提交 0f6e2825 编写于 作者: J Jan Kara 提交者: Mauro Carvalho Chehab

[media] vb2: Push mmap_sem down to memops

Currently vb2 core acquires mmap_sem just around call to
__qbuf_userptr(). However since commit f035eb4e (videobuf2: fix
lockdep warning) it isn't necessary to acquire it so early as we no
longer have to drop queue mutex before acquiring mmap_sem. So push
acquisition of mmap_sem down into .get_userptr memop so that the
semaphore is acquired for a shorter time and it is clearer what it is
needed for.

Note that we also need mmap_sem in .put_userptr memop since that ends up
calling vb2_put_vma() which calls vma->vm_ops->close() which should be
called with mmap_sem held. However we didn't hold mmap_sem in some code
paths anyway (e.g. when called via vb2_ioctl_reqbufs() ->
__vb2_queue_free() -> vb2_dma_sg_put_userptr()) and getting mmap_sem in
put_userptr() introduces a lock inversion with queue->mmap_lock in the
above mentioned call path.

Luckily this whole locking mess will get resolved once we convert
videobuf2 core to the new mm helper which avoids the need for mmap_sem
in .put_userptr memop altogether.
Signed-off-by: NJan Kara <jack@suse.cz>
Signed-off-by: NHans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: NMauro Carvalho Chehab <mchehab@osg.samsung.com>
上级 27c03975
......@@ -1681,9 +1681,7 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
ret = __qbuf_mmap(vb, b);
break;
case V4L2_MEMORY_USERPTR:
down_read(&current->mm->mmap_sem);
ret = __qbuf_userptr(vb, b);
up_read(&current->mm->mmap_sem);
break;
case V4L2_MEMORY_DMABUF:
ret = __qbuf_dmabuf(vb, b);
......
......@@ -616,6 +616,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
goto fail_buf;
}
down_read(&current->mm->mmap_sem);
/* current->mm->mmap_sem is taken by videobuf2 core */
vma = find_vma(current->mm, vaddr);
if (!vma) {
......@@ -642,6 +643,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
if (ret) {
unsigned long pfn;
if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
up_read(&current->mm->mmap_sem);
buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
buf->size = size;
kfree(pages);
......@@ -651,6 +653,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
pr_err("failed to get user pages\n");
goto fail_vma;
}
up_read(&current->mm->mmap_sem);
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
......@@ -713,10 +716,12 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
while (n_pages)
put_page(pages[--n_pages]);
down_read(&current->mm->mmap_sem);
fail_vma:
vb2_put_vma(buf->vma);
fail_pages:
up_read(&current->mm->mmap_sem);
kfree(pages); /* kfree is NULL-proof */
fail_buf:
......
......@@ -264,6 +264,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
if (!buf->pages)
goto userptr_fail_alloc_pages;
down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, vaddr);
if (!vma) {
dprintk(1, "no vma for address %lu\n", vaddr);
......@@ -302,6 +303,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
1, /* force */
buf->pages,
NULL);
up_read(&current->mm->mmap_sem);
if (num_pages_from_user != buf->num_pages)
goto userptr_fail_get_user_pages;
......@@ -331,8 +333,10 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
if (!vma_is_io(buf->vma))
while (--num_pages_from_user >= 0)
put_page(buf->pages[num_pages_from_user]);
down_read(&current->mm->mmap_sem);
vb2_put_vma(buf->vma);
userptr_fail_find_vma:
up_read(&current->mm->mmap_sem);
kfree(buf->pages);
userptr_fail_alloc_pages:
kfree(buf);
......
......@@ -89,7 +89,7 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
offset = vaddr & ~PAGE_MASK;
buf->size = size;
down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, vaddr);
if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
if (vb2_get_contig_userptr(vaddr, size, &vma, &physp))
......@@ -121,6 +121,7 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
if (!buf->vaddr)
goto fail_get_user_pages;
}
up_read(&current->mm->mmap_sem);
buf->vaddr += offset;
return buf;
......@@ -133,6 +134,7 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
kfree(buf->pages);
fail_pages_array_alloc:
up_read(&current->mm->mmap_sem);
kfree(buf);
return NULL;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册