提交 9786b65b 编写于 作者: G Gerd Hoffmann

drm/ttm: fix mmap refcounting

When mapping ttm objects via drm_gem_ttm_mmap() helper
drm_gem_mmap_obj() will take an object reference.  That gets
never released due to ttm having its own reference counting.

Fix that by dropping the gem object reference once the ttm mmap
completed (and ttm refcount got bumped).

For that to work properly the drm_gem_object_get() call in
drm_gem_ttm_mmap() must be moved so it happens before calling
obj->funcs->mmap(), otherwise the gem refcount would go down
to zero.

Fixes: 231927d9 ("drm/ttm: add drm_gem_ttm_mmap()")
Signed-off-by: NGerd Hoffmann <kraxel@redhat.com>
Reviewed-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
Tested-by: NThomas Zimmermann <tzimmermann@suse.de>
Link: http://patchwork.freedesktop.org/patch/msgid/20191113135612.19679-1-kraxel@redhat.com
上级 a64fc11b
......@@ -1105,21 +1105,33 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
* This reference is cleaned up by the corresponding vm_close
* (which should happen whether the vma was created by this call, or
* by a vm_open due to mremap or partial unmap or whatever).
*/
drm_gem_object_get(obj);
if (obj->funcs && obj->funcs->mmap) {
/* Remove the fake offset */
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
ret = obj->funcs->mmap(obj, vma);
if (ret)
if (ret) {
drm_gem_object_put_unlocked(obj);
return ret;
}
WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
} else {
if (obj->funcs && obj->funcs->vm_ops)
vma->vm_ops = obj->funcs->vm_ops;
else if (dev->driver->gem_vm_ops)
vma->vm_ops = dev->driver->gem_vm_ops;
else
else {
drm_gem_object_put_unlocked(obj);
return -EINVAL;
}
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
......@@ -1128,14 +1140,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
vma->vm_private_data = obj;
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
* This reference is cleaned up by the corresponding vm_close
* (which should happen whether the vma was created by this call, or
* by a vm_open due to mremap or partial unmap or whatever).
*/
drm_gem_object_get(obj);
return 0;
}
EXPORT_SYMBOL(drm_gem_mmap_obj);
......
......@@ -64,8 +64,19 @@ int drm_gem_ttm_mmap(struct drm_gem_object *gem,
struct vm_area_struct *vma)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
int ret;
return ttm_bo_mmap_obj(vma, bo);
ret = ttm_bo_mmap_obj(vma, bo);
if (ret < 0)
return ret;
/*
* ttm has its own object refcounting, so drop gem reference
* to avoid double accounting counting.
*/
drm_gem_object_put_unlocked(gem);
return 0;
}
EXPORT_SYMBOL(drm_gem_ttm_mmap);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册