提交 00d041d0 编写于 作者: B Ben Skeggs

drm/nouveau: queue delayed unmapping of VMAs on client workqueue

VMAs are about to not take references on the VMM they belong to, which
means more care is required when handling delayed unmapping.

Queuing it on the client workqueue ensures all pending VMA unmaps will
have completed before the VMM is destroyed.
Signed-off-by: NBen Skeggs <bskeggs@redhat.com>
上级 814a2324
...@@ -90,19 +90,33 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) ...@@ -90,19 +90,33 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
return ret; return ret;
} }
struct nouveau_gem_object_unmap {
struct nouveau_cli_work work;
struct nouveau_vma *vma;
};
static void static void
nouveau_gem_object_delete(void *data) nouveau_gem_object_delete(struct nouveau_vma *vma)
{ {
struct nouveau_vma *vma = data;
nouveau_vma_del(&vma); nouveau_vma_del(&vma);
} }
static void
nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
{
struct nouveau_gem_object_unmap *work =
container_of(w, typeof(*work), work);
nouveau_gem_object_delete(work->vma);
kfree(work);
}
static void static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{ {
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
struct reservation_object *resv = nvbo->bo.resv; struct reservation_object *resv = nvbo->bo.resv;
struct reservation_object_list *fobj; struct reservation_object_list *fobj;
struct nouveau_gem_object_unmap *work;
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
fobj = reservation_object_get_list(resv); fobj = reservation_object_get_list(resv);
...@@ -117,10 +131,20 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) ...@@ -117,10 +131,20 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
else else
fence = reservation_object_get_excl(nvbo->bo.resv); fence = reservation_object_get_excl(nvbo->bo.resv);
if (fence && mapped) if (!fence || !mapped) {
nouveau_fence_work(fence, nouveau_gem_object_delete, vma); nouveau_gem_object_delete(vma);
else return;
nouveau_vma_del(&vma); }
if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
nouveau_gem_object_delete(vma);
return;
}
work->work.func = nouveau_gem_object_delete_work;
work->vma = vma;
nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
} }
void void
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册