提交 d9393973 编写于 作者: T Thomas Hellström

drm/i915: Remove the vma refcount

Now that i915_vma_parked() is taking the object lock on vma destruction,
and the only user of the vma refcount, i915_gem_object_unbind()
also takes the object lock, remove the vma refcount.

v3: Documentation update.
Signed-off-by: NThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: NNiranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220304082641.308069-3-thomas.hellstrom@linux.intel.com
上级 e1a7ab4f
......@@ -151,14 +151,25 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
break;
}
/*
* Requiring the vm destructor to take the object lock
* before destroying a vma would help us eliminate the
* i915_vm_tryget() here, AND thus also the barrier stuff
* at the end. That's an easy fix, but sleeping locks in
* a kthread should generally be avoided.
*/
ret = -EAGAIN;
if (!i915_vm_tryget(vma->vm))
break;
/* Prevent vma being freed by i915_vma_parked as we unbind */
vma = __i915_vma_get(vma);
spin_unlock(&obj->vma.lock);
/*
* Since i915_vma_parked() takes the object lock
* before vma destruction, it won't race us here,
* and destroy the vma from under us.
*/
if (vma) {
bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK);
ret = -EBUSY;
......@@ -180,8 +191,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
ret = i915_vma_unbind(vma);
}
}
__i915_vma_put(vma);
}
i915_vm_put(vma->vm);
......
......@@ -122,7 +122,6 @@ vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL)
return ERR_PTR(-ENOMEM);
kref_init(&vma->ref);
vma->ops = &vm->vma_ops;
vma->obj = obj;
vma->size = obj->base.size;
......@@ -1628,15 +1627,6 @@ void i915_vma_reopen(struct i915_vma *vma)
__i915_vma_remove_closed(vma);
}
void i915_vma_release(struct kref *ref)
{
struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
i915_active_fini(&vma->active);
GEM_WARN_ON(vma->resource);
i915_vma_free(vma);
}
static void force_unbind(struct i915_vma *vma)
{
if (!drm_mm_node_allocated(&vma->node))
......@@ -1665,7 +1655,9 @@ static void release_references(struct i915_vma *vma, bool vm_ddestroy)
if (vm_ddestroy)
i915_vm_resv_put(vma->vm);
__i915_vma_put(vma);
i915_active_fini(&vma->active);
GEM_WARN_ON(vma->resource);
i915_vma_free(vma);
}
/**
......@@ -1693,9 +1685,6 @@ static void release_references(struct i915_vma *vma, bool vm_ddestroy)
* - vm->mutex
* - obj->vma.lock
* - gt->closed_lock
*
* A vma user can also temporarily keep the vma alive while holding a vma
* reference.
*/
void i915_vma_destroy_locked(struct i915_vma *vma)
{
......
......@@ -222,20 +222,6 @@ void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
void i915_vma_reopen(struct i915_vma *vma);
static inline struct i915_vma *__i915_vma_get(struct i915_vma *vma)
{
if (kref_get_unless_zero(&vma->ref))
return vma;
return NULL;
}
void i915_vma_release(struct kref *ref);
static inline void __i915_vma_put(struct i915_vma *vma)
{
kref_put(&vma->ref, i915_vma_release);
}
void i915_vma_destroy_locked(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma);
......
......@@ -211,7 +211,6 @@ struct i915_vma {
* handles (but same file) for execbuf, i.e. the number of aliases
* that exist in the ctx->handle_vmas LUT for this vma.
*/
struct kref ref;
atomic_t open_count;
atomic_t flags;
/**
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册