diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index d182058383a9ef7264339101265e511ac8fa0609..1719078c763ac0d140c8fb7a9e954862bd8d3bc7 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -113,7 +113,10 @@ static void *invalidate_range__linear(struct i915_mmu_notifier *mn, continue; obj = mo->obj; - drm_gem_object_reference(&obj->base); + + if (!kref_get_unless_zero(&obj->base.refcount)) + continue; + spin_unlock(&mn->lock); cancel_userptr(obj); @@ -149,7 +152,20 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, it = interval_tree_iter_first(&mn->objects, start, end); if (it != NULL) { obj = container_of(it, struct i915_mmu_object, it)->obj; - drm_gem_object_reference(&obj->base); + + /* The mmu_object is released late when destroying the + * GEM object so it is entirely possible to gain a + * reference on an object in the process of being freed + * since our serialisation is via the spinlock and not + * the struct_mutex - and consequently use it after it + * is freed and then double free it. + */ + if (!kref_get_unless_zero(&obj->base.refcount)) { + spin_unlock(&mn->lock); + serial = 0; + continue; + } + serial = mn->serial; } spin_unlock(&mn->lock);