diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9143129cd85186b0b8fbf4df1e2a097ddee39280..086396e16de71ce594820c46331c32081242b28e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3287,7 +3287,13 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) __i915_gem_object_unpin_pages(obj); } -void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); +enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */ + I915_MM_NORMAL = 0, + I915_MM_SHRINKER +}; + +void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, + enum i915_mm_subclass subclass); void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj); enum i915_map_type { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1568f67564304468de53b6dce781cddff66755fd..cbbfaa7761b94015fa25b3ef93271d203c064cad 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -491,7 +491,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, if (ret) return ret; - __i915_gem_object_put_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); if (obj->mm.pages) return -EBUSY; @@ -2181,7 +2181,8 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) radix_tree_delete(&obj->mm.get_page.radix, iter.index); } -void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) +void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, + enum i915_mm_subclass subclass) { struct sg_table *pages; @@ -2193,7 +2194,7 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) return; /* May be called by shrinker from within get_pages() (on another bo) */ - mutex_lock_nested(&obj->mm.lock, SINGLE_DEPTH_NESTING); + mutex_lock_nested(&obj->mm.lock, subclass); if (unlikely(atomic_read(&obj->mm.pages_pin_count))) goto unlock; @@ -4283,7 +4284,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, if (WARN_ON(i915_gem_object_has_pinned_pages(obj))) atomic_set(&obj->mm.pages_pin_count, 0); - __i915_gem_object_put_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); GEM_BUG_ON(obj->mm.pages); if (obj->base.import_attach) diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 0993afc0e725b75749e06ffee0d2a86641a4b31d..f988652f1e26c5de87407272c752abd4f6d5e65a 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -111,7 +111,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) { if (i915_gem_object_unbind(obj) == 0) - __i915_gem_object_put_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); return !READ_ONCE(obj->mm.pages); } @@ -225,7 +225,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, if (unsafe_drop_pages(obj)) { /* May arrive from get_pages on another bo */ mutex_lock_nested(&obj->mm.lock, - SINGLE_DEPTH_NESTING); + I915_MM_SHRINKER); if (!obj->mm.pages) { __i915_gem_object_invalidate(obj); list_del_init(&obj->global_list); diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index c30d04f64670c55092d2519d89115f274d8b1f69..9bf44b5bca101448d2e71bd558cf464db27b4caa 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -75,7 +75,7 @@ static void cancel_userptr(struct work_struct *work) /* We are inside a kthread context and can't be interrupted */ if (i915_gem_object_unbind(obj) == 0) - __i915_gem_object_put_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); WARN_ONCE(obj->mm.pages, "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n", obj->bind_count,