提交 e92075ff 编写于 作者: J Joonas Lahtinen

drm/i915: Simplify shrinker locking

By using the same structure for both interruptible and
uninterruptible locking in shrinker code, combined with the
information that mm.interruptible is only being written to, the
code can be greatly simplified.

Also removing the i915_gem_ prefix from the locking functions so
that nobody in their wildest dreams considers exporting them.
Signed-off-by: NJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: NChris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1491562175-27680-1-git-send-email-joonas.lahtinen@linux.intel.com
上级 8f612d05
...@@ -1511,12 +1511,6 @@ struct i915_gem_mm { ...@@ -1511,12 +1511,6 @@ struct i915_gem_mm {
/** LRU list of objects with fence regs on them. */ /** LRU list of objects with fence regs on them. */
struct list_head fence_list; struct list_head fence_list;
/**
* Are we in a non-interruptible section of code like
* modesetting?
*/
bool interruptible;
/* the indicator for dispatch video commands on two BSD rings */ /* the indicator for dispatch video commands on two BSD rings */
atomic_t bsd_engine_dispatch_index; atomic_t bsd_engine_dispatch_index;
......
...@@ -4822,8 +4822,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv) ...@@ -4822,8 +4822,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
init_waitqueue_head(&dev_priv->pending_flip_queue); init_waitqueue_head(&dev_priv->pending_flip_queue);
dev_priv->mm.interruptible = true;
atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0); atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
spin_lock_init(&dev_priv->fb_tracking.lock); spin_lock_init(&dev_priv->fb_tracking.lock);
......
...@@ -35,9 +35,9 @@ ...@@ -35,9 +35,9 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_trace.h" #include "i915_trace.h"
static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
{ {
switch (mutex_trylock_recursive(&dev->struct_mutex)) { switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
case MUTEX_TRYLOCK_FAILED: case MUTEX_TRYLOCK_FAILED:
return false; return false;
...@@ -53,12 +53,12 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) ...@@ -53,12 +53,12 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
BUG(); BUG();
} }
static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock) static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
{ {
if (!unlock) if (!unlock)
return; return;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
/* expedite the RCU grace period to free some request slabs */ /* expedite the RCU grace period to free some request slabs */
synchronize_rcu_expedited(); synchronize_rcu_expedited();
...@@ -156,7 +156,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -156,7 +156,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long count = 0; unsigned long count = 0;
bool unlock; bool unlock;
if (!i915_gem_shrinker_lock(&dev_priv->drm, &unlock)) if (!shrinker_lock(dev_priv, &unlock))
return 0; return 0;
trace_i915_gem_shrink(dev_priv, target, flags); trace_i915_gem_shrink(dev_priv, target, flags);
...@@ -244,7 +244,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -244,7 +244,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
i915_gem_retire_requests(dev_priv); i915_gem_retire_requests(dev_priv);
i915_gem_shrinker_unlock(&dev_priv->drm, unlock); shrinker_unlock(dev_priv, unlock);
return count; return count;
} }
...@@ -284,12 +284,11 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -284,12 +284,11 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{ {
struct drm_i915_private *dev_priv = struct drm_i915_private *dev_priv =
container_of(shrinker, struct drm_i915_private, mm.shrinker); container_of(shrinker, struct drm_i915_private, mm.shrinker);
struct drm_device *dev = &dev_priv->drm;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
unsigned long count; unsigned long count;
bool unlock; bool unlock;
if (!i915_gem_shrinker_lock(dev, &unlock)) if (!shrinker_lock(dev_priv, &unlock))
return 0; return 0;
i915_gem_retire_requests(dev_priv); i915_gem_retire_requests(dev_priv);
...@@ -304,7 +303,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -304,7 +303,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
} }
i915_gem_shrinker_unlock(dev, unlock); shrinker_unlock(dev_priv, unlock);
return count; return count;
} }
...@@ -314,11 +313,10 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -314,11 +313,10 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{ {
struct drm_i915_private *dev_priv = struct drm_i915_private *dev_priv =
container_of(shrinker, struct drm_i915_private, mm.shrinker); container_of(shrinker, struct drm_i915_private, mm.shrinker);
struct drm_device *dev = &dev_priv->drm;
unsigned long freed; unsigned long freed;
bool unlock; bool unlock;
if (!i915_gem_shrinker_lock(dev, &unlock)) if (!shrinker_lock(dev_priv, &unlock))
return SHRINK_STOP; return SHRINK_STOP;
freed = i915_gem_shrink(dev_priv, freed = i915_gem_shrink(dev_priv,
...@@ -332,26 +330,20 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -332,26 +330,20 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
I915_SHRINK_BOUND | I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND); I915_SHRINK_UNBOUND);
i915_gem_shrinker_unlock(dev, unlock); shrinker_unlock(dev_priv, unlock);
return freed; return freed;
} }
struct shrinker_lock_uninterruptible {
bool was_interruptible;
bool unlock;
};
static bool static bool
i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock,
struct shrinker_lock_uninterruptible *slu,
int timeout_ms) int timeout_ms)
{ {
unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms); unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
do { do {
if (i915_gem_wait_for_idle(dev_priv, 0) == 0 && if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) shrinker_lock(dev_priv, unlock))
break; break;
schedule_timeout_killable(1); schedule_timeout_killable(1);
...@@ -364,29 +356,19 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, ...@@ -364,29 +356,19 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
} }
} while (1); } while (1);
slu->was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
return true; return true;
} }
static void
i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
struct shrinker_lock_uninterruptible *slu)
{
dev_priv->mm.interruptible = slu->was_interruptible;
i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock);
}
static int static int
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
{ {
struct drm_i915_private *dev_priv = struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.oom_notifier); container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct shrinker_lock_uninterruptible slu;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
unsigned long unevictable, bound, unbound, freed_pages; unsigned long unevictable, bound, unbound, freed_pages;
bool unlock;
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
return NOTIFY_DONE; return NOTIFY_DONE;
freed_pages = i915_gem_shrink_all(dev_priv); freed_pages = i915_gem_shrink_all(dev_priv);
...@@ -415,7 +397,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) ...@@ -415,7 +397,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
bound += obj->base.size >> PAGE_SHIFT; bound += obj->base.size >> PAGE_SHIFT;
} }
i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); shrinker_unlock(dev_priv, unlock);
if (freed_pages || unbound || bound) if (freed_pages || unbound || bound)
pr_info("Purging GPU memory, %lu pages freed, " pr_info("Purging GPU memory, %lu pages freed, "
...@@ -435,12 +417,12 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr ...@@ -435,12 +417,12 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
{ {
struct drm_i915_private *dev_priv = struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.vmap_notifier); container_of(nb, struct drm_i915_private, mm.vmap_notifier);
struct shrinker_lock_uninterruptible slu;
struct i915_vma *vma, *next; struct i915_vma *vma, *next;
unsigned long freed_pages = 0; unsigned long freed_pages = 0;
bool unlock;
int ret; int ret;
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
return NOTIFY_DONE; return NOTIFY_DONE;
/* Force everything onto the inactive lists */ /* Force everything onto the inactive lists */
...@@ -465,7 +447,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr ...@@ -465,7 +447,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
} }
out: out:
i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); shrinker_unlock(dev_priv, unlock);
*(unsigned long *)ptr += freed_pages; *(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE; return NOTIFY_DONE;
......
...@@ -4872,12 +4872,9 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) ...@@ -4872,12 +4872,9 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
{ {
if (intel_crtc->overlay) { if (intel_crtc->overlay) {
struct drm_device *dev = intel_crtc->base.dev; struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
dev_priv->mm.interruptible = false;
(void) intel_overlay_switch_off(intel_crtc->overlay); (void) intel_overlay_switch_off(intel_crtc->overlay);
dev_priv->mm.interruptible = true;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册