提交 912d572d 编写于 作者: C Chris Wilson 提交者: Linus Torvalds

drm/i915: wire up shrinkctl->nr_scanned

shrink_slab() allows us to report back the number of objects we
successfully scanned (out of the target shrinkctl->nr_to_scan).  As
report the number of pages owned by each GEM object as a separate item
to the shrinker, we cannot precisely control the number of shrinker
objects we scan on each pass; and indeed may free more than requested.
If we fail to tell the shrinker about the number of objects we process,
it will continue to hold a grudge against us as any objects left
unscanned are added to the next reclaim -- and so we will keep on
"unfairly" shrinking our own slab in comparison to other slabs.

Link: http://lkml.kernel.org/r/20170822135325.9191-2-chris@chris-wilson.co.ukSigned-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Shaohua Li <shli@fb.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 d460acb5
...@@ -4308,10 +4308,10 @@ i915_drop_caches_set(void *data, u64 val) ...@@ -4308,10 +4308,10 @@ i915_drop_caches_set(void *data, u64 val)
fs_reclaim_acquire(GFP_KERNEL); fs_reclaim_acquire(GFP_KERNEL);
if (val & DROP_BOUND) if (val & DROP_BOUND)
i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
if (val & DROP_UNBOUND) if (val & DROP_UNBOUND)
i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND); i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
if (val & DROP_SHRINK_ALL) if (val & DROP_SHRINK_ALL)
i915_gem_shrink_all(dev_priv); i915_gem_shrink_all(dev_priv);
......
...@@ -3742,6 +3742,7 @@ i915_gem_object_create_internal(struct drm_i915_private *dev_priv, ...@@ -3742,6 +3742,7 @@ i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
/* i915_gem_shrinker.c */ /* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long target, unsigned long target,
unsigned long *nr_scanned,
unsigned flags); unsigned flags);
#define I915_SHRINK_PURGEABLE 0x1 #define I915_SHRINK_PURGEABLE 0x1
#define I915_SHRINK_UNBOUND 0x2 #define I915_SHRINK_UNBOUND 0x2
......
...@@ -2354,7 +2354,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2354,7 +2354,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
goto err_sg; goto err_sg;
} }
i915_gem_shrink(dev_priv, 2 * page_count, *s++); i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
cond_resched(); cond_resched();
/* We've tried hard to allocate the memory by reaping /* We've tried hard to allocate the memory by reaping
...@@ -5015,7 +5015,7 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv) ...@@ -5015,7 +5015,7 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
* the objects as well, see i915_gem_freeze() * the objects as well, see i915_gem_freeze()
*/ */
i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND); i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
i915_gem_drain_freed_objects(dev_priv); i915_gem_drain_freed_objects(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
......
...@@ -2062,7 +2062,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, ...@@ -2062,7 +2062,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
*/ */
GEM_BUG_ON(obj->mm.pages == pages); GEM_BUG_ON(obj->mm.pages == pages);
} while (i915_gem_shrink(to_i915(obj->base.dev), } while (i915_gem_shrink(to_i915(obj->base.dev),
obj->base.size >> PAGE_SHIFT, obj->base.size >> PAGE_SHIFT, NULL,
I915_SHRINK_BOUND | I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND | I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE)); I915_SHRINK_ACTIVE));
......
...@@ -136,6 +136,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) ...@@ -136,6 +136,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
* i915_gem_shrink - Shrink buffer object caches * i915_gem_shrink - Shrink buffer object caches
* @dev_priv: i915 device * @dev_priv: i915 device
* @target: amount of memory to make available, in pages * @target: amount of memory to make available, in pages
* @nr_scanned: optional output for number of pages scanned (incremental)
* @flags: control flags for selecting cache types * @flags: control flags for selecting cache types
* *
* This function is the main interface to the shrinker. It will try to release * This function is the main interface to the shrinker. It will try to release
...@@ -158,7 +159,9 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) ...@@ -158,7 +159,9 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
*/ */
unsigned long unsigned long
i915_gem_shrink(struct drm_i915_private *dev_priv, i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long target, unsigned flags) unsigned long target,
unsigned long *nr_scanned,
unsigned flags)
{ {
const struct { const struct {
struct list_head *list; struct list_head *list;
...@@ -169,6 +172,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -169,6 +172,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
{ NULL, 0 }, { NULL, 0 },
}, *phase; }, *phase;
unsigned long count = 0; unsigned long count = 0;
unsigned long scanned = 0;
bool unlock; bool unlock;
if (!shrinker_lock(dev_priv, &unlock)) if (!shrinker_lock(dev_priv, &unlock))
...@@ -249,6 +253,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -249,6 +253,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
} }
mutex_unlock(&obj->mm.lock); mutex_unlock(&obj->mm.lock);
scanned += obj->base.size >> PAGE_SHIFT;
} }
} }
list_splice_tail(&still_in_list, phase->list); list_splice_tail(&still_in_list, phase->list);
...@@ -261,6 +266,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -261,6 +266,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
shrinker_unlock(dev_priv, unlock); shrinker_unlock(dev_priv, unlock);
if (nr_scanned)
*nr_scanned += scanned;
return count; return count;
} }
...@@ -283,7 +290,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) ...@@ -283,7 +290,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
unsigned long freed; unsigned long freed;
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
freed = i915_gem_shrink(dev_priv, -1UL, freed = i915_gem_shrink(dev_priv, -1UL, NULL,
I915_SHRINK_BOUND | I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND | I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE); I915_SHRINK_ACTIVE);
...@@ -329,23 +336,28 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -329,23 +336,28 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
unsigned long freed; unsigned long freed;
bool unlock; bool unlock;
sc->nr_scanned = 0;
if (!shrinker_lock(dev_priv, &unlock)) if (!shrinker_lock(dev_priv, &unlock))
return SHRINK_STOP; return SHRINK_STOP;
freed = i915_gem_shrink(dev_priv, freed = i915_gem_shrink(dev_priv,
sc->nr_to_scan, sc->nr_to_scan,
&sc->nr_scanned,
I915_SHRINK_BOUND | I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND | I915_SHRINK_UNBOUND |
I915_SHRINK_PURGEABLE); I915_SHRINK_PURGEABLE);
if (freed < sc->nr_to_scan) if (freed < sc->nr_to_scan)
freed += i915_gem_shrink(dev_priv, freed += i915_gem_shrink(dev_priv,
sc->nr_to_scan - freed, sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned,
I915_SHRINK_BOUND | I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND); I915_SHRINK_UNBOUND);
if (freed < sc->nr_to_scan && current_is_kswapd()) { if (freed < sc->nr_to_scan && current_is_kswapd()) {
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
freed += i915_gem_shrink(dev_priv, freed += i915_gem_shrink(dev_priv,
sc->nr_to_scan - freed, sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned,
I915_SHRINK_ACTIVE | I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND | I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND); I915_SHRINK_UNBOUND);
...@@ -354,7 +366,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -354,7 +366,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
shrinker_unlock(dev_priv, unlock); shrinker_unlock(dev_priv, unlock);
return freed; return sc->nr_scanned ? freed : SHRINK_STOP;
} }
static bool static bool
...@@ -453,7 +465,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr ...@@ -453,7 +465,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
goto out; goto out;
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
freed_pages += i915_gem_shrink(dev_priv, -1UL, freed_pages += i915_gem_shrink(dev_priv, -1UL, NULL,
I915_SHRINK_BOUND | I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND | I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE | I915_SHRINK_ACTIVE |
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册