提交 3d574a6b 编写于 作者: C Chris Wilson

drm/i915: Remove walk over obj->vma_list for the shrinker

In the next patch, we want to reduce the lock coverage within the
shrinker, and one of the dangerous walks we have is over obj->vma_list.
We are only walking the obj->vma_list in order to check whether it has
been permanently pinned by HW access, typically via use on the scanout.
But we have a couple of other long term pins, the context objects for
which we currently have to check the individual vma pin_count. If we
instead mark these using obj->pin_display, we can forgo the dangerous
and sometimes slow list iteration.

v2: Rearrange code to try and avoid confusion from false associations
due to arrangement of whitespace along with rebasing on obj->pin_global.
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: NJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171013202621.7276-4-chris@chris-wilson.co.uk
上级 f46250e4
...@@ -71,25 +71,6 @@ static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock) ...@@ -71,25 +71,6 @@ static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
} }
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, obj_link) {
/* Only GGTT vma may be permanently pinned, and are always
* at the start of the list. We can stop hunting as soon
* as we see a ppGTT vma.
*/
if (!i915_vma_is_ggtt(vma))
break;
if (i915_vma_is_pinned(vma))
return true;
}
return false;
}
static bool swap_available(void) static bool swap_available(void)
{ {
return get_nr_swap_pages() > 0; return get_nr_swap_pages() > 0;
...@@ -115,7 +96,13 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) ...@@ -115,7 +96,13 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count) if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
return false; return false;
if (any_vma_pinned(obj)) /* If any vma are "permanently" pinned, it will prevent us from
* reclaiming the obj->mm.pages. We only allow scanout objects to claim
* a permanent pin, along with a few others like the context objects.
* To simplify the scan, and to avoid walking the list of vma under the
* object, we just check the count of its permanently pinned.
*/
if (obj->pin_global)
return false; return false;
/* We can only return physical pages to the system if we can either /* We can only return physical pages to the system if we can either
......
...@@ -1093,6 +1093,7 @@ execlists_context_pin(struct intel_engine_cs *engine, ...@@ -1093,6 +1093,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
i915_ggtt_offset(ce->ring->vma); i915_ggtt_offset(ce->ring->vma);
ce->state->obj->mm.dirty = true; ce->state->obj->mm.dirty = true;
ce->state->obj->pin_global++;
i915_gem_context_get(ctx); i915_gem_context_get(ctx);
out: out:
...@@ -1120,6 +1121,7 @@ static void execlists_context_unpin(struct intel_engine_cs *engine, ...@@ -1120,6 +1121,7 @@ static void execlists_context_unpin(struct intel_engine_cs *engine,
intel_ring_unpin(ce->ring); intel_ring_unpin(ce->ring);
ce->state->obj->pin_global--;
i915_gem_object_unpin_map(ce->state->obj); i915_gem_object_unpin_map(ce->state->obj);
i915_vma_unpin(ce->state); i915_vma_unpin(ce->state);
......
...@@ -1244,6 +1244,8 @@ int intel_ring_pin(struct intel_ring *ring, ...@@ -1244,6 +1244,8 @@ int intel_ring_pin(struct intel_ring *ring,
if (IS_ERR(addr)) if (IS_ERR(addr))
goto err; goto err;
vma->obj->pin_global++;
ring->vaddr = addr; ring->vaddr = addr;
return 0; return 0;
...@@ -1275,6 +1277,7 @@ void intel_ring_unpin(struct intel_ring *ring) ...@@ -1275,6 +1277,7 @@ void intel_ring_unpin(struct intel_ring *ring)
i915_gem_object_unpin_map(ring->vma->obj); i915_gem_object_unpin_map(ring->vma->obj);
ring->vaddr = NULL; ring->vaddr = NULL;
ring->vma->obj->pin_global--;
i915_vma_unpin(ring->vma); i915_vma_unpin(ring->vma);
} }
...@@ -1439,6 +1442,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine, ...@@ -1439,6 +1442,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
goto err; goto err;
ce->state->obj->mm.dirty = true; ce->state->obj->mm.dirty = true;
ce->state->obj->pin_global++;
} }
/* The kernel context is only used as a placeholder for flushing the /* The kernel context is only used as a placeholder for flushing the
...@@ -1473,8 +1477,10 @@ static void intel_ring_context_unpin(struct intel_engine_cs *engine, ...@@ -1473,8 +1477,10 @@ static void intel_ring_context_unpin(struct intel_engine_cs *engine,
if (--ce->pin_count) if (--ce->pin_count)
return; return;
if (ce->state) if (ce->state) {
ce->state->obj->pin_global--;
i915_vma_unpin(ce->state); i915_vma_unpin(ce->state);
}
i915_gem_context_put(ctx); i915_gem_context_put(ctx);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册