提交 8c45cec4 编写于 作者: C Chris Wilson

drm/i915: Split vma exec_link/evict_link

Currently the vma has one link member that is used for both holding its
place in the execbuf reservation list, and in any eviction list. This
dual property is quite tricky and error prone.
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: NMika Kuoppala <mika.kuoppala@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170615081435.17699-3-chris@chris-wilson.co.uk
上级 d55495b4
...@@ -62,7 +62,7 @@ mark_free(struct drm_mm_scan *scan, ...@@ -62,7 +62,7 @@ mark_free(struct drm_mm_scan *scan,
if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link)) if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
return false; return false;
list_add(&vma->exec_list, unwind); list_add(&vma->evict_link, unwind);
return drm_mm_scan_add_block(scan, &vma->node); return drm_mm_scan_add_block(scan, &vma->node);
} }
...@@ -154,7 +154,7 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -154,7 +154,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
} while (*++phase); } while (*++phase);
/* Nothing found, clean up and bail out! */ /* Nothing found, clean up and bail out! */
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
ret = drm_mm_scan_remove_block(&scan, &vma->node); ret = drm_mm_scan_remove_block(&scan, &vma->node);
BUG_ON(ret); BUG_ON(ret);
} }
...@@ -200,16 +200,16 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -200,16 +200,16 @@ i915_gem_evict_something(struct i915_address_space *vm,
* calling unbind (which may remove the active reference * calling unbind (which may remove the active reference
* of any of our objects, thus corrupting the list). * of any of our objects, thus corrupting the list).
*/ */
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
if (drm_mm_scan_remove_block(&scan, &vma->node)) if (drm_mm_scan_remove_block(&scan, &vma->node))
__i915_vma_pin(vma); __i915_vma_pin(vma);
else else
list_del(&vma->exec_list); list_del(&vma->evict_link);
} }
/* Unbinding will emit any required flushes */ /* Unbinding will emit any required flushes */
ret = 0; ret = 0;
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
if (ret == 0) if (ret == 0)
ret = i915_vma_unbind(vma); ret = i915_vma_unbind(vma);
...@@ -322,10 +322,10 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, ...@@ -322,10 +322,10 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* reference) another in our eviction list. * reference) another in our eviction list.
*/ */
__i915_vma_pin(vma); __i915_vma_pin(vma);
list_add(&vma->exec_list, &eviction_list); list_add(&vma->evict_link, &eviction_list);
} }
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
if (ret == 0) if (ret == 0)
ret = i915_vma_unbind(vma); ret = i915_vma_unbind(vma);
......
...@@ -136,7 +136,7 @@ eb_reset(struct i915_execbuffer *eb) ...@@ -136,7 +136,7 @@ eb_reset(struct i915_execbuffer *eb)
{ {
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, &eb->vmas, exec_list) { list_for_each_entry(vma, &eb->vmas, exec_link) {
eb_unreserve_vma(vma); eb_unreserve_vma(vma);
i915_vma_put(vma); i915_vma_put(vma);
vma->exec_entry = NULL; vma->exec_entry = NULL;
...@@ -149,7 +149,7 @@ eb_reset(struct i915_execbuffer *eb) ...@@ -149,7 +149,7 @@ eb_reset(struct i915_execbuffer *eb)
static struct i915_vma * static struct i915_vma *
eb_get_batch(struct i915_execbuffer *eb) eb_get_batch(struct i915_execbuffer *eb)
{ {
struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list); struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_link);
/* /*
* SNA is doing fancy tricks with compressing batch buffers, which leads * SNA is doing fancy tricks with compressing batch buffers, which leads
...@@ -226,7 +226,7 @@ eb_lookup_vmas(struct i915_execbuffer *eb) ...@@ -226,7 +226,7 @@ eb_lookup_vmas(struct i915_execbuffer *eb)
} }
/* Transfer ownership from the objects list to the vmas list. */ /* Transfer ownership from the objects list to the vmas list. */
list_add_tail(&vma->exec_list, &eb->vmas); list_add_tail(&vma->exec_link, &eb->vmas);
list_del_init(&obj->obj_exec_link); list_del_init(&obj->obj_exec_link);
vma->exec_entry = &eb->exec[i]; vma->exec_entry = &eb->exec[i];
...@@ -285,7 +285,7 @@ static void eb_destroy(struct i915_execbuffer *eb) ...@@ -285,7 +285,7 @@ static void eb_destroy(struct i915_execbuffer *eb)
{ {
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, &eb->vmas, exec_list) { list_for_each_entry(vma, &eb->vmas, exec_link) {
if (!vma->exec_entry) if (!vma->exec_entry)
continue; continue;
...@@ -751,7 +751,7 @@ static int eb_relocate(struct i915_execbuffer *eb) ...@@ -751,7 +751,7 @@ static int eb_relocate(struct i915_execbuffer *eb)
struct i915_vma *vma; struct i915_vma *vma;
int ret = 0; int ret = 0;
list_for_each_entry(vma, &eb->vmas, exec_list) { list_for_each_entry(vma, &eb->vmas, exec_link) {
ret = eb_relocate_vma(vma, eb); ret = eb_relocate_vma(vma, eb);
if (ret) if (ret)
break; break;
...@@ -904,7 +904,7 @@ static int eb_reserve(struct i915_execbuffer *eb) ...@@ -904,7 +904,7 @@ static int eb_reserve(struct i915_execbuffer *eb)
struct drm_i915_gem_exec_object2 *entry; struct drm_i915_gem_exec_object2 *entry;
bool need_fence, need_mappable; bool need_fence, need_mappable;
vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); vma = list_first_entry(&eb->vmas, struct i915_vma, exec_link);
obj = vma->obj; obj = vma->obj;
entry = vma->exec_entry; entry = vma->exec_entry;
...@@ -920,12 +920,12 @@ static int eb_reserve(struct i915_execbuffer *eb) ...@@ -920,12 +920,12 @@ static int eb_reserve(struct i915_execbuffer *eb)
need_mappable = need_fence || need_reloc_mappable(vma); need_mappable = need_fence || need_reloc_mappable(vma);
if (entry->flags & EXEC_OBJECT_PINNED) if (entry->flags & EXEC_OBJECT_PINNED)
list_move_tail(&vma->exec_list, &pinned_vmas); list_move_tail(&vma->exec_link, &pinned_vmas);
else if (need_mappable) { else if (need_mappable) {
entry->flags |= __EXEC_OBJECT_NEEDS_MAP; entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
list_move(&vma->exec_list, &ordered_vmas); list_move(&vma->exec_link, &ordered_vmas);
} else } else
list_move_tail(&vma->exec_list, &ordered_vmas); list_move_tail(&vma->exec_link, &ordered_vmas);
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
obj->base.pending_write_domain = 0; obj->base.pending_write_domain = 0;
...@@ -950,7 +950,7 @@ static int eb_reserve(struct i915_execbuffer *eb) ...@@ -950,7 +950,7 @@ static int eb_reserve(struct i915_execbuffer *eb)
int ret = 0; int ret = 0;
/* Unbind any ill-fitting objects or pin. */ /* Unbind any ill-fitting objects or pin. */
list_for_each_entry(vma, &eb->vmas, exec_list) { list_for_each_entry(vma, &eb->vmas, exec_link) {
if (!drm_mm_node_allocated(&vma->node)) if (!drm_mm_node_allocated(&vma->node))
continue; continue;
...@@ -963,7 +963,7 @@ static int eb_reserve(struct i915_execbuffer *eb) ...@@ -963,7 +963,7 @@ static int eb_reserve(struct i915_execbuffer *eb)
} }
/* Bind fresh objects */ /* Bind fresh objects */
list_for_each_entry(vma, &eb->vmas, exec_list) { list_for_each_entry(vma, &eb->vmas, exec_link) {
if (drm_mm_node_allocated(&vma->node)) if (drm_mm_node_allocated(&vma->node))
continue; continue;
...@@ -977,7 +977,7 @@ static int eb_reserve(struct i915_execbuffer *eb) ...@@ -977,7 +977,7 @@ static int eb_reserve(struct i915_execbuffer *eb)
return ret; return ret;
/* Decrement pin count for bound objects */ /* Decrement pin count for bound objects */
list_for_each_entry(vma, &eb->vmas, exec_list) list_for_each_entry(vma, &eb->vmas, exec_link)
eb_unreserve_vma(vma); eb_unreserve_vma(vma);
ret = i915_gem_evict_vm(eb->vm, true); ret = i915_gem_evict_vm(eb->vm, true);
...@@ -1066,7 +1066,7 @@ eb_relocate_slow(struct i915_execbuffer *eb) ...@@ -1066,7 +1066,7 @@ eb_relocate_slow(struct i915_execbuffer *eb)
if (ret) if (ret)
goto err; goto err;
list_for_each_entry(vma, &eb->vmas, exec_list) { list_for_each_entry(vma, &eb->vmas, exec_link) {
int idx = vma->exec_entry - eb->exec; int idx = vma->exec_entry - eb->exec;
ret = eb_relocate_vma_slow(vma, eb, reloc + reloc_offset[idx]); ret = eb_relocate_vma_slow(vma, eb, reloc + reloc_offset[idx]);
...@@ -1092,7 +1092,7 @@ eb_move_to_gpu(struct i915_execbuffer *eb) ...@@ -1092,7 +1092,7 @@ eb_move_to_gpu(struct i915_execbuffer *eb)
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
list_for_each_entry(vma, &eb->vmas, exec_list) { list_for_each_entry(vma, &eb->vmas, exec_link) {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
if (vma->exec_entry->flags & EXEC_OBJECT_CAPTURE) { if (vma->exec_entry->flags & EXEC_OBJECT_CAPTURE) {
...@@ -1314,7 +1314,7 @@ eb_move_to_active(struct i915_execbuffer *eb) ...@@ -1314,7 +1314,7 @@ eb_move_to_active(struct i915_execbuffer *eb)
{ {
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, &eb->vmas, exec_list) { list_for_each_entry(vma, &eb->vmas, exec_link) {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
obj->base.write_domain = obj->base.pending_write_domain; obj->base.write_domain = obj->base.pending_write_domain;
...@@ -1388,7 +1388,7 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master) ...@@ -1388,7 +1388,7 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
memset(&eb->shadow_exec_entry, 0, sizeof(*vma->exec_entry)); memset(&eb->shadow_exec_entry, 0, sizeof(*vma->exec_entry));
vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN; vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
i915_gem_object_get(shadow_batch_obj); i915_gem_object_get(shadow_batch_obj);
list_add_tail(&vma->exec_list, &eb->vmas); list_add_tail(&vma->exec_link, &eb->vmas);
out: out:
i915_gem_object_unpin_pages(shadow_batch_obj); i915_gem_object_unpin_pages(shadow_batch_obj);
......
...@@ -100,8 +100,11 @@ struct i915_vma { ...@@ -100,8 +100,11 @@ struct i915_vma {
struct list_head obj_link; /* Link in the object's VMA list */ struct list_head obj_link; /* Link in the object's VMA list */
struct rb_node obj_node; struct rb_node obj_node;
/** This vma's place in the batchbuffer or on the eviction list */ /** This vma's place in the execbuf reservation list */
struct list_head exec_list; struct list_head exec_link;
/** This vma's place in the eviction list */
struct list_head evict_link;
/** /**
* Used for performing relocations during execbuffer insertion. * Used for performing relocations during execbuffer insertion.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册