diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 891247d7929959d65a8e064007cd120610042cb4..204a2d9288aea91075e9f7411a4542b5473c328e 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -62,7 +62,7 @@ mark_free(struct drm_mm_scan *scan, if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link)) return false; - list_add(&vma->exec_list, unwind); + list_add(&vma->evict_link, unwind); return drm_mm_scan_add_block(scan, &vma->node); } @@ -154,7 +154,7 @@ i915_gem_evict_something(struct i915_address_space *vm, } while (*++phase); /* Nothing found, clean up and bail out! */ - list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { + list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { ret = drm_mm_scan_remove_block(&scan, &vma->node); BUG_ON(ret); } @@ -200,16 +200,16 @@ i915_gem_evict_something(struct i915_address_space *vm, * calling unbind (which may remove the active reference * of any of our objects, thus corrupting the list). */ - list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { + list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { if (drm_mm_scan_remove_block(&scan, &vma->node)) __i915_vma_pin(vma); else - list_del(&vma->exec_list); + list_del(&vma->evict_link); } /* Unbinding will emit any required flushes */ ret = 0; - list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { + list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { __i915_vma_unpin(vma); if (ret == 0) ret = i915_vma_unbind(vma); @@ -322,10 +322,10 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, * reference) another in our eviction list. */ __i915_vma_pin(vma); - list_add(&vma->exec_list, &eviction_list); + list_add(&vma->evict_link, &eviction_list); } - list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { + list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { __i915_vma_unpin(vma); if (ret == 0) ret = i915_vma_unbind(vma); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a7aa21dcc5534f30559ed945630ed1c622201ad2..96705171e397206c02580fe8708619fbd8e80212 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -136,7 +136,7 @@ eb_reset(struct i915_execbuffer *eb) { struct i915_vma *vma; - list_for_each_entry(vma, &eb->vmas, exec_list) { + list_for_each_entry(vma, &eb->vmas, exec_link) { eb_unreserve_vma(vma); i915_vma_put(vma); vma->exec_entry = NULL; @@ -149,7 +149,7 @@ eb_reset(struct i915_execbuffer *eb) static struct i915_vma * eb_get_batch(struct i915_execbuffer *eb) { - struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list); + struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_link); /* * SNA is doing fancy tricks with compressing batch buffers, which leads @@ -226,7 +226,7 @@ eb_lookup_vmas(struct i915_execbuffer *eb) } /* Transfer ownership from the objects list to the vmas list. */ - list_add_tail(&vma->exec_list, &eb->vmas); + list_add_tail(&vma->exec_link, &eb->vmas); list_del_init(&obj->obj_exec_link); vma->exec_entry = &eb->exec[i]; @@ -285,7 +285,7 @@ static void eb_destroy(struct i915_execbuffer *eb) { struct i915_vma *vma; - list_for_each_entry(vma, &eb->vmas, exec_list) { + list_for_each_entry(vma, &eb->vmas, exec_link) { if (!vma->exec_entry) continue; @@ -751,7 +751,7 @@ static int eb_relocate(struct i915_execbuffer *eb) struct i915_vma *vma; int ret = 0; - list_for_each_entry(vma, &eb->vmas, exec_list) { + list_for_each_entry(vma, &eb->vmas, exec_link) { ret = eb_relocate_vma(vma, eb); if (ret) break; @@ -904,7 +904,7 @@ static int eb_reserve(struct i915_execbuffer *eb) struct drm_i915_gem_exec_object2 *entry; bool need_fence, need_mappable; - vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); + vma = list_first_entry(&eb->vmas, struct i915_vma, exec_link); obj = vma->obj; entry = vma->exec_entry; @@ -920,12 +920,12 @@ static int eb_reserve(struct i915_execbuffer *eb) need_mappable = need_fence || need_reloc_mappable(vma); if (entry->flags & EXEC_OBJECT_PINNED) - list_move_tail(&vma->exec_list, &pinned_vmas); + list_move_tail(&vma->exec_link, &pinned_vmas); else if (need_mappable) { entry->flags |= __EXEC_OBJECT_NEEDS_MAP; - list_move(&vma->exec_list, &ordered_vmas); + list_move(&vma->exec_link, &ordered_vmas); } else - list_move_tail(&vma->exec_list, &ordered_vmas); + list_move_tail(&vma->exec_link, &ordered_vmas); obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; obj->base.pending_write_domain = 0; @@ -950,7 +950,7 @@ static int eb_reserve(struct i915_execbuffer *eb) int ret = 0; /* Unbind any ill-fitting objects or pin. */ - list_for_each_entry(vma, &eb->vmas, exec_list) { + list_for_each_entry(vma, &eb->vmas, exec_link) { if (!drm_mm_node_allocated(&vma->node)) continue; @@ -963,7 +963,7 @@ static int eb_reserve(struct i915_execbuffer *eb) } /* Bind fresh objects */ - list_for_each_entry(vma, &eb->vmas, exec_list) { + list_for_each_entry(vma, &eb->vmas, exec_link) { if (drm_mm_node_allocated(&vma->node)) continue; @@ -977,7 +977,7 @@ static int eb_reserve(struct i915_execbuffer *eb) return ret; /* Decrement pin count for bound objects */ - list_for_each_entry(vma, &eb->vmas, exec_list) + list_for_each_entry(vma, &eb->vmas, exec_link) eb_unreserve_vma(vma); ret = i915_gem_evict_vm(eb->vm, true); @@ -1066,7 +1066,7 @@ eb_relocate_slow(struct i915_execbuffer *eb) if (ret) goto err; - list_for_each_entry(vma, &eb->vmas, exec_list) { + list_for_each_entry(vma, &eb->vmas, exec_link) { int idx = vma->exec_entry - eb->exec; ret = eb_relocate_vma_slow(vma, eb, reloc + reloc_offset[idx]); @@ -1092,7 +1092,7 @@ eb_move_to_gpu(struct i915_execbuffer *eb) struct i915_vma *vma; int ret; - list_for_each_entry(vma, &eb->vmas, exec_list) { + list_for_each_entry(vma, &eb->vmas, exec_link) { struct drm_i915_gem_object *obj = vma->obj; if (vma->exec_entry->flags & EXEC_OBJECT_CAPTURE) { @@ -1314,7 +1314,7 @@ eb_move_to_active(struct i915_execbuffer *eb) { struct i915_vma *vma; - list_for_each_entry(vma, &eb->vmas, exec_list) { + list_for_each_entry(vma, &eb->vmas, exec_link) { struct drm_i915_gem_object *obj = vma->obj; obj->base.write_domain = obj->base.pending_write_domain; @@ -1388,7 +1388,7 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master) memset(&eb->shadow_exec_entry, 0, sizeof(*vma->exec_entry)); vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN; i915_gem_object_get(shadow_batch_obj); - list_add_tail(&vma->exec_list, &eb->vmas); + list_add_tail(&vma->exec_link, &eb->vmas); out: i915_gem_object_unpin_pages(shadow_batch_obj); diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 2e03f81dddbe37ea3d63a77dfd93ca186265c2ea..4d827300d1a8f7f9807b2b8def235a674263aefa 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -100,8 +100,11 @@ struct i915_vma { struct list_head obj_link; /* Link in the object's VMA list */ struct rb_node obj_node; - /** This vma's place in the batchbuffer or on the eviction list */ - struct list_head exec_list; + /** This vma's place in the execbuf reservation list */ + struct list_head exec_link; + + /** This vma's place in the eviction list */ + struct list_head evict_link; /** * Used for performing relocations during execbuffer insertion.