提交 e2d05a8b 编写于 作者: B Ben Widawsky 提交者: Daniel Vetter

drm/i915: Convert active API to VMA

Even though we track object activity and not VMA, because we have the
active_list be based on the VM, it makes the most sense to use VMAs in
the APIs.

NOTE: Daniel intends to eventually rip out active/inactive LRUs, but for
now, leave them be.

v2: Remove leftover hunk from the previous patch which didn't keep
i915_gem_object_move_to_active. That patch had to rely on the ring to
get the dev instead of the obj. (Chris)
Reviewed-by: NChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: NBen Widawsky <ben@bwidawsk.net>
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
上级 5c2abbea
......@@ -1905,9 +1905,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
void i915_vma_move_to_active(struct i915_vma *vma,
struct intel_ring_buffer *ring);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
......
......@@ -1917,7 +1917,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return 0;
}
void
static void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
......@@ -1956,6 +1956,13 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}
}
void i915_vma_move_to_active(struct i915_vma *vma,
struct intel_ring_buffer *ring)
{
list_move_tail(&vma->mm_list, &vma->vm->active_list);
return i915_gem_object_move_to_active(vma->obj, ring);
}
static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
......
......@@ -453,11 +453,8 @@ static int do_switch(struct i915_hw_context *to)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
i915_gem_object_move_to_active(from->obj, ring);
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
......
......@@ -872,8 +872,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
list_move_tail(&vma->mm_list, &vma->vm->active_list);
i915_gem_object_move_to_active(obj, ring);
i915_vma_move_to_active(vma, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册