提交 caea7476 编写于 作者: C Chris Wilson

drm/i915: More accurately track last fence usage by the GPU

Based on a patch by Daniel Vetter.
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
上级 a7a09aeb
...@@ -110,7 +110,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj) ...@@ -110,7 +110,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
static void static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{ {
seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s", seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s",
&obj->base, &obj->base,
get_pin_flag(obj), get_pin_flag(obj),
get_tiling_flag(obj), get_tiling_flag(obj),
...@@ -118,6 +118,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -118,6 +118,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.read_domains, obj->base.read_domains,
obj->base.write_domain, obj->base.write_domain,
obj->last_rendering_seqno, obj->last_rendering_seqno,
obj->last_fenced_seqno,
obj->dirty ? " dirty" : "", obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name) if (obj->base.name)
......
...@@ -124,9 +124,8 @@ struct drm_i915_master_private { ...@@ -124,9 +124,8 @@ struct drm_i915_master_private {
#define I915_FENCE_REG_NONE -1 #define I915_FENCE_REG_NONE -1
struct drm_i915_fence_reg { struct drm_i915_fence_reg {
struct drm_i915_gem_object *obj;
struct list_head lru_list; struct list_head lru_list;
bool gpu; struct drm_i915_gem_object *obj;
}; };
struct sdvo_device_mapping { struct sdvo_device_mapping {
...@@ -787,6 +786,12 @@ struct drm_i915_gem_object { ...@@ -787,6 +786,12 @@ struct drm_i915_gem_object {
unsigned int fault_mappable : 1; unsigned int fault_mappable : 1;
unsigned int pin_mappable : 1; unsigned int pin_mappable : 1;
/*
* Is the GPU currently using a fence to access this buffer,
*/
unsigned int pending_fenced_gpu_access:1;
unsigned int fenced_gpu_access:1;
struct page **pages; struct page **pages;
/** /**
...@@ -802,11 +807,13 @@ struct drm_i915_gem_object { ...@@ -802,11 +807,13 @@ struct drm_i915_gem_object {
*/ */
uint32_t gtt_offset; uint32_t gtt_offset;
/* Which ring is refering to is this object */
struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */ /** Breadcrumb of last rendering to the buffer. */
uint32_t last_rendering_seqno; uint32_t last_rendering_seqno;
struct intel_ring_buffer *ring;
/** Breadcrumb of last fenced GPU access to the buffer. */
uint32_t last_fenced_seqno;
struct intel_ring_buffer *last_fenced_ring;
/** Current tiling stride for the object, if it's tiled. */ /** Current tiling stride for the object, if it's tiled. */
uint32_t stride; uint32_t stride;
......
...@@ -1688,7 +1688,27 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, ...@@ -1688,7 +1688,27 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
/* Move from whatever list we were on to the tail of execution. */ /* Move from whatever list we were on to the tail of execution. */
list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
list_move_tail(&obj->ring_list, &ring->active_list); list_move_tail(&obj->ring_list, &ring->active_list);
obj->last_rendering_seqno = seqno; obj->last_rendering_seqno = seqno;
if (obj->fenced_gpu_access) {
struct drm_i915_fence_reg *reg;
BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
obj->last_fenced_seqno = seqno;
obj->last_fenced_ring = ring;
reg = &dev_priv->fence_regs[obj->fence_reg];
list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
}
}
static void
i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
{
list_del_init(&obj->ring_list);
obj->last_rendering_seqno = 0;
obj->last_fenced_seqno = 0;
} }
static void static void
...@@ -1699,8 +1719,33 @@ i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj) ...@@ -1699,8 +1719,33 @@ i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
BUG_ON(!obj->active); BUG_ON(!obj->active);
list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list); list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
list_del_init(&obj->ring_list);
obj->last_rendering_seqno = 0; i915_gem_object_move_off_active(obj);
}
static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (obj->pin_count != 0)
list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
else
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
BUG_ON(!list_empty(&obj->gpu_write_list));
BUG_ON(!obj->active);
obj->ring = NULL;
i915_gem_object_move_off_active(obj);
obj->fenced_gpu_access = false;
obj->last_fenced_ring = NULL;
obj->active = 0;
drm_gem_object_unreference(&obj->base);
WARN_ON(i915_verify_lists(dev));
} }
/* Immediately discard the backing storage */ /* Immediately discard the backing storage */
...@@ -1729,35 +1774,11 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) ...@@ -1729,35 +1774,11 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
return obj->madv == I915_MADV_DONTNEED; return obj->madv == I915_MADV_DONTNEED;
} }
static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
if (obj->pin_count != 0)
list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
else
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
list_del_init(&obj->ring_list);
BUG_ON(!list_empty(&obj->gpu_write_list));
obj->last_rendering_seqno = 0;
obj->ring = NULL;
if (obj->active) {
obj->active = 0;
drm_gem_object_unreference(&obj->base);
}
WARN_ON(i915_verify_lists(dev));
}
static void static void
i915_gem_process_flushing_list(struct drm_device *dev, i915_gem_process_flushing_list(struct drm_device *dev,
uint32_t flush_domains, uint32_t flush_domains,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring)
{ {
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next; struct drm_i915_gem_object *obj, *next;
list_for_each_entry_safe(obj, next, list_for_each_entry_safe(obj, next,
...@@ -1770,14 +1791,6 @@ i915_gem_process_flushing_list(struct drm_device *dev, ...@@ -1770,14 +1791,6 @@ i915_gem_process_flushing_list(struct drm_device *dev,
list_del_init(&obj->gpu_write_list); list_del_init(&obj->gpu_write_list);
i915_gem_object_move_to_active(obj, ring); i915_gem_object_move_to_active(obj, ring);
/* update the fence lru list */
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_fence_reg *reg =
&dev_priv->fence_regs[obj->fence_reg];
list_move_tail(&reg->lru_list,
&dev_priv->mm.fence_list);
}
trace_i915_gem_object_change_domain(obj, trace_i915_gem_object_change_domain(obj,
obj->base.read_domains, obj->base.read_domains,
old_write_domain); old_write_domain);
...@@ -2615,8 +2628,7 @@ i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj, ...@@ -2615,8 +2628,7 @@ i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj,
bool interruptible) bool interruptible)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; int ret;
struct drm_i915_fence_reg *reg;
if (obj->fence_reg == I915_FENCE_REG_NONE) if (obj->fence_reg == I915_FENCE_REG_NONE)
return 0; return 0;
...@@ -2631,19 +2643,23 @@ i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj, ...@@ -2631,19 +2643,23 @@ i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj,
* therefore we must wait for any outstanding access to complete * therefore we must wait for any outstanding access to complete
* before clearing the fence. * before clearing the fence.
*/ */
reg = &dev_priv->fence_regs[obj->fence_reg]; if (obj->fenced_gpu_access) {
if (reg->gpu) {
int ret;
ret = i915_gem_object_flush_gpu_write_domain(obj, NULL); ret = i915_gem_object_flush_gpu_write_domain(obj, NULL);
if (ret) if (ret)
return ret; return ret;
ret = i915_gem_object_wait_rendering(obj, interruptible); obj->fenced_gpu_access = false;
}
if (obj->last_fenced_seqno) {
ret = i915_do_wait_request(dev,
obj->last_fenced_seqno,
interruptible,
obj->last_fenced_ring);
if (ret) if (ret)
return ret; return ret;
reg->gpu = false; obj->last_fenced_seqno = false;
} }
i915_gem_object_flush_gtt_write_domain(obj); i915_gem_object_flush_gtt_write_domain(obj);
...@@ -3166,8 +3182,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, ...@@ -3166,8 +3182,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
* write domain * write domain
*/ */
if (obj->base.write_domain && if (obj->base.write_domain &&
(obj->base.write_domain != obj->base.pending_read_domains || (((obj->base.write_domain != obj->base.pending_read_domains ||
obj->ring != ring)) { obj->ring != ring)) ||
(obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
flush_domains |= obj->base.write_domain; flush_domains |= obj->base.write_domain;
invalidate_domains |= invalidate_domains |=
obj->base.pending_read_domains & ~obj->base.write_domain; obj->base.pending_read_domains & ~obj->base.write_domain;
...@@ -3528,7 +3545,6 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, ...@@ -3528,7 +3545,6 @@ i915_gem_execbuffer_reserve(struct drm_device *dev,
struct drm_i915_gem_exec_object2 *exec_list, struct drm_i915_gem_exec_object2 *exec_list,
int count) int count)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i, retry; int ret, i, retry;
/* Attempt to pin all of the buffers into the GTT. /* Attempt to pin all of the buffers into the GTT.
...@@ -3601,7 +3617,7 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, ...@@ -3601,7 +3617,7 @@ i915_gem_execbuffer_reserve(struct drm_device *dev,
if (ret) if (ret)
break; break;
dev_priv->fence_regs[obj->fence_reg].gpu = true; obj->pending_fenced_gpu_access = true;
} }
entry->offset = obj->gtt_offset; entry->offset = obj->gtt_offset;
...@@ -3981,6 +3997,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -3981,6 +3997,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err; goto err;
} }
obj->in_execbuffer = true; obj->in_execbuffer = true;
obj->pending_fenced_gpu_access = false;
} }
/* Move the objects en-masse into the GTT, evicting if necessary. */ /* Move the objects en-masse into the GTT, evicting if necessary. */
...@@ -4085,6 +4102,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -4085,6 +4102,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
obj->base.read_domains = obj->base.pending_read_domains; obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain; obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
i915_gem_object_move_to_active(obj, ring); i915_gem_object_move_to_active(obj, ring);
if (obj->base.write_domain) { if (obj->base.write_domain) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册