You need to sign in or sign up before continuing.
提交 666796da 编写于 作者: T Tvrtko Ursulin

drm/i915: More intel_engine_cs renaming

Some trivial ones, first pass done with Coccinelle:

@@
@@
(
- I915_NUM_RINGS
+ I915_NUM_ENGINES
|
- intel_ring_flag
+ intel_engine_flag
|
- for_each_ring
+ for_each_engine
|
- i915_gem_request_get_ring
+ i915_gem_request_get_engine
|
- intel_ring_idle
+ intel_engine_idle
|
- i915_gem_reset_ring_status
+ i915_gem_reset_engine_status
|
- i915_gem_reset_ring_cleanup
+ i915_gem_reset_engine_cleanup
|
- init_ring_lists
+ init_engine_lists
)

But that didn't fully work so I cleaned it up with:

for f in *.[hc]; do sed -i -e s/I915_NUM_RINGS/I915_NUM_ENGINES/ $f; done
for f in *.[hc]; do sed -i -e s/i915_gem_request_get_ring/i915_gem_request_get_engine/ $f; done
for f in *.[hc]; do sed -i -e s/intel_ring_flag/intel_engine_flag/ $f; done
for f in *.[hc]; do sed -i -e s/intel_ring_idle/intel_engine_idle/ $f; done
for f in *.[hc]; do sed -i -e s/init_ring_lists/init_engine_lists/ $f; done
for f in *.[hc]; do sed -i -e s/i915_gem_reset_ring_cleanup/i915_gem_reset_engine_cleanup/ $f; done
for f in *.[hc]; do sed -i -e s/i915_gem_reset_ring_status/i915_gem_reset_engine_status/ $f; done

v2: Rebase.
Signed-off-by: NTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: NChris Wilson <chris@chris-wilson.co.uk>
上级 4a570db5
...@@ -143,7 +143,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -143,7 +143,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.size / 1024, obj->base.size / 1024,
obj->base.read_domains, obj->base.read_domains,
obj->base.write_domain); obj->base.write_domain);
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
seq_printf(m, "%x ", seq_printf(m, "%x ",
i915_gem_request_get_seqno(obj->last_read_req[i])); i915_gem_request_get_seqno(obj->last_read_req[i]));
seq_printf(m, "] %x %x%s%s%s", seq_printf(m, "] %x %x%s%s%s",
...@@ -184,7 +184,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -184,7 +184,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
} }
if (obj->last_write_req != NULL) if (obj->last_write_req != NULL)
seq_printf(m, " (%s)", seq_printf(m, " (%s)",
i915_gem_request_get_ring(obj->last_write_req)->name); i915_gem_request_get_engine(obj->last_write_req)->name);
if (obj->frontbuffer_bits) if (obj->frontbuffer_bits)
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
} }
...@@ -402,7 +402,7 @@ static void print_batch_pool_stats(struct seq_file *m, ...@@ -402,7 +402,7 @@ static void print_batch_pool_stats(struct seq_file *m,
memset(&stats, 0, sizeof(stats)); memset(&stats, 0, sizeof(stats));
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
list_for_each_entry(obj, list_for_each_entry(obj,
&engine->batch_pool.cache_list[j], &engine->batch_pool.cache_list[j],
...@@ -591,7 +591,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) ...@@ -591,7 +591,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
pipe, plane); pipe, plane);
} }
if (work->flip_queued_req) { if (work->flip_queued_req) {
struct intel_engine_cs *engine = i915_gem_request_get_ring(work->flip_queued_req); struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);
seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n", seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
engine->name, engine->name,
...@@ -644,7 +644,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data) ...@@ -644,7 +644,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
if (ret) if (ret)
return ret; return ret;
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
int count; int count;
...@@ -689,7 +689,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data) ...@@ -689,7 +689,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
return ret; return ret;
any = 0; any = 0;
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
int count; int count;
count = 0; count = 0;
...@@ -746,7 +746,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) ...@@ -746,7 +746,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
return ret; return ret;
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
i915_ring_seqno_info(m, engine); i915_ring_seqno_info(m, engine);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
...@@ -933,7 +933,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) ...@@ -933,7 +933,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Graphics Interrupt mask: %08x\n", seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR)); I915_READ(GTIMR));
} }
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 6) { if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m, seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n", "Graphics Interrupt mask (%s): %08x\n",
...@@ -1331,8 +1331,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) ...@@ -1331,8 +1331,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
u64 acthd[I915_NUM_RINGS]; u64 acthd[I915_NUM_ENGINES];
u32 seqno[I915_NUM_RINGS]; u32 seqno[I915_NUM_ENGINES];
u32 instdone[I915_NUM_INSTDONE_REG]; u32 instdone[I915_NUM_INSTDONE_REG];
int i, j; int i, j;
...@@ -1343,7 +1343,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) ...@@ -1343,7 +1343,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
seqno[i] = engine->get_seqno(engine, false); seqno[i] = engine->get_seqno(engine, false);
acthd[i] = intel_ring_get_active_head(engine); acthd[i] = intel_ring_get_active_head(engine);
} }
...@@ -1359,7 +1359,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) ...@@ -1359,7 +1359,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
} else } else
seq_printf(m, "Hangcheck inactive\n"); seq_printf(m, "Hangcheck inactive\n");
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
seq_printf(m, "%s:\n", engine->name); seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x]\n", seq_printf(m, "\tseqno = %x [current %x]\n",
engine->hangcheck.seqno, seqno[i]); engine->hangcheck.seqno, seqno[i]);
...@@ -1965,7 +1965,7 @@ static int i915_context_status(struct seq_file *m, void *unused) ...@@ -1965,7 +1965,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (i915.enable_execlists) { if (i915.enable_execlists) {
seq_putc(m, '\n'); seq_putc(m, '\n');
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
struct drm_i915_gem_object *ctx_obj = struct drm_i915_gem_object *ctx_obj =
ctx->engine[i].state; ctx->engine[i].state;
struct intel_ringbuffer *ringbuf = struct intel_ringbuffer *ringbuf =
...@@ -2055,7 +2055,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) ...@@ -2055,7 +2055,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
list_for_each_entry(ctx, &dev_priv->context_list, link) list_for_each_entry(ctx, &dev_priv->context_list, link)
if (ctx != dev_priv->kernel_context) if (ctx != dev_priv->kernel_context)
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
i915_dump_lrc_obj(m, ctx, engine); i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -2089,7 +2089,7 @@ static int i915_execlists(struct seq_file *m, void *data) ...@@ -2089,7 +2089,7 @@ static int i915_execlists(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
for_each_ring(engine, dev_priv, ring_id) { for_each_engine(engine, dev_priv, ring_id) {
struct drm_i915_gem_request *head_req = NULL; struct drm_i915_gem_request *head_req = NULL;
int count = 0; int count = 0;
unsigned long flags; unsigned long flags;
...@@ -2253,7 +2253,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) ...@@ -2253,7 +2253,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
if (!ppgtt) if (!ppgtt)
return; return;
for_each_ring(engine, dev_priv, unused) { for_each_engine(engine, dev_priv, unused) {
seq_printf(m, "%s\n", engine->name); seq_printf(m, "%s\n", engine->name);
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i)); u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
...@@ -2273,7 +2273,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) ...@@ -2273,7 +2273,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
if (INTEL_INFO(dev)->gen == 6) if (INTEL_INFO(dev)->gen == 6)
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
seq_printf(m, "%s\n", engine->name); seq_printf(m, "%s\n", engine->name);
if (INTEL_INFO(dev)->gen == 7) if (INTEL_INFO(dev)->gen == 7)
seq_printf(m, "GFX_MODE: 0x%08x\n", seq_printf(m, "GFX_MODE: 0x%08x\n",
...@@ -2342,7 +2342,7 @@ static int count_irq_waiters(struct drm_i915_private *i915) ...@@ -2342,7 +2342,7 @@ static int count_irq_waiters(struct drm_i915_private *i915)
int count = 0; int count = 0;
int i; int i;
for_each_ring(engine, i915, i) for_each_engine(engine, i915, i)
count += engine->irq_refcount; count += engine->irq_refcount;
return count; return count;
...@@ -2466,7 +2466,7 @@ static void i915_guc_client_info(struct seq_file *m, ...@@ -2466,7 +2466,7 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
seq_printf(m, "\tLast submission result: %d\n", client->retcode); seq_printf(m, "\tLast submission result: %d\n", client->retcode);
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
seq_printf(m, "\tSubmissions: %llu %s\n", seq_printf(m, "\tSubmissions: %llu %s\n",
client->submissions[engine->guc_id], client->submissions[engine->guc_id],
engine->name); engine->name);
...@@ -2506,7 +2506,7 @@ static int i915_guc_info(struct seq_file *m, void *data) ...@@ -2506,7 +2506,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "GuC last action error code: %d\n", guc.action_err); seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
seq_printf(m, "\nGuC submissions:\n"); seq_printf(m, "\nGuC submissions:\n");
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
engine->name, guc.submissions[engine->guc_id], engine->name, guc.submissions[engine->guc_id],
guc.last_seqno[engine->guc_id]); guc.last_seqno[engine->guc_id]);
...@@ -3153,14 +3153,14 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) ...@@ -3153,14 +3153,14 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0); page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
seqno = (uint64_t *)kmap_atomic(page); seqno = (uint64_t *)kmap_atomic(page);
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
uint64_t offset; uint64_t offset;
seq_printf(m, "%s\n", engine->name); seq_printf(m, "%s\n", engine->name);
seq_puts(m, " Last signal:"); seq_puts(m, " Last signal:");
for (j = 0; j < num_rings; j++) { for (j = 0; j < num_rings; j++) {
offset = i * I915_NUM_RINGS + j; offset = i * I915_NUM_ENGINES + j;
seq_printf(m, "0x%08llx (0x%02llx) ", seq_printf(m, "0x%08llx (0x%02llx) ",
seqno[offset], offset * 8); seqno[offset], offset * 8);
} }
...@@ -3168,7 +3168,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) ...@@ -3168,7 +3168,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
seq_puts(m, " Last wait: "); seq_puts(m, " Last wait: ");
for (j = 0; j < num_rings; j++) { for (j = 0; j < num_rings; j++) {
offset = i + (j * I915_NUM_RINGS); offset = i + (j * I915_NUM_ENGINES);
seq_printf(m, "0x%08llx (0x%02llx) ", seq_printf(m, "0x%08llx (0x%02llx) ",
seqno[offset], offset * 8); seqno[offset], offset * 8);
} }
...@@ -3178,7 +3178,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) ...@@ -3178,7 +3178,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
kunmap_atomic(seqno); kunmap_atomic(seqno);
} else { } else {
seq_puts(m, " Last signal:"); seq_puts(m, " Last signal:");
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
for (j = 0; j < num_rings; j++) for (j = 0; j < num_rings; j++)
seq_printf(m, "0x%08x\n", seq_printf(m, "0x%08x\n",
I915_READ(engine->semaphore.mbox.signal[j])); I915_READ(engine->semaphore.mbox.signal[j]));
...@@ -3186,7 +3186,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) ...@@ -3186,7 +3186,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
} }
seq_puts(m, "\nSync seqno:\n"); seq_puts(m, "\nSync seqno:\n");
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
for (j = 0; j < num_rings; j++) { for (j = 0; j < num_rings; j++) {
seq_printf(m, " 0x%08x ", seq_printf(m, " 0x%08x ",
engine->semaphore.sync_seqno[j]); engine->semaphore.sync_seqno[j]);
...@@ -3244,7 +3244,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused) ...@@ -3244,7 +3244,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
seq_printf(m, "Workarounds applied: %d\n", workarounds->count); seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
seq_printf(m, "HW whitelist count for %s: %d\n", seq_printf(m, "HW whitelist count for %s: %d\n",
engine->name, workarounds->hw_whitelist_count[i]); engine->name, workarounds->hw_whitelist_count[i]);
for (i = 0; i < workarounds->count; ++i) { for (i = 0; i < workarounds->count; ++i) {
......
...@@ -459,7 +459,7 @@ struct drm_i915_error_state { ...@@ -459,7 +459,7 @@ struct drm_i915_error_state {
u32 cpu_ring_head; u32 cpu_ring_head;
u32 cpu_ring_tail; u32 cpu_ring_tail;
u32 semaphore_seqno[I915_NUM_RINGS - 1]; u32 semaphore_seqno[I915_NUM_ENGINES - 1];
/* Register state */ /* Register state */
u32 start; u32 start;
...@@ -479,7 +479,7 @@ struct drm_i915_error_state { ...@@ -479,7 +479,7 @@ struct drm_i915_error_state {
u32 fault_reg; u32 fault_reg;
u64 faddr; u64 faddr;
u32 rc_psmi; /* sleep state */ u32 rc_psmi; /* sleep state */
u32 semaphore_mboxes[I915_NUM_RINGS - 1]; u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
struct drm_i915_error_object { struct drm_i915_error_object {
int page_count; int page_count;
...@@ -505,12 +505,12 @@ struct drm_i915_error_state { ...@@ -505,12 +505,12 @@ struct drm_i915_error_state {
pid_t pid; pid_t pid;
char comm[TASK_COMM_LEN]; char comm[TASK_COMM_LEN];
} ring[I915_NUM_RINGS]; } ring[I915_NUM_ENGINES];
struct drm_i915_error_buffer { struct drm_i915_error_buffer {
u32 size; u32 size;
u32 name; u32 name;
u32 rseqno[I915_NUM_RINGS], wseqno; u32 rseqno[I915_NUM_ENGINES], wseqno;
u64 gtt_offset; u64 gtt_offset;
u32 read_domains; u32 read_domains;
u32 write_domain; u32 write_domain;
...@@ -824,7 +824,7 @@ struct intel_context { ...@@ -824,7 +824,7 @@ struct intel_context {
struct i915_vma *lrc_vma; struct i915_vma *lrc_vma;
u64 lrc_desc; u64 lrc_desc;
uint32_t *lrc_reg_state; uint32_t *lrc_reg_state;
} engine[I915_NUM_RINGS]; } engine[I915_NUM_ENGINES];
struct list_head link; struct list_head link;
}; };
...@@ -1639,7 +1639,7 @@ struct i915_wa_reg { ...@@ -1639,7 +1639,7 @@ struct i915_wa_reg {
struct i915_workarounds { struct i915_workarounds {
struct i915_wa_reg reg[I915_MAX_WA_REGS]; struct i915_wa_reg reg[I915_MAX_WA_REGS];
u32 count; u32 count;
u32 hw_whitelist_count[I915_NUM_RINGS]; u32 hw_whitelist_count[I915_NUM_ENGINES];
}; };
struct i915_virtual_gpu { struct i915_virtual_gpu {
...@@ -1704,7 +1704,7 @@ struct drm_i915_private { ...@@ -1704,7 +1704,7 @@ struct drm_i915_private {
wait_queue_head_t gmbus_wait_queue; wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev; struct pci_dev *bridge_dev;
struct intel_engine_cs engine[I915_NUM_RINGS]; struct intel_engine_cs engine[I915_NUM_ENGINES];
struct drm_i915_gem_object *semaphore_obj; struct drm_i915_gem_object *semaphore_obj;
uint32_t last_seqno, next_seqno; uint32_t last_seqno, next_seqno;
...@@ -1967,8 +1967,8 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) ...@@ -1967,8 +1967,8 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
} }
/* Iterate over initialised rings */ /* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \ #define for_each_engine(ring__, dev_priv__, i__) \
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ for ((i__) = 0; (i__) < I915_NUM_ENGINES; (i__)++) \
for_each_if ((((ring__) = &(dev_priv__)->engine[(i__)]), intel_ring_initialized((ring__)))) for_each_if ((((ring__) = &(dev_priv__)->engine[(i__)]), intel_ring_initialized((ring__))))
enum hdmi_force_audio { enum hdmi_force_audio {
...@@ -2039,7 +2039,7 @@ struct drm_i915_gem_object { ...@@ -2039,7 +2039,7 @@ struct drm_i915_gem_object {
struct drm_mm_node *stolen; struct drm_mm_node *stolen;
struct list_head global_list; struct list_head global_list;
struct list_head ring_list[I915_NUM_RINGS]; struct list_head ring_list[I915_NUM_ENGINES];
/** Used in execbuf to temporarily hold a ref */ /** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link; struct list_head obj_exec_link;
...@@ -2050,7 +2050,7 @@ struct drm_i915_gem_object { ...@@ -2050,7 +2050,7 @@ struct drm_i915_gem_object {
* rendering and so a non-zero seqno), and is not set if it i s on * rendering and so a non-zero seqno), and is not set if it i s on
* inactive (ready to be unbound) list. * inactive (ready to be unbound) list.
*/ */
unsigned int active:I915_NUM_RINGS; unsigned int active:I915_NUM_ENGINES;
/** /**
* This is set if the object has been written to since last bound * This is set if the object has been written to since last bound
...@@ -2129,7 +2129,7 @@ struct drm_i915_gem_object { ...@@ -2129,7 +2129,7 @@ struct drm_i915_gem_object {
* read request. This allows for the CPU to read from an active * read request. This allows for the CPU to read from an active
* buffer by only waiting for the write to complete. * buffer by only waiting for the write to complete.
* */ * */
struct drm_i915_gem_request *last_read_req[I915_NUM_RINGS]; struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES];
struct drm_i915_gem_request *last_write_req; struct drm_i915_gem_request *last_write_req;
/** Breadcrumb of last fenced GPU access to the buffer. */ /** Breadcrumb of last fenced GPU access to the buffer. */
struct drm_i915_gem_request *last_fenced_req; struct drm_i915_gem_request *last_fenced_req;
...@@ -2277,7 +2277,7 @@ i915_gem_request_get_seqno(struct drm_i915_gem_request *req) ...@@ -2277,7 +2277,7 @@ i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
} }
static inline struct intel_engine_cs * static inline struct intel_engine_cs *
i915_gem_request_get_ring(struct drm_i915_gem_request *req) i915_gem_request_get_engine(struct drm_i915_gem_request *req)
{ {
return req ? req->engine : NULL; return req ? req->engine : NULL;
} }
......
...@@ -1243,11 +1243,11 @@ int __i915_wait_request(struct drm_i915_gem_request *req, ...@@ -1243,11 +1243,11 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
s64 *timeout, s64 *timeout,
struct intel_rps_client *rps) struct intel_rps_client *rps)
{ {
struct intel_engine_cs *engine = i915_gem_request_get_ring(req); struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
struct drm_device *dev = engine->dev; struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
const bool irq_test_in_progress = const bool irq_test_in_progress =
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(engine); ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
unsigned long timeout_expire; unsigned long timeout_expire;
...@@ -1512,7 +1512,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, ...@@ -1512,7 +1512,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
i915_gem_object_retire__write(obj); i915_gem_object_retire__write(obj);
} }
} else { } else {
for (i = 0; i < I915_NUM_RINGS; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
if (obj->last_read_req[i] == NULL) if (obj->last_read_req[i] == NULL)
continue; continue;
...@@ -1552,7 +1552,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, ...@@ -1552,7 +1552,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_request *requests[I915_NUM_RINGS]; struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
unsigned reset_counter; unsigned reset_counter;
int ret, i, n = 0; int ret, i, n = 0;
...@@ -1577,7 +1577,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, ...@@ -1577,7 +1577,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
requests[n++] = i915_gem_request_reference(req); requests[n++] = i915_gem_request_reference(req);
} else { } else {
for (i = 0; i < I915_NUM_RINGS; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
req = obj->last_read_req[i]; req = obj->last_read_req[i];
...@@ -2406,12 +2406,12 @@ void i915_vma_move_to_active(struct i915_vma *vma, ...@@ -2406,12 +2406,12 @@ void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
engine = i915_gem_request_get_ring(req); engine = i915_gem_request_get_engine(req);
/* Add a reference if we're newly entering the active list. */ /* Add a reference if we're newly entering the active list. */
if (obj->active == 0) if (obj->active == 0)
drm_gem_object_reference(&obj->base); drm_gem_object_reference(&obj->base);
obj->active |= intel_ring_flag(engine); obj->active |= intel_engine_flag(engine);
list_move_tail(&obj->ring_list[engine->id], &engine->active_list); list_move_tail(&obj->ring_list[engine->id], &engine->active_list);
i915_gem_request_assign(&obj->last_read_req[engine->id], req); i915_gem_request_assign(&obj->last_read_req[engine->id], req);
...@@ -2423,7 +2423,7 @@ static void ...@@ -2423,7 +2423,7 @@ static void
i915_gem_object_retire__write(struct drm_i915_gem_object *obj) i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
{ {
RQ_BUG_ON(obj->last_write_req == NULL); RQ_BUG_ON(obj->last_write_req == NULL);
RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->engine))); RQ_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
i915_gem_request_assign(&obj->last_write_req, NULL); i915_gem_request_assign(&obj->last_write_req, NULL);
intel_fb_obj_flush(obj, true, ORIGIN_CS); intel_fb_obj_flush(obj, true, ORIGIN_CS);
...@@ -2471,15 +2471,15 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno) ...@@ -2471,15 +2471,15 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
int ret, i, j; int ret, i, j;
/* Carefully retire all requests without writing to the rings */ /* Carefully retire all requests without writing to the rings */
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
ret = intel_ring_idle(engine); ret = intel_engine_idle(engine);
if (ret) if (ret)
return ret; return ret;
} }
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev);
/* Finally reset hw state */ /* Finally reset hw state */
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
intel_ring_init_seqno(engine, seqno); intel_ring_init_seqno(engine, seqno);
for (j = 0; j < ARRAY_SIZE(engine->semaphore.sync_seqno); j++) for (j = 0; j < ARRAY_SIZE(engine->semaphore.sync_seqno); j++)
...@@ -2801,7 +2801,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine) ...@@ -2801,7 +2801,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
return NULL; return NULL;
} }
static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
...@@ -2820,7 +2820,7 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, ...@@ -2820,7 +2820,7 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
i915_set_reset_status(dev_priv, request->ctx, false); i915_set_reset_status(dev_priv, request->ctx, false);
} }
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
struct intel_ringbuffer *buffer; struct intel_ringbuffer *buffer;
...@@ -2893,11 +2893,11 @@ void i915_gem_reset(struct drm_device *dev) ...@@ -2893,11 +2893,11 @@ void i915_gem_reset(struct drm_device *dev)
* them for finding the guilty party. As the requests only borrow * them for finding the guilty party. As the requests only borrow
* their reference to the objects, the inspection must be done first. * their reference to the objects, the inspection must be done first.
*/ */
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
i915_gem_reset_ring_status(dev_priv, engine); i915_gem_reset_engine_status(dev_priv, engine);
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
i915_gem_reset_ring_cleanup(dev_priv, engine); i915_gem_reset_engine_cleanup(dev_priv, engine);
i915_gem_context_reset(dev); i915_gem_context_reset(dev);
...@@ -2966,7 +2966,7 @@ i915_gem_retire_requests(struct drm_device *dev) ...@@ -2966,7 +2966,7 @@ i915_gem_retire_requests(struct drm_device *dev)
bool idle = true; bool idle = true;
int i; int i;
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
i915_gem_retire_requests_ring(engine); i915_gem_retire_requests_ring(engine);
idle &= list_empty(&engine->request_list); idle &= list_empty(&engine->request_list);
if (i915.enable_execlists) { if (i915.enable_execlists) {
...@@ -3014,7 +3014,7 @@ i915_gem_idle_work_handler(struct work_struct *work) ...@@ -3014,7 +3014,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
struct intel_engine_cs *ring; struct intel_engine_cs *ring;
int i; int i;
for_each_ring(ring, dev_priv, i) for_each_engine(ring, dev_priv, i)
if (!list_empty(&ring->request_list)) if (!list_empty(&ring->request_list))
return; return;
...@@ -3028,7 +3028,7 @@ i915_gem_idle_work_handler(struct work_struct *work) ...@@ -3028,7 +3028,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
int i; int i;
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
i915_gem_batch_pool_fini(&engine->batch_pool); i915_gem_batch_pool_fini(&engine->batch_pool);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -3048,7 +3048,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj) ...@@ -3048,7 +3048,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
if (!obj->active) if (!obj->active)
return 0; return 0;
for (i = 0; i < I915_NUM_RINGS; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
req = obj->last_read_req[i]; req = obj->last_read_req[i];
...@@ -3096,7 +3096,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -3096,7 +3096,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data; struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_request *req[I915_NUM_RINGS]; struct drm_i915_gem_request *req[I915_NUM_ENGINES];
unsigned reset_counter; unsigned reset_counter;
int i, n = 0; int i, n = 0;
int ret; int ret;
...@@ -3133,7 +3133,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -3133,7 +3133,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
for (i = 0; i < I915_NUM_RINGS; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
if (obj->last_read_req[i] == NULL) if (obj->last_read_req[i] == NULL)
continue; continue;
...@@ -3166,7 +3166,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, ...@@ -3166,7 +3166,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *from; struct intel_engine_cs *from;
int ret; int ret;
from = i915_gem_request_get_ring(from_req); from = i915_gem_request_get_engine(from_req);
if (to == from) if (to == from)
return 0; return 0;
...@@ -3260,7 +3260,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, ...@@ -3260,7 +3260,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request **to_req) struct drm_i915_gem_request **to_req)
{ {
const bool readonly = obj->base.pending_write_domain == 0; const bool readonly = obj->base.pending_write_domain == 0;
struct drm_i915_gem_request *req[I915_NUM_RINGS]; struct drm_i915_gem_request *req[I915_NUM_ENGINES];
int ret, i, n; int ret, i, n;
if (!obj->active) if (!obj->active)
...@@ -3274,7 +3274,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, ...@@ -3274,7 +3274,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (obj->last_write_req) if (obj->last_write_req)
req[n++] = obj->last_write_req; req[n++] = obj->last_write_req;
} else { } else {
for (i = 0; i < I915_NUM_RINGS; i++) for (i = 0; i < I915_NUM_ENGINES; i++)
if (obj->last_read_req[i]) if (obj->last_read_req[i])
req[n++] = obj->last_read_req[i]; req[n++] = obj->last_read_req[i];
} }
...@@ -3395,7 +3395,7 @@ int i915_gpu_idle(struct drm_device *dev) ...@@ -3395,7 +3395,7 @@ int i915_gpu_idle(struct drm_device *dev)
int ret, i; int ret, i;
/* Flush everything onto the inactive list. */ /* Flush everything onto the inactive list. */
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
if (!i915.enable_execlists) { if (!i915.enable_execlists) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
...@@ -3412,7 +3412,7 @@ int i915_gpu_idle(struct drm_device *dev) ...@@ -3412,7 +3412,7 @@ int i915_gpu_idle(struct drm_device *dev)
i915_add_request_no_flush(req); i915_add_request_no_flush(req);
} }
ret = intel_ring_idle(engine); ret = intel_engine_idle(engine);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -4359,7 +4359,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, ...@@ -4359,7 +4359,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
if (obj->active) { if (obj->active) {
int i; int i;
for (i = 0; i < I915_NUM_RINGS; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
req = obj->last_read_req[i]; req = obj->last_read_req[i];
...@@ -4447,7 +4447,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, ...@@ -4447,7 +4447,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
int i; int i;
INIT_LIST_HEAD(&obj->global_list); INIT_LIST_HEAD(&obj->global_list);
for (i = 0; i < I915_NUM_RINGS; i++) for (i = 0; i < I915_NUM_ENGINES; i++)
INIT_LIST_HEAD(&obj->ring_list[i]); INIT_LIST_HEAD(&obj->ring_list[i]);
INIT_LIST_HEAD(&obj->obj_exec_link); INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list); INIT_LIST_HEAD(&obj->vma_list);
...@@ -4659,7 +4659,7 @@ i915_gem_stop_ringbuffers(struct drm_device *dev) ...@@ -4659,7 +4659,7 @@ i915_gem_stop_ringbuffers(struct drm_device *dev)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
int i; int i;
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
dev_priv->gt.stop_ring(engine); dev_priv->gt.stop_ring(engine);
} }
...@@ -4876,7 +4876,7 @@ i915_gem_init_hw(struct drm_device *dev) ...@@ -4876,7 +4876,7 @@ i915_gem_init_hw(struct drm_device *dev)
} }
/* Need to do basic initialisation of all rings first: */ /* Need to do basic initialisation of all rings first: */
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
ret = engine->init_hw(engine); ret = engine->init_hw(engine);
if (ret) if (ret)
goto out; goto out;
...@@ -4901,7 +4901,7 @@ i915_gem_init_hw(struct drm_device *dev) ...@@ -4901,7 +4901,7 @@ i915_gem_init_hw(struct drm_device *dev)
goto out; goto out;
/* Now it is safe to go back round and do everything else: */ /* Now it is safe to go back round and do everything else: */
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
req = i915_gem_request_alloc(engine, NULL); req = i915_gem_request_alloc(engine, NULL);
...@@ -5009,7 +5009,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) ...@@ -5009,7 +5009,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
int i; int i;
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
dev_priv->gt.cleanup_ring(engine); dev_priv->gt.cleanup_ring(engine);
if (i915.enable_execlists) if (i915.enable_execlists)
...@@ -5022,7 +5022,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) ...@@ -5022,7 +5022,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
} }
static void static void
init_ring_lists(struct intel_engine_cs *engine) init_engine_lists(struct intel_engine_cs *engine)
{ {
INIT_LIST_HEAD(&engine->active_list); INIT_LIST_HEAD(&engine->active_list);
INIT_LIST_HEAD(&engine->request_list); INIT_LIST_HEAD(&engine->request_list);
...@@ -5055,8 +5055,8 @@ i915_gem_load_init(struct drm_device *dev) ...@@ -5055,8 +5055,8 @@ i915_gem_load_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.unbound_list); INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list); INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list);
for (i = 0; i < I915_NUM_RINGS; i++) for (i = 0; i < I915_NUM_ENGINES; i++)
init_ring_lists(&dev_priv->engine[i]); init_engine_lists(&dev_priv->engine[i]);
for (i = 0; i < I915_MAX_NUM_FENCES; i++) for (i = 0; i < I915_MAX_NUM_FENCES; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work, INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
......
...@@ -345,7 +345,7 @@ void i915_gem_context_reset(struct drm_device *dev) ...@@ -345,7 +345,7 @@ void i915_gem_context_reset(struct drm_device *dev)
intel_lr_context_reset(dev, ctx); intel_lr_context_reset(dev, ctx);
} }
for (i = 0; i < I915_NUM_RINGS; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
struct intel_engine_cs *engine = &dev_priv->engine[i]; struct intel_engine_cs *engine = &dev_priv->engine[i];
if (engine->last_context) { if (engine->last_context) {
...@@ -426,7 +426,7 @@ void i915_gem_context_fini(struct drm_device *dev) ...@@ -426,7 +426,7 @@ void i915_gem_context_fini(struct drm_device *dev)
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
} }
for (i = I915_NUM_RINGS; --i >= 0;) { for (i = I915_NUM_ENGINES; --i >= 0;) {
struct intel_engine_cs *engine = &dev_priv->engine[i]; struct intel_engine_cs *engine = &dev_priv->engine[i];
if (engine->last_context) { if (engine->last_context) {
...@@ -553,7 +553,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) ...@@ -553,7 +553,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
intel_ring_emit(engine, intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings)); MI_LOAD_REGISTER_IMM(num_rings));
for_each_ring(signaller, to_i915(engine->dev), i) { for_each_engine(signaller, to_i915(engine->dev), i) {
if (signaller == engine) if (signaller == engine)
continue; continue;
...@@ -582,7 +582,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) ...@@ -582,7 +582,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
intel_ring_emit(engine, intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings)); MI_LOAD_REGISTER_IMM(num_rings));
for_each_ring(signaller, to_i915(engine->dev), i) { for_each_engine(signaller, to_i915(engine->dev), i) {
if (signaller == engine) if (signaller == engine)
continue; continue;
...@@ -608,7 +608,7 @@ static inline bool should_skip_switch(struct intel_engine_cs *engine, ...@@ -608,7 +608,7 @@ static inline bool should_skip_switch(struct intel_engine_cs *engine,
return false; return false;
if (to->ppgtt && from == to && if (to->ppgtt && from == to &&
!(intel_ring_flag(engine) & to->ppgtt->pd_dirty_rings)) !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
return true; return true;
return false; return false;
...@@ -697,7 +697,7 @@ static int do_switch(struct drm_i915_gem_request *req) ...@@ -697,7 +697,7 @@ static int do_switch(struct drm_i915_gem_request *req)
goto unpin_out; goto unpin_out;
/* Doing a PD load always reloads the page dirs */ /* Doing a PD load always reloads the page dirs */
to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine); to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
} }
if (engine != &dev_priv->engine[RCS]) { if (engine != &dev_priv->engine[RCS]) {
...@@ -725,9 +725,9 @@ static int do_switch(struct drm_i915_gem_request *req) ...@@ -725,9 +725,9 @@ static int do_switch(struct drm_i915_gem_request *req)
* space. This means we must enforce that a page table load * space. This means we must enforce that a page table load
* occur when this occurs. */ * occur when this occurs. */
} else if (to->ppgtt && } else if (to->ppgtt &&
(intel_ring_flag(engine) & to->ppgtt->pd_dirty_rings)) { (intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) {
hw_flags |= MI_FORCE_RESTORE; hw_flags |= MI_FORCE_RESTORE;
to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine); to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
} }
/* We should never emit switch_mm more than once */ /* We should never emit switch_mm more than once */
......
...@@ -43,7 +43,7 @@ i915_verify_lists(struct drm_device *dev) ...@@ -43,7 +43,7 @@ i915_verify_lists(struct drm_device *dev)
if (warned) if (warned)
return 0; return 0;
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
list_for_each_entry(obj, &engine->active_list, list_for_each_entry(obj, &engine->active_list,
ring_list[engine->id]) { ring_list[engine->id]) {
if (obj->base.dev != dev || if (obj->base.dev != dev ||
......
...@@ -942,7 +942,7 @@ static int ...@@ -942,7 +942,7 @@ static int
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas) struct list_head *vmas)
{ {
const unsigned other_rings = ~intel_ring_flag(req->engine); const unsigned other_rings = ~intel_engine_flag(req->engine);
struct i915_vma *vma; struct i915_vma *vma;
uint32_t flush_domains = 0; uint32_t flush_domains = 0;
bool flush_chipset = false; bool flush_chipset = false;
...@@ -1099,7 +1099,7 @@ void ...@@ -1099,7 +1099,7 @@ void
i915_gem_execbuffer_move_to_active(struct list_head *vmas, i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct drm_i915_gem_request *req) struct drm_i915_gem_request *req)
{ {
struct intel_engine_cs *engine = i915_gem_request_get_ring(req); struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) { list_for_each_entry(vma, vmas, exec_list) {
......
...@@ -1739,7 +1739,7 @@ static void gen8_ppgtt_enable(struct drm_device *dev) ...@@ -1739,7 +1739,7 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
int j; int j;
for_each_ring(engine, dev_priv, j) { for_each_engine(engine, dev_priv, j) {
u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0; u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
I915_WRITE(RING_MODE_GEN7(engine), I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level)); _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
...@@ -1765,7 +1765,7 @@ static void gen7_ppgtt_enable(struct drm_device *dev) ...@@ -1765,7 +1765,7 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
} }
I915_WRITE(GAM_ECOCHK, ecochk); I915_WRITE(GAM_ECOCHK, ecochk);
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
/* GFX_MODE is per-ring on gen7+ */ /* GFX_MODE is per-ring on gen7+ */
I915_WRITE(RING_MODE_GEN7(engine), I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
...@@ -2292,7 +2292,7 @@ void i915_check_and_clear_faults(struct drm_device *dev) ...@@ -2292,7 +2292,7 @@ void i915_check_and_clear_faults(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6) if (INTEL_INFO(dev)->gen < 6)
return; return;
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
u32 fault_reg; u32 fault_reg;
fault_reg = I915_READ(RING_FAULT_REG(engine)); fault_reg = I915_READ(RING_FAULT_REG(engine));
if (fault_reg & RING_FAULT_VALID) { if (fault_reg & RING_FAULT_VALID) {
......
...@@ -198,7 +198,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, ...@@ -198,7 +198,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
err->size, err->size,
err->read_domains, err->read_domains,
err->write_domain); err->write_domain);
for (i = 0; i < I915_NUM_RINGS; i++) for (i = 0; i < I915_NUM_ENGINES; i++)
err_printf(m, "%02x ", err->rseqno[i]); err_printf(m, "%02x ", err->rseqno[i]);
err_printf(m, "] %02x", err->wseqno); err_printf(m, "] %02x", err->wseqno);
...@@ -732,7 +732,7 @@ static void capture_bo(struct drm_i915_error_buffer *err, ...@@ -732,7 +732,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->size = obj->base.size; err->size = obj->base.size;
err->name = obj->base.name; err->name = obj->base.name;
for (i = 0; i < I915_NUM_RINGS; i++) for (i = 0; i < I915_NUM_ENGINES; i++)
err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]); err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
err->wseqno = i915_gem_request_get_seqno(obj->last_write_req); err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
err->gtt_offset = vma->node.start; err->gtt_offset = vma->node.start;
...@@ -747,7 +747,7 @@ static void capture_bo(struct drm_i915_error_buffer *err, ...@@ -747,7 +747,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->purgeable = obj->madv != I915_MADV_WILLNEED; err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL; err->userptr = obj->userptr.mm != NULL;
err->ring = obj->last_write_req ? err->ring = obj->last_write_req ?
i915_gem_request_get_ring(obj->last_write_req)->id : -1; i915_gem_request_get_engine(obj->last_write_req)->id : -1;
err->cache_level = obj->cache_level; err->cache_level = obj->cache_level;
} }
...@@ -809,7 +809,7 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv, ...@@ -809,7 +809,7 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
* synchronization commands which almost always appear in the case * synchronization commands which almost always appear in the case
* strictly a client bug. Use instdone to differentiate those some. * strictly a client bug. Use instdone to differentiate those some.
*/ */
for (i = 0; i < I915_NUM_RINGS; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) { if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
if (ring_id) if (ring_id)
*ring_id = i; *ring_id = i;
...@@ -856,7 +856,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, ...@@ -856,7 +856,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
i915_error_ggtt_object_create(dev_priv, i915_error_ggtt_object_create(dev_priv,
dev_priv->semaphore_obj); dev_priv->semaphore_obj);
for_each_ring(to, dev_priv, i) { for_each_engine(to, dev_priv, i) {
int idx; int idx;
u16 signal_offset; u16 signal_offset;
u32 *tmp; u32 *tmp;
...@@ -1019,7 +1019,7 @@ static void i915_gem_record_rings(struct drm_device *dev, ...@@ -1019,7 +1019,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
int i, count; int i, count;
for (i = 0; i < I915_NUM_RINGS; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
struct intel_engine_cs *engine = &dev_priv->engine[i]; struct intel_engine_cs *engine = &dev_priv->engine[i];
struct intel_ringbuffer *rbuf; struct intel_ringbuffer *rbuf;
......
...@@ -390,7 +390,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc, ...@@ -390,7 +390,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.priority = client->priority; desc.priority = client->priority;
desc.db_id = client->doorbell_id; desc.db_id = client->doorbell_id;
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id]; struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
uint64_t ctx_desc; uint64_t ctx_desc;
...@@ -871,7 +871,7 @@ static void guc_create_ads(struct intel_guc *guc) ...@@ -871,7 +871,7 @@ static void guc_create_ads(struct intel_guc *guc)
engine = &dev_priv->engine[RCS]; engine = &dev_priv->engine[RCS];
ads->golden_context_lrca = engine->status_page.gfx_addr; ads->golden_context_lrca = engine->status_page.gfx_addr;
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine); ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
/* GuC scheduling policies */ /* GuC scheduling policies */
...@@ -884,7 +884,7 @@ static void guc_create_ads(struct intel_guc *guc) ...@@ -884,7 +884,7 @@ static void guc_create_ads(struct intel_guc *guc)
/* MMIO reg state */ /* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies); reg_state = (void *)policies + sizeof(struct guc_policies);
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
reg_state->mmio_white_list[engine->guc_id].mmio_start = reg_state->mmio_white_list[engine->guc_id].mmio_start =
engine->mmio_base + GUC_MMIO_WHITE_LIST_START; engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
......
...@@ -1082,7 +1082,7 @@ static bool any_waiters(struct drm_i915_private *dev_priv) ...@@ -1082,7 +1082,7 @@ static bool any_waiters(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
int i; int i;
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
if (engine->irq_refcount) if (engine->irq_refcount)
return true; return true;
...@@ -2460,7 +2460,7 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv, ...@@ -2460,7 +2460,7 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
*/ */
/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
wake_up_all(&engine->irq_queue); wake_up_all(&engine->irq_queue);
/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
...@@ -2832,7 +2832,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, ...@@ -2832,7 +2832,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
int i; int i;
if (INTEL_INFO(dev_priv->dev)->gen >= 8) { if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
for_each_ring(signaller, dev_priv, i) { for_each_engine(signaller, dev_priv, i) {
if (engine == signaller) if (engine == signaller)
continue; continue;
...@@ -2842,7 +2842,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, ...@@ -2842,7 +2842,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
} else { } else {
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
for_each_ring(signaller, dev_priv, i) { for_each_engine(signaller, dev_priv, i) {
if(engine == signaller) if(engine == signaller)
continue; continue;
...@@ -2941,7 +2941,7 @@ static int semaphore_passed(struct intel_engine_cs *engine) ...@@ -2941,7 +2941,7 @@ static int semaphore_passed(struct intel_engine_cs *engine)
return -1; return -1;
/* Prevent pathological recursion due to driver bugs */ /* Prevent pathological recursion due to driver bugs */
if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
return -1; return -1;
if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
...@@ -2960,7 +2960,7 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) ...@@ -2960,7 +2960,7 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
int i; int i;
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
engine->hangcheck.deadlock = 0; engine->hangcheck.deadlock = 0;
} }
...@@ -3075,7 +3075,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) ...@@ -3075,7 +3075,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
int i; int i;
int busy_count = 0, rings_hung = 0; int busy_count = 0, rings_hung = 0;
bool stuck[I915_NUM_RINGS] = { 0 }; bool stuck[I915_NUM_ENGINES] = { 0 };
#define BUSY 1 #define BUSY 1
#define KICK 5 #define KICK 5
#define HUNG 20 #define HUNG 20
...@@ -3097,7 +3097,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) ...@@ -3097,7 +3097,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
*/ */
intel_uncore_arm_unclaimed_mmio_detection(dev_priv); intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
u64 acthd; u64 acthd;
u32 seqno; u32 seqno;
bool busy = true; bool busy = true;
...@@ -3114,7 +3114,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) ...@@ -3114,7 +3114,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (waitqueue_active(&engine->irq_queue)) { if (waitqueue_active(&engine->irq_queue)) {
/* Issue a wake-up to catch stuck h/w. */ /* Issue a wake-up to catch stuck h/w. */
if (!test_and_set_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings)) { if (!test_and_set_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings)) {
if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(engine))) if (!(dev_priv->gpu_error.test_irq_rings & intel_engine_flag(engine)))
DRM_ERROR("Hangcheck timer elapsed... %s idle\n", DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
engine->name); engine->name);
else else
...@@ -3184,7 +3184,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) ...@@ -3184,7 +3184,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
busy_count += busy; busy_count += busy;
} }
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
DRM_INFO("%s on %s\n", DRM_INFO("%s on %s\n",
stuck[i] ? "stuck" : "no progress", stuck[i] ? "stuck" : "no progress",
......
...@@ -487,7 +487,7 @@ TRACE_EVENT(i915_gem_ring_dispatch, ...@@ -487,7 +487,7 @@ TRACE_EVENT(i915_gem_ring_dispatch,
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *engine = struct intel_engine_cs *engine =
i915_gem_request_get_ring(req); i915_gem_request_get_engine(req);
__entry->dev = engine->dev->primary->index; __entry->dev = engine->dev->primary->index;
__entry->ring = engine->id; __entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req); __entry->seqno = i915_gem_request_get_seqno(req);
...@@ -534,7 +534,7 @@ DECLARE_EVENT_CLASS(i915_gem_request, ...@@ -534,7 +534,7 @@ DECLARE_EVENT_CLASS(i915_gem_request,
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *engine = struct intel_engine_cs *engine =
i915_gem_request_get_ring(req); i915_gem_request_get_engine(req);
__entry->dev = engine->dev->primary->index; __entry->dev = engine->dev->primary->index;
__entry->ring = engine->id; __entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req); __entry->seqno = i915_gem_request_get_seqno(req);
...@@ -598,7 +598,7 @@ TRACE_EVENT(i915_gem_request_wait_begin, ...@@ -598,7 +598,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
*/ */
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *engine = struct intel_engine_cs *engine =
i915_gem_request_get_ring(req); i915_gem_request_get_engine(req);
__entry->dev = engine->dev->primary->index; __entry->dev = engine->dev->primary->index;
__entry->ring = engine->id; __entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req); __entry->seqno = i915_gem_request_get_seqno(req);
......
...@@ -11242,7 +11242,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine, ...@@ -11242,7 +11242,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
false)) false))
return true; return true;
else else
return engine != i915_gem_request_get_ring(obj->last_write_req); return engine != i915_gem_request_get_engine(obj->last_write_req);
} }
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
...@@ -11582,7 +11582,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -11582,7 +11582,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
engine = &dev_priv->engine[BCS]; engine = &dev_priv->engine[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) { } else if (INTEL_INFO(dev)->gen >= 7) {
engine = i915_gem_request_get_ring(obj->last_write_req); engine = i915_gem_request_get_engine(obj->last_write_req);
if (engine == NULL || engine->id != RCS) if (engine == NULL || engine->id != RCS)
engine = &dev_priv->engine[BCS]; engine = &dev_priv->engine[BCS];
} else { } else {
......
...@@ -87,7 +87,7 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv) ...@@ -87,7 +87,7 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
/* tell all command streamers NOT to forward interrupts and vblank to GuC */ /* tell all command streamers NOT to forward interrupts and vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
I915_WRITE(RING_MODE_GEN7(engine), irqs); I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route all GT interrupts to the host */ /* route all GT interrupts to the host */
...@@ -104,7 +104,7 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv) ...@@ -104,7 +104,7 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
/* tell all command streamers to forward interrupts and vblank to GuC */ /* tell all command streamers to forward interrupts and vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS); irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
I915_WRITE(RING_MODE_GEN7(engine), irqs); I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */ /* route USER_INTERRUPT to Host, all others are sent to GuC. */
......
...@@ -669,7 +669,7 @@ static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req) ...@@ -669,7 +669,7 @@ static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
static int execlists_move_to_gpu(struct drm_i915_gem_request *req, static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas) struct list_head *vmas)
{ {
const unsigned other_rings = ~intel_ring_flag(req->engine); const unsigned other_rings = ~intel_engine_flag(req->engine);
struct i915_vma *vma; struct i915_vma *vma;
uint32_t flush_domains = 0; uint32_t flush_domains = 0;
bool flush_chipset = false; bool flush_chipset = false;
...@@ -1057,7 +1057,7 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine) ...@@ -1057,7 +1057,7 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
if (!intel_ring_initialized(engine)) if (!intel_ring_initialized(engine))
return; return;
ret = intel_ring_idle(engine); ret = intel_engine_idle(engine);
if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error)) if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
engine->name, ret); engine->name, ret);
...@@ -1688,7 +1688,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, ...@@ -1688,7 +1688,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
* not idle). PML4 is allocated during ppgtt init so this is * not idle). PML4 is allocated during ppgtt init so this is
* not needed in 48-bit.*/ * not needed in 48-bit.*/
if (req->ctx->ppgtt && if (req->ctx->ppgtt &&
(intel_ring_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) { (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
if (!USES_FULL_48BIT_PPGTT(req->i915) && if (!USES_FULL_48BIT_PPGTT(req->i915) &&
!intel_vgpu_active(req->i915->dev)) { !intel_vgpu_active(req->i915->dev)) {
ret = intel_logical_ring_emit_pdps(req); ret = intel_logical_ring_emit_pdps(req);
...@@ -1696,7 +1696,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, ...@@ -1696,7 +1696,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
return ret; return ret;
} }
req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->engine); req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
} }
ret = intel_logical_ring_begin(req, 4); ret = intel_logical_ring_begin(req, 4);
...@@ -2511,7 +2511,7 @@ void intel_lr_context_free(struct intel_context *ctx) ...@@ -2511,7 +2511,7 @@ void intel_lr_context_free(struct intel_context *ctx)
{ {
int i; int i;
for (i = I915_NUM_RINGS; --i >= 0; ) { for (i = I915_NUM_ENGINES; --i >= 0; ) {
struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf; struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
...@@ -2674,7 +2674,7 @@ void intel_lr_context_reset(struct drm_device *dev, ...@@ -2674,7 +2674,7 @@ void intel_lr_context_reset(struct drm_device *dev,
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
int i; int i;
for_each_ring(engine, dev_priv, i) { for_each_engine(engine, dev_priv, i) {
struct drm_i915_gem_object *ctx_obj = struct drm_i915_gem_object *ctx_obj =
ctx->engine[engine->id].state; ctx->engine[engine->id].state;
struct intel_ringbuffer *ringbuf = struct intel_ringbuffer *ringbuf =
......
...@@ -328,7 +328,7 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req) ...@@ -328,7 +328,7 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
enum intel_ring_id ring_id; enum intel_ring_id ring_id;
/* Program the control registers */ /* Program the control registers */
for_each_ring(engine, dev_priv, ring_id) { for_each_engine(engine, dev_priv, ring_id) {
ret = emit_mocs_control_table(req, &t, ring_id); ret = emit_mocs_control_table(req, &t, ring_id);
if (ret) if (ret)
return ret; return ret;
......
...@@ -4838,7 +4838,7 @@ static void gen9_enable_rc6(struct drm_device *dev) ...@@ -4838,7 +4838,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_ring(engine, dev_priv, unused) for_each_engine(engine, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
if (HAS_GUC_UCODE(dev)) if (HAS_GUC_UCODE(dev))
...@@ -4906,7 +4906,7 @@ static void gen8_enable_rps(struct drm_device *dev) ...@@ -4906,7 +4906,7 @@ static void gen8_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_ring(engine, dev_priv, unused) for_each_engine(engine, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC_SLEEP, 0);
if (IS_BROADWELL(dev)) if (IS_BROADWELL(dev))
...@@ -5003,7 +5003,7 @@ static void gen6_enable_rps(struct drm_device *dev) ...@@ -5003,7 +5003,7 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC_SLEEP, 0);
...@@ -5522,7 +5522,7 @@ static void cherryview_enable_rps(struct drm_device *dev) ...@@ -5522,7 +5522,7 @@ static void cherryview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC_SLEEP, 0);
...@@ -5633,7 +5633,7 @@ static void valleyview_enable_rps(struct drm_device *dev) ...@@ -5633,7 +5633,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC6_THRESHOLD, 0x557); I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
...@@ -6019,7 +6019,7 @@ bool i915_gpu_busy(void) ...@@ -6019,7 +6019,7 @@ bool i915_gpu_busy(void)
goto out_unlock; goto out_unlock;
dev_priv = i915_mch_dev; dev_priv = i915_mch_dev;
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
ret |= !list_empty(&engine->request_list); ret |= !list_empty(&engine->request_list);
out_unlock: out_unlock:
......
...@@ -62,7 +62,7 @@ int intel_ring_space(struct intel_ringbuffer *ringbuf) ...@@ -62,7 +62,7 @@ int intel_ring_space(struct intel_ringbuffer *ringbuf)
bool intel_ring_stopped(struct intel_engine_cs *engine) bool intel_ring_stopped(struct intel_engine_cs *engine)
{ {
struct drm_i915_private *dev_priv = engine->dev->dev_private; struct drm_i915_private *dev_priv = engine->dev->dev_private;
return dev_priv->gpu_error.stop_rings & intel_ring_flag(engine); return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
} }
static void __intel_ring_advance(struct intel_engine_cs *engine) static void __intel_ring_advance(struct intel_engine_cs *engine)
...@@ -1283,7 +1283,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, ...@@ -1283,7 +1283,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
if (ret) if (ret)
return ret; return ret;
for_each_ring(waiter, dev_priv, i) { for_each_engine(waiter, dev_priv, i) {
u32 seqno; u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
...@@ -1324,7 +1324,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, ...@@ -1324,7 +1324,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
if (ret) if (ret)
return ret; return ret;
for_each_ring(waiter, dev_priv, i) { for_each_engine(waiter, dev_priv, i) {
u32 seqno; u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
...@@ -1363,7 +1363,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req, ...@@ -1363,7 +1363,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
if (ret) if (ret)
return ret; return ret;
for_each_ring(useless, dev_priv, i) { for_each_engine(useless, dev_priv, i) {
i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i]; i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i];
if (i915_mmio_reg_valid(mbox_reg)) { if (i915_mmio_reg_valid(mbox_reg)) {
...@@ -2356,7 +2356,7 @@ static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) ...@@ -2356,7 +2356,7 @@ static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
intel_ring_update_space(ringbuf); intel_ring_update_space(ringbuf);
} }
int intel_ring_idle(struct intel_engine_cs *engine) int intel_engine_idle(struct intel_engine_cs *engine)
{ {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
...@@ -3170,7 +3170,7 @@ intel_stop_ring_buffer(struct intel_engine_cs *engine) ...@@ -3170,7 +3170,7 @@ intel_stop_ring_buffer(struct intel_engine_cs *engine)
if (!intel_ring_initialized(engine)) if (!intel_ring_initialized(engine))
return; return;
ret = intel_ring_idle(engine); ret = intel_engine_idle(engine);
if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error)) if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
engine->name, ret); engine->name, ret);
......
...@@ -55,12 +55,12 @@ struct intel_hw_status_page { ...@@ -55,12 +55,12 @@ struct intel_hw_status_page {
#define i915_semaphore_seqno_size sizeof(uint64_t) #define i915_semaphore_seqno_size sizeof(uint64_t)
#define GEN8_SIGNAL_OFFSET(__ring, to) \ #define GEN8_SIGNAL_OFFSET(__ring, to) \
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ ((__ring)->id * I915_NUM_ENGINES * i915_semaphore_seqno_size) + \
(i915_semaphore_seqno_size * (to))) (i915_semaphore_seqno_size * (to)))
#define GEN8_WAIT_OFFSET(__ring, from) \ #define GEN8_WAIT_OFFSET(__ring, from) \
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ ((from) * I915_NUM_ENGINES * i915_semaphore_seqno_size) + \
(i915_semaphore_seqno_size * (__ring)->id)) (i915_semaphore_seqno_size * (__ring)->id))
#define GEN8_RING_SEMAPHORE_INIT(e) do { \ #define GEN8_RING_SEMAPHORE_INIT(e) do { \
...@@ -153,7 +153,7 @@ struct intel_engine_cs { ...@@ -153,7 +153,7 @@ struct intel_engine_cs {
VCS2, /* Keep instances of the same type engine together. */ VCS2, /* Keep instances of the same type engine together. */
VECS VECS
} id; } id;
#define I915_NUM_RINGS 5 #define I915_NUM_ENGINES 5
#define _VCS(n) (VCS + (n)) #define _VCS(n) (VCS + (n))
unsigned int exec_id; unsigned int exec_id;
unsigned int guc_id; unsigned int guc_id;
...@@ -244,16 +244,16 @@ struct intel_engine_cs { ...@@ -244,16 +244,16 @@ struct intel_engine_cs {
* ie. transpose of f(x, y) * ie. transpose of f(x, y)
*/ */
struct { struct {
u32 sync_seqno[I915_NUM_RINGS-1]; u32 sync_seqno[I915_NUM_ENGINES-1];
union { union {
struct { struct {
/* our mbox written by others */ /* our mbox written by others */
u32 wait[I915_NUM_RINGS]; u32 wait[I915_NUM_ENGINES];
/* mboxes this ring signals to */ /* mboxes this ring signals to */
i915_reg_t signal[I915_NUM_RINGS]; i915_reg_t signal[I915_NUM_ENGINES];
} mbox; } mbox;
u64 signal_ggtt[I915_NUM_RINGS]; u64 signal_ggtt[I915_NUM_ENGINES];
}; };
/* AKA wait() */ /* AKA wait() */
...@@ -361,7 +361,7 @@ intel_ring_initialized(struct intel_engine_cs *engine) ...@@ -361,7 +361,7 @@ intel_ring_initialized(struct intel_engine_cs *engine)
} }
static inline unsigned static inline unsigned
intel_ring_flag(struct intel_engine_cs *engine) intel_engine_flag(struct intel_engine_cs *engine)
{ {
return 1 << engine->id; return 1 << engine->id;
} }
...@@ -382,7 +382,7 @@ intel_ring_sync_index(struct intel_engine_cs *engine, ...@@ -382,7 +382,7 @@ intel_ring_sync_index(struct intel_engine_cs *engine,
idx = (other - engine) - 1; idx = (other - engine) - 1;
if (idx < 0) if (idx < 0)
idx += I915_NUM_RINGS; idx += I915_NUM_ENGINES;
return idx; return idx;
} }
...@@ -467,7 +467,7 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf); ...@@ -467,7 +467,7 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
int intel_ring_space(struct intel_ringbuffer *ringbuf); int intel_ring_space(struct intel_ringbuffer *ringbuf);
bool intel_ring_stopped(struct intel_engine_cs *engine); bool intel_ring_stopped(struct intel_engine_cs *engine);
int __must_check intel_ring_idle(struct intel_engine_cs *engine); int __must_check intel_engine_idle(struct intel_engine_cs *engine);
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno); void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
......
...@@ -1573,14 +1573,14 @@ static int gen8_do_reset(struct drm_device *dev) ...@@ -1573,14 +1573,14 @@ static int gen8_do_reset(struct drm_device *dev)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
int i; int i;
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
if (gen8_request_engine_reset(engine)) if (gen8_request_engine_reset(engine))
goto not_ready; goto not_ready;
return gen6_do_reset(dev); return gen6_do_reset(dev);
not_ready: not_ready:
for_each_ring(engine, dev_priv, i) for_each_engine(engine, dev_priv, i)
gen8_unrequest_engine_reset(engine); gen8_unrequest_engine_reset(engine);
return -EIO; return -EIO;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册