提交 9f235dfa 编写于 作者: T Tvrtko Ursulin

drm/i915: Consolidate gen8_emit_pipe_control

We have a few open coded instances in the execlists code and an
almost suitable helper in intel_ringbuf.c

We can consolidate to a single helper if we change the existing
helper to emit directly to ring buffer memory and move the space
reservation outside it.

v2: Drop memcpy for memset. (Chris Wilson)
Signed-off-by: NTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: NChris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170216122325.31391-2-tvrtko.ursulin@linux.intel.com
上级 097d4f1c
...@@ -918,12 +918,10 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) ...@@ -918,12 +918,10 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
*batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES; *batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES;
*batch++ = GFX_OP_PIPE_CONTROL(6); batch = gen8_emit_pipe_control(batch,
*batch++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_DC_FLUSH_ENABLE; PIPE_CONTROL_CS_STALL |
*batch++ = 0; PIPE_CONTROL_DC_FLUSH_ENABLE,
*batch++ = 0; 0);
*batch++ = 0;
*batch++ = 0;
*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
...@@ -957,15 +955,15 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) ...@@ -957,15 +955,15 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
if (IS_BROADWELL(engine->i915)) if (IS_BROADWELL(engine->i915))
batch = gen8_emit_flush_coherentl3_wa(engine, batch); batch = gen8_emit_flush_coherentl3_wa(engine, batch);
*batch++ = GFX_OP_PIPE_CONTROL(6);
*batch++ = PIPE_CONTROL_FLUSH_L3 | PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL | PIPE_CONTROL_QW_WRITE;
/* WaClearSlmSpaceAtContextSwitch:bdw,chv */ /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
/* Actual scratch location is at 128 bytes offset */ /* Actual scratch location is at 128 bytes offset */
*batch++ = i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; batch = gen8_emit_pipe_control(batch,
*batch++ = 0; PIPE_CONTROL_FLUSH_L3 |
*batch++ = 0; PIPE_CONTROL_GLOBAL_GTT_IVB |
*batch++ = 0; PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
i915_ggtt_offset(engine->scratch) +
2 * CACHELINE_BYTES);
/* Pad to end of cacheline */ /* Pad to end of cacheline */
while ((unsigned long)batch % CACHELINE_BYTES) while ((unsigned long)batch % CACHELINE_BYTES)
...@@ -1013,14 +1011,13 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) ...@@ -1013,14 +1011,13 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
/* WaClearSlmSpaceAtContextSwitch:kbl */ /* WaClearSlmSpaceAtContextSwitch:kbl */
/* Actual scratch location is at 128 bytes offset */ /* Actual scratch location is at 128 bytes offset */
if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) { if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
*batch++ = GFX_OP_PIPE_CONTROL(6); batch = gen8_emit_pipe_control(batch,
*batch++ = PIPE_CONTROL_FLUSH_L3 | PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_FLUSH_L3 |
PIPE_CONTROL_CS_STALL | PIPE_CONTROL_QW_WRITE; PIPE_CONTROL_GLOBAL_GTT_IVB |
*batch++ = i915_ggtt_offset(engine->scratch) + PIPE_CONTROL_CS_STALL |
2 * CACHELINE_BYTES; PIPE_CONTROL_QW_WRITE,
*batch++ = 0; i915_ggtt_offset(engine->scratch)
*batch++ = 0; + 2 * CACHELINE_BYTES);
*batch++ = 0;
} }
/* WaMediaPoolStateCmdInWABB:bxt,glk */ /* WaMediaPoolStateCmdInWABB:bxt,glk */
...@@ -1456,39 +1453,17 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, ...@@ -1456,39 +1453,17 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
if (IS_ERR(cs)) if (IS_ERR(cs))
return PTR_ERR(cs); return PTR_ERR(cs);
if (vf_flush_wa) { if (vf_flush_wa)
*cs++ = GFX_OP_PIPE_CONTROL(6); cs = gen8_emit_pipe_control(cs, 0, 0);
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
}
if (dc_flush_wa) { if (dc_flush_wa)
*cs++ = GFX_OP_PIPE_CONTROL(6); cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
*cs++ = PIPE_CONTROL_DC_FLUSH_ENABLE; 0);
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
}
*cs++ = GFX_OP_PIPE_CONTROL(6); cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
*cs++ = flags;
*cs++ = scratch_addr;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
if (dc_flush_wa) { if (dc_flush_wa)
*cs++ = GFX_OP_PIPE_CONTROL(6); cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
*cs++ = PIPE_CONTROL_CS_STALL;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
}
intel_ring_advance(request, cs); intel_ring_advance(request, cs);
......
...@@ -334,35 +334,16 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) ...@@ -334,35 +334,16 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
} }
static int static int
gen8_emit_pipe_control(struct drm_i915_gem_request *req, gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
u32 flags, u32 scratch_addr)
{ {
u32 flags;
u32 *cs; u32 *cs;
cs = intel_ring_begin(req, 6); cs = intel_ring_begin(req, mode & EMIT_INVALIDATE ? 12 : 6);
if (IS_ERR(cs)) if (IS_ERR(cs))
return PTR_ERR(cs); return PTR_ERR(cs);
*cs++ = GFX_OP_PIPE_CONTROL(6); flags = PIPE_CONTROL_CS_STALL;
*cs++ = flags;
*cs++ = scratch_addr;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
intel_ring_advance(req, cs);
return 0;
}
static int
gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
u32 scratch_addr =
i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
u32 flags = 0;
int ret;
flags |= PIPE_CONTROL_CS_STALL;
if (mode & EMIT_FLUSH) { if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
...@@ -381,15 +362,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) ...@@ -381,15 +362,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */ /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
ret = gen8_emit_pipe_control(req, cs = gen8_emit_pipe_control(cs,
PIPE_CONTROL_CS_STALL | PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD, PIPE_CONTROL_STALL_AT_SCOREBOARD,
0); 0);
if (ret)
return ret;
} }
return gen8_emit_pipe_control(req, flags, scratch_addr); cs = gen8_emit_pipe_control(cs, flags,
i915_ggtt_offset(req->engine->scratch) +
2 * CACHELINE_BYTES);
intel_ring_advance(req, cs);
return 0;
} }
static void ring_setup_phys_status_page(struct intel_engine_cs *engine) static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
......
...@@ -631,4 +631,15 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine); ...@@ -631,4 +631,15 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915); unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915);
static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
{
memset(batch, 0, 6 * sizeof(u32));
batch[0] = GFX_OP_PIPE_CONTROL(6);
batch[1] = flags;
batch[2] = offset;
return batch + 6;
}
#endif /* _INTEL_RINGBUFFER_H_ */ #endif /* _INTEL_RINGBUFFER_H_ */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册