intel_ringbuffer.c 54.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * Copyright © 2008-2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Zou Nan hai <nanhai.zou@intel.com>
 *    Xiang Hai hao<haihao.xiang@intel.com>
 *
 */

30
#include <linux/log2.h>
31

32 33
#include <drm/drmP.h>
#include <drm/i915_drm.h>
34 35 36

#include "i915_drv.h"
#include "i915_gem_render_state.h"
37
#include "i915_trace.h"
38
#include "intel_drv.h"
39
#include "intel_workarounds.h"
40

41 42 43 44 45
/* Rough estimate of the typical request size, performing a flush,
 * set-context and then emitting the batch.
 */
#define LEGACY_REQUEST_SIZE 200

46 47 48
static unsigned int __intel_ring_space(unsigned int head,
				       unsigned int tail,
				       unsigned int size)
49
{
50 51 52 53 54 55 56
	/*
	 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
	 * same cacheline, the Head Pointer must not be greater than the Tail
	 * Pointer."
	 */
	GEM_BUG_ON(!is_power_of_2(size));
	return (head - tail - CACHELINE_BYTES) & (size - 1);
57 58
}

59
unsigned int intel_ring_update_space(struct intel_ring *ring)
60
{
61 62 63 64 65 66
	unsigned int space;

	space = __intel_ring_space(ring->head, ring->emit, ring->size);

	ring->space = space;
	return space;
67 68
}

69
static int
70
gen2_render_ring_flush(struct i915_request *rq, u32 mode)
71
{
72
	u32 cmd, *cs;
73 74 75

	cmd = MI_FLUSH;

76
	if (mode & EMIT_INVALIDATE)
77 78
		cmd |= MI_READ_FLUSH;

79
	cs = intel_ring_begin(rq, 2);
80 81
	if (IS_ERR(cs))
		return PTR_ERR(cs);
82

83 84
	*cs++ = cmd;
	*cs++ = MI_NOOP;
85
	intel_ring_advance(rq, cs);
86 87 88 89 90

	return 0;
}

static int
91
gen4_render_ring_flush(struct i915_request *rq, u32 mode)
92
{
93
	u32 cmd, *cs;
94

95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
	/*
	 * read/write caches:
	 *
	 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
	 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
	 * also flushed at 2d versus 3d pipeline switches.
	 *
	 * read-only caches:
	 *
	 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
	 * MI_READ_FLUSH is set, and is always flushed on 965.
	 *
	 * I915_GEM_DOMAIN_COMMAND may not exist?
	 *
	 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
	 * invalidated when MI_EXE_FLUSH is set.
	 *
	 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
	 * invalidated with every MI_FLUSH.
	 *
	 * TLBs:
	 *
	 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
	 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
	 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
	 * are flushed at any MI_FLUSH.
	 */

123
	cmd = MI_FLUSH;
124
	if (mode & EMIT_INVALIDATE) {
125
		cmd |= MI_EXE_FLUSH;
126
		if (IS_G4X(rq->i915) || IS_GEN5(rq->i915))
127 128
			cmd |= MI_INVALIDATE_ISP;
	}
129

130
	cs = intel_ring_begin(rq, 2);
131 132
	if (IS_ERR(cs))
		return PTR_ERR(cs);
133

134 135
	*cs++ = cmd;
	*cs++ = MI_NOOP;
136
	intel_ring_advance(rq, cs);
137 138

	return 0;
139 140
}

141
/*
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
 * implementing two workarounds on gen6.  From section 1.4.7.1
 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
 *
 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
 * produced by non-pipelined state commands), software needs to first
 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
 * 0.
 *
 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
 *
 * And the workaround for these two requires this workaround first:
 *
 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
 * BEFORE the pipe-control with a post-sync op and no write-cache
 * flushes.
 *
 * And this last workaround is tricky because of the requirements on
 * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
 * volume 2 part 1:
 *
 *     "1 of the following must also be set:
 *      - Render Target Cache Flush Enable ([12] of DW1)
 *      - Depth Cache Flush Enable ([0] of DW1)
 *      - Stall at Pixel Scoreboard ([1] of DW1)
 *      - Depth Stall ([13] of DW1)
 *      - Post-Sync Operation ([13] of DW1)
 *      - Notify Enable ([8] of DW1)"
 *
 * The cache flushes require the workaround flush that triggered this
 * one, so we can't use it.  Depth stall would trigger the same.
 * Post-sync nonzero is what triggered this second workaround, so we
 * can't use that one either.  Notify enable is IRQs, which aren't
 * really our business.  That leaves only stall at scoreboard.
 */
static int
179
intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
180
{
181
	u32 scratch_addr =
182
		i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
183 184
	u32 *cs;

185
	cs = intel_ring_begin(rq, 6);
186 187 188 189 190 191 192 193 194
	if (IS_ERR(cs))
		return PTR_ERR(cs);

	*cs++ = GFX_OP_PIPE_CONTROL(5);
	*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
	*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
	*cs++ = 0; /* low dword */
	*cs++ = 0; /* high dword */
	*cs++ = MI_NOOP;
195
	intel_ring_advance(rq, cs);
196

197
	cs = intel_ring_begin(rq, 6);
198 199 200 201 202 203 204 205 206
	if (IS_ERR(cs))
		return PTR_ERR(cs);

	*cs++ = GFX_OP_PIPE_CONTROL(5);
	*cs++ = PIPE_CONTROL_QW_WRITE;
	*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
	*cs++ = 0;
	*cs++ = 0;
	*cs++ = MI_NOOP;
207
	intel_ring_advance(rq, cs);
208 209 210 211 212

	return 0;
}

static int
213
gen6_render_ring_flush(struct i915_request *rq, u32 mode)
214
{
215
	u32 scratch_addr =
216
		i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
217
	u32 *cs, flags = 0;
218 219
	int ret;

220
	/* Force SNB workarounds for PIPE_CONTROL flushes */
221
	ret = intel_emit_post_sync_nonzero_flush(rq);
222 223 224
	if (ret)
		return ret;

225 226 227 228
	/* Just flush everything.  Experiments have shown that reducing the
	 * number of bits based on the write domains has little performance
	 * impact.
	 */
229
	if (mode & EMIT_FLUSH) {
230 231 232 233 234 235
		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
		/*
		 * Ensure that any following seqno writes only happen
		 * when the render cache is indeed flushed.
		 */
236
		flags |= PIPE_CONTROL_CS_STALL;
237
	}
238
	if (mode & EMIT_INVALIDATE) {
239 240 241 242 243 244 245 246 247
		flags |= PIPE_CONTROL_TLB_INVALIDATE;
		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
		/*
		 * TLB invalidate requires a post-sync write.
		 */
248
		flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
249
	}
250

251
	cs = intel_ring_begin(rq, 4);
252 253
	if (IS_ERR(cs))
		return PTR_ERR(cs);
254

255 256 257 258
	*cs++ = GFX_OP_PIPE_CONTROL(4);
	*cs++ = flags;
	*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
	*cs++ = 0;
259
	intel_ring_advance(rq, cs);
260 261 262 263

	return 0;
}

264
static int
265
gen7_render_ring_cs_stall_wa(struct i915_request *rq)
266
{
267
	u32 *cs;
268

269
	cs = intel_ring_begin(rq, 4);
270 271
	if (IS_ERR(cs))
		return PTR_ERR(cs);
272

273 274 275 276
	*cs++ = GFX_OP_PIPE_CONTROL(4);
	*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
	*cs++ = 0;
	*cs++ = 0;
277
	intel_ring_advance(rq, cs);
278 279 280 281

	return 0;
}

282
static int
283
gen7_render_ring_flush(struct i915_request *rq, u32 mode)
284
{
285
	u32 scratch_addr =
286
		i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
287
	u32 *cs, flags = 0;
288

289 290 291 292 293 294 295 296 297 298
	/*
	 * Ensure that any following seqno writes only happen when the render
	 * cache is indeed flushed.
	 *
	 * Workaround: 4th PIPE_CONTROL command (except the ones with only
	 * read-cache invalidate bits set) must have the CS_STALL bit set. We
	 * don't try to be clever and just set it unconditionally.
	 */
	flags |= PIPE_CONTROL_CS_STALL;

299 300 301 302
	/* Just flush everything.  Experiments have shown that reducing the
	 * number of bits based on the write domains has little performance
	 * impact.
	 */
303
	if (mode & EMIT_FLUSH) {
304 305
		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
306
		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
307
		flags |= PIPE_CONTROL_FLUSH_ENABLE;
308
	}
309
	if (mode & EMIT_INVALIDATE) {
310 311 312 313 314 315
		flags |= PIPE_CONTROL_TLB_INVALIDATE;
		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
316
		flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
317 318 319 320
		/*
		 * TLB invalidate requires a post-sync write.
		 */
		flags |= PIPE_CONTROL_QW_WRITE;
321
		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
322

323 324
		flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;

325 326 327
		/* Workaround: we must issue a pipe_control with CS-stall bit
		 * set before a pipe_control command that has the state cache
		 * invalidate bit set. */
328
		gen7_render_ring_cs_stall_wa(rq);
329 330
	}

331
	cs = intel_ring_begin(rq, 4);
332 333
	if (IS_ERR(cs))
		return PTR_ERR(cs);
334

335 336 337 338
	*cs++ = GFX_OP_PIPE_CONTROL(4);
	*cs++ = flags;
	*cs++ = scratch_addr;
	*cs++ = 0;
339
	intel_ring_advance(rq, cs);
340 341 342 343

	return 0;
}

344
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
345
{
346
	struct drm_i915_private *dev_priv = engine->i915;
347 348 349
	u32 addr;

	addr = dev_priv->status_page_dmah->busaddr;
350
	if (INTEL_GEN(dev_priv) >= 4)
351 352 353 354
		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
	I915_WRITE(HWS_PGA, addr);
}

355
static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
356
{
357
	struct drm_i915_private *dev_priv = engine->i915;
358
	i915_reg_t mmio;
359 360 361 362

	/* The ring status page addresses are no longer next to the rest of
	 * the ring registers as of gen7.
	 */
363
	if (IS_GEN7(dev_priv)) {
364
		switch (engine->id) {
365 366 367 368 369 370
		/*
		 * No more rings exist on Gen7. Default case is only to shut up
		 * gcc switch check warning.
		 */
		default:
			GEM_BUG_ON(engine->id);
371 372 373 374 375 376 377 378 379 380 381 382 383
		case RCS:
			mmio = RENDER_HWS_PGA_GEN7;
			break;
		case BCS:
			mmio = BLT_HWS_PGA_GEN7;
			break;
		case VCS:
			mmio = BSD_HWS_PGA_GEN7;
			break;
		case VECS:
			mmio = VEBOX_HWS_PGA_GEN7;
			break;
		}
384
	} else if (IS_GEN6(dev_priv)) {
385
		mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
386
	} else {
387
		mmio = RING_HWS_PGA(engine->mmio_base);
388 389
	}

390 391 392
	if (INTEL_GEN(dev_priv) >= 6)
		I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);

393
	I915_WRITE(mmio, engine->status_page.ggtt_offset);
394 395
	POSTING_READ(mmio);

396
	/* Flush the TLB for this page */
397
	if (IS_GEN(dev_priv, 6, 7)) {
398
		i915_reg_t reg = RING_INSTPM(engine->mmio_base);
399 400

		/* ring should be idle before issuing a sync flush*/
401
		WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
402 403 404 405

		I915_WRITE(reg,
			   _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
					      INSTPM_SYNC_FLUSH));
406 407 408
		if (intel_wait_for_register(dev_priv,
					    reg, INSTPM_SYNC_FLUSH, 0,
					    1000))
409
			DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
410
				  engine->name);
411 412 413
	}
}

414
static bool stop_ring(struct intel_engine_cs *engine)
415
{
416
	struct drm_i915_private *dev_priv = engine->i915;
417

418
	if (INTEL_GEN(dev_priv) > 2) {
419
		I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
420 421 422 423 424
		if (intel_wait_for_register(dev_priv,
					    RING_MI_MODE(engine->mmio_base),
					    MODE_IDLE,
					    MODE_IDLE,
					    1000)) {
425 426
			DRM_ERROR("%s : timed out trying to stop ring\n",
				  engine->name);
427 428 429 430
			/* Sometimes we observe that the idle flag is not
			 * set even though the ring is empty. So double
			 * check before giving up.
			 */
431
			if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
432
				return false;
433 434
		}
	}
435

436 437
	I915_WRITE_HEAD(engine, I915_READ_TAIL(engine));

438
	I915_WRITE_HEAD(engine, 0);
439
	I915_WRITE_TAIL(engine, 0);
440

441 442 443
	/* The ring must be empty before it is disabled */
	I915_WRITE_CTL(engine, 0);

444
	return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
445
}
446

447
static int init_ring_common(struct intel_engine_cs *engine)
448
{
449
	struct drm_i915_private *dev_priv = engine->i915;
450
	struct intel_ring *ring = engine->buffer;
451 452
	int ret = 0;

453
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
454

455
	if (!stop_ring(engine)) {
456
		/* G45 ring initialization often fails to reset head to zero */
457 458 459 460 461 462 463
		DRM_DEBUG_DRIVER("%s head not reset to zero "
				"ctl %08x head %08x tail %08x start %08x\n",
				engine->name,
				I915_READ_CTL(engine),
				I915_READ_HEAD(engine),
				I915_READ_TAIL(engine),
				I915_READ_START(engine));
464

465
		if (!stop_ring(engine)) {
466 467
			DRM_ERROR("failed to set %s head to zero "
				  "ctl %08x head %08x tail %08x start %08x\n",
468 469 470 471 472
				  engine->name,
				  I915_READ_CTL(engine),
				  I915_READ_HEAD(engine),
				  I915_READ_TAIL(engine),
				  I915_READ_START(engine));
473 474
			ret = -EIO;
			goto out;
475
		}
476 477
	}

478
	if (HWS_NEEDS_PHYSICAL(dev_priv))
479
		ring_setup_phys_status_page(engine);
480 481
	else
		intel_ring_setup_status_page(engine);
482

483
	intel_engine_reset_breadcrumbs(engine);
484

485
	/* Enforce ordering by reading HEAD register back */
486
	I915_READ_HEAD(engine);
487

488 489 490 491
	/* Initialize the ring. This must happen _after_ we've cleared the ring
	 * registers with the above sequence (the readback of the HEAD registers
	 * also enforces ordering), otherwise the hw might lose the new ring
	 * register values. */
492
	I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
493 494

	/* WaClearRingBufHeadRegAtInit:ctg,elk */
495
	if (I915_READ_HEAD(engine))
496 497
		DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
				 engine->name, I915_READ_HEAD(engine));
498 499 500 501 502

	intel_ring_update_space(ring);
	I915_WRITE_HEAD(engine, ring->head);
	I915_WRITE_TAIL(engine, ring->tail);
	(void)I915_READ_TAIL(engine);
503

504
	I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
505 506

	/* If the head is still not zero, the ring is dead */
507 508 509
	if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base),
				    RING_VALID, RING_VALID,
				    50)) {
510
		DRM_ERROR("%s initialization failed "
511
			  "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
512 513 514
			  engine->name,
			  I915_READ_CTL(engine),
			  I915_READ_CTL(engine) & RING_VALID,
515 516
			  I915_READ_HEAD(engine), ring->head,
			  I915_READ_TAIL(engine), ring->tail,
517
			  I915_READ_START(engine),
518
			  i915_ggtt_offset(ring->vma));
519 520
		ret = -EIO;
		goto out;
521 522
	}

523
	intel_engine_init_hangcheck(engine);
524

525 526 527
	if (INTEL_GEN(dev_priv) > 2)
		I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));

528
out:
529
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
530 531

	return ret;
532 533
}

534
static void reset_ring_common(struct intel_engine_cs *engine,
535
			      struct i915_request *request)
536
{
537 538 539 540 541 542 543 544 545 546
	/*
	 * RC6 must be prevented until the reset is complete and the engine
	 * reinitialised. If it occurs in the middle of this sequence, the
	 * state written to/loaded from the power context is ill-defined (e.g.
	 * the PP_BASE_DIR may be lost).
	 */
	assert_forcewakes_active(engine->i915, FORCEWAKE_ALL);

	/*
	 * Try to restore the logical GPU state to match the continuation
547 548 549 550 551 552 553 554 555 556 557 558 559 560
	 * of the request queue. If we skip the context/PD restore, then
	 * the next request may try to execute assuming that its context
	 * is valid and loaded on the GPU and so may try to access invalid
	 * memory, prompting repeated GPU hangs.
	 *
	 * If the request was guilty, we still restore the logical state
	 * in case the next request requires it (e.g. the aliasing ppgtt),
	 * but skip over the hung batch.
	 *
	 * If the request was innocent, we try to replay the request with
	 * the restored context.
	 */
	if (request) {
		struct drm_i915_private *dev_priv = request->i915;
561 562
		struct intel_context *ce = to_intel_context(request->ctx,
							    engine);
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
		struct i915_hw_ppgtt *ppgtt;

		if (ce->state) {
			I915_WRITE(CCID,
				   i915_ggtt_offset(ce->state) |
				   BIT(8) /* must be set! */ |
				   CCID_EXTENDED_STATE_SAVE |
				   CCID_EXTENDED_STATE_RESTORE |
				   CCID_EN);
		}

		ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
		if (ppgtt) {
			u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;

			I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
			I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);

			/* Wait for the PD reload to complete */
			if (intel_wait_for_register(dev_priv,
						    RING_PP_DIR_BASE(engine),
						    BIT(0), 0,
						    10))
				DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
587

588 589 590 591
			ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
		}

		/* If the rq hung, jump to its breadcrumb and skip the batch */
592 593
		if (request->fence.error == -EIO)
			request->ring->head = request->postfix;
594 595
	} else {
		engine->legacy_active_context = NULL;
596
		engine->legacy_active_ppgtt = NULL;
597
	}
598 599
}

600
static int intel_rcs_ctx_init(struct i915_request *rq)
601 602 603
{
	int ret;

604
	ret = intel_ctx_workarounds_emit(rq);
605 606 607
	if (ret != 0)
		return ret;

608
	ret = i915_gem_render_state_emit(rq);
609
	if (ret)
610
		return ret;
611

612
	return 0;
613 614
}

615
static int init_render_ring(struct intel_engine_cs *engine)
616
{
617
	struct drm_i915_private *dev_priv = engine->i915;
618
	int ret = init_ring_common(engine);
619 620
	if (ret)
		return ret;
621

622
	intel_whitelist_workarounds_apply(engine);
623

624
	/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
625
	if (IS_GEN(dev_priv, 4, 6))
626
		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
627 628 629 630

	/* We need to disable the AsyncFlip performance optimisations in order
	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
	 * programmed to '1' on all products.
631
	 *
632
	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
633
	 */
634
	if (IS_GEN(dev_priv, 6, 7))
635 636
		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));

637
	/* Required for the hardware to program scanline values for waiting */
638
	/* WaEnableFlushTlbInvalidationMode:snb */
639
	if (IS_GEN6(dev_priv))
640
		I915_WRITE(GFX_MODE,
641
			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
642

643
	/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
644
	if (IS_GEN7(dev_priv))
645
		I915_WRITE(GFX_MODE_GEN7,
646
			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
647
			   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
648

649
	if (IS_GEN6(dev_priv)) {
650 651 652 653 654 655
		/* From the Sandybridge PRM, volume 1 part 3, page 24:
		 * "If this bit is set, STCunit will have LRA as replacement
		 *  policy. [...] This bit must be reset.  LRA replacement
		 *  policy is not supported."
		 */
		I915_WRITE(CACHE_MODE_0,
656
			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
657 658
	}

659
	if (IS_GEN(dev_priv, 6, 7))
660
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
661

662
	if (INTEL_GEN(dev_priv) >= 6)
663
		I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
664

665
	return 0;
666 667
}

668
static u32 *gen6_signal(struct i915_request *rq, u32 *cs)
669
{
670
	struct drm_i915_private *dev_priv = rq->i915;
671
	struct intel_engine_cs *engine;
672
	enum intel_engine_id id;
C
Chris Wilson 已提交
673
	int num_rings = 0;
674

675
	for_each_engine(engine, dev_priv, id) {
676 677 678 679
		i915_reg_t mbox_reg;

		if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
			continue;
680

681
		mbox_reg = rq->engine->semaphore.mbox.signal[engine->hw_id];
682
		if (i915_mmio_reg_valid(mbox_reg)) {
683 684
			*cs++ = MI_LOAD_REGISTER_IMM(1);
			*cs++ = i915_mmio_reg_offset(mbox_reg);
685
			*cs++ = rq->global_seqno;
C
Chris Wilson 已提交
686
			num_rings++;
687 688
		}
	}
C
Chris Wilson 已提交
689
	if (num_rings & 1)
690
		*cs++ = MI_NOOP;
691

692
	return cs;
693 694
}

695 696
static void cancel_requests(struct intel_engine_cs *engine)
{
697
	struct i915_request *request;
698 699 700 701 702 703 704
	unsigned long flags;

	spin_lock_irqsave(&engine->timeline->lock, flags);

	/* Mark all submitted requests as skipped. */
	list_for_each_entry(request, &engine->timeline->requests, link) {
		GEM_BUG_ON(!request->global_seqno);
705
		if (!i915_request_completed(request))
706 707 708 709 710 711 712
			dma_fence_set_error(&request->fence, -EIO);
	}
	/* Remaining _unready_ requests will be nop'ed when submitted */

	spin_unlock_irqrestore(&engine->timeline->lock, flags);
}

713
static void i9xx_submit_request(struct i915_request *request)
714 715 716
{
	struct drm_i915_private *dev_priv = request->i915;

717
	i915_request_submit(request);
718

719 720
	I915_WRITE_TAIL(request->engine,
			intel_ring_set_tail(request->ring, request->tail));
721 722
}

723
static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
724
{
725 726
	*cs++ = MI_STORE_DWORD_INDEX;
	*cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
727
	*cs++ = rq->global_seqno;
728
	*cs++ = MI_USER_INTERRUPT;
729

730 731
	rq->tail = intel_ring_offset(rq, cs);
	assert_ring_tail_valid(rq->ring, rq->tail);
732 733
}

734 735
static const int i9xx_emit_breadcrumb_sz = 4;

736
static void gen6_sema_emit_breadcrumb(struct i915_request *rq, u32 *cs)
737
{
738
	return i9xx_emit_breadcrumb(rq, rq->engine->semaphore.signal(rq, cs));
739 740
}

741
static int
742
gen6_ring_sync_to(struct i915_request *rq, struct i915_request *signal)
743
{
744 745 746
	u32 dw1 = MI_SEMAPHORE_MBOX |
		  MI_SEMAPHORE_COMPARE |
		  MI_SEMAPHORE_REGISTER;
747
	u32 wait_mbox = signal->engine->semaphore.mbox.wait[rq->engine->hw_id];
748
	u32 *cs;
749

750
	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
751

752
	cs = intel_ring_begin(rq, 4);
753 754
	if (IS_ERR(cs))
		return PTR_ERR(cs);
755

756
	*cs++ = dw1 | wait_mbox;
757 758 759 760
	/* Throughout all of the GEM code, seqno passed implies our current
	 * seqno is >= the last seqno executed. However for hardware the
	 * comparison is strictly greater than.
	 */
761 762 763
	*cs++ = signal->global_seqno - 1;
	*cs++ = 0;
	*cs++ = MI_NOOP;
764
	intel_ring_advance(rq, cs);
765 766 767 768

	return 0;
}

769
static void
770
gen5_seqno_barrier(struct intel_engine_cs *engine)
771
{
772 773 774
	/* MI_STORE are internally buffered by the GPU and not flushed
	 * either by MI_FLUSH or SyncFlush or any other combination of
	 * MI commands.
775
	 *
776 777 778 779 780 781 782
	 * "Only the submission of the store operation is guaranteed.
	 * The write result will be complete (coherent) some time later
	 * (this is practically a finite period but there is no guaranteed
	 * latency)."
	 *
	 * Empirically, we observe that we need a delay of at least 75us to
	 * be sure that the seqno write is visible by the CPU.
783
	 */
784
	usleep_range(125, 250);
785 786
}

787 788
static void
gen6_seqno_barrier(struct intel_engine_cs *engine)
789
{
790
	struct drm_i915_private *dev_priv = engine->i915;
791

792 793
	/* Workaround to force correct ordering between irq and seqno writes on
	 * ivb (and maybe also on snb) by reading from a CS register (like
794 795 796 797 798 799 800 801 802
	 * ACTHD) before reading the status page.
	 *
	 * Note that this effectively stalls the read by the time it takes to
	 * do a memory transaction, which more or less ensures that the write
	 * from the GPU has sufficient time to invalidate the CPU cacheline.
	 * Alternatively we could delay the interrupt from the CS ring to give
	 * the write time to land, but that would incur a delay after every
	 * batch i.e. much more frequent than a delay when waiting for the
	 * interrupt (with the same net latency).
803 804 805
	 *
	 * Also note that to prevent whole machine hangs on gen7, we have to
	 * take the spinlock to guard against concurrent cacheline access.
806
	 */
807
	spin_lock_irq(&dev_priv->uncore.lock);
808
	POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
809
	spin_unlock_irq(&dev_priv->uncore.lock);
810 811
}

812 813
static void
gen5_irq_enable(struct intel_engine_cs *engine)
814
{
815
	gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
816 817 818
}

static void
819
gen5_irq_disable(struct intel_engine_cs *engine)
820
{
821
	gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
822 823
}

824 825
static void
i9xx_irq_enable(struct intel_engine_cs *engine)
826
{
827
	struct drm_i915_private *dev_priv = engine->i915;
828

829 830 831
	dev_priv->irq_mask &= ~engine->irq_enable_mask;
	I915_WRITE(IMR, dev_priv->irq_mask);
	POSTING_READ_FW(RING_IMR(engine->mmio_base));
832 833
}

834
static void
835
i9xx_irq_disable(struct intel_engine_cs *engine)
836
{
837
	struct drm_i915_private *dev_priv = engine->i915;
838

839 840
	dev_priv->irq_mask |= engine->irq_enable_mask;
	I915_WRITE(IMR, dev_priv->irq_mask);
841 842
}

843 844
static void
i8xx_irq_enable(struct intel_engine_cs *engine)
C
Chris Wilson 已提交
845
{
846
	struct drm_i915_private *dev_priv = engine->i915;
C
Chris Wilson 已提交
847

848 849 850
	dev_priv->irq_mask &= ~engine->irq_enable_mask;
	I915_WRITE16(IMR, dev_priv->irq_mask);
	POSTING_READ16(RING_IMR(engine->mmio_base));
C
Chris Wilson 已提交
851 852 853
}

static void
854
i8xx_irq_disable(struct intel_engine_cs *engine)
C
Chris Wilson 已提交
855
{
856
	struct drm_i915_private *dev_priv = engine->i915;
C
Chris Wilson 已提交
857

858 859
	dev_priv->irq_mask |= engine->irq_enable_mask;
	I915_WRITE16(IMR, dev_priv->irq_mask);
C
Chris Wilson 已提交
860 861
}

862
static int
863
bsd_ring_flush(struct i915_request *rq, u32 mode)
864
{
865
	u32 *cs;
866

867
	cs = intel_ring_begin(rq, 2);
868 869
	if (IS_ERR(cs))
		return PTR_ERR(cs);
870

871 872
	*cs++ = MI_FLUSH;
	*cs++ = MI_NOOP;
873
	intel_ring_advance(rq, cs);
874
	return 0;
875 876
}

877 878
static void
gen6_irq_enable(struct intel_engine_cs *engine)
879
{
880
	struct drm_i915_private *dev_priv = engine->i915;
881

882 883 884
	I915_WRITE_IMR(engine,
		       ~(engine->irq_enable_mask |
			 engine->irq_keep_mask));
885
	gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
886 887 888
}

static void
889
gen6_irq_disable(struct intel_engine_cs *engine)
890
{
891
	struct drm_i915_private *dev_priv = engine->i915;
892

893
	I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
894
	gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
895 896
}

897 898
static void
hsw_vebox_irq_enable(struct intel_engine_cs *engine)
B
Ben Widawsky 已提交
899
{
900
	struct drm_i915_private *dev_priv = engine->i915;
B
Ben Widawsky 已提交
901

902
	I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
903
	gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
B
Ben Widawsky 已提交
904 905 906
}

static void
907
hsw_vebox_irq_disable(struct intel_engine_cs *engine)
B
Ben Widawsky 已提交
908
{
909
	struct drm_i915_private *dev_priv = engine->i915;
B
Ben Widawsky 已提交
910

911
	I915_WRITE_IMR(engine, ~0);
912
	gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
B
Ben Widawsky 已提交
913 914
}

915
static int
916
i965_emit_bb_start(struct i915_request *rq,
917 918
		   u64 offset, u32 length,
		   unsigned int dispatch_flags)
919
{
920
	u32 *cs;
921

922
	cs = intel_ring_begin(rq, 2);
923 924
	if (IS_ERR(cs))
		return PTR_ERR(cs);
925

926 927 928
	*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
		I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
	*cs++ = offset;
929
	intel_ring_advance(rq, cs);
930

931 932 933
	return 0;
}

934 935
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT (256*1024)
936 937
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
938
static int
939
i830_emit_bb_start(struct i915_request *rq,
940 941
		   u64 offset, u32 len,
		   unsigned int dispatch_flags)
942
{
943
	u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
944

945
	cs = intel_ring_begin(rq, 6);
946 947
	if (IS_ERR(cs))
		return PTR_ERR(cs);
948

949
	/* Evict the invalid PTE TLBs */
950 951 952 953 954 955
	*cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
	*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
	*cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
	*cs++ = cs_offset;
	*cs++ = 0xdeadbeef;
	*cs++ = MI_NOOP;
956
	intel_ring_advance(rq, cs);
957

958
	if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
959 960 961
		if (len > I830_BATCH_LIMIT)
			return -ENOSPC;

962
		cs = intel_ring_begin(rq, 6 + 2);
963 964
		if (IS_ERR(cs))
			return PTR_ERR(cs);
965 966 967 968 969

		/* Blit the batch (which has now all relocs applied) to the
		 * stable batch scratch bo area (so that the CS never
		 * stumbles over its tlb invalidation bug) ...
		 */
970 971 972 973 974 975 976 977 978
		*cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
		*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
		*cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
		*cs++ = cs_offset;
		*cs++ = 4096;
		*cs++ = offset;

		*cs++ = MI_FLUSH;
		*cs++ = MI_NOOP;
979
		intel_ring_advance(rq, cs);
980 981

		/* ... and execute it. */
982
		offset = cs_offset;
983
	}
984

985
	cs = intel_ring_begin(rq, 2);
986 987
	if (IS_ERR(cs))
		return PTR_ERR(cs);
988

989 990 991
	*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
	*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
		MI_BATCH_NON_SECURE);
992
	intel_ring_advance(rq, cs);
993

994 995 996 997
	return 0;
}

static int
998
i915_emit_bb_start(struct i915_request *rq,
999 1000
		   u64 offset, u32 len,
		   unsigned int dispatch_flags)
1001
{
1002
	u32 *cs;
1003

1004
	cs = intel_ring_begin(rq, 2);
1005 1006
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1007

1008 1009 1010
	*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
	*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
		MI_BATCH_NON_SECURE);
1011
	intel_ring_advance(rq, cs);
1012 1013 1014 1015 1016

	return 0;
}


1017

1018 1019 1020
int intel_ring_pin(struct intel_ring *ring,
		   struct drm_i915_private *i915,
		   unsigned int offset_bias)
1021
{
1022
	enum i915_map_type map = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
1023
	struct i915_vma *vma = ring->vma;
1024
	unsigned int flags;
1025
	void *addr;
1026 1027
	int ret;

1028
	GEM_BUG_ON(ring->vaddr);
1029

1030

1031 1032 1033
	flags = PIN_GLOBAL;
	if (offset_bias)
		flags |= PIN_OFFSET_BIAS | offset_bias;
1034
	if (vma->obj->stolen)
1035
		flags |= PIN_MAPPABLE;
1036

1037
	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1038
		if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
1039 1040 1041 1042
			ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
		else
			ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
		if (unlikely(ret))
1043
			return ret;
1044
	}
1045

1046 1047 1048
	ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
	if (unlikely(ret))
		return ret;
1049

1050
	if (i915_vma_is_map_and_fenceable(vma))
1051 1052
		addr = (void __force *)i915_vma_pin_iomap(vma);
	else
1053
		addr = i915_gem_object_pin_map(vma->obj, map);
1054 1055
	if (IS_ERR(addr))
		goto err;
1056

1057 1058
	vma->obj->pin_global++;

1059
	ring->vaddr = addr;
1060
	return 0;
1061

1062 1063 1064
err:
	i915_vma_unpin(vma);
	return PTR_ERR(addr);
1065 1066
}

1067 1068 1069 1070 1071 1072 1073 1074 1075
void intel_ring_reset(struct intel_ring *ring, u32 tail)
{
	GEM_BUG_ON(!list_empty(&ring->request_list));
	ring->tail = tail;
	ring->head = tail;
	ring->emit = tail;
	intel_ring_update_space(ring);
}

1076 1077 1078 1079 1080
void intel_ring_unpin(struct intel_ring *ring)
{
	GEM_BUG_ON(!ring->vma);
	GEM_BUG_ON(!ring->vaddr);

1081 1082 1083
	/* Discard any unused bytes beyond that submitted to hw. */
	intel_ring_reset(ring, ring->tail);

1084
	if (i915_vma_is_map_and_fenceable(ring->vma))
1085
		i915_vma_unpin_iomap(ring->vma);
1086 1087
	else
		i915_gem_object_unpin_map(ring->vma->obj);
1088 1089
	ring->vaddr = NULL;

1090
	ring->vma->obj->pin_global--;
1091
	i915_vma_unpin(ring->vma);
1092 1093
}

1094 1095
static struct i915_vma *
intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
1096
{
1097
	struct drm_i915_gem_object *obj;
1098
	struct i915_vma *vma;
1099

1100
	obj = i915_gem_object_create_stolen(dev_priv, size);
1101
	if (!obj)
1102
		obj = i915_gem_object_create_internal(dev_priv, size);
1103 1104
	if (IS_ERR(obj))
		return ERR_CAST(obj);
1105

1106 1107 1108
	/* mark ring buffers as read-only from GPU side by default */
	obj->gt_ro = 1;

1109
	vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
1110 1111 1112 1113
	if (IS_ERR(vma))
		goto err;

	return vma;
1114

1115 1116 1117
err:
	i915_gem_object_put(obj);
	return vma;
1118 1119
}

1120 1121
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size)
1122
{
1123
	struct intel_ring *ring;
1124
	struct i915_vma *vma;
1125

1126
	GEM_BUG_ON(!is_power_of_2(size));
1127
	GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
1128

1129
	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1130
	if (!ring)
1131 1132
		return ERR_PTR(-ENOMEM);

1133 1134
	INIT_LIST_HEAD(&ring->request_list);

1135 1136 1137 1138 1139 1140
	ring->size = size;
	/* Workaround an erratum on the i830 which causes a hang if
	 * the TAIL pointer points to within the last 2 cachelines
	 * of the buffer.
	 */
	ring->effective_size = size;
1141
	if (IS_I830(engine->i915) || IS_I845G(engine->i915))
1142 1143 1144 1145
		ring->effective_size -= 2 * CACHELINE_BYTES;

	intel_ring_update_space(ring);

1146 1147
	vma = intel_ring_create_vma(engine->i915, size);
	if (IS_ERR(vma)) {
1148
		kfree(ring);
1149
		return ERR_CAST(vma);
1150
	}
1151
	ring->vma = vma;
1152 1153 1154 1155 1156

	return ring;
}

void
1157
intel_ring_free(struct intel_ring *ring)
1158
{
1159 1160 1161 1162 1163
	struct drm_i915_gem_object *obj = ring->vma->obj;

	i915_vma_close(ring->vma);
	__i915_gem_object_release_unless_active(obj);

1164 1165 1166
	kfree(ring);
}

1167
static int context_pin(struct intel_context *ce)
1168
{
1169
	struct i915_vma *vma = ce->state;
1170 1171
	int ret;

1172 1173
	/*
	 * Clear this page out of any CPU caches for coherent swap-in/out.
1174 1175 1176 1177
	 * We only want to do this on the first bind so that we do not stall
	 * on an active context (which by nature is already on the GPU).
	 */
	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1178
		ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1179 1180 1181 1182
		if (ret)
			return ret;
	}

1183 1184
	return i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
			    PIN_GLOBAL | PIN_HIGH);
1185 1186
}

1187 1188 1189 1190 1191 1192
static struct i915_vma *
alloc_context_vma(struct intel_engine_cs *engine)
{
	struct drm_i915_private *i915 = engine->i915;
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
1193
	int err;
1194

1195
	obj = i915_gem_object_create(i915, engine->context_size);
1196 1197 1198
	if (IS_ERR(obj))
		return ERR_CAST(obj);

1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
	if (engine->default_state) {
		void *defaults, *vaddr;

		vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
		if (IS_ERR(vaddr)) {
			err = PTR_ERR(vaddr);
			goto err_obj;
		}

		defaults = i915_gem_object_pin_map(engine->default_state,
						   I915_MAP_WB);
		if (IS_ERR(defaults)) {
			err = PTR_ERR(defaults);
			goto err_map;
		}

		memcpy(vaddr, defaults, engine->context_size);

		i915_gem_object_unpin_map(engine->default_state);
		i915_gem_object_unpin_map(obj);
	}

1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
	/*
	 * Try to make the context utilize L3 as well as LLC.
	 *
	 * On VLV we don't have L3 controls in the PTEs so we
	 * shouldn't touch the cache level, especially as that
	 * would make the object snooped which might have a
	 * negative performance impact.
	 *
	 * Snooping is required on non-llc platforms in execlist
	 * mode, but since all GGTT accesses use PAT entry 0 we
	 * get snooping anyway regardless of cache_level.
	 *
	 * This is only applicable for Ivy Bridge devices since
	 * later platforms don't have L3 control bits in the PTE.
	 */
	if (IS_IVYBRIDGE(i915)) {
		/* Ignore any error, regard it as a simple optimisation */
		i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
	}

	vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1242 1243 1244 1245
	if (IS_ERR(vma)) {
		err = PTR_ERR(vma);
		goto err_obj;
	}
1246 1247

	return vma;
1248 1249 1250 1251 1252 1253

err_map:
	i915_gem_object_unpin_map(obj);
err_obj:
	i915_gem_object_put(obj);
	return ERR_PTR(err);
1254 1255
}

1256 1257 1258
static struct intel_ring *
intel_ring_context_pin(struct intel_engine_cs *engine,
		       struct i915_gem_context *ctx)
1259
{
1260
	struct intel_context *ce = to_intel_context(ctx, engine);
1261 1262
	int ret;

1263
	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1264

1265 1266
	if (likely(ce->pin_count++))
		goto out;
1267
	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
1268

1269
	if (!ce->state && engine->context_size) {
1270 1271 1272 1273 1274
		struct i915_vma *vma;

		vma = alloc_context_vma(engine);
		if (IS_ERR(vma)) {
			ret = PTR_ERR(vma);
1275
			goto err;
1276 1277 1278 1279 1280
		}

		ce->state = vma;
	}

1281
	if (ce->state) {
1282
		ret = context_pin(ce);
1283
		if (ret)
1284
			goto err;
1285

1286
		ce->state->obj->pin_global++;
1287 1288
	}

1289
	i915_gem_context_get(ctx);
1290

1291 1292 1293 1294 1295
out:
	/* One ringbuffer to rule them all */
	return engine->buffer;

err:
1296
	ce->pin_count = 0;
1297
	return ERR_PTR(ret);
1298 1299
}

1300 1301
static void intel_ring_context_unpin(struct intel_engine_cs *engine,
				     struct i915_gem_context *ctx)
1302
{
1303
	struct intel_context *ce = to_intel_context(ctx, engine);
1304

1305
	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1306
	GEM_BUG_ON(ce->pin_count == 0);
1307 1308 1309 1310

	if (--ce->pin_count)
		return;

1311 1312
	if (ce->state) {
		ce->state->obj->pin_global--;
1313
		i915_vma_unpin(ce->state);
1314
	}
1315

1316
	i915_gem_context_put(ctx);
1317 1318
}

1319
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
1320
{
1321
	struct intel_ring *ring;
1322
	int err;
1323

1324 1325
	intel_engine_setup_common(engine);

1326 1327 1328
	err = intel_engine_init_common(engine);
	if (err)
		goto err;
1329

1330 1331
	ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
	if (IS_ERR(ring)) {
1332
		err = PTR_ERR(ring);
1333
		goto err;
1334 1335
	}

1336
	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1337 1338 1339 1340 1341
	err = intel_ring_pin(ring, engine->i915, I915_GTT_PAGE_SIZE);
	if (err)
		goto err_ring;

	GEM_BUG_ON(engine->buffer);
1342
	engine->buffer = ring;
1343

1344
	return 0;
1345

1346 1347 1348 1349 1350
err_ring:
	intel_ring_free(ring);
err:
	intel_engine_cleanup_common(engine);
	return err;
1351 1352
}

1353
void intel_engine_cleanup(struct intel_engine_cs *engine)
1354
{
1355
	struct drm_i915_private *dev_priv = engine->i915;
1356

1357 1358
	WARN_ON(INTEL_GEN(dev_priv) > 2 &&
		(I915_READ_MODE(engine) & MODE_IDLE) == 0);
1359

1360 1361
	intel_ring_unpin(engine->buffer);
	intel_ring_free(engine->buffer);
1362

1363 1364
	if (engine->cleanup)
		engine->cleanup(engine);
Z
Zou Nan hai 已提交
1365

1366
	intel_engine_cleanup_common(engine);
1367

1368 1369
	dev_priv->engine[engine->id] = NULL;
	kfree(engine);
1370 1371
}

1372 1373 1374
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
1375
	enum intel_engine_id id;
1376

1377
	/* Restart from the beginning of the rings for convenience */
1378
	for_each_engine(engine, dev_priv, id)
1379
		intel_ring_reset(engine->buffer, 0);
1380 1381
}

1382
static inline int mi_set_context(struct i915_request *rq, u32 flags)
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430
{
	struct drm_i915_private *i915 = rq->i915;
	struct intel_engine_cs *engine = rq->engine;
	enum intel_engine_id id;
	const int num_rings =
		/* Use an extended w/a on gen7 if signalling from other rings */
		(HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ?
		INTEL_INFO(i915)->num_rings - 1 :
		0;
	int len;
	u32 *cs;

	flags |= MI_MM_SPACE_GTT;
	if (IS_HASWELL(i915))
		/* These flags are for resource streamer on HSW+ */
		flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
	else
		flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;

	len = 4;
	if (IS_GEN7(i915))
		len += 2 + (num_rings ? 4*num_rings + 6 : 0);

	cs = intel_ring_begin(rq, len);
	if (IS_ERR(cs))
		return PTR_ERR(cs);

	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
	if (IS_GEN7(i915)) {
		*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
		if (num_rings) {
			struct intel_engine_cs *signaller;

			*cs++ = MI_LOAD_REGISTER_IMM(num_rings);
			for_each_engine(signaller, i915, id) {
				if (signaller == engine)
					continue;

				*cs++ = i915_mmio_reg_offset(
					   RING_PSMI_CTL(signaller->mmio_base));
				*cs++ = _MASKED_BIT_ENABLE(
						GEN6_PSMI_SLEEP_MSG_DISABLE);
			}
		}
	}

	*cs++ = MI_NOOP;
	*cs++ = MI_SET_CONTEXT;
1431
	*cs++ = i915_ggtt_offset(to_intel_context(rq->ctx, engine)->state) | flags;
1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
	/*
	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
	 * WaMiSetContext_Hang:snb,ivb,vlv
	 */
	*cs++ = MI_NOOP;

	if (IS_GEN7(i915)) {
		if (num_rings) {
			struct intel_engine_cs *signaller;
			i915_reg_t last_reg = {}; /* keep gcc quiet */

			*cs++ = MI_LOAD_REGISTER_IMM(num_rings);
			for_each_engine(signaller, i915, id) {
				if (signaller == engine)
					continue;

				last_reg = RING_PSMI_CTL(signaller->mmio_base);
				*cs++ = i915_mmio_reg_offset(last_reg);
				*cs++ = _MASKED_BIT_DISABLE(
						GEN6_PSMI_SLEEP_MSG_DISABLE);
			}

			/* Insert a delay before the next switch! */
			*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
			*cs++ = i915_mmio_reg_offset(last_reg);
			*cs++ = i915_ggtt_offset(engine->scratch);
			*cs++ = MI_NOOP;
		}
		*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
	}

	intel_ring_advance(rq, cs);

	return 0;
}

1468
static int remap_l3(struct i915_request *rq, int slice)
1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
{
	u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
	int i;

	if (!remap_info)
		return 0;

	cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);

	/*
	 * Note: We do not worry about the concurrent register cacheline hang
	 * here because no other code should access these registers other than
	 * at initialization time.
	 */
	*cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
		*cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
		*cs++ = remap_info[i];
	}
	*cs++ = MI_NOOP;
	intel_ring_advance(rq, cs);

	return 0;
}

1496
static int switch_context(struct i915_request *rq)
1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521
{
	struct intel_engine_cs *engine = rq->engine;
	struct i915_gem_context *to_ctx = rq->ctx;
	struct i915_hw_ppgtt *to_mm =
		to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
	struct i915_gem_context *from_ctx = engine->legacy_active_context;
	struct i915_hw_ppgtt *from_mm = engine->legacy_active_ppgtt;
	u32 hw_flags = 0;
	int ret, i;

	lockdep_assert_held(&rq->i915->drm.struct_mutex);
	GEM_BUG_ON(HAS_EXECLISTS(rq->i915));

	if (to_mm != from_mm ||
	    (to_mm && intel_engine_flag(engine) & to_mm->pd_dirty_rings)) {
		trace_switch_mm(engine, to_ctx);
		ret = to_mm->switch_mm(to_mm, rq);
		if (ret)
			goto err;

		to_mm->pd_dirty_rings &= ~intel_engine_flag(engine);
		engine->legacy_active_ppgtt = to_mm;
		hw_flags = MI_FORCE_RESTORE;
	}

1522
	if (to_intel_context(to_ctx, engine)->state &&
1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565
	    (to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) {
		GEM_BUG_ON(engine->id != RCS);

		/*
		 * The kernel context(s) is treated as pure scratch and is not
		 * expected to retain any state (as we sacrifice it during
		 * suspend and on resume it may be corrupted). This is ok,
		 * as nothing actually executes using the kernel context; it
		 * is purely used for flushing user contexts.
		 */
		if (i915_gem_context_is_kernel(to_ctx))
			hw_flags = MI_RESTORE_INHIBIT;

		ret = mi_set_context(rq, hw_flags);
		if (ret)
			goto err_mm;

		engine->legacy_active_context = to_ctx;
	}

	if (to_ctx->remap_slice) {
		for (i = 0; i < MAX_L3_SLICES; i++) {
			if (!(to_ctx->remap_slice & BIT(i)))
				continue;

			ret = remap_l3(rq, i);
			if (ret)
				goto err_ctx;
		}

		to_ctx->remap_slice = 0;
	}

	return 0;

err_ctx:
	engine->legacy_active_context = from_ctx;
err_mm:
	engine->legacy_active_ppgtt = from_mm;
err:
	return ret;
}

1566
static int ring_request_alloc(struct i915_request *request)
1567
{
1568
	int ret;
1569

1570
	GEM_BUG_ON(!to_intel_context(request->ctx, request->engine)->pin_count);
1571

1572 1573 1574 1575
	/* Flush enough space to reduce the likelihood of waiting after
	 * we start building the request - in which case we will just
	 * have to repeat work.
	 */
1576
	request->reserved_space += LEGACY_REQUEST_SIZE;
1577

1578 1579 1580
	ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
	if (ret)
		return ret;
1581

1582
	ret = switch_context(request);
1583 1584 1585
	if (ret)
		return ret;

1586
	request->reserved_space -= LEGACY_REQUEST_SIZE;
1587
	return 0;
1588 1589
}

1590
static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
1591
{
1592
	struct i915_request *target;
1593 1594
	long timeout;

1595
	lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex);
1596

1597
	if (intel_ring_update_space(ring) >= bytes)
1598 1599
		return 0;

1600
	GEM_BUG_ON(list_empty(&ring->request_list));
1601
	list_for_each_entry(target, &ring->request_list, ring_link) {
1602
		/* Would completion of this request free enough space? */
1603 1604
		if (bytes <= __intel_ring_space(target->postfix,
						ring->emit, ring->size))
1605
			break;
1606
	}
1607

1608
	if (WARN_ON(&target->ring_link == &ring->request_list))
1609 1610
		return -ENOSPC;

1611
	timeout = i915_request_wait(target,
1612 1613 1614 1615
				    I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
				    MAX_SCHEDULE_TIMEOUT);
	if (timeout < 0)
		return timeout;
1616

1617
	i915_request_retire_upto(target);
1618 1619 1620 1621

	intel_ring_update_space(ring);
	GEM_BUG_ON(ring->space < bytes);
	return 0;
1622 1623
}

1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639
int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes)
{
	GEM_BUG_ON(bytes > ring->effective_size);
	if (unlikely(bytes > ring->effective_size - ring->emit))
		bytes += ring->size - ring->emit;

	if (unlikely(bytes > ring->space)) {
		int ret = wait_for_space(ring, bytes);
		if (unlikely(ret))
			return ret;
	}

	GEM_BUG_ON(ring->space < bytes);
	return 0;
}

1640
u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
M
Mika Kuoppala 已提交
1641
{
1642
	struct intel_ring *ring = rq->ring;
1643 1644 1645 1646
	const unsigned int remain_usable = ring->effective_size - ring->emit;
	const unsigned int bytes = num_dwords * sizeof(u32);
	unsigned int need_wrap = 0;
	unsigned int total_bytes;
1647
	u32 *cs;
1648

1649 1650 1651
	/* Packets must be qword aligned. */
	GEM_BUG_ON(num_dwords & 1);

1652
	total_bytes = bytes + rq->reserved_space;
1653
	GEM_BUG_ON(total_bytes > ring->effective_size);
1654

1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672
	if (unlikely(total_bytes > remain_usable)) {
		const int remain_actual = ring->size - ring->emit;

		if (bytes > remain_usable) {
			/*
			 * Not enough space for the basic request. So need to
			 * flush out the remainder and then wait for
			 * base + reserved.
			 */
			total_bytes += remain_actual;
			need_wrap = remain_actual | 1;
		} else  {
			/*
			 * The base request will fit but the reserved space
			 * falls off the end. So we don't need an immediate
			 * wrap and only need to effectively wait for the
			 * reserved size from the start of ringbuffer.
			 */
1673
			total_bytes = rq->reserved_space + remain_actual;
1674
		}
M
Mika Kuoppala 已提交
1675 1676
	}

1677
	if (unlikely(total_bytes > ring->space)) {
1678 1679 1680 1681 1682 1683 1684 1685 1686
		int ret;

		/*
		 * Space is reserved in the ringbuffer for finalising the
		 * request, as that cannot be allowed to fail. During request
		 * finalisation, reserved_space is set to 0 to stop the
		 * overallocation and the assumption is that then we never need
		 * to wait (which has the risk of failing with EINTR).
		 *
1687
		 * See also i915_request_alloc() and i915_request_add().
1688
		 */
1689
		GEM_BUG_ON(!rq->reserved_space);
1690 1691

		ret = wait_for_space(ring, total_bytes);
M
Mika Kuoppala 已提交
1692
		if (unlikely(ret))
1693
			return ERR_PTR(ret);
M
Mika Kuoppala 已提交
1694 1695
	}

1696
	if (unlikely(need_wrap)) {
1697 1698 1699
		need_wrap &= ~1;
		GEM_BUG_ON(need_wrap > ring->space);
		GEM_BUG_ON(ring->emit + need_wrap > ring->size);
1700
		GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
1701

1702
		/* Fill the tail with MI_NOOP */
1703
		memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
1704
		ring->space -= need_wrap;
1705
		ring->emit = 0;
1706
	}
1707

1708
	GEM_BUG_ON(ring->emit > ring->size - bytes);
1709
	GEM_BUG_ON(ring->space < bytes);
1710
	cs = ring->vaddr + ring->emit;
1711
	GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
1712
	ring->emit += bytes;
1713
	ring->space -= bytes;
1714 1715

	return cs;
1716
}
1717

1718
/* Align the ring tail to a cacheline boundary */
1719
int intel_ring_cacheline_align(struct i915_request *rq)
1720
{
1721 1722
	int num_dwords;
	void *cs;
1723

1724
	num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
1725 1726 1727
	if (num_dwords == 0)
		return 0;

1728 1729 1730
	num_dwords = CACHELINE_DWORDS - num_dwords;
	GEM_BUG_ON(num_dwords & 1);

1731
	cs = intel_ring_begin(rq, num_dwords);
1732 1733
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1734

1735
	memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
1736
	intel_ring_advance(rq, cs);
1737

1738
	GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
1739 1740 1741
	return 0;
}

1742
static void gen6_bsd_submit_request(struct i915_request *request)
1743
{
1744
	struct drm_i915_private *dev_priv = request->i915;
1745

1746 1747
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

1748
       /* Every tail move must follow the sequence below */
1749 1750 1751 1752

	/* Disable notification that the ring is IDLE. The GT
	 * will then assume that it is busy and bring it out of rc6.
	 */
1753 1754
	I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
		      _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1755 1756

	/* Clear the context id. Here be magic! */
1757
	I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
1758

1759
	/* Wait for the ring not to be idle, i.e. for it to wake up. */
1760 1761 1762 1763 1764
	if (__intel_wait_for_register_fw(dev_priv,
					 GEN6_BSD_SLEEP_PSMI_CONTROL,
					 GEN6_BSD_SLEEP_INDICATOR,
					 0,
					 1000, 0, NULL))
1765
		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1766

1767
	/* Now that the ring is fully powered up, update the tail */
1768
	i9xx_submit_request(request);
1769 1770 1771 1772

	/* Let the ring send IDLE messages to the GT again,
	 * and so let it sleep to conserve power when idle.
	 */
1773 1774 1775 1776
	I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
		      _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));

	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1777 1778
}

1779
static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
1780
{
1781
	u32 cmd, *cs;
1782

1783
	cs = intel_ring_begin(rq, 4);
1784 1785
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1786

1787
	cmd = MI_FLUSH_DW;
1788 1789 1790 1791 1792 1793 1794 1795

	/* We always require a command barrier so that subsequent
	 * commands, such as breadcrumb interrupts, are strictly ordered
	 * wrt the contents of the write cache being flushed to memory
	 * (and thus being coherent from the CPU).
	 */
	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;

1796 1797 1798 1799 1800 1801
	/*
	 * Bspec vol 1c.5 - video engine command streamer:
	 * "If ENABLED, all TLBs will be invalidated once the flush
	 * operation is complete. This bit is only valid when the
	 * Post-Sync Operation field is a value of 1h or 3h."
	 */
1802
	if (mode & EMIT_INVALIDATE)
1803 1804
		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;

1805 1806
	*cs++ = cmd;
	*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
1807
	*cs++ = 0;
1808
	*cs++ = MI_NOOP;
1809
	intel_ring_advance(rq, cs);
1810 1811 1812
	return 0;
}

1813
static int
1814
hsw_emit_bb_start(struct i915_request *rq,
1815 1816
		  u64 offset, u32 len,
		  unsigned int dispatch_flags)
1817
{
1818
	u32 *cs;
1819

1820
	cs = intel_ring_begin(rq, 2);
1821 1822
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1823

1824 1825 1826 1827
	*cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
		0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
		(dispatch_flags & I915_DISPATCH_RS ?
		MI_BATCH_RESOURCE_STREAMER : 0);
1828
	/* bit0-7 is the length on GEN6+ */
1829
	*cs++ = offset;
1830
	intel_ring_advance(rq, cs);
1831 1832 1833 1834

	return 0;
}

1835
static int
1836
gen6_emit_bb_start(struct i915_request *rq,
1837 1838
		   u64 offset, u32 len,
		   unsigned int dispatch_flags)
1839
{
1840
	u32 *cs;
1841

1842
	cs = intel_ring_begin(rq, 2);
1843 1844
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1845

1846 1847
	*cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
		0 : MI_BATCH_NON_SECURE_I965);
1848
	/* bit0-7 is the length on GEN6+ */
1849
	*cs++ = offset;
1850
	intel_ring_advance(rq, cs);
1851

1852
	return 0;
1853 1854
}

1855 1856
/* Blitter support (SandyBridge+) */

1857
static int gen6_ring_flush(struct i915_request *rq, u32 mode)
Z
Zou Nan hai 已提交
1858
{
1859
	u32 cmd, *cs;
1860

1861
	cs = intel_ring_begin(rq, 4);
1862 1863
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1864

1865
	cmd = MI_FLUSH_DW;
1866 1867 1868 1869 1870 1871 1872 1873

	/* We always require a command barrier so that subsequent
	 * commands, such as breadcrumb interrupts, are strictly ordered
	 * wrt the contents of the write cache being flushed to memory
	 * (and thus being coherent from the CPU).
	 */
	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;

1874 1875 1876 1877 1878 1879
	/*
	 * Bspec vol 1c.3 - blitter engine command streamer:
	 * "If ENABLED, all TLBs will be invalidated once the flush
	 * operation is complete. This bit is only valid when the
	 * Post-Sync Operation field is a value of 1h or 3h."
	 */
1880
	if (mode & EMIT_INVALIDATE)
1881
		cmd |= MI_INVALIDATE_TLB;
1882 1883
	*cs++ = cmd;
	*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
1884 1885
	*cs++ = 0;
	*cs++ = MI_NOOP;
1886
	intel_ring_advance(rq, cs);
R
Rodrigo Vivi 已提交
1887

1888
	return 0;
Z
Zou Nan hai 已提交
1889 1890
}

1891 1892 1893
static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
				       struct intel_engine_cs *engine)
{
1894
	int i;
1895

1896
	if (!HAS_LEGACY_SEMAPHORES(dev_priv))
1897 1898
		return;

1899 1900 1901
	GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
	engine->semaphore.sync_to = gen6_ring_sync_to;
	engine->semaphore.signal = gen6_signal;
1902

1903 1904 1905 1906 1907 1908 1909 1910
	/*
	 * The current semaphore is only applied on pre-gen8
	 * platform.  And there is no VCS2 ring on the pre-gen8
	 * platform. So the semaphore between RCS and VCS2 is
	 * initialized as INVALID.
	 */
	for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
		static const struct {
1911 1912
			u32 wait_mbox;
			i915_reg_t mbox_reg;
1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
		} sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
			[RCS_HW] = {
				[VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RV,  .mbox_reg = GEN6_VRSYNC },
				[BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RB,  .mbox_reg = GEN6_BRSYNC },
				[VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
			},
			[VCS_HW] = {
				[RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VR,  .mbox_reg = GEN6_RVSYNC },
				[BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VB,  .mbox_reg = GEN6_BVSYNC },
				[VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
			},
			[BCS_HW] = {
				[RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BR,  .mbox_reg = GEN6_RBSYNC },
				[VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BV,  .mbox_reg = GEN6_VBSYNC },
				[VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
			},
			[VECS_HW] = {
				[RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
				[VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
				[BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
			},
		};
		u32 wait_mbox;
		i915_reg_t mbox_reg;
1937

1938 1939 1940 1941 1942 1943
		if (i == engine->hw_id) {
			wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
			mbox_reg = GEN6_NOSYNC;
		} else {
			wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
			mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
1944
		}
1945

1946 1947 1948
		engine->semaphore.mbox.wait[i] = wait_mbox;
		engine->semaphore.mbox.signal[i] = mbox_reg;
	}
1949 1950
}

1951 1952 1953
static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
				struct intel_engine_cs *engine)
{
1954
	if (INTEL_GEN(dev_priv) >= 6) {
1955 1956
		engine->irq_enable = gen6_irq_enable;
		engine->irq_disable = gen6_irq_disable;
1957 1958
		engine->irq_seqno_barrier = gen6_seqno_barrier;
	} else if (INTEL_GEN(dev_priv) >= 5) {
1959 1960
		engine->irq_enable = gen5_irq_enable;
		engine->irq_disable = gen5_irq_disable;
1961
		engine->irq_seqno_barrier = gen5_seqno_barrier;
1962
	} else if (INTEL_GEN(dev_priv) >= 3) {
1963 1964
		engine->irq_enable = i9xx_irq_enable;
		engine->irq_disable = i9xx_irq_disable;
1965
	} else {
1966 1967
		engine->irq_enable = i8xx_irq_enable;
		engine->irq_disable = i8xx_irq_disable;
1968 1969 1970
	}
}

1971 1972 1973
static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
	engine->submit_request = i9xx_submit_request;
1974
	engine->cancel_requests = cancel_requests;
1975 1976 1977

	engine->park = NULL;
	engine->unpark = NULL;
1978 1979 1980 1981
}

static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
{
1982
	i9xx_set_default_submission(engine);
1983 1984 1985
	engine->submit_request = gen6_bsd_submit_request;
}

1986 1987 1988
static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
				      struct intel_engine_cs *engine)
{
1989 1990 1991
	/* gen8+ are only supported with execlists */
	GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8);

1992 1993 1994
	intel_ring_init_irq(dev_priv, engine);
	intel_ring_init_semaphores(dev_priv, engine);

1995
	engine->init_hw = init_ring_common;
1996
	engine->reset_hw = reset_ring_common;
1997

1998 1999 2000
	engine->context_pin = intel_ring_context_pin;
	engine->context_unpin = intel_ring_context_unpin;

2001 2002
	engine->request_alloc = ring_request_alloc;

2003
	engine->emit_breadcrumb = i9xx_emit_breadcrumb;
2004
	engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
2005
	if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
2006 2007
		int num_rings;

2008
		engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
2009

2010
		num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
2011 2012 2013
		engine->emit_breadcrumb_sz += num_rings * 3;
		if (num_rings & 1)
			engine->emit_breadcrumb_sz++;
2014
	}
2015 2016

	engine->set_default_submission = i9xx_set_default_submission;
2017

2018
	if (INTEL_GEN(dev_priv) >= 6)
2019
		engine->emit_bb_start = gen6_emit_bb_start;
2020
	else if (INTEL_GEN(dev_priv) >= 4)
2021
		engine->emit_bb_start = i965_emit_bb_start;
2022
	else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
2023
		engine->emit_bb_start = i830_emit_bb_start;
2024
	else
2025
		engine->emit_bb_start = i915_emit_bb_start;
2026 2027
}

2028
int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2029
{
2030
	struct drm_i915_private *dev_priv = engine->i915;
2031
	int ret;
2032

2033 2034
	intel_ring_default_vfuncs(dev_priv, engine);

2035 2036
	if (HAS_L3_DPF(dev_priv))
		engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2037

2038 2039
	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;

2040
	if (INTEL_GEN(dev_priv) >= 6) {
2041
		engine->init_context = intel_rcs_ctx_init;
2042
		engine->emit_flush = gen7_render_ring_flush;
2043
		if (IS_GEN6(dev_priv))
2044
			engine->emit_flush = gen6_render_ring_flush;
2045
	} else if (IS_GEN5(dev_priv)) {
2046
		engine->emit_flush = gen4_render_ring_flush;
2047
	} else {
2048
		if (INTEL_GEN(dev_priv) < 4)
2049
			engine->emit_flush = gen2_render_ring_flush;
2050
		else
2051
			engine->emit_flush = gen4_render_ring_flush;
2052
		engine->irq_enable_mask = I915_USER_INTERRUPT;
2053
	}
B
Ben Widawsky 已提交
2054

2055
	if (IS_HASWELL(dev_priv))
2056
		engine->emit_bb_start = hsw_emit_bb_start;
2057

2058
	engine->init_hw = init_render_ring;
2059

2060
	ret = intel_init_ring_buffer(engine);
2061 2062 2063
	if (ret)
		return ret;

2064
	if (INTEL_GEN(dev_priv) >= 6) {
2065
		ret = intel_engine_create_scratch(engine, PAGE_SIZE);
2066 2067 2068
		if (ret)
			return ret;
	} else if (HAS_BROKEN_CS_TLB(dev_priv)) {
2069
		ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
2070 2071 2072 2073 2074
		if (ret)
			return ret;
	}

	return 0;
2075 2076
}

2077
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
2078
{
2079
	struct drm_i915_private *dev_priv = engine->i915;
2080

2081 2082
	intel_ring_default_vfuncs(dev_priv, engine);

2083
	if (INTEL_GEN(dev_priv) >= 6) {
2084
		/* gen6 bsd needs a special wa for tail updates */
2085
		if (IS_GEN6(dev_priv))
2086
			engine->set_default_submission = gen6_bsd_set_default_submission;
2087
		engine->emit_flush = gen6_bsd_ring_flush;
2088
		engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2089
	} else {
2090
		engine->emit_flush = bsd_ring_flush;
2091
		if (IS_GEN5(dev_priv))
2092
			engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2093
		else
2094
			engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2095 2096
	}

2097
	return intel_init_ring_buffer(engine);
2098
}
2099

2100
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
2101
{
2102
	struct drm_i915_private *dev_priv = engine->i915;
2103 2104 2105

	intel_ring_default_vfuncs(dev_priv, engine);

2106
	engine->emit_flush = gen6_ring_flush;
2107
	engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2108

2109
	return intel_init_ring_buffer(engine);
2110
}
2111

2112
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
B
Ben Widawsky 已提交
2113
{
2114
	struct drm_i915_private *dev_priv = engine->i915;
2115 2116 2117

	intel_ring_default_vfuncs(dev_priv, engine);

2118
	engine->emit_flush = gen6_ring_flush;
2119 2120 2121
	engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
	engine->irq_enable = hsw_vebox_irq_enable;
	engine->irq_disable = hsw_vebox_irq_disable;
B
Ben Widawsky 已提交
2122

2123
	return intel_init_ring_buffer(engine);
B
Ben Widawsky 已提交
2124
}