intel_ringbuffer.c 57.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * Copyright © 2008-2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Zou Nan hai <nanhai.zou@intel.com>
 *    Xiang Hai hao<haihao.xiang@intel.com>
 *
 */

30
#include <linux/log2.h>
31
#include <drm/drmP.h>
32
#include "i915_drv.h"
33
#include <drm/i915_drm.h>
34
#include "i915_trace.h"
35
#include "intel_drv.h"
36

37 38 39 40 41
/* Rough estimate of the typical request size, performing a flush,
 * set-context and then emitting the batch.
 */
#define LEGACY_REQUEST_SIZE 200

42
static int __intel_ring_space(int head, int tail, int size)
43
{
44 45
	int space = head - tail;
	if (space <= 0)
46
		space += size;
47
	return space - I915_RING_FREE_SPACE;
48 49
}

50
void intel_ring_update_space(struct intel_ring *ring)
51
{
52
	ring->space = __intel_ring_space(ring->head, ring->tail, ring->size);
53 54
}

55
static int
56
gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
57
{
58
	u32 cmd, *cs;
59 60 61

	cmd = MI_FLUSH;

62
	if (mode & EMIT_INVALIDATE)
63 64
		cmd |= MI_READ_FLUSH;

65 66 67
	cs = intel_ring_begin(req, 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
68

69 70 71
	*cs++ = cmd;
	*cs++ = MI_NOOP;
	intel_ring_advance(req, cs);
72 73 74 75 76

	return 0;
}

static int
77
gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
78
{
79
	u32 cmd, *cs;
80

81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
	/*
	 * read/write caches:
	 *
	 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
	 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
	 * also flushed at 2d versus 3d pipeline switches.
	 *
	 * read-only caches:
	 *
	 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
	 * MI_READ_FLUSH is set, and is always flushed on 965.
	 *
	 * I915_GEM_DOMAIN_COMMAND may not exist?
	 *
	 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
	 * invalidated when MI_EXE_FLUSH is set.
	 *
	 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
	 * invalidated with every MI_FLUSH.
	 *
	 * TLBs:
	 *
	 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
	 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
	 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
	 * are flushed at any MI_FLUSH.
	 */

109
	cmd = MI_FLUSH;
110
	if (mode & EMIT_INVALIDATE) {
111
		cmd |= MI_EXE_FLUSH;
112 113 114
		if (IS_G4X(req->i915) || IS_GEN5(req->i915))
			cmd |= MI_INVALIDATE_ISP;
	}
115

116 117 118
	cs = intel_ring_begin(req, 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
119

120 121 122
	*cs++ = cmd;
	*cs++ = MI_NOOP;
	intel_ring_advance(req, cs);
123 124

	return 0;
125 126
}

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
/**
 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
 * implementing two workarounds on gen6.  From section 1.4.7.1
 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
 *
 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
 * produced by non-pipelined state commands), software needs to first
 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
 * 0.
 *
 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
 *
 * And the workaround for these two requires this workaround first:
 *
 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
 * BEFORE the pipe-control with a post-sync op and no write-cache
 * flushes.
 *
 * And this last workaround is tricky because of the requirements on
 * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
 * volume 2 part 1:
 *
 *     "1 of the following must also be set:
 *      - Render Target Cache Flush Enable ([12] of DW1)
 *      - Depth Cache Flush Enable ([0] of DW1)
 *      - Stall at Pixel Scoreboard ([1] of DW1)
 *      - Depth Stall ([13] of DW1)
 *      - Post-Sync Operation ([13] of DW1)
 *      - Notify Enable ([8] of DW1)"
 *
 * The cache flushes require the workaround flush that triggered this
 * one, so we can't use it.  Depth stall would trigger the same.
 * Post-sync nonzero is what triggered this second workaround, so we
 * can't use that one either.  Notify enable is IRQs, which aren't
 * really our business.  That leaves only stall at scoreboard.
 */
static int
165
intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
166
{
167
	u32 scratch_addr =
168
		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
	u32 *cs;

	cs = intel_ring_begin(req, 6);
	if (IS_ERR(cs))
		return PTR_ERR(cs);

	*cs++ = GFX_OP_PIPE_CONTROL(5);
	*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
	*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
	*cs++ = 0; /* low dword */
	*cs++ = 0; /* high dword */
	*cs++ = MI_NOOP;
	intel_ring_advance(req, cs);

	cs = intel_ring_begin(req, 6);
	if (IS_ERR(cs))
		return PTR_ERR(cs);

	*cs++ = GFX_OP_PIPE_CONTROL(5);
	*cs++ = PIPE_CONTROL_QW_WRITE;
	*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
	*cs++ = 0;
	*cs++ = 0;
	*cs++ = MI_NOOP;
	intel_ring_advance(req, cs);
194 195 196 197 198

	return 0;
}

static int
199
gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
200
{
201
	u32 scratch_addr =
202
		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
203
	u32 *cs, flags = 0;
204 205
	int ret;

206
	/* Force SNB workarounds for PIPE_CONTROL flushes */
207
	ret = intel_emit_post_sync_nonzero_flush(req);
208 209 210
	if (ret)
		return ret;

211 212 213 214
	/* Just flush everything.  Experiments have shown that reducing the
	 * number of bits based on the write domains has little performance
	 * impact.
	 */
215
	if (mode & EMIT_FLUSH) {
216 217 218 219 220 221
		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
		/*
		 * Ensure that any following seqno writes only happen
		 * when the render cache is indeed flushed.
		 */
222
		flags |= PIPE_CONTROL_CS_STALL;
223
	}
224
	if (mode & EMIT_INVALIDATE) {
225 226 227 228 229 230 231 232 233
		flags |= PIPE_CONTROL_TLB_INVALIDATE;
		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
		/*
		 * TLB invalidate requires a post-sync write.
		 */
234
		flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
235
	}
236

237 238 239
	cs = intel_ring_begin(req, 4);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
240

241 242 243 244 245
	*cs++ = GFX_OP_PIPE_CONTROL(4);
	*cs++ = flags;
	*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
	*cs++ = 0;
	intel_ring_advance(req, cs);
246 247 248 249

	return 0;
}

250
static int
251
gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
252
{
253
	u32 *cs;
254

255 256 257
	cs = intel_ring_begin(req, 4);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
258

259 260 261 262 263
	*cs++ = GFX_OP_PIPE_CONTROL(4);
	*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
	*cs++ = 0;
	*cs++ = 0;
	intel_ring_advance(req, cs);
264 265 266 267

	return 0;
}

268
static int
269
gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
270
{
271
	u32 scratch_addr =
272
		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
273
	u32 *cs, flags = 0;
274

275 276 277 278 279 280 281 282 283 284
	/*
	 * Ensure that any following seqno writes only happen when the render
	 * cache is indeed flushed.
	 *
	 * Workaround: 4th PIPE_CONTROL command (except the ones with only
	 * read-cache invalidate bits set) must have the CS_STALL bit set. We
	 * don't try to be clever and just set it unconditionally.
	 */
	flags |= PIPE_CONTROL_CS_STALL;

285 286 287 288
	/* Just flush everything.  Experiments have shown that reducing the
	 * number of bits based on the write domains has little performance
	 * impact.
	 */
289
	if (mode & EMIT_FLUSH) {
290 291
		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
292
		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
293
		flags |= PIPE_CONTROL_FLUSH_ENABLE;
294
	}
295
	if (mode & EMIT_INVALIDATE) {
296 297 298 299 300 301
		flags |= PIPE_CONTROL_TLB_INVALIDATE;
		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
302
		flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
303 304 305 306
		/*
		 * TLB invalidate requires a post-sync write.
		 */
		flags |= PIPE_CONTROL_QW_WRITE;
307
		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
308

309 310
		flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;

311 312 313
		/* Workaround: we must issue a pipe_control with CS-stall bit
		 * set before a pipe_control command that has the state cache
		 * invalidate bit set. */
314
		gen7_render_ring_cs_stall_wa(req);
315 316
	}

317 318 319
	cs = intel_ring_begin(req, 4);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
320

321 322 323 324 325
	*cs++ = GFX_OP_PIPE_CONTROL(4);
	*cs++ = flags;
	*cs++ = scratch_addr;
	*cs++ = 0;
	intel_ring_advance(req, cs);
326 327 328 329

	return 0;
}

330
static int
331
gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
332
{
333
	u32 flags;
334
	u32 *cs;
335

336
	cs = intel_ring_begin(req, mode & EMIT_INVALIDATE ? 12 : 6);
337 338
	if (IS_ERR(cs))
		return PTR_ERR(cs);
339

340
	flags = PIPE_CONTROL_CS_STALL;
B
Ben Widawsky 已提交
341

342
	if (mode & EMIT_FLUSH) {
B
Ben Widawsky 已提交
343 344
		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
345
		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
346
		flags |= PIPE_CONTROL_FLUSH_ENABLE;
B
Ben Widawsky 已提交
347
	}
348
	if (mode & EMIT_INVALIDATE) {
B
Ben Widawsky 已提交
349 350 351 352 353 354 355 356
		flags |= PIPE_CONTROL_TLB_INVALIDATE;
		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_QW_WRITE;
		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
357 358

		/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
359 360 361 362
		cs = gen8_emit_pipe_control(cs,
					    PIPE_CONTROL_CS_STALL |
					    PIPE_CONTROL_STALL_AT_SCOREBOARD,
					    0);
B
Ben Widawsky 已提交
363 364
	}

365 366 367 368 369 370 371
	cs = gen8_emit_pipe_control(cs, flags,
				    i915_ggtt_offset(req->engine->scratch) +
				    2 * CACHELINE_BYTES);

	intel_ring_advance(req, cs);

	return 0;
B
Ben Widawsky 已提交
372 373
}

374
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
375
{
376
	struct drm_i915_private *dev_priv = engine->i915;
377 378 379
	u32 addr;

	addr = dev_priv->status_page_dmah->busaddr;
380
	if (INTEL_GEN(dev_priv) >= 4)
381 382 383 384
		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
	I915_WRITE(HWS_PGA, addr);
}

385
static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
386
{
387
	struct drm_i915_private *dev_priv = engine->i915;
388
	i915_reg_t mmio;
389 390 391 392

	/* The ring status page addresses are no longer next to the rest of
	 * the ring registers as of gen7.
	 */
393
	if (IS_GEN7(dev_priv)) {
394
		switch (engine->id) {
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
		case RCS:
			mmio = RENDER_HWS_PGA_GEN7;
			break;
		case BCS:
			mmio = BLT_HWS_PGA_GEN7;
			break;
		/*
		 * VCS2 actually doesn't exist on Gen7. Only shut up
		 * gcc switch check warning
		 */
		case VCS2:
		case VCS:
			mmio = BSD_HWS_PGA_GEN7;
			break;
		case VECS:
			mmio = VEBOX_HWS_PGA_GEN7;
			break;
		}
413
	} else if (IS_GEN6(dev_priv)) {
414
		mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
415 416
	} else {
		/* XXX: gen8 returns to sanity */
417
		mmio = RING_HWS_PGA(engine->mmio_base);
418 419
	}

420
	I915_WRITE(mmio, engine->status_page.ggtt_offset);
421 422 423 424 425 426 427 428 429
	POSTING_READ(mmio);

	/*
	 * Flush the TLB for this page
	 *
	 * FIXME: These two bits have disappeared on gen8, so a question
	 * arises: do we still need this and if so how should we go about
	 * invalidating the TLB?
	 */
430
	if (IS_GEN(dev_priv, 6, 7)) {
431
		i915_reg_t reg = RING_INSTPM(engine->mmio_base);
432 433

		/* ring should be idle before issuing a sync flush*/
434
		WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
435 436 437 438

		I915_WRITE(reg,
			   _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
					      INSTPM_SYNC_FLUSH));
439 440 441
		if (intel_wait_for_register(dev_priv,
					    reg, INSTPM_SYNC_FLUSH, 0,
					    1000))
442
			DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
443
				  engine->name);
444 445 446
	}
}

447
static bool stop_ring(struct intel_engine_cs *engine)
448
{
449
	struct drm_i915_private *dev_priv = engine->i915;
450

451
	if (INTEL_GEN(dev_priv) > 2) {
452
		I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
453 454 455 456 457
		if (intel_wait_for_register(dev_priv,
					    RING_MI_MODE(engine->mmio_base),
					    MODE_IDLE,
					    MODE_IDLE,
					    1000)) {
458 459
			DRM_ERROR("%s : timed out trying to stop ring\n",
				  engine->name);
460 461 462 463
			/* Sometimes we observe that the idle flag is not
			 * set even though the ring is empty. So double
			 * check before giving up.
			 */
464
			if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
465
				return false;
466 467
		}
	}
468

469 470
	I915_WRITE_CTL(engine, 0);
	I915_WRITE_HEAD(engine, 0);
471
	I915_WRITE_TAIL(engine, 0);
472

473
	if (INTEL_GEN(dev_priv) > 2) {
474 475
		(void)I915_READ_CTL(engine);
		I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
476
	}
477

478
	return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
479
}
480

481
static int init_ring_common(struct intel_engine_cs *engine)
482
{
483
	struct drm_i915_private *dev_priv = engine->i915;
484
	struct intel_ring *ring = engine->buffer;
485 486
	int ret = 0;

487
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
488

489
	if (!stop_ring(engine)) {
490
		/* G45 ring initialization often fails to reset head to zero */
491 492
		DRM_DEBUG_KMS("%s head not reset to zero "
			      "ctl %08x head %08x tail %08x start %08x\n",
493 494 495 496 497
			      engine->name,
			      I915_READ_CTL(engine),
			      I915_READ_HEAD(engine),
			      I915_READ_TAIL(engine),
			      I915_READ_START(engine));
498

499
		if (!stop_ring(engine)) {
500 501
			DRM_ERROR("failed to set %s head to zero "
				  "ctl %08x head %08x tail %08x start %08x\n",
502 503 504 505 506
				  engine->name,
				  I915_READ_CTL(engine),
				  I915_READ_HEAD(engine),
				  I915_READ_TAIL(engine),
				  I915_READ_START(engine));
507 508
			ret = -EIO;
			goto out;
509
		}
510 511
	}

512
	if (HWS_NEEDS_PHYSICAL(dev_priv))
513
		ring_setup_phys_status_page(engine);
514 515
	else
		intel_ring_setup_status_page(engine);
516

517
	intel_engine_reset_breadcrumbs(engine);
518

519
	/* Enforce ordering by reading HEAD register back */
520
	I915_READ_HEAD(engine);
521

522 523 524 525
	/* Initialize the ring. This must happen _after_ we've cleared the ring
	 * registers with the above sequence (the readback of the HEAD registers
	 * also enforces ordering), otherwise the hw might lose the new ring
	 * register values. */
526
	I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
527 528

	/* WaClearRingBufHeadRegAtInit:ctg,elk */
529
	if (I915_READ_HEAD(engine))
530
		DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
531
			  engine->name, I915_READ_HEAD(engine));
532 533 534 535 536

	intel_ring_update_space(ring);
	I915_WRITE_HEAD(engine, ring->head);
	I915_WRITE_TAIL(engine, ring->tail);
	(void)I915_READ_TAIL(engine);
537

538
	I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
539 540

	/* If the head is still not zero, the ring is dead */
541 542 543
	if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
				       RING_VALID, RING_VALID,
				       50)) {
544
		DRM_ERROR("%s initialization failed "
545
			  "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
546 547 548
			  engine->name,
			  I915_READ_CTL(engine),
			  I915_READ_CTL(engine) & RING_VALID,
549 550
			  I915_READ_HEAD(engine), ring->head,
			  I915_READ_TAIL(engine), ring->tail,
551
			  I915_READ_START(engine),
552
			  i915_ggtt_offset(ring->vma));
553 554
		ret = -EIO;
		goto out;
555 556
	}

557
	intel_engine_init_hangcheck(engine);
558

559
out:
560
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
561 562

	return ret;
563 564
}

565 566 567
static void reset_ring_common(struct intel_engine_cs *engine,
			      struct drm_i915_gem_request *request)
{
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
	/* Try to restore the logical GPU state to match the continuation
	 * of the request queue. If we skip the context/PD restore, then
	 * the next request may try to execute assuming that its context
	 * is valid and loaded on the GPU and so may try to access invalid
	 * memory, prompting repeated GPU hangs.
	 *
	 * If the request was guilty, we still restore the logical state
	 * in case the next request requires it (e.g. the aliasing ppgtt),
	 * but skip over the hung batch.
	 *
	 * If the request was innocent, we try to replay the request with
	 * the restored context.
	 */
	if (request) {
		struct drm_i915_private *dev_priv = request->i915;
		struct intel_context *ce = &request->ctx->engine[engine->id];
		struct i915_hw_ppgtt *ppgtt;

		/* FIXME consider gen8 reset */

		if (ce->state) {
			I915_WRITE(CCID,
				   i915_ggtt_offset(ce->state) |
				   BIT(8) /* must be set! */ |
				   CCID_EXTENDED_STATE_SAVE |
				   CCID_EXTENDED_STATE_RESTORE |
				   CCID_EN);
		}

		ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
		if (ppgtt) {
			u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;

			I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
			I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);

			/* Wait for the PD reload to complete */
			if (intel_wait_for_register(dev_priv,
						    RING_PP_DIR_BASE(engine),
						    BIT(0), 0,
						    10))
				DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
610

611 612 613 614
			ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
		}

		/* If the rq hung, jump to its breadcrumb and skip the batch */
615 616
		if (request->fence.error == -EIO)
			request->ring->head = request->postfix;
617 618 619
	} else {
		engine->legacy_active_context = NULL;
	}
620 621
}

622
static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
623 624 625
{
	int ret;

626
	ret = intel_ring_workarounds_emit(req);
627 628 629
	if (ret != 0)
		return ret;

630
	ret = i915_gem_render_state_emit(req);
631
	if (ret)
632
		return ret;
633

634
	return 0;
635 636
}

637
static int init_render_ring(struct intel_engine_cs *engine)
638
{
639
	struct drm_i915_private *dev_priv = engine->i915;
640
	int ret = init_ring_common(engine);
641 642
	if (ret)
		return ret;
643

644
	/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
645
	if (IS_GEN(dev_priv, 4, 6))
646
		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
647 648 649 650

	/* We need to disable the AsyncFlip performance optimisations in order
	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
	 * programmed to '1' on all products.
651
	 *
652
	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
653
	 */
654
	if (IS_GEN(dev_priv, 6, 7))
655 656
		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));

657
	/* Required for the hardware to program scanline values for waiting */
658
	/* WaEnableFlushTlbInvalidationMode:snb */
659
	if (IS_GEN6(dev_priv))
660
		I915_WRITE(GFX_MODE,
661
			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
662

663
	/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
664
	if (IS_GEN7(dev_priv))
665
		I915_WRITE(GFX_MODE_GEN7,
666
			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
667
			   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
668

669
	if (IS_GEN6(dev_priv)) {
670 671 672 673 674 675
		/* From the Sandybridge PRM, volume 1 part 3, page 24:
		 * "If this bit is set, STCunit will have LRA as replacement
		 *  policy. [...] This bit must be reset.  LRA replacement
		 *  policy is not supported."
		 */
		I915_WRITE(CACHE_MODE_0,
676
			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
677 678
	}

679
	if (IS_GEN(dev_priv, 6, 7))
680
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
681

682 683
	if (INTEL_INFO(dev_priv)->gen >= 6)
		I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
684

685
	return init_workarounds_ring(engine);
686 687
}

688
static void render_ring_cleanup(struct intel_engine_cs *engine)
689
{
690
	struct drm_i915_private *dev_priv = engine->i915;
691

692
	i915_vma_unpin_and_release(&dev_priv->semaphore);
693 694
}

695
static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *cs)
696
{
697
	struct drm_i915_private *dev_priv = req->i915;
698
	struct intel_engine_cs *waiter;
699
	enum intel_engine_id id;
700

701
	for_each_engine(waiter, dev_priv, id) {
702
		u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
703 704 705
		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
			continue;

706 707 708 709 710 711 712 713 714 715
		*cs++ = GFX_OP_PIPE_CONTROL(6);
		*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE |
			PIPE_CONTROL_CS_STALL;
		*cs++ = lower_32_bits(gtt_offset);
		*cs++ = upper_32_bits(gtt_offset);
		*cs++ = req->global_seqno;
		*cs++ = 0;
		*cs++ = MI_SEMAPHORE_SIGNAL |
			MI_SEMAPHORE_TARGET(waiter->hw_id);
		*cs++ = 0;
716 717
	}

718
	return cs;
719 720
}

721
static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *cs)
722
{
723
	struct drm_i915_private *dev_priv = req->i915;
724
	struct intel_engine_cs *waiter;
725
	enum intel_engine_id id;
726

727
	for_each_engine(waiter, dev_priv, id) {
728
		u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
729 730 731
		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
			continue;

732 733 734 735 736 737 738
		*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
		*cs++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
		*cs++ = upper_32_bits(gtt_offset);
		*cs++ = req->global_seqno;
		*cs++ = MI_SEMAPHORE_SIGNAL |
			MI_SEMAPHORE_TARGET(waiter->hw_id);
		*cs++ = 0;
739 740
	}

741
	return cs;
742 743
}

744
static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
745
{
746
	struct drm_i915_private *dev_priv = req->i915;
747
	struct intel_engine_cs *engine;
748
	enum intel_engine_id id;
C
Chris Wilson 已提交
749
	int num_rings = 0;
750

751
	for_each_engine(engine, dev_priv, id) {
752 753 754 755
		i915_reg_t mbox_reg;

		if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
			continue;
756

757
		mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
758
		if (i915_mmio_reg_valid(mbox_reg)) {
759 760 761
			*cs++ = MI_LOAD_REGISTER_IMM(1);
			*cs++ = i915_mmio_reg_offset(mbox_reg);
			*cs++ = req->global_seqno;
C
Chris Wilson 已提交
762
			num_rings++;
763 764
		}
	}
C
Chris Wilson 已提交
765
	if (num_rings & 1)
766
		*cs++ = MI_NOOP;
767

768
	return cs;
769 770
}

771 772 773 774
static void i9xx_submit_request(struct drm_i915_gem_request *request)
{
	struct drm_i915_private *dev_priv = request->i915;

775 776
	i915_gem_request_submit(request);

777
	assert_ring_tail_valid(request->ring, request->tail);
C
Chris Wilson 已提交
778
	I915_WRITE_TAIL(request->engine, request->tail);
779 780
}

781
static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
782
{
783 784 785 786
	*cs++ = MI_STORE_DWORD_INDEX;
	*cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
	*cs++ = req->global_seqno;
	*cs++ = MI_USER_INTERRUPT;
787

788
	req->tail = intel_ring_offset(req, cs);
789
	assert_ring_tail_valid(req->ring, req->tail);
790 791
}

792 793
static const int i9xx_emit_breadcrumb_sz = 4;

794
/**
795
 * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
796 797 798 799 800 801
 *
 * @request - request to write to the ring
 *
 * Update the mailbox registers in the *other* rings with the current seqno.
 * This acts like a signal in the canonical semaphore.
 */
802
static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
803
{
C
Chris Wilson 已提交
804
	return i9xx_emit_breadcrumb(req,
805
				    req->engine->semaphore.signal(req, cs));
806 807
}

C
Chris Wilson 已提交
808
static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
809
					u32 *cs)
810 811
{
	struct intel_engine_cs *engine = req->engine;
812

C
Chris Wilson 已提交
813
	if (engine->semaphore.signal)
814 815 816 817 818 819 820 821
		cs = engine->semaphore.signal(req, cs);

	*cs++ = GFX_OP_PIPE_CONTROL(6);
	*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
		PIPE_CONTROL_QW_WRITE;
	*cs++ = intel_hws_seqno_address(engine);
	*cs++ = 0;
	*cs++ = req->global_seqno;
822
	/* We're thrashing one dword of HWS. */
823 824 825
	*cs++ = 0;
	*cs++ = MI_USER_INTERRUPT;
	*cs++ = MI_NOOP;
826

827
	req->tail = intel_ring_offset(req, cs);
828
	assert_ring_tail_valid(req->ring, req->tail);
829 830
}

831 832
static const int gen8_render_emit_breadcrumb_sz = 8;

833 834 835 836 837 838 839
/**
 * intel_ring_sync - sync the waiter to the signaller on seqno
 *
 * @waiter - ring that is waiting
 * @signaller - ring which has, or will signal
 * @seqno - seqno which the waiter will block on
 */
840 841

static int
842 843
gen8_ring_sync_to(struct drm_i915_gem_request *req,
		  struct drm_i915_gem_request *signal)
844
{
845 846
	struct drm_i915_private *dev_priv = req->i915;
	u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
847
	struct i915_hw_ppgtt *ppgtt;
848
	u32 *cs;
849

850 851 852
	cs = intel_ring_begin(req, 4);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
853

854 855 856 857 858 859
	*cs++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
		MI_SEMAPHORE_SAD_GTE_SDD;
	*cs++ = signal->global_seqno;
	*cs++ = lower_32_bits(offset);
	*cs++ = upper_32_bits(offset);
	intel_ring_advance(req, cs);
860 861 862 863 864 865

	/* When the !RCS engines idle waiting upon a semaphore, they lose their
	 * pagetables and we must reload them before executing the batch.
	 * We do this on the i915_switch_context() following the wait and
	 * before the dispatch.
	 */
866 867 868
	ppgtt = req->ctx->ppgtt;
	if (ppgtt && req->engine->id != RCS)
		ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
869 870 871
	return 0;
}

872
static int
873 874
gen6_ring_sync_to(struct drm_i915_gem_request *req,
		  struct drm_i915_gem_request *signal)
875
{
876 877 878
	u32 dw1 = MI_SEMAPHORE_MBOX |
		  MI_SEMAPHORE_COMPARE |
		  MI_SEMAPHORE_REGISTER;
879
	u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
880
	u32 *cs;
881

882
	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
883

884 885 886
	cs = intel_ring_begin(req, 4);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
887

888
	*cs++ = dw1 | wait_mbox;
889 890 891 892
	/* Throughout all of the GEM code, seqno passed implies our current
	 * seqno is >= the last seqno executed. However for hardware the
	 * comparison is strictly greater than.
	 */
893 894 895 896
	*cs++ = signal->global_seqno - 1;
	*cs++ = 0;
	*cs++ = MI_NOOP;
	intel_ring_advance(req, cs);
897 898 899 900

	return 0;
}

901
static void
902
gen5_seqno_barrier(struct intel_engine_cs *engine)
903
{
904 905 906
	/* MI_STORE are internally buffered by the GPU and not flushed
	 * either by MI_FLUSH or SyncFlush or any other combination of
	 * MI commands.
907
	 *
908 909 910 911 912 913 914
	 * "Only the submission of the store operation is guaranteed.
	 * The write result will be complete (coherent) some time later
	 * (this is practically a finite period but there is no guaranteed
	 * latency)."
	 *
	 * Empirically, we observe that we need a delay of at least 75us to
	 * be sure that the seqno write is visible by the CPU.
915
	 */
916
	usleep_range(125, 250);
917 918
}

919 920
static void
gen6_seqno_barrier(struct intel_engine_cs *engine)
921
{
922
	struct drm_i915_private *dev_priv = engine->i915;
923

924 925
	/* Workaround to force correct ordering between irq and seqno writes on
	 * ivb (and maybe also on snb) by reading from a CS register (like
926 927 928 929 930 931 932 933 934
	 * ACTHD) before reading the status page.
	 *
	 * Note that this effectively stalls the read by the time it takes to
	 * do a memory transaction, which more or less ensures that the write
	 * from the GPU has sufficient time to invalidate the CPU cacheline.
	 * Alternatively we could delay the interrupt from the CS ring to give
	 * the write time to land, but that would incur a delay after every
	 * batch i.e. much more frequent than a delay when waiting for the
	 * interrupt (with the same net latency).
935 936 937
	 *
	 * Also note that to prevent whole machine hangs on gen7, we have to
	 * take the spinlock to guard against concurrent cacheline access.
938
	 */
939
	spin_lock_irq(&dev_priv->uncore.lock);
940
	POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
941
	spin_unlock_irq(&dev_priv->uncore.lock);
942 943
}

944 945
static void
gen5_irq_enable(struct intel_engine_cs *engine)
946
{
947
	gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
948 949 950
}

static void
951
gen5_irq_disable(struct intel_engine_cs *engine)
952
{
953
	gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
954 955
}

956 957
static void
i9xx_irq_enable(struct intel_engine_cs *engine)
958
{
959
	struct drm_i915_private *dev_priv = engine->i915;
960

961 962 963
	dev_priv->irq_mask &= ~engine->irq_enable_mask;
	I915_WRITE(IMR, dev_priv->irq_mask);
	POSTING_READ_FW(RING_IMR(engine->mmio_base));
964 965
}

966
static void
967
i9xx_irq_disable(struct intel_engine_cs *engine)
968
{
969
	struct drm_i915_private *dev_priv = engine->i915;
970

971 972
	dev_priv->irq_mask |= engine->irq_enable_mask;
	I915_WRITE(IMR, dev_priv->irq_mask);
973 974
}

975 976
static void
i8xx_irq_enable(struct intel_engine_cs *engine)
C
Chris Wilson 已提交
977
{
978
	struct drm_i915_private *dev_priv = engine->i915;
C
Chris Wilson 已提交
979

980 981 982
	dev_priv->irq_mask &= ~engine->irq_enable_mask;
	I915_WRITE16(IMR, dev_priv->irq_mask);
	POSTING_READ16(RING_IMR(engine->mmio_base));
C
Chris Wilson 已提交
983 984 985
}

static void
986
i8xx_irq_disable(struct intel_engine_cs *engine)
C
Chris Wilson 已提交
987
{
988
	struct drm_i915_private *dev_priv = engine->i915;
C
Chris Wilson 已提交
989

990 991
	dev_priv->irq_mask |= engine->irq_enable_mask;
	I915_WRITE16(IMR, dev_priv->irq_mask);
C
Chris Wilson 已提交
992 993
}

994
static int
995
bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
996
{
997
	u32 *cs;
998

999 1000 1001
	cs = intel_ring_begin(req, 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1002

1003 1004 1005
	*cs++ = MI_FLUSH;
	*cs++ = MI_NOOP;
	intel_ring_advance(req, cs);
1006
	return 0;
1007 1008
}

1009 1010
static void
gen6_irq_enable(struct intel_engine_cs *engine)
1011
{
1012
	struct drm_i915_private *dev_priv = engine->i915;
1013

1014 1015 1016
	I915_WRITE_IMR(engine,
		       ~(engine->irq_enable_mask |
			 engine->irq_keep_mask));
1017
	gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1018 1019 1020
}

static void
1021
gen6_irq_disable(struct intel_engine_cs *engine)
1022
{
1023
	struct drm_i915_private *dev_priv = engine->i915;
1024

1025
	I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1026
	gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1027 1028
}

1029 1030
static void
hsw_vebox_irq_enable(struct intel_engine_cs *engine)
B
Ben Widawsky 已提交
1031
{
1032
	struct drm_i915_private *dev_priv = engine->i915;
B
Ben Widawsky 已提交
1033

1034
	I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1035
	gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
B
Ben Widawsky 已提交
1036 1037 1038
}

static void
1039
hsw_vebox_irq_disable(struct intel_engine_cs *engine)
B
Ben Widawsky 已提交
1040
{
1041
	struct drm_i915_private *dev_priv = engine->i915;
B
Ben Widawsky 已提交
1042

1043
	I915_WRITE_IMR(engine, ~0);
1044
	gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
B
Ben Widawsky 已提交
1045 1046
}

1047 1048
static void
gen8_irq_enable(struct intel_engine_cs *engine)
1049
{
1050
	struct drm_i915_private *dev_priv = engine->i915;
1051

1052 1053 1054
	I915_WRITE_IMR(engine,
		       ~(engine->irq_enable_mask |
			 engine->irq_keep_mask));
1055
	POSTING_READ_FW(RING_IMR(engine->mmio_base));
1056 1057 1058
}

static void
1059
gen8_irq_disable(struct intel_engine_cs *engine)
1060
{
1061
	struct drm_i915_private *dev_priv = engine->i915;
1062

1063
	I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1064 1065
}

1066
static int
1067 1068 1069
i965_emit_bb_start(struct drm_i915_gem_request *req,
		   u64 offset, u32 length,
		   unsigned int dispatch_flags)
1070
{
1071
	u32 *cs;
1072

1073 1074 1075
	cs = intel_ring_begin(req, 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1076

1077 1078 1079 1080
	*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
		I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
	*cs++ = offset;
	intel_ring_advance(req, cs);
1081

1082 1083 1084
	return 0;
}

1085 1086
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT (256*1024)
1087 1088
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1089
static int
1090 1091 1092
i830_emit_bb_start(struct drm_i915_gem_request *req,
		   u64 offset, u32 len,
		   unsigned int dispatch_flags)
1093
{
1094
	u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch);
1095

1096 1097 1098
	cs = intel_ring_begin(req, 6);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1099

1100
	/* Evict the invalid PTE TLBs */
1101 1102 1103 1104 1105 1106 1107
	*cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
	*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
	*cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
	*cs++ = cs_offset;
	*cs++ = 0xdeadbeef;
	*cs++ = MI_NOOP;
	intel_ring_advance(req, cs);
1108

1109
	if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1110 1111 1112
		if (len > I830_BATCH_LIMIT)
			return -ENOSPC;

1113 1114 1115
		cs = intel_ring_begin(req, 6 + 2);
		if (IS_ERR(cs))
			return PTR_ERR(cs);
1116 1117 1118 1119 1120

		/* Blit the batch (which has now all relocs applied) to the
		 * stable batch scratch bo area (so that the CS never
		 * stumbles over its tlb invalidation bug) ...
		 */
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
		*cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
		*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
		*cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
		*cs++ = cs_offset;
		*cs++ = 4096;
		*cs++ = offset;

		*cs++ = MI_FLUSH;
		*cs++ = MI_NOOP;
		intel_ring_advance(req, cs);
1131 1132

		/* ... and execute it. */
1133
		offset = cs_offset;
1134
	}
1135

1136 1137 1138
	cs = intel_ring_begin(req, 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1139

1140 1141 1142 1143
	*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
	*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
		MI_BATCH_NON_SECURE);
	intel_ring_advance(req, cs);
1144

1145 1146 1147 1148
	return 0;
}

static int
1149 1150 1151
i915_emit_bb_start(struct drm_i915_gem_request *req,
		   u64 offset, u32 len,
		   unsigned int dispatch_flags)
1152
{
1153
	u32 *cs;
1154

1155 1156 1157
	cs = intel_ring_begin(req, 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1158

1159 1160 1161 1162
	*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
	*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
		MI_BATCH_NON_SECURE);
	intel_ring_advance(req, cs);
1163 1164 1165 1166

	return 0;
}

1167
static void cleanup_phys_status_page(struct intel_engine_cs *engine)
1168
{
1169
	struct drm_i915_private *dev_priv = engine->i915;
1170 1171 1172 1173

	if (!dev_priv->status_page_dmah)
		return;

1174
	drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
1175
	engine->status_page.page_addr = NULL;
1176 1177
}

1178
static void cleanup_status_page(struct intel_engine_cs *engine)
1179
{
1180
	struct i915_vma *vma;
1181
	struct drm_i915_gem_object *obj;
1182

1183 1184
	vma = fetch_and_zero(&engine->status_page.vma);
	if (!vma)
1185 1186
		return;

1187 1188
	obj = vma->obj;

1189
	i915_vma_unpin(vma);
1190 1191 1192 1193
	i915_vma_close(vma);

	i915_gem_object_unpin_map(obj);
	__i915_gem_object_release_unless_active(obj);
1194 1195
}

1196
static int init_status_page(struct intel_engine_cs *engine)
1197
{
1198 1199 1200
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
	unsigned int flags;
1201
	void *vaddr;
1202
	int ret;
1203

1204
	obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
1205 1206 1207 1208
	if (IS_ERR(obj)) {
		DRM_ERROR("Failed to allocate status page\n");
		return PTR_ERR(obj);
	}
1209

1210 1211 1212
	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
	if (ret)
		goto err;
1213

1214
	vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
1215 1216 1217
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err;
1218
	}
1219

1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
	flags = PIN_GLOBAL;
	if (!HAS_LLC(engine->i915))
		/* On g33, we cannot place HWS above 256MiB, so
		 * restrict its pinning to the low mappable arena.
		 * Though this restriction is not documented for
		 * gen4, gen5, or byt, they also behave similarly
		 * and hang if the HWS is placed at the top of the
		 * GTT. To generalise, it appears that all !llc
		 * platforms have issues with us placing the HWS
		 * above the mappable region (even though we never
		 * actualy map it).
		 */
		flags |= PIN_MAPPABLE;
	ret = i915_vma_pin(vma, 0, 4096, flags);
	if (ret)
		goto err;
1236

1237 1238 1239 1240 1241 1242
	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
	if (IS_ERR(vaddr)) {
		ret = PTR_ERR(vaddr);
		goto err_unpin;
	}

1243
	engine->status_page.vma = vma;
1244
	engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
1245
	engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
1246

1247 1248
	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
			 engine->name, i915_ggtt_offset(vma));
1249
	return 0;
1250

1251 1252
err_unpin:
	i915_vma_unpin(vma);
1253 1254 1255
err:
	i915_gem_object_put(obj);
	return ret;
1256 1257
}

1258
static int init_phys_status_page(struct intel_engine_cs *engine)
1259
{
1260
	struct drm_i915_private *dev_priv = engine->i915;
1261

1262 1263
	GEM_BUG_ON(engine->id != RCS);

1264 1265 1266 1267
	dev_priv->status_page_dmah =
		drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
	if (!dev_priv->status_page_dmah)
		return -ENOMEM;
1268

1269 1270
	engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
	memset(engine->status_page.page_addr, 0, PAGE_SIZE);
1271 1272 1273 1274

	return 0;
}

1275 1276 1277
int intel_ring_pin(struct intel_ring *ring,
		   struct drm_i915_private *i915,
		   unsigned int offset_bias)
1278
{
1279
	enum i915_map_type map = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
1280
	struct i915_vma *vma = ring->vma;
1281
	unsigned int flags;
1282
	void *addr;
1283 1284
	int ret;

1285
	GEM_BUG_ON(ring->vaddr);
1286

1287

1288 1289 1290
	flags = PIN_GLOBAL;
	if (offset_bias)
		flags |= PIN_OFFSET_BIAS | offset_bias;
1291
	if (vma->obj->stolen)
1292
		flags |= PIN_MAPPABLE;
1293

1294
	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1295
		if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
1296 1297 1298 1299
			ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
		else
			ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
		if (unlikely(ret))
1300
			return ret;
1301
	}
1302

1303 1304 1305
	ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
	if (unlikely(ret))
		return ret;
1306

1307
	if (i915_vma_is_map_and_fenceable(vma))
1308 1309
		addr = (void __force *)i915_vma_pin_iomap(vma);
	else
1310
		addr = i915_gem_object_pin_map(vma->obj, map);
1311 1312
	if (IS_ERR(addr))
		goto err;
1313

1314
	ring->vaddr = addr;
1315
	return 0;
1316

1317 1318 1319
err:
	i915_vma_unpin(vma);
	return PTR_ERR(addr);
1320 1321
}

1322 1323 1324 1325 1326
void intel_ring_unpin(struct intel_ring *ring)
{
	GEM_BUG_ON(!ring->vma);
	GEM_BUG_ON(!ring->vaddr);

1327
	if (i915_vma_is_map_and_fenceable(ring->vma))
1328
		i915_vma_unpin_iomap(ring->vma);
1329 1330
	else
		i915_gem_object_unpin_map(ring->vma->obj);
1331 1332
	ring->vaddr = NULL;

1333
	i915_vma_unpin(ring->vma);
1334 1335
}

1336 1337
static struct i915_vma *
intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
1338
{
1339
	struct drm_i915_gem_object *obj;
1340
	struct i915_vma *vma;
1341

1342
	obj = i915_gem_object_create_stolen(dev_priv, size);
1343
	if (!obj)
1344
		obj = i915_gem_object_create(dev_priv, size);
1345 1346
	if (IS_ERR(obj))
		return ERR_CAST(obj);
1347

1348 1349 1350
	/* mark ring buffers as read-only from GPU side by default */
	obj->gt_ro = 1;

1351
	vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
1352 1353 1354 1355
	if (IS_ERR(vma))
		goto err;

	return vma;
1356

1357 1358 1359
err:
	i915_gem_object_put(obj);
	return vma;
1360 1361
}

1362 1363
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size)
1364
{
1365
	struct intel_ring *ring;
1366
	struct i915_vma *vma;
1367

1368
	GEM_BUG_ON(!is_power_of_2(size));
1369
	GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
1370

1371
	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1372
	if (!ring)
1373 1374
		return ERR_PTR(-ENOMEM);

1375 1376
	INIT_LIST_HEAD(&ring->request_list);

1377 1378 1379 1380 1381 1382
	ring->size = size;
	/* Workaround an erratum on the i830 which causes a hang if
	 * the TAIL pointer points to within the last 2 cachelines
	 * of the buffer.
	 */
	ring->effective_size = size;
1383
	if (IS_I830(engine->i915) || IS_I845G(engine->i915))
1384 1385 1386 1387
		ring->effective_size -= 2 * CACHELINE_BYTES;

	intel_ring_update_space(ring);

1388 1389
	vma = intel_ring_create_vma(engine->i915, size);
	if (IS_ERR(vma)) {
1390
		kfree(ring);
1391
		return ERR_CAST(vma);
1392
	}
1393
	ring->vma = vma;
1394 1395 1396 1397 1398

	return ring;
}

void
1399
intel_ring_free(struct intel_ring *ring)
1400
{
1401 1402 1403 1404 1405
	struct drm_i915_gem_object *obj = ring->vma->obj;

	i915_vma_close(ring->vma);
	__i915_gem_object_release_unless_active(obj);

1406 1407 1408
	kfree(ring);
}

1409
static int context_pin(struct i915_gem_context *ctx)
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423
{
	struct i915_vma *vma = ctx->engine[RCS].state;
	int ret;

	/* Clear this page out of any CPU caches for coherent swap-in/out.
	 * We only want to do this on the first bind so that we do not stall
	 * on an active context (which by nature is already on the GPU).
	 */
	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
		ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
		if (ret)
			return ret;
	}

1424 1425
	return i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
			    PIN_GLOBAL | PIN_HIGH);
1426 1427 1428 1429
}

static int intel_ring_context_pin(struct intel_engine_cs *engine,
				  struct i915_gem_context *ctx)
1430 1431 1432 1433
{
	struct intel_context *ce = &ctx->engine[engine->id];
	int ret;

1434
	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1435 1436 1437

	if (ce->pin_count++)
		return 0;
1438
	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
1439 1440

	if (ce->state) {
1441
		ret = context_pin(ctx);
1442
		if (ret)
1443
			goto error;
1444 1445

		ce->state->obj->mm.dirty = true;
1446 1447
	}

1448 1449 1450 1451 1452 1453 1454
	/* The kernel context is only used as a placeholder for flushing the
	 * active context. It is never used for submitting user rendering and
	 * as such never requires the golden render context, and so we can skip
	 * emitting it when we switch to the kernel context. This is required
	 * as during eviction we cannot allocate and pin the renderstate in
	 * order to initialise the context.
	 */
1455
	if (i915_gem_context_is_kernel(ctx))
1456 1457
		ce->initialised = true;

1458
	i915_gem_context_get(ctx);
1459 1460 1461 1462 1463 1464 1465
	return 0;

error:
	ce->pin_count = 0;
	return ret;
}

1466 1467
static void intel_ring_context_unpin(struct intel_engine_cs *engine,
				     struct i915_gem_context *ctx)
1468 1469 1470
{
	struct intel_context *ce = &ctx->engine[engine->id];

1471
	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1472
	GEM_BUG_ON(ce->pin_count == 0);
1473 1474 1475 1476 1477

	if (--ce->pin_count)
		return;

	if (ce->state)
1478
		i915_vma_unpin(ce->state);
1479

1480
	i915_gem_context_put(ctx);
1481 1482
}

1483
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
1484
{
1485
	struct intel_ring *ring;
1486
	int err;
1487

1488 1489
	intel_engine_setup_common(engine);

1490 1491 1492
	err = intel_engine_init_common(engine);
	if (err)
		goto err;
1493

1494 1495 1496 1497 1498 1499
	if (HWS_NEEDS_PHYSICAL(engine->i915))
		err = init_phys_status_page(engine);
	else
		err = init_status_page(engine);
	if (err)
		goto err;
1500

1501 1502
	ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
	if (IS_ERR(ring)) {
1503 1504
		err = PTR_ERR(ring);
		goto err_hws;
1505 1506
	}

1507
	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1508 1509 1510 1511 1512
	err = intel_ring_pin(ring, engine->i915, I915_GTT_PAGE_SIZE);
	if (err)
		goto err_ring;

	GEM_BUG_ON(engine->buffer);
1513
	engine->buffer = ring;
1514

1515
	return 0;
1516

1517 1518 1519 1520 1521 1522 1523 1524 1525 1526
err_ring:
	intel_ring_free(ring);
err_hws:
	if (HWS_NEEDS_PHYSICAL(engine->i915))
		cleanup_phys_status_page(engine);
	else
		cleanup_status_page(engine);
err:
	intel_engine_cleanup_common(engine);
	return err;
1527 1528
}

1529
void intel_engine_cleanup(struct intel_engine_cs *engine)
1530
{
1531
	struct drm_i915_private *dev_priv = engine->i915;
1532

1533 1534
	WARN_ON(INTEL_GEN(dev_priv) > 2 &&
		(I915_READ_MODE(engine) & MODE_IDLE) == 0);
1535

1536 1537
	intel_ring_unpin(engine->buffer);
	intel_ring_free(engine->buffer);
1538

1539 1540
	if (engine->cleanup)
		engine->cleanup(engine);
Z
Zou Nan hai 已提交
1541

1542
	if (HWS_NEEDS_PHYSICAL(dev_priv))
1543
		cleanup_phys_status_page(engine);
1544
	else
1545
		cleanup_status_page(engine);
1546

1547
	intel_engine_cleanup_common(engine);
1548

1549 1550
	dev_priv->engine[engine->id] = NULL;
	kfree(engine);
1551 1552
}

1553 1554 1555
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
1556
	enum intel_engine_id id;
1557

1558
	for_each_engine(engine, dev_priv, id)
1559 1560 1561
		engine->buffer->head = engine->buffer->tail;
}

1562
static int ring_request_alloc(struct drm_i915_gem_request *request)
1563
{
1564
	u32 *cs;
1565

1566 1567
	GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);

1568 1569 1570 1571
	/* Flush enough space to reduce the likelihood of waiting after
	 * we start building the request - in which case we will just
	 * have to repeat work.
	 */
1572
	request->reserved_space += LEGACY_REQUEST_SIZE;
1573

1574
	GEM_BUG_ON(!request->engine->buffer);
1575
	request->ring = request->engine->buffer;
1576

1577 1578 1579
	cs = intel_ring_begin(request, 0);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1580

1581
	request->reserved_space -= LEGACY_REQUEST_SIZE;
1582
	return 0;
1583 1584
}

1585 1586
static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
{
1587
	struct intel_ring *ring = req->ring;
1588
	struct drm_i915_gem_request *target;
1589 1590 1591
	long timeout;

	lockdep_assert_held(&req->i915->drm.struct_mutex);
1592

1593 1594
	intel_ring_update_space(ring);
	if (ring->space >= bytes)
1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605
		return 0;

	/*
	 * Space is reserved in the ringbuffer for finalising the request,
	 * as that cannot be allowed to fail. During request finalisation,
	 * reserved_space is set to 0 to stop the overallocation and the
	 * assumption is that then we never need to wait (which has the
	 * risk of failing with EINTR).
	 *
	 * See also i915_gem_request_alloc() and i915_add_request().
	 */
1606
	GEM_BUG_ON(!req->reserved_space);
1607

1608
	list_for_each_entry(target, &ring->request_list, ring_link) {
1609 1610 1611
		unsigned space;

		/* Would completion of this request free enough space? */
1612 1613
		space = __intel_ring_space(target->postfix, ring->tail,
					   ring->size);
1614 1615
		if (space >= bytes)
			break;
1616
	}
1617

1618
	if (WARN_ON(&target->ring_link == &ring->request_list))
1619 1620
		return -ENOSPC;

1621 1622 1623 1624 1625
	timeout = i915_wait_request(target,
				    I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
				    MAX_SCHEDULE_TIMEOUT);
	if (timeout < 0)
		return timeout;
1626 1627 1628 1629 1630 1631

	i915_gem_request_retire_upto(target);

	intel_ring_update_space(ring);
	GEM_BUG_ON(ring->space < bytes);
	return 0;
1632 1633
}

1634
u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
M
Mika Kuoppala 已提交
1635
{
1636
	struct intel_ring *ring = req->ring;
1637 1638
	int remain_actual = ring->size - ring->tail;
	int remain_usable = ring->effective_size - ring->tail;
1639 1640
	int bytes = num_dwords * sizeof(u32);
	int total_bytes, wait_bytes;
1641
	bool need_wrap = false;
1642
	u32 *cs;
1643

1644
	total_bytes = bytes + req->reserved_space;
1645

1646 1647 1648 1649 1650 1651 1652
	if (unlikely(bytes > remain_usable)) {
		/*
		 * Not enough space for the basic request. So need to flush
		 * out the remainder and then wait for base + reserved.
		 */
		wait_bytes = remain_actual + total_bytes;
		need_wrap = true;
1653 1654 1655 1656 1657 1658 1659
	} else if (unlikely(total_bytes > remain_usable)) {
		/*
		 * The base request will fit but the reserved space
		 * falls off the end. So we don't need an immediate wrap
		 * and only need to effectively wait for the reserved
		 * size space from the start of ringbuffer.
		 */
1660
		wait_bytes = remain_actual + req->reserved_space;
1661
	} else {
1662 1663
		/* No wrapping required, just waiting. */
		wait_bytes = total_bytes;
M
Mika Kuoppala 已提交
1664 1665
	}

1666
	if (wait_bytes > ring->space) {
1667
		int ret = wait_for_space(req, wait_bytes);
M
Mika Kuoppala 已提交
1668
		if (unlikely(ret))
1669
			return ERR_PTR(ret);
M
Mika Kuoppala 已提交
1670 1671
	}

1672
	if (unlikely(need_wrap)) {
1673 1674
		GEM_BUG_ON(remain_actual > ring->space);
		GEM_BUG_ON(ring->tail + remain_actual > ring->size);
1675

1676
		/* Fill the tail with MI_NOOP */
1677 1678 1679
		memset(ring->vaddr + ring->tail, 0, remain_actual);
		ring->tail = 0;
		ring->space -= remain_actual;
1680
	}
1681

1682 1683 1684
	GEM_BUG_ON(ring->tail > ring->size - bytes);
	cs = ring->vaddr + ring->tail;
	ring->tail += bytes;
1685 1686
	ring->space -= bytes;
	GEM_BUG_ON(ring->space < 0);
1687 1688

	return cs;
1689
}
1690

1691
/* Align the ring tail to a cacheline boundary */
1692
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
1693
{
1694
	int num_dwords =
1695 1696
		(req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
	u32 *cs;
1697 1698 1699 1700

	if (num_dwords == 0)
		return 0;

1701
	num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
1702 1703 1704
	cs = intel_ring_begin(req, num_dwords);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1705 1706

	while (num_dwords--)
1707
		*cs++ = MI_NOOP;
1708

1709
	intel_ring_advance(req, cs);
1710 1711 1712 1713

	return 0;
}

1714
static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
1715
{
1716
	struct drm_i915_private *dev_priv = request->i915;
1717

1718 1719
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

1720
       /* Every tail move must follow the sequence below */
1721 1722 1723 1724

	/* Disable notification that the ring is IDLE. The GT
	 * will then assume that it is busy and bring it out of rc6.
	 */
1725 1726
	I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
		      _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1727 1728

	/* Clear the context id. Here be magic! */
1729
	I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
1730

1731
	/* Wait for the ring not to be idle, i.e. for it to wake up. */
1732 1733 1734 1735 1736
	if (__intel_wait_for_register_fw(dev_priv,
					 GEN6_BSD_SLEEP_PSMI_CONTROL,
					 GEN6_BSD_SLEEP_INDICATOR,
					 0,
					 1000, 0, NULL))
1737
		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1738

1739
	/* Now that the ring is fully powered up, update the tail */
1740
	i9xx_submit_request(request);
1741 1742 1743 1744

	/* Let the ring send IDLE messages to the GT again,
	 * and so let it sleep to conserve power when idle.
	 */
1745 1746 1747 1748
	I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
		      _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));

	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1749 1750
}

1751
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
1752
{
1753
	u32 cmd, *cs;
1754

1755 1756 1757
	cs = intel_ring_begin(req, 4);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1758

1759
	cmd = MI_FLUSH_DW;
1760
	if (INTEL_GEN(req->i915) >= 8)
B
Ben Widawsky 已提交
1761
		cmd += 1;
1762 1763 1764 1765 1766 1767 1768 1769

	/* We always require a command barrier so that subsequent
	 * commands, such as breadcrumb interrupts, are strictly ordered
	 * wrt the contents of the write cache being flushed to memory
	 * (and thus being coherent from the CPU).
	 */
	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;

1770 1771 1772 1773 1774 1775
	/*
	 * Bspec vol 1c.5 - video engine command streamer:
	 * "If ENABLED, all TLBs will be invalidated once the flush
	 * operation is complete. This bit is only valid when the
	 * Post-Sync Operation field is a value of 1h or 3h."
	 */
1776
	if (mode & EMIT_INVALIDATE)
1777 1778
		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;

1779 1780
	*cs++ = cmd;
	*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
1781
	if (INTEL_GEN(req->i915) >= 8) {
1782 1783
		*cs++ = 0; /* upper addr */
		*cs++ = 0; /* value */
B
Ben Widawsky 已提交
1784
	} else  {
1785 1786
		*cs++ = 0;
		*cs++ = MI_NOOP;
B
Ben Widawsky 已提交
1787
	}
1788
	intel_ring_advance(req, cs);
1789
	return 0;
1790 1791
}

1792
static int
1793 1794 1795
gen8_emit_bb_start(struct drm_i915_gem_request *req,
		   u64 offset, u32 len,
		   unsigned int dispatch_flags)
1796
{
1797
	bool ppgtt = USES_PPGTT(req->i915) &&
1798
			!(dispatch_flags & I915_DISPATCH_SECURE);
1799
	u32 *cs;
1800

1801 1802 1803
	cs = intel_ring_begin(req, 4);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1804 1805

	/* FIXME(BDW): Address space and security selectors. */
1806 1807 1808 1809 1810 1811
	*cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
		I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
	*cs++ = lower_32_bits(offset);
	*cs++ = upper_32_bits(offset);
	*cs++ = MI_NOOP;
	intel_ring_advance(req, cs);
1812 1813 1814 1815

	return 0;
}

1816
static int
1817 1818 1819
hsw_emit_bb_start(struct drm_i915_gem_request *req,
		  u64 offset, u32 len,
		  unsigned int dispatch_flags)
1820
{
1821
	u32 *cs;
1822

1823 1824 1825
	cs = intel_ring_begin(req, 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1826

1827 1828 1829 1830
	*cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
		0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
		(dispatch_flags & I915_DISPATCH_RS ?
		MI_BATCH_RESOURCE_STREAMER : 0);
1831
	/* bit0-7 is the length on GEN6+ */
1832 1833
	*cs++ = offset;
	intel_ring_advance(req, cs);
1834 1835 1836 1837

	return 0;
}

1838
static int
1839 1840 1841
gen6_emit_bb_start(struct drm_i915_gem_request *req,
		   u64 offset, u32 len,
		   unsigned int dispatch_flags)
1842
{
1843
	u32 *cs;
1844

1845 1846 1847
	cs = intel_ring_begin(req, 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1848

1849 1850
	*cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
		0 : MI_BATCH_NON_SECURE_I965);
1851
	/* bit0-7 is the length on GEN6+ */
1852 1853
	*cs++ = offset;
	intel_ring_advance(req, cs);
1854

1855
	return 0;
1856 1857
}

1858 1859
/* Blitter support (SandyBridge+) */

1860
static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
Z
Zou Nan hai 已提交
1861
{
1862
	u32 cmd, *cs;
1863

1864 1865 1866
	cs = intel_ring_begin(req, 4);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1867

1868
	cmd = MI_FLUSH_DW;
1869
	if (INTEL_GEN(req->i915) >= 8)
B
Ben Widawsky 已提交
1870
		cmd += 1;
1871 1872 1873 1874 1875 1876 1877 1878

	/* We always require a command barrier so that subsequent
	 * commands, such as breadcrumb interrupts, are strictly ordered
	 * wrt the contents of the write cache being flushed to memory
	 * (and thus being coherent from the CPU).
	 */
	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;

1879 1880 1881 1882 1883 1884
	/*
	 * Bspec vol 1c.3 - blitter engine command streamer:
	 * "If ENABLED, all TLBs will be invalidated once the flush
	 * operation is complete. This bit is only valid when the
	 * Post-Sync Operation field is a value of 1h or 3h."
	 */
1885
	if (mode & EMIT_INVALIDATE)
1886
		cmd |= MI_INVALIDATE_TLB;
1887 1888
	*cs++ = cmd;
	*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
1889
	if (INTEL_GEN(req->i915) >= 8) {
1890 1891
		*cs++ = 0; /* upper addr */
		*cs++ = 0; /* value */
B
Ben Widawsky 已提交
1892
	} else  {
1893 1894
		*cs++ = 0;
		*cs++ = MI_NOOP;
B
Ben Widawsky 已提交
1895
	}
1896
	intel_ring_advance(req, cs);
R
Rodrigo Vivi 已提交
1897

1898
	return 0;
Z
Zou Nan hai 已提交
1899 1900
}

1901 1902 1903
static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
				       struct intel_engine_cs *engine)
{
1904
	struct drm_i915_gem_object *obj;
1905
	int ret, i;
1906

1907
	if (!i915.semaphores)
1908 1909
		return;

1910 1911 1912
	if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
		struct i915_vma *vma;

1913
		obj = i915_gem_object_create(dev_priv, PAGE_SIZE);
1914 1915
		if (IS_ERR(obj))
			goto err;
1916

1917
		vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930
		if (IS_ERR(vma))
			goto err_obj;

		ret = i915_gem_object_set_to_gtt_domain(obj, false);
		if (ret)
			goto err_obj;

		ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
		if (ret)
			goto err_obj;

		dev_priv->semaphore = vma;
	}
1931 1932

	if (INTEL_GEN(dev_priv) >= 8) {
1933
		u32 offset = i915_ggtt_offset(dev_priv->semaphore);
1934

1935
		engine->semaphore.sync_to = gen8_ring_sync_to;
1936
		engine->semaphore.signal = gen8_xcs_signal;
1937 1938

		for (i = 0; i < I915_NUM_ENGINES; i++) {
1939
			u32 ring_offset;
1940 1941 1942 1943 1944 1945 1946 1947

			if (i != engine->id)
				ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
			else
				ring_offset = MI_SEMAPHORE_SYNC_INVALID;

			engine->semaphore.signal_ggtt[i] = ring_offset;
		}
1948
	} else if (INTEL_GEN(dev_priv) >= 6) {
1949
		engine->semaphore.sync_to = gen6_ring_sync_to;
1950
		engine->semaphore.signal = gen6_signal;
1951 1952 1953 1954 1955 1956 1957 1958

		/*
		 * The current semaphore is only applied on pre-gen8
		 * platform.  And there is no VCS2 ring on the pre-gen8
		 * platform. So the semaphore between RCS and VCS2 is
		 * initialized as INVALID.  Gen8 will initialize the
		 * sema between VCS2 and RCS later.
		 */
1959
		for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
1960 1961 1962
			static const struct {
				u32 wait_mbox;
				i915_reg_t mbox_reg;
1963 1964 1965 1966 1967
			} sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
				[RCS_HW] = {
					[VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RV,  .mbox_reg = GEN6_VRSYNC },
					[BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RB,  .mbox_reg = GEN6_BRSYNC },
					[VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
1968
				},
1969 1970 1971 1972
				[VCS_HW] = {
					[RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VR,  .mbox_reg = GEN6_RVSYNC },
					[BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VB,  .mbox_reg = GEN6_BVSYNC },
					[VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
1973
				},
1974 1975 1976 1977
				[BCS_HW] = {
					[RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BR,  .mbox_reg = GEN6_RBSYNC },
					[VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BV,  .mbox_reg = GEN6_VBSYNC },
					[VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
1978
				},
1979 1980 1981 1982
				[VECS_HW] = {
					[RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
					[VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
					[BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
1983 1984 1985 1986 1987
				},
			};
			u32 wait_mbox;
			i915_reg_t mbox_reg;

1988
			if (i == engine->hw_id) {
1989 1990 1991
				wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
				mbox_reg = GEN6_NOSYNC;
			} else {
1992 1993
				wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
				mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
1994 1995 1996 1997 1998
			}

			engine->semaphore.mbox.wait[i] = wait_mbox;
			engine->semaphore.mbox.signal[i] = mbox_reg;
		}
1999
	}
2000 2001 2002 2003 2004 2005 2006 2007

	return;

err_obj:
	i915_gem_object_put(obj);
err:
	DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
	i915.semaphores = 0;
2008 2009
}

2010 2011 2012
static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
				struct intel_engine_cs *engine)
{
2013 2014
	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;

2015
	if (INTEL_GEN(dev_priv) >= 8) {
2016 2017
		engine->irq_enable = gen8_irq_enable;
		engine->irq_disable = gen8_irq_disable;
2018 2019
		engine->irq_seqno_barrier = gen6_seqno_barrier;
	} else if (INTEL_GEN(dev_priv) >= 6) {
2020 2021
		engine->irq_enable = gen6_irq_enable;
		engine->irq_disable = gen6_irq_disable;
2022 2023
		engine->irq_seqno_barrier = gen6_seqno_barrier;
	} else if (INTEL_GEN(dev_priv) >= 5) {
2024 2025
		engine->irq_enable = gen5_irq_enable;
		engine->irq_disable = gen5_irq_disable;
2026
		engine->irq_seqno_barrier = gen5_seqno_barrier;
2027
	} else if (INTEL_GEN(dev_priv) >= 3) {
2028 2029
		engine->irq_enable = i9xx_irq_enable;
		engine->irq_disable = i9xx_irq_disable;
2030
	} else {
2031 2032
		engine->irq_enable = i8xx_irq_enable;
		engine->irq_disable = i8xx_irq_disable;
2033 2034 2035
	}
}

2036 2037 2038 2039 2040 2041 2042 2043 2044 2045
static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
	engine->submit_request = i9xx_submit_request;
}

static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
{
	engine->submit_request = gen6_bsd_submit_request;
}

2046 2047 2048
static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
				      struct intel_engine_cs *engine)
{
2049 2050 2051
	intel_ring_init_irq(dev_priv, engine);
	intel_ring_init_semaphores(dev_priv, engine);

2052
	engine->init_hw = init_ring_common;
2053
	engine->reset_hw = reset_ring_common;
2054

2055 2056 2057
	engine->context_pin = intel_ring_context_pin;
	engine->context_unpin = intel_ring_context_unpin;

2058 2059
	engine->request_alloc = ring_request_alloc;

2060
	engine->emit_breadcrumb = i9xx_emit_breadcrumb;
2061 2062 2063 2064
	engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
	if (i915.semaphores) {
		int num_rings;

2065
		engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
2066 2067 2068 2069 2070 2071 2072 2073 2074 2075

		num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
		if (INTEL_GEN(dev_priv) >= 8) {
			engine->emit_breadcrumb_sz += num_rings * 6;
		} else {
			engine->emit_breadcrumb_sz += num_rings * 3;
			if (num_rings & 1)
				engine->emit_breadcrumb_sz++;
		}
	}
2076 2077

	engine->set_default_submission = i9xx_set_default_submission;
2078 2079

	if (INTEL_GEN(dev_priv) >= 8)
2080
		engine->emit_bb_start = gen8_emit_bb_start;
2081
	else if (INTEL_GEN(dev_priv) >= 6)
2082
		engine->emit_bb_start = gen6_emit_bb_start;
2083
	else if (INTEL_GEN(dev_priv) >= 4)
2084
		engine->emit_bb_start = i965_emit_bb_start;
2085
	else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
2086
		engine->emit_bb_start = i830_emit_bb_start;
2087
	else
2088
		engine->emit_bb_start = i915_emit_bb_start;
2089 2090
}

2091
int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2092
{
2093
	struct drm_i915_private *dev_priv = engine->i915;
2094
	int ret;
2095

2096 2097
	intel_ring_default_vfuncs(dev_priv, engine);

2098 2099
	if (HAS_L3_DPF(dev_priv))
		engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2100

2101
	if (INTEL_GEN(dev_priv) >= 8) {
2102
		engine->init_context = intel_rcs_ctx_init;
2103
		engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
2104
		engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
2105
		engine->emit_flush = gen8_render_ring_flush;
2106 2107 2108
		if (i915.semaphores) {
			int num_rings;

2109
			engine->semaphore.signal = gen8_rcs_signal;
2110 2111 2112

			num_rings =
				hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
2113
			engine->emit_breadcrumb_sz += num_rings * 8;
2114
		}
2115
	} else if (INTEL_GEN(dev_priv) >= 6) {
2116
		engine->init_context = intel_rcs_ctx_init;
2117
		engine->emit_flush = gen7_render_ring_flush;
2118
		if (IS_GEN6(dev_priv))
2119
			engine->emit_flush = gen6_render_ring_flush;
2120
	} else if (IS_GEN5(dev_priv)) {
2121
		engine->emit_flush = gen4_render_ring_flush;
2122
	} else {
2123
		if (INTEL_GEN(dev_priv) < 4)
2124
			engine->emit_flush = gen2_render_ring_flush;
2125
		else
2126
			engine->emit_flush = gen4_render_ring_flush;
2127
		engine->irq_enable_mask = I915_USER_INTERRUPT;
2128
	}
B
Ben Widawsky 已提交
2129

2130
	if (IS_HASWELL(dev_priv))
2131
		engine->emit_bb_start = hsw_emit_bb_start;
2132

2133 2134
	engine->init_hw = init_render_ring;
	engine->cleanup = render_ring_cleanup;
2135

2136
	ret = intel_init_ring_buffer(engine);
2137 2138 2139
	if (ret)
		return ret;

2140
	if (INTEL_GEN(dev_priv) >= 6) {
2141
		ret = intel_engine_create_scratch(engine, PAGE_SIZE);
2142 2143 2144
		if (ret)
			return ret;
	} else if (HAS_BROKEN_CS_TLB(dev_priv)) {
2145
		ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
2146 2147 2148 2149 2150
		if (ret)
			return ret;
	}

	return 0;
2151 2152
}

2153
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
2154
{
2155
	struct drm_i915_private *dev_priv = engine->i915;
2156

2157 2158
	intel_ring_default_vfuncs(dev_priv, engine);

2159
	if (INTEL_GEN(dev_priv) >= 6) {
2160
		/* gen6 bsd needs a special wa for tail updates */
2161
		if (IS_GEN6(dev_priv))
2162
			engine->set_default_submission = gen6_bsd_set_default_submission;
2163
		engine->emit_flush = gen6_bsd_ring_flush;
2164
		if (INTEL_GEN(dev_priv) < 8)
2165
			engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2166
	} else {
2167
		engine->mmio_base = BSD_RING_BASE;
2168
		engine->emit_flush = bsd_ring_flush;
2169
		if (IS_GEN5(dev_priv))
2170
			engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2171
		else
2172
			engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2173 2174
	}

2175
	return intel_init_ring_buffer(engine);
2176
}
2177

2178
/**
2179
 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
2180
 */
2181
int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
2182
{
2183
	struct drm_i915_private *dev_priv = engine->i915;
2184 2185 2186

	intel_ring_default_vfuncs(dev_priv, engine);

2187
	engine->emit_flush = gen6_bsd_ring_flush;
2188

2189
	return intel_init_ring_buffer(engine);
2190 2191
}

2192
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
2193
{
2194
	struct drm_i915_private *dev_priv = engine->i915;
2195 2196 2197

	intel_ring_default_vfuncs(dev_priv, engine);

2198
	engine->emit_flush = gen6_ring_flush;
2199
	if (INTEL_GEN(dev_priv) < 8)
2200
		engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2201

2202
	return intel_init_ring_buffer(engine);
2203
}
2204

2205
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
B
Ben Widawsky 已提交
2206
{
2207
	struct drm_i915_private *dev_priv = engine->i915;
2208 2209 2210

	intel_ring_default_vfuncs(dev_priv, engine);

2211
	engine->emit_flush = gen6_ring_flush;
2212

2213
	if (INTEL_GEN(dev_priv) < 8) {
2214
		engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2215 2216
		engine->irq_enable = hsw_vebox_irq_enable;
		engine->irq_disable = hsw_vebox_irq_disable;
2217
	}
B
Ben Widawsky 已提交
2218

2219
	return intel_init_ring_buffer(engine);
B
Ben Widawsky 已提交
2220
}