intel_ringbuffer.c 57.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * Copyright © 2008-2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Zou Nan hai <nanhai.zou@intel.com>
 *    Xiang Hai hao<haihao.xiang@intel.com>
 *
 */

30
#include <linux/log2.h>
31
#include <drm/drmP.h>
32
#include "i915_drv.h"
33
#include <drm/i915_drm.h>
34
#include "i915_trace.h"
35
#include "intel_drv.h"
36

37 38 39 40 41
/* Rough estimate of the typical request size, performing a flush,
 * set-context and then emitting the batch.
 */
#define LEGACY_REQUEST_SIZE 200

42
static int __intel_ring_space(int head, int tail, int size)
43
{
44 45
	int space = head - tail;
	if (space <= 0)
46
		space += size;
47
	return space - I915_RING_FREE_SPACE;
48 49
}

50
void intel_ring_update_space(struct intel_ring *ring)
51
{
52 53 54
	if (ring->last_retired_head != -1) {
		ring->head = ring->last_retired_head;
		ring->last_retired_head = -1;
55 56
	}

57 58
	ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
					 ring->tail, ring->size);
59 60
}

61
static int
62
gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
63
{
64
	u32 cmd, *cs;
65 66 67

	cmd = MI_FLUSH;

68
	if (mode & EMIT_INVALIDATE)
69 70
		cmd |= MI_READ_FLUSH;

71 72 73
	cs = intel_ring_begin(req, 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
74

75 76 77
	*cs++ = cmd;
	*cs++ = MI_NOOP;
	intel_ring_advance(req, cs);
78 79 80 81 82

	return 0;
}

static int
83
gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
84
{
85
	u32 cmd, *cs;
86

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
	/*
	 * read/write caches:
	 *
	 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
	 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
	 * also flushed at 2d versus 3d pipeline switches.
	 *
	 * read-only caches:
	 *
	 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
	 * MI_READ_FLUSH is set, and is always flushed on 965.
	 *
	 * I915_GEM_DOMAIN_COMMAND may not exist?
	 *
	 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
	 * invalidated when MI_EXE_FLUSH is set.
	 *
	 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
	 * invalidated with every MI_FLUSH.
	 *
	 * TLBs:
	 *
	 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
	 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
	 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
	 * are flushed at any MI_FLUSH.
	 */

115
	cmd = MI_FLUSH;
116
	if (mode & EMIT_INVALIDATE) {
117
		cmd |= MI_EXE_FLUSH;
118 119 120
		if (IS_G4X(req->i915) || IS_GEN5(req->i915))
			cmd |= MI_INVALIDATE_ISP;
	}
121

122 123 124
	cs = intel_ring_begin(req, 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
125

126 127 128
	*cs++ = cmd;
	*cs++ = MI_NOOP;
	intel_ring_advance(req, cs);
129 130

	return 0;
131 132
}

133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
/**
 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
 * implementing two workarounds on gen6.  From section 1.4.7.1
 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
 *
 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
 * produced by non-pipelined state commands), software needs to first
 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
 * 0.
 *
 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
 *
 * And the workaround for these two requires this workaround first:
 *
 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
 * BEFORE the pipe-control with a post-sync op and no write-cache
 * flushes.
 *
 * And this last workaround is tricky because of the requirements on
 * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
 * volume 2 part 1:
 *
 *     "1 of the following must also be set:
 *      - Render Target Cache Flush Enable ([12] of DW1)
 *      - Depth Cache Flush Enable ([0] of DW1)
 *      - Stall at Pixel Scoreboard ([1] of DW1)
 *      - Depth Stall ([13] of DW1)
 *      - Post-Sync Operation ([13] of DW1)
 *      - Notify Enable ([8] of DW1)"
 *
 * The cache flushes require the workaround flush that triggered this
 * one, so we can't use it.  Depth stall would trigger the same.
 * Post-sync nonzero is what triggered this second workaround, so we
 * can't use that one either.  Notify enable is IRQs, which aren't
 * really our business.  That leaves only stall at scoreboard.
 */
static int
171
intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
172
{
173
	u32 scratch_addr =
174
		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
	u32 *cs;

	cs = intel_ring_begin(req, 6);
	if (IS_ERR(cs))
		return PTR_ERR(cs);

	*cs++ = GFX_OP_PIPE_CONTROL(5);
	*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
	*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
	*cs++ = 0; /* low dword */
	*cs++ = 0; /* high dword */
	*cs++ = MI_NOOP;
	intel_ring_advance(req, cs);

	cs = intel_ring_begin(req, 6);
	if (IS_ERR(cs))
		return PTR_ERR(cs);

	*cs++ = GFX_OP_PIPE_CONTROL(5);
	*cs++ = PIPE_CONTROL_QW_WRITE;
	*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
	*cs++ = 0;
	*cs++ = 0;
	*cs++ = MI_NOOP;
	intel_ring_advance(req, cs);
200 201 202 203 204

	return 0;
}

static int
205
gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
206
{
207
	u32 scratch_addr =
208
		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
209
	u32 *cs, flags = 0;
210 211
	int ret;

212
	/* Force SNB workarounds for PIPE_CONTROL flushes */
213
	ret = intel_emit_post_sync_nonzero_flush(req);
214 215 216
	if (ret)
		return ret;

217 218 219 220
	/* Just flush everything.  Experiments have shown that reducing the
	 * number of bits based on the write domains has little performance
	 * impact.
	 */
221
	if (mode & EMIT_FLUSH) {
222 223 224 225 226 227
		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
		/*
		 * Ensure that any following seqno writes only happen
		 * when the render cache is indeed flushed.
		 */
228
		flags |= PIPE_CONTROL_CS_STALL;
229
	}
230
	if (mode & EMIT_INVALIDATE) {
231 232 233 234 235 236 237 238 239
		flags |= PIPE_CONTROL_TLB_INVALIDATE;
		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
		/*
		 * TLB invalidate requires a post-sync write.
		 */
240
		flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
241
	}
242

243 244 245
	cs = intel_ring_begin(req, 4);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
246

247 248 249 250 251
	*cs++ = GFX_OP_PIPE_CONTROL(4);
	*cs++ = flags;
	*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
	*cs++ = 0;
	intel_ring_advance(req, cs);
252 253 254 255

	return 0;
}

256
static int
257
gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
258
{
259
	u32 *cs;
260

261 262 263
	cs = intel_ring_begin(req, 4);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
264

265 266 267 268 269
	*cs++ = GFX_OP_PIPE_CONTROL(4);
	*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
	*cs++ = 0;
	*cs++ = 0;
	intel_ring_advance(req, cs);
270 271 272 273

	return 0;
}

274
static int
275
gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
276
{
277
	u32 scratch_addr =
278
		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
279
	u32 *cs, flags = 0;
280

281 282 283 284 285 286 287 288 289 290
	/*
	 * Ensure that any following seqno writes only happen when the render
	 * cache is indeed flushed.
	 *
	 * Workaround: 4th PIPE_CONTROL command (except the ones with only
	 * read-cache invalidate bits set) must have the CS_STALL bit set. We
	 * don't try to be clever and just set it unconditionally.
	 */
	flags |= PIPE_CONTROL_CS_STALL;

291 292 293 294
	/* Just flush everything.  Experiments have shown that reducing the
	 * number of bits based on the write domains has little performance
	 * impact.
	 */
295
	if (mode & EMIT_FLUSH) {
296 297
		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
298
		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
299
		flags |= PIPE_CONTROL_FLUSH_ENABLE;
300
	}
301
	if (mode & EMIT_INVALIDATE) {
302 303 304 305 306 307
		flags |= PIPE_CONTROL_TLB_INVALIDATE;
		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
308
		flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
309 310 311 312
		/*
		 * TLB invalidate requires a post-sync write.
		 */
		flags |= PIPE_CONTROL_QW_WRITE;
313
		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
314

315 316
		flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;

317 318 319
		/* Workaround: we must issue a pipe_control with CS-stall bit
		 * set before a pipe_control command that has the state cache
		 * invalidate bit set. */
320
		gen7_render_ring_cs_stall_wa(req);
321 322
	}

323 324 325
	cs = intel_ring_begin(req, 4);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
326

327 328 329 330 331
	*cs++ = GFX_OP_PIPE_CONTROL(4);
	*cs++ = flags;
	*cs++ = scratch_addr;
	*cs++ = 0;
	intel_ring_advance(req, cs);
332 333 334 335

	return 0;
}

336
static int
337
gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
338
{
339
	u32 flags;
340
	u32 *cs;
341

342
	cs = intel_ring_begin(req, mode & EMIT_INVALIDATE ? 12 : 6);
343 344
	if (IS_ERR(cs))
		return PTR_ERR(cs);
345

346
	flags = PIPE_CONTROL_CS_STALL;
B
Ben Widawsky 已提交
347

348
	if (mode & EMIT_FLUSH) {
B
Ben Widawsky 已提交
349 350
		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
351
		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
352
		flags |= PIPE_CONTROL_FLUSH_ENABLE;
B
Ben Widawsky 已提交
353
	}
354
	if (mode & EMIT_INVALIDATE) {
B
Ben Widawsky 已提交
355 356 357 358 359 360 361 362
		flags |= PIPE_CONTROL_TLB_INVALIDATE;
		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_QW_WRITE;
		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
363 364

		/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
365 366 367 368
		cs = gen8_emit_pipe_control(cs,
					    PIPE_CONTROL_CS_STALL |
					    PIPE_CONTROL_STALL_AT_SCOREBOARD,
					    0);
B
Ben Widawsky 已提交
369 370
	}

371 372 373 374 375 376 377
	cs = gen8_emit_pipe_control(cs, flags,
				    i915_ggtt_offset(req->engine->scratch) +
				    2 * CACHELINE_BYTES);

	intel_ring_advance(req, cs);

	return 0;
B
Ben Widawsky 已提交
378 379
}

380
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
381
{
382
	struct drm_i915_private *dev_priv = engine->i915;
383 384 385
	u32 addr;

	addr = dev_priv->status_page_dmah->busaddr;
386
	if (INTEL_GEN(dev_priv) >= 4)
387 388 389 390
		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
	I915_WRITE(HWS_PGA, addr);
}

391
static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
392
{
393
	struct drm_i915_private *dev_priv = engine->i915;
394
	i915_reg_t mmio;
395 396 397 398

	/* The ring status page addresses are no longer next to the rest of
	 * the ring registers as of gen7.
	 */
399
	if (IS_GEN7(dev_priv)) {
400
		switch (engine->id) {
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
		case RCS:
			mmio = RENDER_HWS_PGA_GEN7;
			break;
		case BCS:
			mmio = BLT_HWS_PGA_GEN7;
			break;
		/*
		 * VCS2 actually doesn't exist on Gen7. Only shut up
		 * gcc switch check warning
		 */
		case VCS2:
		case VCS:
			mmio = BSD_HWS_PGA_GEN7;
			break;
		case VECS:
			mmio = VEBOX_HWS_PGA_GEN7;
			break;
		}
419
	} else if (IS_GEN6(dev_priv)) {
420
		mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
421 422
	} else {
		/* XXX: gen8 returns to sanity */
423
		mmio = RING_HWS_PGA(engine->mmio_base);
424 425
	}

426
	I915_WRITE(mmio, engine->status_page.ggtt_offset);
427 428 429 430 431 432 433 434 435
	POSTING_READ(mmio);

	/*
	 * Flush the TLB for this page
	 *
	 * FIXME: These two bits have disappeared on gen8, so a question
	 * arises: do we still need this and if so how should we go about
	 * invalidating the TLB?
	 */
436
	if (IS_GEN(dev_priv, 6, 7)) {
437
		i915_reg_t reg = RING_INSTPM(engine->mmio_base);
438 439

		/* ring should be idle before issuing a sync flush*/
440
		WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
441 442 443 444

		I915_WRITE(reg,
			   _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
					      INSTPM_SYNC_FLUSH));
445 446 447
		if (intel_wait_for_register(dev_priv,
					    reg, INSTPM_SYNC_FLUSH, 0,
					    1000))
448
			DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
449
				  engine->name);
450 451 452
	}
}

453
static bool stop_ring(struct intel_engine_cs *engine)
454
{
455
	struct drm_i915_private *dev_priv = engine->i915;
456

457
	if (INTEL_GEN(dev_priv) > 2) {
458
		I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
459 460 461 462 463
		if (intel_wait_for_register(dev_priv,
					    RING_MI_MODE(engine->mmio_base),
					    MODE_IDLE,
					    MODE_IDLE,
					    1000)) {
464 465
			DRM_ERROR("%s : timed out trying to stop ring\n",
				  engine->name);
466 467 468 469
			/* Sometimes we observe that the idle flag is not
			 * set even though the ring is empty. So double
			 * check before giving up.
			 */
470
			if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
471
				return false;
472 473
		}
	}
474

475 476
	I915_WRITE_CTL(engine, 0);
	I915_WRITE_HEAD(engine, 0);
477
	I915_WRITE_TAIL(engine, 0);
478

479
	if (INTEL_GEN(dev_priv) > 2) {
480 481
		(void)I915_READ_CTL(engine);
		I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
482
	}
483

484
	return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
485
}
486

487
static int init_ring_common(struct intel_engine_cs *engine)
488
{
489
	struct drm_i915_private *dev_priv = engine->i915;
490
	struct intel_ring *ring = engine->buffer;
491 492
	int ret = 0;

493
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
494

495
	if (!stop_ring(engine)) {
496
		/* G45 ring initialization often fails to reset head to zero */
497 498
		DRM_DEBUG_KMS("%s head not reset to zero "
			      "ctl %08x head %08x tail %08x start %08x\n",
499 500 501 502 503
			      engine->name,
			      I915_READ_CTL(engine),
			      I915_READ_HEAD(engine),
			      I915_READ_TAIL(engine),
			      I915_READ_START(engine));
504

505
		if (!stop_ring(engine)) {
506 507
			DRM_ERROR("failed to set %s head to zero "
				  "ctl %08x head %08x tail %08x start %08x\n",
508 509 510 511 512
				  engine->name,
				  I915_READ_CTL(engine),
				  I915_READ_HEAD(engine),
				  I915_READ_TAIL(engine),
				  I915_READ_START(engine));
513 514
			ret = -EIO;
			goto out;
515
		}
516 517
	}

518
	if (HWS_NEEDS_PHYSICAL(dev_priv))
519
		ring_setup_phys_status_page(engine);
520 521
	else
		intel_ring_setup_status_page(engine);
522

523
	intel_engine_reset_breadcrumbs(engine);
524

525
	/* Enforce ordering by reading HEAD register back */
526
	I915_READ_HEAD(engine);
527

528 529 530 531
	/* Initialize the ring. This must happen _after_ we've cleared the ring
	 * registers with the above sequence (the readback of the HEAD registers
	 * also enforces ordering), otherwise the hw might lose the new ring
	 * register values. */
532
	I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
533 534

	/* WaClearRingBufHeadRegAtInit:ctg,elk */
535
	if (I915_READ_HEAD(engine))
536
		DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
537
			  engine->name, I915_READ_HEAD(engine));
538 539 540 541 542

	intel_ring_update_space(ring);
	I915_WRITE_HEAD(engine, ring->head);
	I915_WRITE_TAIL(engine, ring->tail);
	(void)I915_READ_TAIL(engine);
543

544
	I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
545 546

	/* If the head is still not zero, the ring is dead */
547 548 549
	if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
				       RING_VALID, RING_VALID,
				       50)) {
550
		DRM_ERROR("%s initialization failed "
551
			  "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
552 553 554
			  engine->name,
			  I915_READ_CTL(engine),
			  I915_READ_CTL(engine) & RING_VALID,
555 556
			  I915_READ_HEAD(engine), ring->head,
			  I915_READ_TAIL(engine), ring->tail,
557
			  I915_READ_START(engine),
558
			  i915_ggtt_offset(ring->vma));
559 560
		ret = -EIO;
		goto out;
561 562
	}

563
	intel_engine_init_hangcheck(engine);
564

565
out:
566
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
567 568

	return ret;
569 570
}

571 572 573
static void reset_ring_common(struct intel_engine_cs *engine,
			      struct drm_i915_gem_request *request)
{
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
	/* Try to restore the logical GPU state to match the continuation
	 * of the request queue. If we skip the context/PD restore, then
	 * the next request may try to execute assuming that its context
	 * is valid and loaded on the GPU and so may try to access invalid
	 * memory, prompting repeated GPU hangs.
	 *
	 * If the request was guilty, we still restore the logical state
	 * in case the next request requires it (e.g. the aliasing ppgtt),
	 * but skip over the hung batch.
	 *
	 * If the request was innocent, we try to replay the request with
	 * the restored context.
	 */
	if (request) {
		struct drm_i915_private *dev_priv = request->i915;
		struct intel_context *ce = &request->ctx->engine[engine->id];
		struct i915_hw_ppgtt *ppgtt;

		/* FIXME consider gen8 reset */

		if (ce->state) {
			I915_WRITE(CCID,
				   i915_ggtt_offset(ce->state) |
				   BIT(8) /* must be set! */ |
				   CCID_EXTENDED_STATE_SAVE |
				   CCID_EXTENDED_STATE_RESTORE |
				   CCID_EN);
		}

		ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
		if (ppgtt) {
			u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;

			I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
			I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);

			/* Wait for the PD reload to complete */
			if (intel_wait_for_register(dev_priv,
						    RING_PP_DIR_BASE(engine),
						    BIT(0), 0,
						    10))
				DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
616

617 618 619 620 621 622 623 624 625 626 627 628 629
			ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
		}

		/* If the rq hung, jump to its breadcrumb and skip the batch */
		if (request->fence.error == -EIO) {
			struct intel_ring *ring = request->ring;

			ring->head = request->postfix;
			ring->last_retired_head = -1;
		}
	} else {
		engine->legacy_active_context = NULL;
	}
630 631
}

632
static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
633 634 635
{
	int ret;

636
	ret = intel_ring_workarounds_emit(req);
637 638 639
	if (ret != 0)
		return ret;

640
	ret = i915_gem_render_state_emit(req);
641
	if (ret)
642
		return ret;
643

644
	return 0;
645 646
}

647
static int init_render_ring(struct intel_engine_cs *engine)
648
{
649
	struct drm_i915_private *dev_priv = engine->i915;
650
	int ret = init_ring_common(engine);
651 652
	if (ret)
		return ret;
653

654
	/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
655
	if (IS_GEN(dev_priv, 4, 6))
656
		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
657 658 659 660

	/* We need to disable the AsyncFlip performance optimisations in order
	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
	 * programmed to '1' on all products.
661
	 *
662
	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
663
	 */
664
	if (IS_GEN(dev_priv, 6, 7))
665 666
		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));

667
	/* Required for the hardware to program scanline values for waiting */
668
	/* WaEnableFlushTlbInvalidationMode:snb */
669
	if (IS_GEN6(dev_priv))
670
		I915_WRITE(GFX_MODE,
671
			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
672

673
	/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
674
	if (IS_GEN7(dev_priv))
675
		I915_WRITE(GFX_MODE_GEN7,
676
			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
677
			   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
678

679
	if (IS_GEN6(dev_priv)) {
680 681 682 683 684 685
		/* From the Sandybridge PRM, volume 1 part 3, page 24:
		 * "If this bit is set, STCunit will have LRA as replacement
		 *  policy. [...] This bit must be reset.  LRA replacement
		 *  policy is not supported."
		 */
		I915_WRITE(CACHE_MODE_0,
686
			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
687 688
	}

689
	if (IS_GEN(dev_priv, 6, 7))
690
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
691

692 693
	if (INTEL_INFO(dev_priv)->gen >= 6)
		I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
694

695
	return init_workarounds_ring(engine);
696 697
}

698
static void render_ring_cleanup(struct intel_engine_cs *engine)
699
{
700
	struct drm_i915_private *dev_priv = engine->i915;
701

702
	i915_vma_unpin_and_release(&dev_priv->semaphore);
703 704
}

705
static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *cs)
706
{
707
	struct drm_i915_private *dev_priv = req->i915;
708
	struct intel_engine_cs *waiter;
709
	enum intel_engine_id id;
710

711
	for_each_engine(waiter, dev_priv, id) {
712
		u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
713 714 715
		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
			continue;

716 717 718 719 720 721 722 723 724 725
		*cs++ = GFX_OP_PIPE_CONTROL(6);
		*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE |
			PIPE_CONTROL_CS_STALL;
		*cs++ = lower_32_bits(gtt_offset);
		*cs++ = upper_32_bits(gtt_offset);
		*cs++ = req->global_seqno;
		*cs++ = 0;
		*cs++ = MI_SEMAPHORE_SIGNAL |
			MI_SEMAPHORE_TARGET(waiter->hw_id);
		*cs++ = 0;
726 727
	}

728
	return cs;
729 730
}

731
static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *cs)
732
{
733
	struct drm_i915_private *dev_priv = req->i915;
734
	struct intel_engine_cs *waiter;
735
	enum intel_engine_id id;
736

737
	for_each_engine(waiter, dev_priv, id) {
738
		u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
739 740 741
		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
			continue;

742 743 744 745 746 747 748
		*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
		*cs++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
		*cs++ = upper_32_bits(gtt_offset);
		*cs++ = req->global_seqno;
		*cs++ = MI_SEMAPHORE_SIGNAL |
			MI_SEMAPHORE_TARGET(waiter->hw_id);
		*cs++ = 0;
749 750
	}

751
	return cs;
752 753
}

754
static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
755
{
756
	struct drm_i915_private *dev_priv = req->i915;
757
	struct intel_engine_cs *engine;
758
	enum intel_engine_id id;
C
Chris Wilson 已提交
759
	int num_rings = 0;
760

761
	for_each_engine(engine, dev_priv, id) {
762 763 764 765
		i915_reg_t mbox_reg;

		if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
			continue;
766

767
		mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
768
		if (i915_mmio_reg_valid(mbox_reg)) {
769 770 771
			*cs++ = MI_LOAD_REGISTER_IMM(1);
			*cs++ = i915_mmio_reg_offset(mbox_reg);
			*cs++ = req->global_seqno;
C
Chris Wilson 已提交
772
			num_rings++;
773 774
		}
	}
C
Chris Wilson 已提交
775
	if (num_rings & 1)
776
		*cs++ = MI_NOOP;
777

778
	return cs;
779 780
}

781 782 783 784
static void i9xx_submit_request(struct drm_i915_gem_request *request)
{
	struct drm_i915_private *dev_priv = request->i915;

785 786
	i915_gem_request_submit(request);

787
	GEM_BUG_ON(!IS_ALIGNED(request->tail, 8));
C
Chris Wilson 已提交
788
	I915_WRITE_TAIL(request->engine, request->tail);
789 790
}

791
static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
792
{
793 794 795 796
	*cs++ = MI_STORE_DWORD_INDEX;
	*cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
	*cs++ = req->global_seqno;
	*cs++ = MI_USER_INTERRUPT;
797

798
	req->tail = intel_ring_offset(req, cs);
799
	GEM_BUG_ON(!IS_ALIGNED(req->tail, 8));
800 801
}

802 803
static const int i9xx_emit_breadcrumb_sz = 4;

804
/**
805
 * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
806 807 808 809 810 811
 *
 * @request - request to write to the ring
 *
 * Update the mailbox registers in the *other* rings with the current seqno.
 * This acts like a signal in the canonical semaphore.
 */
812
static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
813
{
C
Chris Wilson 已提交
814
	return i9xx_emit_breadcrumb(req,
815
				    req->engine->semaphore.signal(req, cs));
816 817
}

C
Chris Wilson 已提交
818
static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
819
					u32 *cs)
820 821
{
	struct intel_engine_cs *engine = req->engine;
822

C
Chris Wilson 已提交
823
	if (engine->semaphore.signal)
824 825 826 827 828 829 830 831
		cs = engine->semaphore.signal(req, cs);

	*cs++ = GFX_OP_PIPE_CONTROL(6);
	*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
		PIPE_CONTROL_QW_WRITE;
	*cs++ = intel_hws_seqno_address(engine);
	*cs++ = 0;
	*cs++ = req->global_seqno;
832
	/* We're thrashing one dword of HWS. */
833 834 835
	*cs++ = 0;
	*cs++ = MI_USER_INTERRUPT;
	*cs++ = MI_NOOP;
836

837
	req->tail = intel_ring_offset(req, cs);
838
	GEM_BUG_ON(!IS_ALIGNED(req->tail, 8));
839 840
}

841 842
static const int gen8_render_emit_breadcrumb_sz = 8;

843 844 845 846 847 848 849
/**
 * intel_ring_sync - sync the waiter to the signaller on seqno
 *
 * @waiter - ring that is waiting
 * @signaller - ring which has, or will signal
 * @seqno - seqno which the waiter will block on
 */
850 851

static int
852 853
gen8_ring_sync_to(struct drm_i915_gem_request *req,
		  struct drm_i915_gem_request *signal)
854
{
855 856
	struct drm_i915_private *dev_priv = req->i915;
	u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
857
	struct i915_hw_ppgtt *ppgtt;
858
	u32 *cs;
859

860 861 862
	cs = intel_ring_begin(req, 4);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
863

864 865 866 867 868 869
	*cs++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
		MI_SEMAPHORE_SAD_GTE_SDD;
	*cs++ = signal->global_seqno;
	*cs++ = lower_32_bits(offset);
	*cs++ = upper_32_bits(offset);
	intel_ring_advance(req, cs);
870 871 872 873 874 875

	/* When the !RCS engines idle waiting upon a semaphore, they lose their
	 * pagetables and we must reload them before executing the batch.
	 * We do this on the i915_switch_context() following the wait and
	 * before the dispatch.
	 */
876 877 878
	ppgtt = req->ctx->ppgtt;
	if (ppgtt && req->engine->id != RCS)
		ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
879 880 881
	return 0;
}

882
static int
883 884
gen6_ring_sync_to(struct drm_i915_gem_request *req,
		  struct drm_i915_gem_request *signal)
885
{
886 887 888
	u32 dw1 = MI_SEMAPHORE_MBOX |
		  MI_SEMAPHORE_COMPARE |
		  MI_SEMAPHORE_REGISTER;
889
	u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
890
	u32 *cs;
891

892
	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
893

894 895 896
	cs = intel_ring_begin(req, 4);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
897

898
	*cs++ = dw1 | wait_mbox;
899 900 901 902
	/* Throughout all of the GEM code, seqno passed implies our current
	 * seqno is >= the last seqno executed. However for hardware the
	 * comparison is strictly greater than.
	 */
903 904 905 906
	*cs++ = signal->global_seqno - 1;
	*cs++ = 0;
	*cs++ = MI_NOOP;
	intel_ring_advance(req, cs);
907 908 909 910

	return 0;
}

911
static void
912
gen5_seqno_barrier(struct intel_engine_cs *engine)
913
{
914 915 916
	/* MI_STORE are internally buffered by the GPU and not flushed
	 * either by MI_FLUSH or SyncFlush or any other combination of
	 * MI commands.
917
	 *
918 919 920 921 922 923 924
	 * "Only the submission of the store operation is guaranteed.
	 * The write result will be complete (coherent) some time later
	 * (this is practically a finite period but there is no guaranteed
	 * latency)."
	 *
	 * Empirically, we observe that we need a delay of at least 75us to
	 * be sure that the seqno write is visible by the CPU.
925
	 */
926
	usleep_range(125, 250);
927 928
}

929 930
static void
gen6_seqno_barrier(struct intel_engine_cs *engine)
931
{
932
	struct drm_i915_private *dev_priv = engine->i915;
933

934 935
	/* Workaround to force correct ordering between irq and seqno writes on
	 * ivb (and maybe also on snb) by reading from a CS register (like
936 937 938 939 940 941 942 943 944
	 * ACTHD) before reading the status page.
	 *
	 * Note that this effectively stalls the read by the time it takes to
	 * do a memory transaction, which more or less ensures that the write
	 * from the GPU has sufficient time to invalidate the CPU cacheline.
	 * Alternatively we could delay the interrupt from the CS ring to give
	 * the write time to land, but that would incur a delay after every
	 * batch i.e. much more frequent than a delay when waiting for the
	 * interrupt (with the same net latency).
945 946 947
	 *
	 * Also note that to prevent whole machine hangs on gen7, we have to
	 * take the spinlock to guard against concurrent cacheline access.
948
	 */
949
	spin_lock_irq(&dev_priv->uncore.lock);
950
	POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
951
	spin_unlock_irq(&dev_priv->uncore.lock);
952 953
}

954 955
static void
gen5_irq_enable(struct intel_engine_cs *engine)
956
{
957
	gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
958 959 960
}

static void
961
gen5_irq_disable(struct intel_engine_cs *engine)
962
{
963
	gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
964 965
}

966 967
static void
i9xx_irq_enable(struct intel_engine_cs *engine)
968
{
969
	struct drm_i915_private *dev_priv = engine->i915;
970

971 972 973
	dev_priv->irq_mask &= ~engine->irq_enable_mask;
	I915_WRITE(IMR, dev_priv->irq_mask);
	POSTING_READ_FW(RING_IMR(engine->mmio_base));
974 975
}

976
static void
977
i9xx_irq_disable(struct intel_engine_cs *engine)
978
{
979
	struct drm_i915_private *dev_priv = engine->i915;
980

981 982
	dev_priv->irq_mask |= engine->irq_enable_mask;
	I915_WRITE(IMR, dev_priv->irq_mask);
983 984
}

985 986
static void
i8xx_irq_enable(struct intel_engine_cs *engine)
C
Chris Wilson 已提交
987
{
988
	struct drm_i915_private *dev_priv = engine->i915;
C
Chris Wilson 已提交
989

990 991 992
	dev_priv->irq_mask &= ~engine->irq_enable_mask;
	I915_WRITE16(IMR, dev_priv->irq_mask);
	POSTING_READ16(RING_IMR(engine->mmio_base));
C
Chris Wilson 已提交
993 994 995
}

static void
996
i8xx_irq_disable(struct intel_engine_cs *engine)
C
Chris Wilson 已提交
997
{
998
	struct drm_i915_private *dev_priv = engine->i915;
C
Chris Wilson 已提交
999

1000 1001
	dev_priv->irq_mask |= engine->irq_enable_mask;
	I915_WRITE16(IMR, dev_priv->irq_mask);
C
Chris Wilson 已提交
1002 1003
}

1004
static int
1005
bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
1006
{
1007
	u32 *cs;
1008

1009 1010 1011
	cs = intel_ring_begin(req, 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1012

1013 1014 1015
	*cs++ = MI_FLUSH;
	*cs++ = MI_NOOP;
	intel_ring_advance(req, cs);
1016
	return 0;
1017 1018
}

1019 1020
static void
gen6_irq_enable(struct intel_engine_cs *engine)
1021
{
1022
	struct drm_i915_private *dev_priv = engine->i915;
1023

1024 1025 1026
	I915_WRITE_IMR(engine,
		       ~(engine->irq_enable_mask |
			 engine->irq_keep_mask));
1027
	gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1028 1029 1030
}

static void
1031
gen6_irq_disable(struct intel_engine_cs *engine)
1032
{
1033
	struct drm_i915_private *dev_priv = engine->i915;
1034

1035
	I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1036
	gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1037 1038
}

1039 1040
static void
hsw_vebox_irq_enable(struct intel_engine_cs *engine)
B
Ben Widawsky 已提交
1041
{
1042
	struct drm_i915_private *dev_priv = engine->i915;
B
Ben Widawsky 已提交
1043

1044
	I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1045
	gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
B
Ben Widawsky 已提交
1046 1047 1048
}

static void
1049
hsw_vebox_irq_disable(struct intel_engine_cs *engine)
B
Ben Widawsky 已提交
1050
{
1051
	struct drm_i915_private *dev_priv = engine->i915;
B
Ben Widawsky 已提交
1052

1053
	I915_WRITE_IMR(engine, ~0);
1054
	gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
B
Ben Widawsky 已提交
1055 1056
}

1057 1058
static void
gen8_irq_enable(struct intel_engine_cs *engine)
1059
{
1060
	struct drm_i915_private *dev_priv = engine->i915;
1061

1062 1063 1064
	I915_WRITE_IMR(engine,
		       ~(engine->irq_enable_mask |
			 engine->irq_keep_mask));
1065
	POSTING_READ_FW(RING_IMR(engine->mmio_base));
1066 1067 1068
}

static void
1069
gen8_irq_disable(struct intel_engine_cs *engine)
1070
{
1071
	struct drm_i915_private *dev_priv = engine->i915;
1072

1073
	I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1074 1075
}

1076
static int
1077 1078 1079
i965_emit_bb_start(struct drm_i915_gem_request *req,
		   u64 offset, u32 length,
		   unsigned int dispatch_flags)
1080
{
1081
	u32 *cs;
1082

1083 1084 1085
	cs = intel_ring_begin(req, 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1086

1087 1088 1089 1090
	*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
		I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
	*cs++ = offset;
	intel_ring_advance(req, cs);
1091

1092 1093 1094
	return 0;
}

1095 1096
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT (256*1024)
1097 1098
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1099
static int
1100 1101 1102
i830_emit_bb_start(struct drm_i915_gem_request *req,
		   u64 offset, u32 len,
		   unsigned int dispatch_flags)
1103
{
1104
	u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch);
1105

1106 1107 1108
	cs = intel_ring_begin(req, 6);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1109

1110
	/* Evict the invalid PTE TLBs */
1111 1112 1113 1114 1115 1116 1117
	*cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
	*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
	*cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
	*cs++ = cs_offset;
	*cs++ = 0xdeadbeef;
	*cs++ = MI_NOOP;
	intel_ring_advance(req, cs);
1118

1119
	if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1120 1121 1122
		if (len > I830_BATCH_LIMIT)
			return -ENOSPC;

1123 1124 1125
		cs = intel_ring_begin(req, 6 + 2);
		if (IS_ERR(cs))
			return PTR_ERR(cs);
1126 1127 1128 1129 1130

		/* Blit the batch (which has now all relocs applied) to the
		 * stable batch scratch bo area (so that the CS never
		 * stumbles over its tlb invalidation bug) ...
		 */
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
		*cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
		*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
		*cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
		*cs++ = cs_offset;
		*cs++ = 4096;
		*cs++ = offset;

		*cs++ = MI_FLUSH;
		*cs++ = MI_NOOP;
		intel_ring_advance(req, cs);
1141 1142

		/* ... and execute it. */
1143
		offset = cs_offset;
1144
	}
1145

1146 1147 1148
	cs = intel_ring_begin(req, 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1149

1150 1151 1152 1153
	*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
	*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
		MI_BATCH_NON_SECURE);
	intel_ring_advance(req, cs);
1154

1155 1156 1157 1158
	return 0;
}

static int
1159 1160 1161
i915_emit_bb_start(struct drm_i915_gem_request *req,
		   u64 offset, u32 len,
		   unsigned int dispatch_flags)
1162
{
1163
	u32 *cs;
1164

1165 1166 1167
	cs = intel_ring_begin(req, 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1168

1169 1170 1171 1172
	*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
	*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
		MI_BATCH_NON_SECURE);
	intel_ring_advance(req, cs);
1173 1174 1175 1176

	return 0;
}

1177
static void cleanup_phys_status_page(struct intel_engine_cs *engine)
1178
{
1179
	struct drm_i915_private *dev_priv = engine->i915;
1180 1181 1182 1183

	if (!dev_priv->status_page_dmah)
		return;

1184
	drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
1185
	engine->status_page.page_addr = NULL;
1186 1187
}

1188
static void cleanup_status_page(struct intel_engine_cs *engine)
1189
{
1190
	struct i915_vma *vma;
1191
	struct drm_i915_gem_object *obj;
1192

1193 1194
	vma = fetch_and_zero(&engine->status_page.vma);
	if (!vma)
1195 1196
		return;

1197 1198
	obj = vma->obj;

1199
	i915_vma_unpin(vma);
1200 1201 1202 1203
	i915_vma_close(vma);

	i915_gem_object_unpin_map(obj);
	__i915_gem_object_release_unless_active(obj);
1204 1205
}

1206
static int init_status_page(struct intel_engine_cs *engine)
1207
{
1208 1209 1210
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
	unsigned int flags;
1211
	void *vaddr;
1212
	int ret;
1213

1214
	obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
1215 1216 1217 1218
	if (IS_ERR(obj)) {
		DRM_ERROR("Failed to allocate status page\n");
		return PTR_ERR(obj);
	}
1219

1220 1221 1222
	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
	if (ret)
		goto err;
1223

1224
	vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
1225 1226 1227
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err;
1228
	}
1229

1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
	flags = PIN_GLOBAL;
	if (!HAS_LLC(engine->i915))
		/* On g33, we cannot place HWS above 256MiB, so
		 * restrict its pinning to the low mappable arena.
		 * Though this restriction is not documented for
		 * gen4, gen5, or byt, they also behave similarly
		 * and hang if the HWS is placed at the top of the
		 * GTT. To generalise, it appears that all !llc
		 * platforms have issues with us placing the HWS
		 * above the mappable region (even though we never
		 * actualy map it).
		 */
		flags |= PIN_MAPPABLE;
	ret = i915_vma_pin(vma, 0, 4096, flags);
	if (ret)
		goto err;
1246

1247 1248 1249 1250 1251 1252
	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
	if (IS_ERR(vaddr)) {
		ret = PTR_ERR(vaddr);
		goto err_unpin;
	}

1253
	engine->status_page.vma = vma;
1254
	engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
1255
	engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
1256

1257 1258
	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
			 engine->name, i915_ggtt_offset(vma));
1259
	return 0;
1260

1261 1262
err_unpin:
	i915_vma_unpin(vma);
1263 1264 1265
err:
	i915_gem_object_put(obj);
	return ret;
1266 1267
}

1268
static int init_phys_status_page(struct intel_engine_cs *engine)
1269
{
1270
	struct drm_i915_private *dev_priv = engine->i915;
1271

1272 1273 1274 1275
	dev_priv->status_page_dmah =
		drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
	if (!dev_priv->status_page_dmah)
		return -ENOMEM;
1276

1277 1278
	engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
	memset(engine->status_page.page_addr, 0, PAGE_SIZE);
1279 1280 1281 1282

	return 0;
}

1283
int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias)
1284
{
1285
	unsigned int flags;
1286
	enum i915_map_type map;
1287
	struct i915_vma *vma = ring->vma;
1288
	void *addr;
1289 1290
	int ret;

1291
	GEM_BUG_ON(ring->vaddr);
1292

1293 1294
	map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;

1295 1296 1297
	flags = PIN_GLOBAL;
	if (offset_bias)
		flags |= PIN_OFFSET_BIAS | offset_bias;
1298
	if (vma->obj->stolen)
1299
		flags |= PIN_MAPPABLE;
1300

1301
	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1302
		if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
1303 1304 1305 1306
			ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
		else
			ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
		if (unlikely(ret))
1307
			return ret;
1308
	}
1309

1310 1311 1312
	ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
	if (unlikely(ret))
		return ret;
1313

1314
	if (i915_vma_is_map_and_fenceable(vma))
1315 1316
		addr = (void __force *)i915_vma_pin_iomap(vma);
	else
1317
		addr = i915_gem_object_pin_map(vma->obj, map);
1318 1319
	if (IS_ERR(addr))
		goto err;
1320

1321
	ring->vaddr = addr;
1322
	return 0;
1323

1324 1325 1326
err:
	i915_vma_unpin(vma);
	return PTR_ERR(addr);
1327 1328
}

1329 1330 1331 1332 1333
void intel_ring_unpin(struct intel_ring *ring)
{
	GEM_BUG_ON(!ring->vma);
	GEM_BUG_ON(!ring->vaddr);

1334
	if (i915_vma_is_map_and_fenceable(ring->vma))
1335
		i915_vma_unpin_iomap(ring->vma);
1336 1337
	else
		i915_gem_object_unpin_map(ring->vma->obj);
1338 1339
	ring->vaddr = NULL;

1340
	i915_vma_unpin(ring->vma);
1341 1342
}

1343 1344
static struct i915_vma *
intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
1345
{
1346
	struct drm_i915_gem_object *obj;
1347
	struct i915_vma *vma;
1348

1349
	obj = i915_gem_object_create_stolen(dev_priv, size);
1350
	if (!obj)
1351
		obj = i915_gem_object_create(dev_priv, size);
1352 1353
	if (IS_ERR(obj))
		return ERR_CAST(obj);
1354

1355 1356 1357
	/* mark ring buffers as read-only from GPU side by default */
	obj->gt_ro = 1;

1358
	vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
1359 1360 1361 1362
	if (IS_ERR(vma))
		goto err;

	return vma;
1363

1364 1365 1366
err:
	i915_gem_object_put(obj);
	return vma;
1367 1368
}

1369 1370
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size)
1371
{
1372
	struct intel_ring *ring;
1373
	struct i915_vma *vma;
1374

1375
	GEM_BUG_ON(!is_power_of_2(size));
1376
	GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
1377

1378
	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1379
	if (!ring)
1380 1381
		return ERR_PTR(-ENOMEM);

1382
	ring->engine = engine;
1383

1384 1385
	INIT_LIST_HEAD(&ring->request_list);

1386 1387 1388 1389 1390 1391
	ring->size = size;
	/* Workaround an erratum on the i830 which causes a hang if
	 * the TAIL pointer points to within the last 2 cachelines
	 * of the buffer.
	 */
	ring->effective_size = size;
1392
	if (IS_I830(engine->i915) || IS_I845G(engine->i915))
1393 1394 1395 1396 1397
		ring->effective_size -= 2 * CACHELINE_BYTES;

	ring->last_retired_head = -1;
	intel_ring_update_space(ring);

1398 1399
	vma = intel_ring_create_vma(engine->i915, size);
	if (IS_ERR(vma)) {
1400
		kfree(ring);
1401
		return ERR_CAST(vma);
1402
	}
1403
	ring->vma = vma;
1404 1405 1406 1407 1408

	return ring;
}

void
1409
intel_ring_free(struct intel_ring *ring)
1410
{
1411 1412 1413 1414 1415
	struct drm_i915_gem_object *obj = ring->vma->obj;

	i915_vma_close(ring->vma);
	__i915_gem_object_release_unless_active(obj);

1416 1417 1418
	kfree(ring);
}

1419
static int context_pin(struct i915_gem_context *ctx)
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
{
	struct i915_vma *vma = ctx->engine[RCS].state;
	int ret;

	/* Clear this page out of any CPU caches for coherent swap-in/out.
	 * We only want to do this on the first bind so that we do not stall
	 * on an active context (which by nature is already on the GPU).
	 */
	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
		ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
		if (ret)
			return ret;
	}

1434
	return i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | PIN_HIGH);
1435 1436 1437 1438
}

static int intel_ring_context_pin(struct intel_engine_cs *engine,
				  struct i915_gem_context *ctx)
1439 1440 1441 1442
{
	struct intel_context *ce = &ctx->engine[engine->id];
	int ret;

1443
	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1444 1445 1446 1447 1448

	if (ce->pin_count++)
		return 0;

	if (ce->state) {
1449
		ret = context_pin(ctx);
1450
		if (ret)
1451 1452 1453
			goto error;
	}

1454 1455 1456 1457 1458 1459 1460
	/* The kernel context is only used as a placeholder for flushing the
	 * active context. It is never used for submitting user rendering and
	 * as such never requires the golden render context, and so we can skip
	 * emitting it when we switch to the kernel context. This is required
	 * as during eviction we cannot allocate and pin the renderstate in
	 * order to initialise the context.
	 */
1461
	if (i915_gem_context_is_kernel(ctx))
1462 1463
		ce->initialised = true;

1464
	i915_gem_context_get(ctx);
1465 1466 1467 1468 1469 1470 1471
	return 0;

error:
	ce->pin_count = 0;
	return ret;
}

1472 1473
static void intel_ring_context_unpin(struct intel_engine_cs *engine,
				     struct i915_gem_context *ctx)
1474 1475 1476
{
	struct intel_context *ce = &ctx->engine[engine->id];

1477
	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1478
	GEM_BUG_ON(ce->pin_count == 0);
1479 1480 1481 1482 1483

	if (--ce->pin_count)
		return;

	if (ce->state)
1484
		i915_vma_unpin(ce->state);
1485

1486
	i915_gem_context_put(ctx);
1487 1488
}

1489
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
1490
{
1491
	struct drm_i915_private *dev_priv = engine->i915;
1492
	struct intel_ring *ring;
1493 1494
	int ret;

1495
	WARN_ON(engine->buffer);
1496

1497 1498 1499
	intel_engine_setup_common(engine);

	ret = intel_engine_init_common(engine);
1500 1501
	if (ret)
		goto error;
1502

1503 1504 1505
	ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
	if (IS_ERR(ring)) {
		ret = PTR_ERR(ring);
1506 1507
		goto error;
	}
1508

1509 1510 1511
	if (HWS_NEEDS_PHYSICAL(dev_priv)) {
		WARN_ON(engine->id != RCS);
		ret = init_phys_status_page(engine);
1512
		if (ret)
1513
			goto error;
1514
	} else {
1515
		ret = init_status_page(engine);
1516
		if (ret)
1517
			goto error;
1518 1519
	}

1520
	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1521
	ret = intel_ring_pin(ring, I915_GTT_PAGE_SIZE);
1522
	if (ret) {
1523
		intel_ring_free(ring);
1524
		goto error;
1525
	}
1526
	engine->buffer = ring;
1527

1528
	return 0;
1529

1530
error:
1531
	intel_engine_cleanup(engine);
1532
	return ret;
1533 1534
}

1535
void intel_engine_cleanup(struct intel_engine_cs *engine)
1536
{
1537
	struct drm_i915_private *dev_priv;
1538

1539
	dev_priv = engine->i915;
1540

1541
	if (engine->buffer) {
1542 1543
		WARN_ON(INTEL_GEN(dev_priv) > 2 &&
			(I915_READ_MODE(engine) & MODE_IDLE) == 0);
1544

1545
		intel_ring_unpin(engine->buffer);
1546
		intel_ring_free(engine->buffer);
1547
		engine->buffer = NULL;
1548
	}
1549

1550 1551
	if (engine->cleanup)
		engine->cleanup(engine);
Z
Zou Nan hai 已提交
1552

1553
	if (HWS_NEEDS_PHYSICAL(dev_priv)) {
1554 1555
		WARN_ON(engine->id != RCS);
		cleanup_phys_status_page(engine);
1556 1557
	} else {
		cleanup_status_page(engine);
1558
	}
1559

1560
	intel_engine_cleanup_common(engine);
1561

1562
	engine->i915 = NULL;
1563 1564
	dev_priv->engine[engine->id] = NULL;
	kfree(engine);
1565 1566
}

1567 1568 1569
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
1570
	enum intel_engine_id id;
1571

1572
	for_each_engine(engine, dev_priv, id) {
1573 1574 1575 1576 1577
		engine->buffer->head = engine->buffer->tail;
		engine->buffer->last_retired_head = -1;
	}
}

1578
static int ring_request_alloc(struct drm_i915_gem_request *request)
1579
{
1580
	u32 *cs;
1581

1582 1583
	GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);

1584 1585 1586 1587
	/* Flush enough space to reduce the likelihood of waiting after
	 * we start building the request - in which case we will just
	 * have to repeat work.
	 */
1588
	request->reserved_space += LEGACY_REQUEST_SIZE;
1589

1590
	GEM_BUG_ON(!request->engine->buffer);
1591
	request->ring = request->engine->buffer;
1592

1593 1594 1595
	cs = intel_ring_begin(request, 0);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1596

1597
	request->reserved_space -= LEGACY_REQUEST_SIZE;
1598
	return 0;
1599 1600
}

1601 1602
static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
{
1603
	struct intel_ring *ring = req->ring;
1604
	struct drm_i915_gem_request *target;
1605 1606 1607
	long timeout;

	lockdep_assert_held(&req->i915->drm.struct_mutex);
1608

1609 1610
	intel_ring_update_space(ring);
	if (ring->space >= bytes)
1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
		return 0;

	/*
	 * Space is reserved in the ringbuffer for finalising the request,
	 * as that cannot be allowed to fail. During request finalisation,
	 * reserved_space is set to 0 to stop the overallocation and the
	 * assumption is that then we never need to wait (which has the
	 * risk of failing with EINTR).
	 *
	 * See also i915_gem_request_alloc() and i915_add_request().
	 */
1622
	GEM_BUG_ON(!req->reserved_space);
1623

1624
	list_for_each_entry(target, &ring->request_list, ring_link) {
1625 1626 1627
		unsigned space;

		/* Would completion of this request free enough space? */
1628 1629
		space = __intel_ring_space(target->postfix, ring->tail,
					   ring->size);
1630 1631
		if (space >= bytes)
			break;
1632
	}
1633

1634
	if (WARN_ON(&target->ring_link == &ring->request_list))
1635 1636
		return -ENOSPC;

1637 1638 1639 1640 1641
	timeout = i915_wait_request(target,
				    I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
				    MAX_SCHEDULE_TIMEOUT);
	if (timeout < 0)
		return timeout;
1642 1643 1644 1645 1646 1647

	i915_gem_request_retire_upto(target);

	intel_ring_update_space(ring);
	GEM_BUG_ON(ring->space < bytes);
	return 0;
1648 1649
}

1650
u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
M
Mika Kuoppala 已提交
1651
{
1652
	struct intel_ring *ring = req->ring;
1653 1654
	int remain_actual = ring->size - ring->tail;
	int remain_usable = ring->effective_size - ring->tail;
1655 1656
	int bytes = num_dwords * sizeof(u32);
	int total_bytes, wait_bytes;
1657
	bool need_wrap = false;
1658
	u32 *cs;
1659

1660
	total_bytes = bytes + req->reserved_space;
1661

1662 1663 1664 1665 1666 1667 1668
	if (unlikely(bytes > remain_usable)) {
		/*
		 * Not enough space for the basic request. So need to flush
		 * out the remainder and then wait for base + reserved.
		 */
		wait_bytes = remain_actual + total_bytes;
		need_wrap = true;
1669 1670 1671 1672 1673 1674 1675
	} else if (unlikely(total_bytes > remain_usable)) {
		/*
		 * The base request will fit but the reserved space
		 * falls off the end. So we don't need an immediate wrap
		 * and only need to effectively wait for the reserved
		 * size space from the start of ringbuffer.
		 */
1676
		wait_bytes = remain_actual + req->reserved_space;
1677
	} else {
1678 1679
		/* No wrapping required, just waiting. */
		wait_bytes = total_bytes;
M
Mika Kuoppala 已提交
1680 1681
	}

1682
	if (wait_bytes > ring->space) {
1683
		int ret = wait_for_space(req, wait_bytes);
M
Mika Kuoppala 已提交
1684
		if (unlikely(ret))
1685
			return ERR_PTR(ret);
M
Mika Kuoppala 已提交
1686 1687
	}

1688
	if (unlikely(need_wrap)) {
1689 1690
		GEM_BUG_ON(remain_actual > ring->space);
		GEM_BUG_ON(ring->tail + remain_actual > ring->size);
1691

1692
		/* Fill the tail with MI_NOOP */
1693 1694 1695
		memset(ring->vaddr + ring->tail, 0, remain_actual);
		ring->tail = 0;
		ring->space -= remain_actual;
1696
	}
1697

1698 1699 1700
	GEM_BUG_ON(ring->tail > ring->size - bytes);
	cs = ring->vaddr + ring->tail;
	ring->tail += bytes;
1701 1702
	ring->space -= bytes;
	GEM_BUG_ON(ring->space < 0);
1703 1704

	return cs;
1705
}
1706

1707
/* Align the ring tail to a cacheline boundary */
1708
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
1709
{
1710
	int num_dwords =
1711 1712
		(req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
	u32 *cs;
1713 1714 1715 1716

	if (num_dwords == 0)
		return 0;

1717
	num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
1718 1719 1720
	cs = intel_ring_begin(req, num_dwords);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1721 1722

	while (num_dwords--)
1723
		*cs++ = MI_NOOP;
1724

1725
	intel_ring_advance(req, cs);
1726 1727 1728 1729

	return 0;
}

1730
static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
1731
{
1732
	struct drm_i915_private *dev_priv = request->i915;
1733

1734 1735
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

1736
       /* Every tail move must follow the sequence below */
1737 1738 1739 1740

	/* Disable notification that the ring is IDLE. The GT
	 * will then assume that it is busy and bring it out of rc6.
	 */
1741 1742
	I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
		      _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1743 1744

	/* Clear the context id. Here be magic! */
1745
	I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
1746

1747
	/* Wait for the ring not to be idle, i.e. for it to wake up. */
1748 1749 1750 1751 1752
	if (intel_wait_for_register_fw(dev_priv,
				       GEN6_BSD_SLEEP_PSMI_CONTROL,
				       GEN6_BSD_SLEEP_INDICATOR,
				       0,
				       50))
1753
		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1754

1755
	/* Now that the ring is fully powered up, update the tail */
1756
	i9xx_submit_request(request);
1757 1758 1759 1760

	/* Let the ring send IDLE messages to the GT again,
	 * and so let it sleep to conserve power when idle.
	 */
1761 1762 1763 1764
	I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
		      _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));

	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1765 1766
}

1767
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
1768
{
1769
	u32 cmd, *cs;
1770

1771 1772 1773
	cs = intel_ring_begin(req, 4);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1774

1775
	cmd = MI_FLUSH_DW;
1776
	if (INTEL_GEN(req->i915) >= 8)
B
Ben Widawsky 已提交
1777
		cmd += 1;
1778 1779 1780 1781 1782 1783 1784 1785

	/* We always require a command barrier so that subsequent
	 * commands, such as breadcrumb interrupts, are strictly ordered
	 * wrt the contents of the write cache being flushed to memory
	 * (and thus being coherent from the CPU).
	 */
	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;

1786 1787 1788 1789 1790 1791
	/*
	 * Bspec vol 1c.5 - video engine command streamer:
	 * "If ENABLED, all TLBs will be invalidated once the flush
	 * operation is complete. This bit is only valid when the
	 * Post-Sync Operation field is a value of 1h or 3h."
	 */
1792
	if (mode & EMIT_INVALIDATE)
1793 1794
		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;

1795 1796
	*cs++ = cmd;
	*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
1797
	if (INTEL_GEN(req->i915) >= 8) {
1798 1799
		*cs++ = 0; /* upper addr */
		*cs++ = 0; /* value */
B
Ben Widawsky 已提交
1800
	} else  {
1801 1802
		*cs++ = 0;
		*cs++ = MI_NOOP;
B
Ben Widawsky 已提交
1803
	}
1804
	intel_ring_advance(req, cs);
1805
	return 0;
1806 1807
}

1808
static int
1809 1810 1811
gen8_emit_bb_start(struct drm_i915_gem_request *req,
		   u64 offset, u32 len,
		   unsigned int dispatch_flags)
1812
{
1813
	bool ppgtt = USES_PPGTT(req->i915) &&
1814
			!(dispatch_flags & I915_DISPATCH_SECURE);
1815
	u32 *cs;
1816

1817 1818 1819
	cs = intel_ring_begin(req, 4);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1820 1821

	/* FIXME(BDW): Address space and security selectors. */
1822 1823 1824 1825 1826 1827
	*cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
		I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
	*cs++ = lower_32_bits(offset);
	*cs++ = upper_32_bits(offset);
	*cs++ = MI_NOOP;
	intel_ring_advance(req, cs);
1828 1829 1830 1831

	return 0;
}

1832
static int
1833 1834 1835
hsw_emit_bb_start(struct drm_i915_gem_request *req,
		  u64 offset, u32 len,
		  unsigned int dispatch_flags)
1836
{
1837
	u32 *cs;
1838

1839 1840 1841
	cs = intel_ring_begin(req, 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1842

1843 1844 1845 1846
	*cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
		0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
		(dispatch_flags & I915_DISPATCH_RS ?
		MI_BATCH_RESOURCE_STREAMER : 0);
1847
	/* bit0-7 is the length on GEN6+ */
1848 1849
	*cs++ = offset;
	intel_ring_advance(req, cs);
1850 1851 1852 1853

	return 0;
}

1854
static int
1855 1856 1857
gen6_emit_bb_start(struct drm_i915_gem_request *req,
		   u64 offset, u32 len,
		   unsigned int dispatch_flags)
1858
{
1859
	u32 *cs;
1860

1861 1862 1863
	cs = intel_ring_begin(req, 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1864

1865 1866
	*cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
		0 : MI_BATCH_NON_SECURE_I965);
1867
	/* bit0-7 is the length on GEN6+ */
1868 1869
	*cs++ = offset;
	intel_ring_advance(req, cs);
1870

1871
	return 0;
1872 1873
}

1874 1875
/* Blitter support (SandyBridge+) */

1876
static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
Z
Zou Nan hai 已提交
1877
{
1878
	u32 cmd, *cs;
1879

1880 1881 1882
	cs = intel_ring_begin(req, 4);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1883

1884
	cmd = MI_FLUSH_DW;
1885
	if (INTEL_GEN(req->i915) >= 8)
B
Ben Widawsky 已提交
1886
		cmd += 1;
1887 1888 1889 1890 1891 1892 1893 1894

	/* We always require a command barrier so that subsequent
	 * commands, such as breadcrumb interrupts, are strictly ordered
	 * wrt the contents of the write cache being flushed to memory
	 * (and thus being coherent from the CPU).
	 */
	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;

1895 1896 1897 1898 1899 1900
	/*
	 * Bspec vol 1c.3 - blitter engine command streamer:
	 * "If ENABLED, all TLBs will be invalidated once the flush
	 * operation is complete. This bit is only valid when the
	 * Post-Sync Operation field is a value of 1h or 3h."
	 */
1901
	if (mode & EMIT_INVALIDATE)
1902
		cmd |= MI_INVALIDATE_TLB;
1903 1904
	*cs++ = cmd;
	*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
1905
	if (INTEL_GEN(req->i915) >= 8) {
1906 1907
		*cs++ = 0; /* upper addr */
		*cs++ = 0; /* value */
B
Ben Widawsky 已提交
1908
	} else  {
1909 1910
		*cs++ = 0;
		*cs++ = MI_NOOP;
B
Ben Widawsky 已提交
1911
	}
1912
	intel_ring_advance(req, cs);
R
Rodrigo Vivi 已提交
1913

1914
	return 0;
Z
Zou Nan hai 已提交
1915 1916
}

1917 1918 1919
static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
				       struct intel_engine_cs *engine)
{
1920
	struct drm_i915_gem_object *obj;
1921
	int ret, i;
1922

1923
	if (!i915.semaphores)
1924 1925
		return;

1926 1927 1928
	if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
		struct i915_vma *vma;

1929
		obj = i915_gem_object_create(dev_priv, PAGE_SIZE);
1930 1931
		if (IS_ERR(obj))
			goto err;
1932

1933
		vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946
		if (IS_ERR(vma))
			goto err_obj;

		ret = i915_gem_object_set_to_gtt_domain(obj, false);
		if (ret)
			goto err_obj;

		ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
		if (ret)
			goto err_obj;

		dev_priv->semaphore = vma;
	}
1947 1948

	if (INTEL_GEN(dev_priv) >= 8) {
1949
		u32 offset = i915_ggtt_offset(dev_priv->semaphore);
1950

1951
		engine->semaphore.sync_to = gen8_ring_sync_to;
1952
		engine->semaphore.signal = gen8_xcs_signal;
1953 1954

		for (i = 0; i < I915_NUM_ENGINES; i++) {
1955
			u32 ring_offset;
1956 1957 1958 1959 1960 1961 1962 1963

			if (i != engine->id)
				ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
			else
				ring_offset = MI_SEMAPHORE_SYNC_INVALID;

			engine->semaphore.signal_ggtt[i] = ring_offset;
		}
1964
	} else if (INTEL_GEN(dev_priv) >= 6) {
1965
		engine->semaphore.sync_to = gen6_ring_sync_to;
1966
		engine->semaphore.signal = gen6_signal;
1967 1968 1969 1970 1971 1972 1973 1974

		/*
		 * The current semaphore is only applied on pre-gen8
		 * platform.  And there is no VCS2 ring on the pre-gen8
		 * platform. So the semaphore between RCS and VCS2 is
		 * initialized as INVALID.  Gen8 will initialize the
		 * sema between VCS2 and RCS later.
		 */
1975
		for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
1976 1977 1978
			static const struct {
				u32 wait_mbox;
				i915_reg_t mbox_reg;
1979 1980 1981 1982 1983
			} sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
				[RCS_HW] = {
					[VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RV,  .mbox_reg = GEN6_VRSYNC },
					[BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RB,  .mbox_reg = GEN6_BRSYNC },
					[VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
1984
				},
1985 1986 1987 1988
				[VCS_HW] = {
					[RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VR,  .mbox_reg = GEN6_RVSYNC },
					[BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VB,  .mbox_reg = GEN6_BVSYNC },
					[VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
1989
				},
1990 1991 1992 1993
				[BCS_HW] = {
					[RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BR,  .mbox_reg = GEN6_RBSYNC },
					[VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BV,  .mbox_reg = GEN6_VBSYNC },
					[VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
1994
				},
1995 1996 1997 1998
				[VECS_HW] = {
					[RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
					[VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
					[BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
1999 2000 2001 2002 2003
				},
			};
			u32 wait_mbox;
			i915_reg_t mbox_reg;

2004
			if (i == engine->hw_id) {
2005 2006 2007
				wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
				mbox_reg = GEN6_NOSYNC;
			} else {
2008 2009
				wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
				mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
2010 2011 2012 2013 2014
			}

			engine->semaphore.mbox.wait[i] = wait_mbox;
			engine->semaphore.mbox.signal[i] = mbox_reg;
		}
2015
	}
2016 2017 2018 2019 2020 2021 2022 2023

	return;

err_obj:
	i915_gem_object_put(obj);
err:
	DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
	i915.semaphores = 0;
2024 2025
}

2026 2027 2028
static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
				struct intel_engine_cs *engine)
{
2029 2030
	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;

2031
	if (INTEL_GEN(dev_priv) >= 8) {
2032 2033
		engine->irq_enable = gen8_irq_enable;
		engine->irq_disable = gen8_irq_disable;
2034 2035
		engine->irq_seqno_barrier = gen6_seqno_barrier;
	} else if (INTEL_GEN(dev_priv) >= 6) {
2036 2037
		engine->irq_enable = gen6_irq_enable;
		engine->irq_disable = gen6_irq_disable;
2038 2039
		engine->irq_seqno_barrier = gen6_seqno_barrier;
	} else if (INTEL_GEN(dev_priv) >= 5) {
2040 2041
		engine->irq_enable = gen5_irq_enable;
		engine->irq_disable = gen5_irq_disable;
2042
		engine->irq_seqno_barrier = gen5_seqno_barrier;
2043
	} else if (INTEL_GEN(dev_priv) >= 3) {
2044 2045
		engine->irq_enable = i9xx_irq_enable;
		engine->irq_disable = i9xx_irq_disable;
2046
	} else {
2047 2048
		engine->irq_enable = i8xx_irq_enable;
		engine->irq_disable = i8xx_irq_disable;
2049 2050 2051
	}
}

2052 2053 2054
static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
				      struct intel_engine_cs *engine)
{
2055 2056 2057
	intel_ring_init_irq(dev_priv, engine);
	intel_ring_init_semaphores(dev_priv, engine);

2058
	engine->init_hw = init_ring_common;
2059
	engine->reset_hw = reset_ring_common;
2060

2061 2062 2063
	engine->context_pin = intel_ring_context_pin;
	engine->context_unpin = intel_ring_context_unpin;

2064 2065
	engine->request_alloc = ring_request_alloc;

2066
	engine->emit_breadcrumb = i9xx_emit_breadcrumb;
2067 2068 2069 2070
	engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
	if (i915.semaphores) {
		int num_rings;

2071
		engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
2072 2073 2074 2075 2076 2077 2078 2079 2080 2081

		num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
		if (INTEL_GEN(dev_priv) >= 8) {
			engine->emit_breadcrumb_sz += num_rings * 6;
		} else {
			engine->emit_breadcrumb_sz += num_rings * 3;
			if (num_rings & 1)
				engine->emit_breadcrumb_sz++;
		}
	}
2082
	engine->submit_request = i9xx_submit_request;
2083 2084

	if (INTEL_GEN(dev_priv) >= 8)
2085
		engine->emit_bb_start = gen8_emit_bb_start;
2086
	else if (INTEL_GEN(dev_priv) >= 6)
2087
		engine->emit_bb_start = gen6_emit_bb_start;
2088
	else if (INTEL_GEN(dev_priv) >= 4)
2089
		engine->emit_bb_start = i965_emit_bb_start;
2090
	else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
2091
		engine->emit_bb_start = i830_emit_bb_start;
2092
	else
2093
		engine->emit_bb_start = i915_emit_bb_start;
2094 2095
}

2096
int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2097
{
2098
	struct drm_i915_private *dev_priv = engine->i915;
2099
	int ret;
2100

2101 2102
	intel_ring_default_vfuncs(dev_priv, engine);

2103 2104
	if (HAS_L3_DPF(dev_priv))
		engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2105

2106
	if (INTEL_GEN(dev_priv) >= 8) {
2107
		engine->init_context = intel_rcs_ctx_init;
2108
		engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
2109
		engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
2110
		engine->emit_flush = gen8_render_ring_flush;
2111 2112 2113
		if (i915.semaphores) {
			int num_rings;

2114
			engine->semaphore.signal = gen8_rcs_signal;
2115 2116 2117 2118 2119

			num_rings =
				hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
			engine->emit_breadcrumb_sz += num_rings * 6;
		}
2120
	} else if (INTEL_GEN(dev_priv) >= 6) {
2121
		engine->init_context = intel_rcs_ctx_init;
2122
		engine->emit_flush = gen7_render_ring_flush;
2123
		if (IS_GEN6(dev_priv))
2124
			engine->emit_flush = gen6_render_ring_flush;
2125
	} else if (IS_GEN5(dev_priv)) {
2126
		engine->emit_flush = gen4_render_ring_flush;
2127
	} else {
2128
		if (INTEL_GEN(dev_priv) < 4)
2129
			engine->emit_flush = gen2_render_ring_flush;
2130
		else
2131
			engine->emit_flush = gen4_render_ring_flush;
2132
		engine->irq_enable_mask = I915_USER_INTERRUPT;
2133
	}
B
Ben Widawsky 已提交
2134

2135
	if (IS_HASWELL(dev_priv))
2136
		engine->emit_bb_start = hsw_emit_bb_start;
2137

2138 2139
	engine->init_hw = init_render_ring;
	engine->cleanup = render_ring_cleanup;
2140

2141
	ret = intel_init_ring_buffer(engine);
2142 2143 2144
	if (ret)
		return ret;

2145
	if (INTEL_GEN(dev_priv) >= 6) {
2146
		ret = intel_engine_create_scratch(engine, PAGE_SIZE);
2147 2148 2149
		if (ret)
			return ret;
	} else if (HAS_BROKEN_CS_TLB(dev_priv)) {
2150
		ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
2151 2152 2153 2154 2155
		if (ret)
			return ret;
	}

	return 0;
2156 2157
}

2158
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
2159
{
2160
	struct drm_i915_private *dev_priv = engine->i915;
2161

2162 2163
	intel_ring_default_vfuncs(dev_priv, engine);

2164
	if (INTEL_GEN(dev_priv) >= 6) {
2165
		/* gen6 bsd needs a special wa for tail updates */
2166
		if (IS_GEN6(dev_priv))
2167
			engine->submit_request = gen6_bsd_submit_request;
2168
		engine->emit_flush = gen6_bsd_ring_flush;
2169
		if (INTEL_GEN(dev_priv) < 8)
2170
			engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2171
	} else {
2172
		engine->mmio_base = BSD_RING_BASE;
2173
		engine->emit_flush = bsd_ring_flush;
2174
		if (IS_GEN5(dev_priv))
2175
			engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2176
		else
2177
			engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2178 2179
	}

2180
	return intel_init_ring_buffer(engine);
2181
}
2182

2183
/**
2184
 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
2185
 */
2186
int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
2187
{
2188
	struct drm_i915_private *dev_priv = engine->i915;
2189 2190 2191

	intel_ring_default_vfuncs(dev_priv, engine);

2192
	engine->emit_flush = gen6_bsd_ring_flush;
2193

2194
	return intel_init_ring_buffer(engine);
2195 2196
}

2197
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
2198
{
2199
	struct drm_i915_private *dev_priv = engine->i915;
2200 2201 2202

	intel_ring_default_vfuncs(dev_priv, engine);

2203
	engine->emit_flush = gen6_ring_flush;
2204
	if (INTEL_GEN(dev_priv) < 8)
2205
		engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2206

2207
	return intel_init_ring_buffer(engine);
2208
}
2209

2210
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
B
Ben Widawsky 已提交
2211
{
2212
	struct drm_i915_private *dev_priv = engine->i915;
2213 2214 2215

	intel_ring_default_vfuncs(dev_priv, engine);

2216
	engine->emit_flush = gen6_ring_flush;
2217

2218
	if (INTEL_GEN(dev_priv) < 8) {
2219
		engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2220 2221
		engine->irq_enable = hsw_vebox_irq_enable;
		engine->irq_disable = hsw_vebox_irq_disable;
2222
	}
B
Ben Widawsky 已提交
2223

2224
	return intel_init_ring_buffer(engine);
B
Ben Widawsky 已提交
2225
}