intel_ringbuffer.c 89.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * Copyright © 2008-2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Zou Nan hai <nanhai.zou@intel.com>
 *    Xiang Hai hao<haihao.xiang@intel.com>
 *
 */

30
#include <linux/log2.h>
31
#include <drm/drmP.h>
32
#include "i915_drv.h"
33
#include <drm/i915_drm.h>
34
#include "i915_trace.h"
35
#include "intel_drv.h"
36

37 38 39 40 41
/* Rough estimate of the typical request size, performing a flush,
 * set-context and then emitting the batch.
 */
#define LEGACY_REQUEST_SIZE 200

42
int __intel_ring_space(int head, int tail, int size)
43
{
44 45
	int space = head - tail;
	if (space <= 0)
46
		space += size;
47
	return space - I915_RING_FREE_SPACE;
48 49
}

50 51 52 53 54 55 56 57 58 59 60
void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
{
	if (ringbuf->last_retired_head != -1) {
		ringbuf->head = ringbuf->last_retired_head;
		ringbuf->last_retired_head = -1;
	}

	ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR,
					    ringbuf->tail, ringbuf->size);
}

61
bool intel_engine_stopped(struct intel_engine_cs *engine)
62
{
63
	struct drm_i915_private *dev_priv = engine->dev->dev_private;
64
	return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
65
}
66

67
static void __intel_ring_advance(struct intel_engine_cs *engine)
68
{
69
	struct intel_ringbuffer *ringbuf = engine->buffer;
70
	ringbuf->tail &= ringbuf->size - 1;
71
	if (intel_engine_stopped(engine))
72
		return;
73
	engine->write_tail(engine, ringbuf->tail);
74 75
}

76
static int
77
gen2_render_ring_flush(struct drm_i915_gem_request *req,
78 79 80
		       u32	invalidate_domains,
		       u32	flush_domains)
{
81
	struct intel_engine_cs *engine = req->engine;
82 83 84 85
	u32 cmd;
	int ret;

	cmd = MI_FLUSH;
86
	if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
87 88 89 90 91
		cmd |= MI_NO_WRITE_FLUSH;

	if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
		cmd |= MI_READ_FLUSH;

92
	ret = intel_ring_begin(req, 2);
93 94 95
	if (ret)
		return ret;

96 97 98
	intel_ring_emit(engine, cmd);
	intel_ring_emit(engine, MI_NOOP);
	intel_ring_advance(engine);
99 100 101 102 103

	return 0;
}

static int
104
gen4_render_ring_flush(struct drm_i915_gem_request *req,
105 106
		       u32	invalidate_domains,
		       u32	flush_domains)
107
{
108
	struct intel_engine_cs *engine = req->engine;
109
	struct drm_device *dev = engine->dev;
110
	u32 cmd;
111
	int ret;
112

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
	/*
	 * read/write caches:
	 *
	 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
	 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
	 * also flushed at 2d versus 3d pipeline switches.
	 *
	 * read-only caches:
	 *
	 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
	 * MI_READ_FLUSH is set, and is always flushed on 965.
	 *
	 * I915_GEM_DOMAIN_COMMAND may not exist?
	 *
	 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
	 * invalidated when MI_EXE_FLUSH is set.
	 *
	 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
	 * invalidated with every MI_FLUSH.
	 *
	 * TLBs:
	 *
	 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
	 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
	 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
	 * are flushed at any MI_FLUSH.
	 */

	cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
142
	if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
143 144 145
		cmd &= ~MI_NO_WRITE_FLUSH;
	if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
		cmd |= MI_EXE_FLUSH;
146

147 148 149
	if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
	    (IS_G4X(dev) || IS_GEN5(dev)))
		cmd |= MI_INVALIDATE_ISP;
150

151
	ret = intel_ring_begin(req, 2);
152 153
	if (ret)
		return ret;
154

155 156 157
	intel_ring_emit(engine, cmd);
	intel_ring_emit(engine, MI_NOOP);
	intel_ring_advance(engine);
158 159

	return 0;
160 161
}

162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
/**
 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
 * implementing two workarounds on gen6.  From section 1.4.7.1
 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
 *
 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
 * produced by non-pipelined state commands), software needs to first
 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
 * 0.
 *
 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
 *
 * And the workaround for these two requires this workaround first:
 *
 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
 * BEFORE the pipe-control with a post-sync op and no write-cache
 * flushes.
 *
 * And this last workaround is tricky because of the requirements on
 * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
 * volume 2 part 1:
 *
 *     "1 of the following must also be set:
 *      - Render Target Cache Flush Enable ([12] of DW1)
 *      - Depth Cache Flush Enable ([0] of DW1)
 *      - Stall at Pixel Scoreboard ([1] of DW1)
 *      - Depth Stall ([13] of DW1)
 *      - Post-Sync Operation ([13] of DW1)
 *      - Notify Enable ([8] of DW1)"
 *
 * The cache flushes require the workaround flush that triggered this
 * one, so we can't use it.  Depth stall would trigger the same.
 * Post-sync nonzero is what triggered this second workaround, so we
 * can't use that one either.  Notify enable is IRQs, which aren't
 * really our business.  That leaves only stall at scoreboard.
 */
static int
200
intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
201
{
202
	struct intel_engine_cs *engine = req->engine;
203
	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
204 205
	int ret;

206
	ret = intel_ring_begin(req, 6);
207 208 209
	if (ret)
		return ret;

210 211
	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
	intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
212
			PIPE_CONTROL_STALL_AT_SCOREBOARD);
213 214 215 216 217
	intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
	intel_ring_emit(engine, 0); /* low dword */
	intel_ring_emit(engine, 0); /* high dword */
	intel_ring_emit(engine, MI_NOOP);
	intel_ring_advance(engine);
218

219
	ret = intel_ring_begin(req, 6);
220 221 222
	if (ret)
		return ret;

223 224 225 226 227 228 229
	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
	intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE);
	intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
	intel_ring_emit(engine, 0);
	intel_ring_emit(engine, 0);
	intel_ring_emit(engine, MI_NOOP);
	intel_ring_advance(engine);
230 231 232 233 234

	return 0;
}

static int
235 236
gen6_render_ring_flush(struct drm_i915_gem_request *req,
		       u32 invalidate_domains, u32 flush_domains)
237
{
238
	struct intel_engine_cs *engine = req->engine;
239
	u32 flags = 0;
240
	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
241 242
	int ret;

243
	/* Force SNB workarounds for PIPE_CONTROL flushes */
244
	ret = intel_emit_post_sync_nonzero_flush(req);
245 246 247
	if (ret)
		return ret;

248 249 250 251
	/* Just flush everything.  Experiments have shown that reducing the
	 * number of bits based on the write domains has little performance
	 * impact.
	 */
252 253 254 255 256 257 258
	if (flush_domains) {
		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
		/*
		 * Ensure that any following seqno writes only happen
		 * when the render cache is indeed flushed.
		 */
259
		flags |= PIPE_CONTROL_CS_STALL;
260 261 262 263 264 265 266 267 268 269 270
	}
	if (invalidate_domains) {
		flags |= PIPE_CONTROL_TLB_INVALIDATE;
		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
		/*
		 * TLB invalidate requires a post-sync write.
		 */
271
		flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
272
	}
273

274
	ret = intel_ring_begin(req, 4);
275 276 277
	if (ret)
		return ret;

278 279 280 281 282
	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
	intel_ring_emit(engine, flags);
	intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
	intel_ring_emit(engine, 0);
	intel_ring_advance(engine);
283 284 285 286

	return 0;
}

287
static int
288
gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
289
{
290
	struct intel_engine_cs *engine = req->engine;
291 292
	int ret;

293
	ret = intel_ring_begin(req, 4);
294 295 296
	if (ret)
		return ret;

297 298
	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
	intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
299
			      PIPE_CONTROL_STALL_AT_SCOREBOARD);
300 301 302
	intel_ring_emit(engine, 0);
	intel_ring_emit(engine, 0);
	intel_ring_advance(engine);
303 304 305 306

	return 0;
}

307
static int
308
gen7_render_ring_flush(struct drm_i915_gem_request *req,
309 310
		       u32 invalidate_domains, u32 flush_domains)
{
311
	struct intel_engine_cs *engine = req->engine;
312
	u32 flags = 0;
313
	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
314 315
	int ret;

316 317 318 319 320 321 322 323 324 325
	/*
	 * Ensure that any following seqno writes only happen when the render
	 * cache is indeed flushed.
	 *
	 * Workaround: 4th PIPE_CONTROL command (except the ones with only
	 * read-cache invalidate bits set) must have the CS_STALL bit set. We
	 * don't try to be clever and just set it unconditionally.
	 */
	flags |= PIPE_CONTROL_CS_STALL;

326 327 328 329 330 331 332
	/* Just flush everything.  Experiments have shown that reducing the
	 * number of bits based on the write domains has little performance
	 * impact.
	 */
	if (flush_domains) {
		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
333
		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
334
		flags |= PIPE_CONTROL_FLUSH_ENABLE;
335 336 337 338 339 340 341 342
	}
	if (invalidate_domains) {
		flags |= PIPE_CONTROL_TLB_INVALIDATE;
		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
343
		flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
344 345 346 347
		/*
		 * TLB invalidate requires a post-sync write.
		 */
		flags |= PIPE_CONTROL_QW_WRITE;
348
		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
349

350 351
		flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;

352 353 354
		/* Workaround: we must issue a pipe_control with CS-stall bit
		 * set before a pipe_control command that has the state cache
		 * invalidate bit set. */
355
		gen7_render_ring_cs_stall_wa(req);
356 357
	}

358
	ret = intel_ring_begin(req, 4);
359 360 361
	if (ret)
		return ret;

362 363 364 365 366
	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
	intel_ring_emit(engine, flags);
	intel_ring_emit(engine, scratch_addr);
	intel_ring_emit(engine, 0);
	intel_ring_advance(engine);
367 368 369 370

	return 0;
}

371
static int
372
gen8_emit_pipe_control(struct drm_i915_gem_request *req,
373 374
		       u32 flags, u32 scratch_addr)
{
375
	struct intel_engine_cs *engine = req->engine;
376 377
	int ret;

378
	ret = intel_ring_begin(req, 6);
379 380 381
	if (ret)
		return ret;

382 383 384 385 386 387 388
	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
	intel_ring_emit(engine, flags);
	intel_ring_emit(engine, scratch_addr);
	intel_ring_emit(engine, 0);
	intel_ring_emit(engine, 0);
	intel_ring_emit(engine, 0);
	intel_ring_advance(engine);
389 390 391 392

	return 0;
}

B
Ben Widawsky 已提交
393
static int
394
gen8_render_ring_flush(struct drm_i915_gem_request *req,
B
Ben Widawsky 已提交
395 396 397
		       u32 invalidate_domains, u32 flush_domains)
{
	u32 flags = 0;
398
	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
399
	int ret;
B
Ben Widawsky 已提交
400 401 402 403 404 405

	flags |= PIPE_CONTROL_CS_STALL;

	if (flush_domains) {
		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
406
		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
407
		flags |= PIPE_CONTROL_FLUSH_ENABLE;
B
Ben Widawsky 已提交
408 409 410 411 412 413 414 415 416 417
	}
	if (invalidate_domains) {
		flags |= PIPE_CONTROL_TLB_INVALIDATE;
		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_QW_WRITE;
		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
418 419

		/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
420
		ret = gen8_emit_pipe_control(req,
421 422 423 424 425
					     PIPE_CONTROL_CS_STALL |
					     PIPE_CONTROL_STALL_AT_SCOREBOARD,
					     0);
		if (ret)
			return ret;
B
Ben Widawsky 已提交
426 427
	}

428
	return gen8_emit_pipe_control(req, flags, scratch_addr);
B
Ben Widawsky 已提交
429 430
}

431
static void ring_write_tail(struct intel_engine_cs *engine,
432
			    u32 value)
433
{
434 435
	struct drm_i915_private *dev_priv = engine->dev->dev_private;
	I915_WRITE_TAIL(engine, value);
436 437
}

438
u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
439
{
440
	struct drm_i915_private *dev_priv = engine->dev->dev_private;
441
	u64 acthd;
442

443 444 445 446 447
	if (INTEL_INFO(engine->dev)->gen >= 8)
		acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
					 RING_ACTHD_UDW(engine->mmio_base));
	else if (INTEL_INFO(engine->dev)->gen >= 4)
		acthd = I915_READ(RING_ACTHD(engine->mmio_base));
448 449 450 451
	else
		acthd = I915_READ(ACTHD);

	return acthd;
452 453
}

454
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
455
{
456
	struct drm_i915_private *dev_priv = engine->dev->dev_private;
457 458 459
	u32 addr;

	addr = dev_priv->status_page_dmah->busaddr;
460
	if (INTEL_INFO(engine->dev)->gen >= 4)
461 462 463 464
		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
	I915_WRITE(HWS_PGA, addr);
}

465
static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
466
{
467 468
	struct drm_device *dev = engine->dev;
	struct drm_i915_private *dev_priv = engine->dev->dev_private;
469
	i915_reg_t mmio;
470 471 472 473 474

	/* The ring status page addresses are no longer next to the rest of
	 * the ring registers as of gen7.
	 */
	if (IS_GEN7(dev)) {
475
		switch (engine->id) {
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
		case RCS:
			mmio = RENDER_HWS_PGA_GEN7;
			break;
		case BCS:
			mmio = BLT_HWS_PGA_GEN7;
			break;
		/*
		 * VCS2 actually doesn't exist on Gen7. Only shut up
		 * gcc switch check warning
		 */
		case VCS2:
		case VCS:
			mmio = BSD_HWS_PGA_GEN7;
			break;
		case VECS:
			mmio = VEBOX_HWS_PGA_GEN7;
			break;
		}
494 495
	} else if (IS_GEN6(engine->dev)) {
		mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
496 497
	} else {
		/* XXX: gen8 returns to sanity */
498
		mmio = RING_HWS_PGA(engine->mmio_base);
499 500
	}

501
	I915_WRITE(mmio, (u32)engine->status_page.gfx_addr);
502 503 504 505 506 507 508 509 510 511
	POSTING_READ(mmio);

	/*
	 * Flush the TLB for this page
	 *
	 * FIXME: These two bits have disappeared on gen8, so a question
	 * arises: do we still need this and if so how should we go about
	 * invalidating the TLB?
	 */
	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
512
		i915_reg_t reg = RING_INSTPM(engine->mmio_base);
513 514

		/* ring should be idle before issuing a sync flush*/
515
		WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
516 517 518 519 520 521 522

		I915_WRITE(reg,
			   _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
					      INSTPM_SYNC_FLUSH));
		if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
			     1000))
			DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
523
				  engine->name);
524 525 526
	}
}

527
static bool stop_ring(struct intel_engine_cs *engine)
528
{
529
	struct drm_i915_private *dev_priv = to_i915(engine->dev);
530

531 532 533 534 535
	if (!IS_GEN2(engine->dev)) {
		I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
		if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
			DRM_ERROR("%s : timed out trying to stop ring\n",
				  engine->name);
536 537 538 539
			/* Sometimes we observe that the idle flag is not
			 * set even though the ring is empty. So double
			 * check before giving up.
			 */
540
			if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
541
				return false;
542 543
		}
	}
544

545 546 547
	I915_WRITE_CTL(engine, 0);
	I915_WRITE_HEAD(engine, 0);
	engine->write_tail(engine, 0);
548

549 550 551
	if (!IS_GEN2(engine->dev)) {
		(void)I915_READ_CTL(engine);
		I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
552
	}
553

554
	return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
555
}
556

557 558 559 560 561
void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
{
	memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
}

562
static int init_ring_common(struct intel_engine_cs *engine)
563
{
564
	struct drm_device *dev = engine->dev;
565
	struct drm_i915_private *dev_priv = dev->dev_private;
566
	struct intel_ringbuffer *ringbuf = engine->buffer;
567
	struct drm_i915_gem_object *obj = ringbuf->obj;
568 569
	int ret = 0;

570
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
571

572
	if (!stop_ring(engine)) {
573
		/* G45 ring initialization often fails to reset head to zero */
574 575
		DRM_DEBUG_KMS("%s head not reset to zero "
			      "ctl %08x head %08x tail %08x start %08x\n",
576 577 578 579 580
			      engine->name,
			      I915_READ_CTL(engine),
			      I915_READ_HEAD(engine),
			      I915_READ_TAIL(engine),
			      I915_READ_START(engine));
581

582
		if (!stop_ring(engine)) {
583 584
			DRM_ERROR("failed to set %s head to zero "
				  "ctl %08x head %08x tail %08x start %08x\n",
585 586 587 588 589
				  engine->name,
				  I915_READ_CTL(engine),
				  I915_READ_HEAD(engine),
				  I915_READ_TAIL(engine),
				  I915_READ_START(engine));
590 591
			ret = -EIO;
			goto out;
592
		}
593 594
	}

595
	if (I915_NEED_GFX_HWS(dev))
596
		intel_ring_setup_status_page(engine);
597
	else
598
		ring_setup_phys_status_page(engine);
599

600
	/* Enforce ordering by reading HEAD register back */
601
	I915_READ_HEAD(engine);
602

603 604 605 606
	/* Initialize the ring. This must happen _after_ we've cleared the ring
	 * registers with the above sequence (the readback of the HEAD registers
	 * also enforces ordering), otherwise the hw might lose the new ring
	 * register values. */
607
	I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj));
608 609

	/* WaClearRingBufHeadRegAtInit:ctg,elk */
610
	if (I915_READ_HEAD(engine))
611
		DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
612 613 614
			  engine->name, I915_READ_HEAD(engine));
	I915_WRITE_HEAD(engine, 0);
	(void)I915_READ_HEAD(engine);
615

616
	I915_WRITE_CTL(engine,
617
			((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
618
			| RING_VALID);
619 620

	/* If the head is still not zero, the ring is dead */
621 622 623
	if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 &&
		     I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) &&
		     (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) {
624
		DRM_ERROR("%s initialization failed "
625
			  "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
626 627 628 629 630 631
			  engine->name,
			  I915_READ_CTL(engine),
			  I915_READ_CTL(engine) & RING_VALID,
			  I915_READ_HEAD(engine), I915_READ_TAIL(engine),
			  I915_READ_START(engine),
			  (unsigned long)i915_gem_obj_ggtt_offset(obj));
632 633
		ret = -EIO;
		goto out;
634 635
	}

636
	ringbuf->last_retired_head = -1;
637 638
	ringbuf->head = I915_READ_HEAD(engine);
	ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
639
	intel_ring_update_space(ringbuf);
640

641
	intel_engine_init_hangcheck(engine);
642

643
out:
644
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
645 646

	return ret;
647 648
}

649
void
650
intel_fini_pipe_control(struct intel_engine_cs *engine)
651
{
652
	struct drm_device *dev = engine->dev;
653

654
	if (engine->scratch.obj == NULL)
655 656 657
		return;

	if (INTEL_INFO(dev)->gen >= 5) {
658 659
		kunmap(sg_page(engine->scratch.obj->pages->sgl));
		i915_gem_object_ggtt_unpin(engine->scratch.obj);
660 661
	}

662 663
	drm_gem_object_unreference(&engine->scratch.obj->base);
	engine->scratch.obj = NULL;
664 665 666
}

int
667
intel_init_pipe_control(struct intel_engine_cs *engine)
668 669 670
{
	int ret;

671
	WARN_ON(engine->scratch.obj);
672

673
	engine->scratch.obj = i915_gem_object_create(engine->dev, 4096);
674
	if (IS_ERR(engine->scratch.obj)) {
675
		DRM_ERROR("Failed to allocate seqno page\n");
676 677
		ret = PTR_ERR(engine->scratch.obj);
		engine->scratch.obj = NULL;
678 679
		goto err;
	}
680

681 682
	ret = i915_gem_object_set_cache_level(engine->scratch.obj,
					      I915_CACHE_LLC);
683 684
	if (ret)
		goto err_unref;
685

686
	ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0);
687 688 689
	if (ret)
		goto err_unref;

690 691 692
	engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj);
	engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl));
	if (engine->scratch.cpu_page == NULL) {
693
		ret = -ENOMEM;
694
		goto err_unpin;
695
	}
696

697
	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
698
			 engine->name, engine->scratch.gtt_offset);
699 700 701
	return 0;

err_unpin:
702
	i915_gem_object_ggtt_unpin(engine->scratch.obj);
703
err_unref:
704
	drm_gem_object_unreference(&engine->scratch.obj->base);
705 706 707 708
err:
	return ret;
}

709
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
710
{
711
	int ret, i;
712
	struct intel_engine_cs *engine = req->engine;
713
	struct drm_device *dev = engine->dev;
714
	struct drm_i915_private *dev_priv = dev->dev_private;
715
	struct i915_workarounds *w = &dev_priv->workarounds;
716

717
	if (w->count == 0)
718
		return 0;
719

720
	engine->gpu_caches_dirty = true;
721
	ret = intel_ring_flush_all_caches(req);
722 723
	if (ret)
		return ret;
724

725
	ret = intel_ring_begin(req, (w->count * 2 + 2));
726 727 728
	if (ret)
		return ret;

729
	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count));
730
	for (i = 0; i < w->count; i++) {
731 732
		intel_ring_emit_reg(engine, w->reg[i].addr);
		intel_ring_emit(engine, w->reg[i].value);
733
	}
734
	intel_ring_emit(engine, MI_NOOP);
735

736
	intel_ring_advance(engine);
737

738
	engine->gpu_caches_dirty = true;
739
	ret = intel_ring_flush_all_caches(req);
740 741
	if (ret)
		return ret;
742

743
	DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
744

745
	return 0;
746 747
}

748
static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
749 750 751
{
	int ret;

752
	ret = intel_ring_workarounds_emit(req);
753 754 755
	if (ret != 0)
		return ret;

756
	ret = i915_gem_render_state_init(req);
757
	if (ret)
758
		return ret;
759

760
	return 0;
761 762
}

763
static int wa_add(struct drm_i915_private *dev_priv,
764 765
		  i915_reg_t addr,
		  const u32 mask, const u32 val)
766 767 768 769 770 771 772 773 774 775 776 777 778
{
	const u32 idx = dev_priv->workarounds.count;

	if (WARN_ON(idx >= I915_MAX_WA_REGS))
		return -ENOSPC;

	dev_priv->workarounds.reg[idx].addr = addr;
	dev_priv->workarounds.reg[idx].value = val;
	dev_priv->workarounds.reg[idx].mask = mask;

	dev_priv->workarounds.count++;

	return 0;
779 780
}

781
#define WA_REG(addr, mask, val) do { \
782
		const int r = wa_add(dev_priv, (addr), (mask), (val)); \
783 784
		if (r) \
			return r; \
785
	} while (0)
786 787

#define WA_SET_BIT_MASKED(addr, mask) \
788
	WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
789 790

#define WA_CLR_BIT_MASKED(addr, mask) \
791
	WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
792

793
#define WA_SET_FIELD_MASKED(addr, mask, value) \
794
	WA_REG(addr, mask, _MASKED_FIELD(mask, value))
795

796 797
#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
798

799
#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
800

801 802
static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
				 i915_reg_t reg)
803
{
804
	struct drm_i915_private *dev_priv = engine->dev->dev_private;
805
	struct i915_workarounds *wa = &dev_priv->workarounds;
806
	const uint32_t index = wa->hw_whitelist_count[engine->id];
807 808 809 810

	if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
		return -EINVAL;

811
	WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
812
		 i915_mmio_reg_offset(reg));
813
	wa->hw_whitelist_count[engine->id]++;
814 815 816 817

	return 0;
}

818
static int gen8_init_workarounds(struct intel_engine_cs *engine)
819
{
820
	struct drm_device *dev = engine->dev;
821 822 823
	struct drm_i915_private *dev_priv = dev->dev_private;

	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
824

825 826 827
	/* WaDisableAsyncFlipPerfMode:bdw,chv */
	WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);

828 829 830 831
	/* WaDisablePartialInstShootdown:bdw,chv */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);

832 833 834 835 836
	/* Use Force Non-Coherent whenever executing a 3D context. This is a
	 * workaround for for a possible hang in the unlikely event a TLB
	 * invalidation occurs during a PSD flush.
	 */
	/* WaForceEnableNonCoherent:bdw,chv */
837
	/* WaHdcDisableFetchWhenMasked:bdw,chv */
838
	WA_SET_BIT_MASKED(HDC_CHICKEN0,
839
			  HDC_DONOT_FETCH_MEM_WHEN_MASKED |
840 841
			  HDC_FORCE_NON_COHERENT);

842 843 844 845 846 847 848 849 850 851
	/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
	 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
	 *  polygons in the same 8x4 pixel/sample area to be processed without
	 *  stalling waiting for the earlier ones to write to Hierarchical Z
	 *  buffer."
	 *
	 * This optimization is off by default for BDW and CHV; turn it on.
	 */
	WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);

852 853 854
	/* Wa4x4STCOptimizationDisable:bdw,chv */
	WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);

855 856 857 858 859 860 861 862 863 864 865 866
	/*
	 * BSpec recommends 8x4 when MSAA is used,
	 * however in practice 16x4 seems fastest.
	 *
	 * Note that PS/WM thread counts depend on the WIZ hashing
	 * disable bit, which we don't touch here, but it's good
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
	 */
	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
			    GEN6_WIZ_HASHING_MASK,
			    GEN6_WIZ_HASHING_16x4);

867 868 869
	return 0;
}

870
static int bdw_init_workarounds(struct intel_engine_cs *engine)
871
{
872
	int ret;
873
	struct drm_device *dev = engine->dev;
874
	struct drm_i915_private *dev_priv = dev->dev_private;
875

876
	ret = gen8_init_workarounds(engine);
877 878 879
	if (ret)
		return ret;

880
	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
881
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
882

883
	/* WaDisableDopClockGating:bdw */
884 885
	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
			  DOP_CLOCK_GATING_DISABLE);
886

887 888
	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
			  GEN8_SAMPLER_POWER_BYPASS_DIS);
889

890
	WA_SET_BIT_MASKED(HDC_CHICKEN0,
891 892 893
			  /* WaForceContextSaveRestoreNonCoherent:bdw */
			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
			  /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
894
			  (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
895 896 897 898

	return 0;
}

899
static int chv_init_workarounds(struct intel_engine_cs *engine)
900
{
901
	int ret;
902
	struct drm_device *dev = engine->dev;
903 904
	struct drm_i915_private *dev_priv = dev->dev_private;

905
	ret = gen8_init_workarounds(engine);
906 907 908
	if (ret)
		return ret;

909
	/* WaDisableThreadStallDopClockGating:chv */
910
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
911

912 913 914
	/* Improve HiZ throughput on CHV. */
	WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);

915 916 917
	return 0;
}

918
static int gen9_init_workarounds(struct intel_engine_cs *engine)
919
{
920
	struct drm_device *dev = engine->dev;
921
	struct drm_i915_private *dev_priv = dev->dev_private;
922
	uint32_t tmp;
923
	int ret;
924

925 926 927 928 929 930 931 932
	/* WaEnableLbsSlaRetryTimerDecrement:skl */
	I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
		   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);

	/* WaDisableKillLogic:bxt,skl */
	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
		   ECOCHK_DIS_TLB);

933
	/* WaClearFlowControlGpgpuContextSave:skl,bxt */
934
	/* WaDisablePartialInstShootdown:skl,bxt */
935
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
936
			  FLOW_CONTROL_ENABLE |
937 938
			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);

939
	/* Syncing dependencies between camera and graphics:skl,bxt */
940 941 942
	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
			  GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);

943 944 945
	/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
	    IS_BXT_REVID(dev, 0, BXT_REVID_A1))
946 947
		WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
				  GEN9_DG_MIRROR_FIX_ENABLE);
948

949 950 951
	/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
	    IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
952 953
		WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
				  GEN9_RHWO_OPTIMIZATION_DISABLE);
954 955 956 957 958
		/*
		 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
		 * but we do that in per ctx batchbuffer as there is an issue
		 * with this register not getting restored on ctx restore
		 */
959 960
	}

961
	/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
962 963 964 965
	/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt */
	WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
			  GEN9_ENABLE_YV12_BUGFIX |
			  GEN9_ENABLE_GPGPU_PREEMPTION);
966

967
	/* Wa4x4STCOptimizationDisable:skl,bxt */
968
	/* WaDisablePartialResolveInVc:skl,bxt */
969 970
	WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
					 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
971

972
	/* WaCcsTlbPrefetchDisable:skl,bxt */
973 974 975
	WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
			  GEN9_CCS_TLB_PREFETCH_ENABLE);

976
	/* WaDisableMaskBasedCammingInRCC:skl,bxt */
977 978
	if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
	    IS_BXT_REVID(dev, 0, BXT_REVID_A1))
979 980 981
		WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
				  PIXEL_MASK_CAMMING_DISABLE);

982 983
	/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
	tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
984
	if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
985
	    IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
986 987 988
		tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
	WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);

989
	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
990
	if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
991 992 993
		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
				  GEN8_SAMPLER_POWER_BYPASS_DIS);

994 995 996
	/* WaDisableSTUnitPowerOptimization:skl,bxt */
	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);

997 998 999 1000
	/* WaOCLCoherentLineFlush:skl,bxt */
	I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
				    GEN8_LQSC_FLUSH_COHERENT_LINES));

1001
	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
1002
	ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
1003 1004 1005
	if (ret)
		return ret;

1006
	/* WaAllowUMDToModifyHDCChicken1:skl,bxt */
1007
	ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
1008 1009 1010
	if (ret)
		return ret;

1011 1012 1013
	return 0;
}

1014
static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
1015
{
1016
	struct drm_device *dev = engine->dev;
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
	struct drm_i915_private *dev_priv = dev->dev_private;
	u8 vals[3] = { 0, 0, 0 };
	unsigned int i;

	for (i = 0; i < 3; i++) {
		u8 ss;

		/*
		 * Only consider slices where one, and only one, subslice has 7
		 * EUs
		 */
1028
		if (!is_power_of_2(dev_priv->info.subslice_7eu[i]))
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
			continue;

		/*
		 * subslice_7eu[i] != 0 (because of the check above) and
		 * ss_max == 4 (maximum number of subslices possible per slice)
		 *
		 * ->    0 <= ss <= 3;
		 */
		ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
		vals[i] = 3 - ss;
	}

	if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
		return 0;

	/* Tune IZ hashing. See intel_device_info_runtime_init() */
	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
			    GEN9_IZ_HASHING_MASK(2) |
			    GEN9_IZ_HASHING_MASK(1) |
			    GEN9_IZ_HASHING_MASK(0),
			    GEN9_IZ_HASHING(2, vals[2]) |
			    GEN9_IZ_HASHING(1, vals[1]) |
			    GEN9_IZ_HASHING(0, vals[0]));

	return 0;
}

1056
static int skl_init_workarounds(struct intel_engine_cs *engine)
1057
{
1058
	int ret;
1059
	struct drm_device *dev = engine->dev;
1060 1061
	struct drm_i915_private *dev_priv = dev->dev_private;

1062
	ret = gen9_init_workarounds(engine);
1063 1064
	if (ret)
		return ret;
1065

1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
	/*
	 * Actual WA is to disable percontext preemption granularity control
	 * until D0 which is the default case so this is equivalent to
	 * !WaDisablePerCtxtPreemptionGranularityControl:skl
	 */
	if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) {
		I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
			   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
	}

1076
	if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
1077 1078 1079 1080 1081 1082 1083 1084
		/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
		I915_WRITE(FF_SLICE_CS_CHICKEN2,
			   _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
	}

	/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
	 * involving this register should also be added to WA batch as required.
	 */
1085
	if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
1086 1087 1088 1089 1090
		/* WaDisableLSQCROPERFforOCL:skl */
		I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
			   GEN8_LQSC_RO_PERF_DIS);

	/* WaEnableGapsTsvCreditFix:skl */
1091
	if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
1092 1093 1094 1095
		I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
					   GEN9_GAPS_TSV_CREDIT_DISABLE));
	}

1096
	/* WaDisablePowerCompilerClockGating:skl */
1097
	if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
1098 1099 1100
		WA_SET_BIT_MASKED(HIZ_CHICKEN,
				  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);

1101 1102
	/* This is tied to WaForceContextSaveRestoreNonCoherent */
	if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
1103 1104 1105 1106 1107 1108 1109 1110
		/*
		 *Use Force Non-Coherent whenever executing a 3D context. This
		 * is a workaround for a possible hang in the unlikely event
		 * a TLB invalidation occurs during a PSD flush.
		 */
		/* WaForceEnableNonCoherent:skl */
		WA_SET_BIT_MASKED(HDC_CHICKEN0,
				  HDC_FORCE_NON_COHERENT);
1111 1112 1113 1114

		/* WaDisableHDCInvalidation:skl */
		I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
			   BDW_DISABLE_HDC_INVALIDATION);
1115 1116
	}

1117 1118
	/* WaBarrierPerformanceFixDisable:skl */
	if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
1119 1120 1121 1122
		WA_SET_BIT_MASKED(HDC_CHICKEN0,
				  HDC_FENCE_DEST_SLM_DISABLE |
				  HDC_BARRIER_PERFORMANCE_DISABLE);

1123
	/* WaDisableSbeCacheDispatchPortSharing:skl */
1124
	if (IS_SKL_REVID(dev, 0, SKL_REVID_F0))
1125 1126 1127 1128
		WA_SET_BIT_MASKED(
			GEN7_HALF_SLICE_CHICKEN1,
			GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);

1129
	/* WaDisableLSQCROPERFforOCL:skl */
1130
	ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1131 1132 1133
	if (ret)
		return ret;

1134
	return skl_tune_iz_hashing(engine);
1135 1136
}

1137
static int bxt_init_workarounds(struct intel_engine_cs *engine)
1138
{
1139
	int ret;
1140
	struct drm_device *dev = engine->dev;
1141 1142
	struct drm_i915_private *dev_priv = dev->dev_private;

1143
	ret = gen9_init_workarounds(engine);
1144 1145
	if (ret)
		return ret;
1146

1147 1148
	/* WaStoreMultiplePTEenable:bxt */
	/* This is a requirement according to Hardware specification */
T
Tim Gore 已提交
1149
	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
1150 1151 1152
		I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);

	/* WaSetClckGatingDisableMedia:bxt */
T
Tim Gore 已提交
1153
	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
1154 1155 1156 1157
		I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
					    ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
	}

1158 1159 1160 1161
	/* WaDisableThreadStallDopClockGating:bxt */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
			  STALL_DOP_GATING_DISABLE);

1162
	/* WaDisableSbeCacheDispatchPortSharing:bxt */
1163
	if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
1164 1165 1166 1167 1168
		WA_SET_BIT_MASKED(
			GEN7_HALF_SLICE_CHICKEN1,
			GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
	}

1169 1170 1171
	/* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
	/* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
	/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
1172
	/* WaDisableLSQCROPERFforOCL:bxt */
1173
	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
1174
		ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
1175 1176
		if (ret)
			return ret;
1177

1178
		ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1179 1180
		if (ret)
			return ret;
1181 1182
	}

1183 1184 1185 1186
	/* WaProgramL3SqcReg1DefaultForPerf:bxt */
	if (IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
		I915_WRITE(GEN8_L3SQCREG1, BXT_WA_L3SQCREG1_DEFAULT);

1187 1188 1189
	return 0;
}

1190
int init_workarounds_ring(struct intel_engine_cs *engine)
1191
{
1192
	struct drm_device *dev = engine->dev;
1193 1194
	struct drm_i915_private *dev_priv = dev->dev_private;

1195
	WARN_ON(engine->id != RCS);
1196 1197

	dev_priv->workarounds.count = 0;
1198
	dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
1199 1200

	if (IS_BROADWELL(dev))
1201
		return bdw_init_workarounds(engine);
1202 1203

	if (IS_CHERRYVIEW(dev))
1204
		return chv_init_workarounds(engine);
1205

1206
	if (IS_SKYLAKE(dev))
1207
		return skl_init_workarounds(engine);
1208 1209

	if (IS_BROXTON(dev))
1210
		return bxt_init_workarounds(engine);
1211

1212 1213 1214
	return 0;
}

1215
static int init_render_ring(struct intel_engine_cs *engine)
1216
{
1217
	struct drm_device *dev = engine->dev;
1218
	struct drm_i915_private *dev_priv = dev->dev_private;
1219
	int ret = init_ring_common(engine);
1220 1221
	if (ret)
		return ret;
1222

1223 1224
	/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
	if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
1225
		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
1226 1227 1228 1229

	/* We need to disable the AsyncFlip performance optimisations in order
	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
	 * programmed to '1' on all products.
1230
	 *
1231
	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
1232
	 */
1233
	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
1234 1235
		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));

1236
	/* Required for the hardware to program scanline values for waiting */
1237
	/* WaEnableFlushTlbInvalidationMode:snb */
1238 1239
	if (INTEL_INFO(dev)->gen == 6)
		I915_WRITE(GFX_MODE,
1240
			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
1241

1242
	/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
1243 1244
	if (IS_GEN7(dev))
		I915_WRITE(GFX_MODE_GEN7,
1245
			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
1246
			   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
1247

1248
	if (IS_GEN6(dev)) {
1249 1250 1251 1252 1253 1254
		/* From the Sandybridge PRM, volume 1 part 3, page 24:
		 * "If this bit is set, STCunit will have LRA as replacement
		 *  policy. [...] This bit must be reset.  LRA replacement
		 *  policy is not supported."
		 */
		I915_WRITE(CACHE_MODE_0,
1255
			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
1256 1257
	}

1258
	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
1259
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1260

1261
	if (HAS_L3_DPF(dev))
1262
		I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
1263

1264
	return init_workarounds_ring(engine);
1265 1266
}

1267
static void render_ring_cleanup(struct intel_engine_cs *engine)
1268
{
1269
	struct drm_device *dev = engine->dev;
1270 1271 1272 1273 1274 1275 1276
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (dev_priv->semaphore_obj) {
		i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
		drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
		dev_priv->semaphore_obj = NULL;
	}
1277

1278
	intel_fini_pipe_control(engine);
1279 1280
}

1281
static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
1282 1283 1284
			   unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 8
1285
	struct intel_engine_cs *signaller = signaller_req->engine;
1286 1287 1288
	struct drm_device *dev = signaller->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_engine_cs *waiter;
1289 1290
	enum intel_engine_id id;
	int ret, num_rings;
1291 1292 1293 1294 1295

	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS

1296
	ret = intel_ring_begin(signaller_req, num_dwords);
1297 1298 1299
	if (ret)
		return ret;

1300
	for_each_engine_id(waiter, dev_priv, id) {
1301
		u32 seqno;
1302
		u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
1303 1304 1305
		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
			continue;

1306
		seqno = i915_gem_request_get_seqno(signaller_req);
1307 1308 1309 1310 1311 1312
		intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
		intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
					   PIPE_CONTROL_QW_WRITE |
					   PIPE_CONTROL_FLUSH_ENABLE);
		intel_ring_emit(signaller, lower_32_bits(gtt_offset));
		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
1313
		intel_ring_emit(signaller, seqno);
1314 1315 1316 1317 1318 1319 1320 1321 1322
		intel_ring_emit(signaller, 0);
		intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
					   MI_SEMAPHORE_TARGET(waiter->id));
		intel_ring_emit(signaller, 0);
	}

	return 0;
}

1323
static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
1324 1325 1326
			   unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 6
1327
	struct intel_engine_cs *signaller = signaller_req->engine;
1328 1329 1330
	struct drm_device *dev = signaller->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_engine_cs *waiter;
1331 1332
	enum intel_engine_id id;
	int ret, num_rings;
1333 1334 1335 1336 1337

	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS

1338
	ret = intel_ring_begin(signaller_req, num_dwords);
1339 1340 1341
	if (ret)
		return ret;

1342
	for_each_engine_id(waiter, dev_priv, id) {
1343
		u32 seqno;
1344
		u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
1345 1346 1347
		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
			continue;

1348
		seqno = i915_gem_request_get_seqno(signaller_req);
1349 1350 1351 1352 1353
		intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
					   MI_FLUSH_DW_OP_STOREDW);
		intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
					   MI_FLUSH_DW_USE_GTT);
		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
1354
		intel_ring_emit(signaller, seqno);
1355 1356 1357 1358 1359 1360 1361 1362
		intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
					   MI_SEMAPHORE_TARGET(waiter->id));
		intel_ring_emit(signaller, 0);
	}

	return 0;
}

1363
static int gen6_signal(struct drm_i915_gem_request *signaller_req,
1364
		       unsigned int num_dwords)
1365
{
1366
	struct intel_engine_cs *signaller = signaller_req->engine;
1367 1368
	struct drm_device *dev = signaller->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
1369
	struct intel_engine_cs *useless;
1370 1371
	enum intel_engine_id id;
	int ret, num_rings;
1372

1373 1374 1375 1376
#define MBOX_UPDATE_DWORDS 3
	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
	num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
#undef MBOX_UPDATE_DWORDS
1377

1378
	ret = intel_ring_begin(signaller_req, num_dwords);
1379 1380 1381
	if (ret)
		return ret;

1382 1383
	for_each_engine_id(useless, dev_priv, id) {
		i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
1384 1385

		if (i915_mmio_reg_valid(mbox_reg)) {
1386
			u32 seqno = i915_gem_request_get_seqno(signaller_req);
1387

1388
			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
1389
			intel_ring_emit_reg(signaller, mbox_reg);
1390
			intel_ring_emit(signaller, seqno);
1391 1392
		}
	}
1393

1394 1395 1396 1397
	/* If num_dwords was rounded, make sure the tail pointer is correct */
	if (num_rings % 2 == 0)
		intel_ring_emit(signaller, MI_NOOP);

1398
	return 0;
1399 1400
}

1401 1402
/**
 * gen6_add_request - Update the semaphore mailbox registers
1403 1404
 *
 * @request - request to write to the ring
1405 1406 1407 1408
 *
 * Update the mailbox registers in the *other* rings with the current seqno.
 * This acts like a signal in the canonical semaphore.
 */
1409
static int
1410
gen6_add_request(struct drm_i915_gem_request *req)
1411
{
1412
	struct intel_engine_cs *engine = req->engine;
1413
	int ret;
1414

1415 1416
	if (engine->semaphore.signal)
		ret = engine->semaphore.signal(req, 4);
B
Ben Widawsky 已提交
1417
	else
1418
		ret = intel_ring_begin(req, 4);
B
Ben Widawsky 已提交
1419

1420 1421 1422
	if (ret)
		return ret;

1423 1424 1425 1426 1427 1428
	intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
	intel_ring_emit(engine,
			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
	intel_ring_emit(engine, i915_gem_request_get_seqno(req));
	intel_ring_emit(engine, MI_USER_INTERRUPT);
	__intel_ring_advance(engine);
1429 1430 1431 1432

	return 0;
}

1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
static int
gen8_render_add_request(struct drm_i915_gem_request *req)
{
	struct intel_engine_cs *engine = req->engine;
	int ret;

	if (engine->semaphore.signal)
		ret = engine->semaphore.signal(req, 8);
	else
		ret = intel_ring_begin(req, 8);
	if (ret)
		return ret;

	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
	intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB |
				 PIPE_CONTROL_CS_STALL |
				 PIPE_CONTROL_QW_WRITE));
	intel_ring_emit(engine, intel_hws_seqno_address(req->engine));
	intel_ring_emit(engine, 0);
	intel_ring_emit(engine, i915_gem_request_get_seqno(req));
	/* We're thrashing one dword of HWS. */
	intel_ring_emit(engine, 0);
	intel_ring_emit(engine, MI_USER_INTERRUPT);
	intel_ring_emit(engine, MI_NOOP);
	__intel_ring_advance(engine);

	return 0;
}

1462 1463 1464 1465 1466 1467 1468
static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
					      u32 seqno)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	return dev_priv->last_seqno < seqno;
}

1469 1470 1471 1472 1473 1474 1475
/**
 * intel_ring_sync - sync the waiter to the signaller on seqno
 *
 * @waiter - ring that is waiting
 * @signaller - ring which has, or will signal
 * @seqno - seqno which the waiter will block on
 */
1476 1477

static int
1478
gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
1479 1480 1481
	       struct intel_engine_cs *signaller,
	       u32 seqno)
{
1482
	struct intel_engine_cs *waiter = waiter_req->engine;
1483 1484 1485
	struct drm_i915_private *dev_priv = waiter->dev->dev_private;
	int ret;

1486
	ret = intel_ring_begin(waiter_req, 4);
1487 1488 1489 1490 1491
	if (ret)
		return ret;

	intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
				MI_SEMAPHORE_GLOBAL_GTT |
B
Ben Widawsky 已提交
1492
				MI_SEMAPHORE_POLL |
1493 1494 1495 1496 1497 1498 1499 1500 1501 1502
				MI_SEMAPHORE_SAD_GTE_SDD);
	intel_ring_emit(waiter, seqno);
	intel_ring_emit(waiter,
			lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
	intel_ring_emit(waiter,
			upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
	intel_ring_advance(waiter);
	return 0;
}

1503
static int
1504
gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
1505
	       struct intel_engine_cs *signaller,
1506
	       u32 seqno)
1507
{
1508
	struct intel_engine_cs *waiter = waiter_req->engine;
1509 1510 1511
	u32 dw1 = MI_SEMAPHORE_MBOX |
		  MI_SEMAPHORE_COMPARE |
		  MI_SEMAPHORE_REGISTER;
1512 1513
	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
	int ret;
1514

1515 1516 1517 1518 1519 1520
	/* Throughout all of the GEM code, seqno passed implies our current
	 * seqno is >= the last seqno executed. However for hardware the
	 * comparison is strictly greater than.
	 */
	seqno -= 1;

1521
	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
1522

1523
	ret = intel_ring_begin(waiter_req, 4);
1524 1525 1526
	if (ret)
		return ret;

1527 1528
	/* If seqno wrap happened, omit the wait with no-ops */
	if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
1529
		intel_ring_emit(waiter, dw1 | wait_mbox);
1530 1531 1532 1533 1534 1535 1536 1537 1538
		intel_ring_emit(waiter, seqno);
		intel_ring_emit(waiter, 0);
		intel_ring_emit(waiter, MI_NOOP);
	} else {
		intel_ring_emit(waiter, MI_NOOP);
		intel_ring_emit(waiter, MI_NOOP);
		intel_ring_emit(waiter, MI_NOOP);
		intel_ring_emit(waiter, MI_NOOP);
	}
1539
	intel_ring_advance(waiter);
1540 1541 1542 1543

	return 0;
}

1544 1545
#define PIPE_CONTROL_FLUSH(ring__, addr__)					\
do {									\
1546 1547
	intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |		\
		 PIPE_CONTROL_DEPTH_STALL);				\
1548 1549 1550 1551 1552 1553
	intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);			\
	intel_ring_emit(ring__, 0);							\
	intel_ring_emit(ring__, 0);							\
} while (0)

static int
1554
pc_render_add_request(struct drm_i915_gem_request *req)
1555
{
1556
	struct intel_engine_cs *engine = req->engine;
1557
	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
1558 1559 1560 1561 1562 1563 1564 1565 1566 1567
	int ret;

	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
	 * incoherent with writes to memory, i.e. completely fubar,
	 * so we need to use PIPE_NOTIFY instead.
	 *
	 * However, we also need to workaround the qword write
	 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
	 * memory before requesting an interrupt.
	 */
1568
	ret = intel_ring_begin(req, 32);
1569 1570 1571
	if (ret)
		return ret;

1572 1573
	intel_ring_emit(engine,
			GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
1574 1575
			PIPE_CONTROL_WRITE_FLUSH |
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
1576 1577 1578 1579 1580
	intel_ring_emit(engine,
			engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
	intel_ring_emit(engine, i915_gem_request_get_seqno(req));
	intel_ring_emit(engine, 0);
	PIPE_CONTROL_FLUSH(engine, scratch_addr);
1581
	scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
1582
	PIPE_CONTROL_FLUSH(engine, scratch_addr);
1583
	scratch_addr += 2 * CACHELINE_BYTES;
1584
	PIPE_CONTROL_FLUSH(engine, scratch_addr);
1585
	scratch_addr += 2 * CACHELINE_BYTES;
1586
	PIPE_CONTROL_FLUSH(engine, scratch_addr);
1587
	scratch_addr += 2 * CACHELINE_BYTES;
1588
	PIPE_CONTROL_FLUSH(engine, scratch_addr);
1589
	scratch_addr += 2 * CACHELINE_BYTES;
1590
	PIPE_CONTROL_FLUSH(engine, scratch_addr);
1591

1592 1593
	intel_ring_emit(engine,
			GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
1594 1595
			PIPE_CONTROL_WRITE_FLUSH |
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
1596
			PIPE_CONTROL_NOTIFY);
1597 1598 1599 1600 1601
	intel_ring_emit(engine,
			engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
	intel_ring_emit(engine, i915_gem_request_get_seqno(req));
	intel_ring_emit(engine, 0);
	__intel_ring_advance(engine);
1602 1603 1604 1605

	return 0;
}

1606 1607
static void
gen6_seqno_barrier(struct intel_engine_cs *engine)
1608
{
1609 1610
	struct drm_i915_private *dev_priv = engine->dev->dev_private;

1611 1612
	/* Workaround to force correct ordering between irq and seqno writes on
	 * ivb (and maybe also on snb) by reading from a CS register (like
1613 1614 1615 1616 1617 1618 1619 1620 1621
	 * ACTHD) before reading the status page.
	 *
	 * Note that this effectively stalls the read by the time it takes to
	 * do a memory transaction, which more or less ensures that the write
	 * from the GPU has sufficient time to invalidate the CPU cacheline.
	 * Alternatively we could delay the interrupt from the CS ring to give
	 * the write time to land, but that would incur a delay after every
	 * batch i.e. much more frequent than a delay when waiting for the
	 * interrupt (with the same net latency).
1622 1623 1624
	 *
	 * Also note that to prevent whole machine hangs on gen7, we have to
	 * take the spinlock to guard against concurrent cacheline access.
1625
	 */
1626
	spin_lock_irq(&dev_priv->uncore.lock);
1627
	POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
1628
	spin_unlock_irq(&dev_priv->uncore.lock);
1629 1630
}

1631
static u32
1632
ring_get_seqno(struct intel_engine_cs *engine)
1633
{
1634
	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
1635 1636
}

M
Mika Kuoppala 已提交
1637
static void
1638
ring_set_seqno(struct intel_engine_cs *engine, u32 seqno)
M
Mika Kuoppala 已提交
1639
{
1640
	intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
M
Mika Kuoppala 已提交
1641 1642
}

1643
static u32
1644
pc_render_get_seqno(struct intel_engine_cs *engine)
1645
{
1646
	return engine->scratch.cpu_page[0];
1647 1648
}

M
Mika Kuoppala 已提交
1649
static void
1650
pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
M
Mika Kuoppala 已提交
1651
{
1652
	engine->scratch.cpu_page[0] = seqno;
M
Mika Kuoppala 已提交
1653 1654
}

1655
static bool
1656
gen5_ring_get_irq(struct intel_engine_cs *engine)
1657
{
1658
	struct drm_device *dev = engine->dev;
1659
	struct drm_i915_private *dev_priv = dev->dev_private;
1660
	unsigned long flags;
1661

1662
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1663 1664
		return false;

1665
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1666 1667
	if (engine->irq_refcount++ == 0)
		gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1668
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1669 1670 1671 1672 1673

	return true;
}

static void
1674
gen5_ring_put_irq(struct intel_engine_cs *engine)
1675
{
1676
	struct drm_device *dev = engine->dev;
1677
	struct drm_i915_private *dev_priv = dev->dev_private;
1678
	unsigned long flags;
1679

1680
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1681 1682
	if (--engine->irq_refcount == 0)
		gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1683
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1684 1685
}

1686
static bool
1687
i9xx_ring_get_irq(struct intel_engine_cs *engine)
1688
{
1689
	struct drm_device *dev = engine->dev;
1690
	struct drm_i915_private *dev_priv = dev->dev_private;
1691
	unsigned long flags;
1692

1693
	if (!intel_irqs_enabled(dev_priv))
1694 1695
		return false;

1696
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1697 1698
	if (engine->irq_refcount++ == 0) {
		dev_priv->irq_mask &= ~engine->irq_enable_mask;
1699 1700 1701
		I915_WRITE(IMR, dev_priv->irq_mask);
		POSTING_READ(IMR);
	}
1702
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1703 1704

	return true;
1705 1706
}

1707
static void
1708
i9xx_ring_put_irq(struct intel_engine_cs *engine)
1709
{
1710
	struct drm_device *dev = engine->dev;
1711
	struct drm_i915_private *dev_priv = dev->dev_private;
1712
	unsigned long flags;
1713

1714
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1715 1716
	if (--engine->irq_refcount == 0) {
		dev_priv->irq_mask |= engine->irq_enable_mask;
1717 1718 1719
		I915_WRITE(IMR, dev_priv->irq_mask);
		POSTING_READ(IMR);
	}
1720
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1721 1722
}

C
Chris Wilson 已提交
1723
static bool
1724
i8xx_ring_get_irq(struct intel_engine_cs *engine)
C
Chris Wilson 已提交
1725
{
1726
	struct drm_device *dev = engine->dev;
1727
	struct drm_i915_private *dev_priv = dev->dev_private;
1728
	unsigned long flags;
C
Chris Wilson 已提交
1729

1730
	if (!intel_irqs_enabled(dev_priv))
C
Chris Wilson 已提交
1731 1732
		return false;

1733
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1734 1735
	if (engine->irq_refcount++ == 0) {
		dev_priv->irq_mask &= ~engine->irq_enable_mask;
C
Chris Wilson 已提交
1736 1737 1738
		I915_WRITE16(IMR, dev_priv->irq_mask);
		POSTING_READ16(IMR);
	}
1739
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
C
Chris Wilson 已提交
1740 1741 1742 1743 1744

	return true;
}

static void
1745
i8xx_ring_put_irq(struct intel_engine_cs *engine)
C
Chris Wilson 已提交
1746
{
1747
	struct drm_device *dev = engine->dev;
1748
	struct drm_i915_private *dev_priv = dev->dev_private;
1749
	unsigned long flags;
C
Chris Wilson 已提交
1750

1751
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1752 1753
	if (--engine->irq_refcount == 0) {
		dev_priv->irq_mask |= engine->irq_enable_mask;
C
Chris Wilson 已提交
1754 1755 1756
		I915_WRITE16(IMR, dev_priv->irq_mask);
		POSTING_READ16(IMR);
	}
1757
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
C
Chris Wilson 已提交
1758 1759
}

1760
static int
1761
bsd_ring_flush(struct drm_i915_gem_request *req,
1762 1763
	       u32     invalidate_domains,
	       u32     flush_domains)
1764
{
1765
	struct intel_engine_cs *engine = req->engine;
1766 1767
	int ret;

1768
	ret = intel_ring_begin(req, 2);
1769 1770 1771
	if (ret)
		return ret;

1772 1773 1774
	intel_ring_emit(engine, MI_FLUSH);
	intel_ring_emit(engine, MI_NOOP);
	intel_ring_advance(engine);
1775
	return 0;
1776 1777
}

1778
static int
1779
i9xx_add_request(struct drm_i915_gem_request *req)
1780
{
1781
	struct intel_engine_cs *engine = req->engine;
1782 1783
	int ret;

1784
	ret = intel_ring_begin(req, 4);
1785 1786
	if (ret)
		return ret;
1787

1788 1789 1790 1791 1792 1793
	intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
	intel_ring_emit(engine,
			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
	intel_ring_emit(engine, i915_gem_request_get_seqno(req));
	intel_ring_emit(engine, MI_USER_INTERRUPT);
	__intel_ring_advance(engine);
1794

1795
	return 0;
1796 1797
}

1798
static bool
1799
gen6_ring_get_irq(struct intel_engine_cs *engine)
1800
{
1801
	struct drm_device *dev = engine->dev;
1802
	struct drm_i915_private *dev_priv = dev->dev_private;
1803
	unsigned long flags;
1804

1805 1806
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return false;
1807

1808
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1809 1810 1811 1812
	if (engine->irq_refcount++ == 0) {
		if (HAS_L3_DPF(dev) && engine->id == RCS)
			I915_WRITE_IMR(engine,
				       ~(engine->irq_enable_mask |
1813
					 GT_PARITY_ERROR(dev)));
1814
		else
1815 1816
			I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
		gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1817
	}
1818
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1819 1820 1821 1822 1823

	return true;
}

static void
1824
gen6_ring_put_irq(struct intel_engine_cs *engine)
1825
{
1826
	struct drm_device *dev = engine->dev;
1827
	struct drm_i915_private *dev_priv = dev->dev_private;
1828
	unsigned long flags;
1829

1830
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1831 1832 1833
	if (--engine->irq_refcount == 0) {
		if (HAS_L3_DPF(dev) && engine->id == RCS)
			I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
1834
		else
1835 1836
			I915_WRITE_IMR(engine, ~0);
		gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1837
	}
1838
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1839 1840
}

B
Ben Widawsky 已提交
1841
static bool
1842
hsw_vebox_get_irq(struct intel_engine_cs *engine)
B
Ben Widawsky 已提交
1843
{
1844
	struct drm_device *dev = engine->dev;
B
Ben Widawsky 已提交
1845 1846 1847
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long flags;

1848
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
B
Ben Widawsky 已提交
1849 1850
		return false;

1851
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1852 1853 1854
	if (engine->irq_refcount++ == 0) {
		I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
		gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
B
Ben Widawsky 已提交
1855
	}
1856
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
B
Ben Widawsky 已提交
1857 1858 1859 1860 1861

	return true;
}

static void
1862
hsw_vebox_put_irq(struct intel_engine_cs *engine)
B
Ben Widawsky 已提交
1863
{
1864
	struct drm_device *dev = engine->dev;
B
Ben Widawsky 已提交
1865 1866 1867
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long flags;

1868
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1869 1870 1871
	if (--engine->irq_refcount == 0) {
		I915_WRITE_IMR(engine, ~0);
		gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
B
Ben Widawsky 已提交
1872
	}
1873
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
B
Ben Widawsky 已提交
1874 1875
}

1876
static bool
1877
gen8_ring_get_irq(struct intel_engine_cs *engine)
1878
{
1879
	struct drm_device *dev = engine->dev;
1880 1881 1882
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long flags;

1883
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1884 1885 1886
		return false;

	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1887 1888 1889 1890
	if (engine->irq_refcount++ == 0) {
		if (HAS_L3_DPF(dev) && engine->id == RCS) {
			I915_WRITE_IMR(engine,
				       ~(engine->irq_enable_mask |
1891 1892
					 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
		} else {
1893
			I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1894
		}
1895
		POSTING_READ(RING_IMR(engine->mmio_base));
1896 1897 1898 1899 1900 1901 1902
	}
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);

	return true;
}

static void
1903
gen8_ring_put_irq(struct intel_engine_cs *engine)
1904
{
1905
	struct drm_device *dev = engine->dev;
1906 1907 1908 1909
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long flags;

	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1910 1911 1912
	if (--engine->irq_refcount == 0) {
		if (HAS_L3_DPF(dev) && engine->id == RCS) {
			I915_WRITE_IMR(engine,
1913 1914
				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
		} else {
1915
			I915_WRITE_IMR(engine, ~0);
1916
		}
1917
		POSTING_READ(RING_IMR(engine->mmio_base));
1918 1919 1920 1921
	}
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}

1922
static int
1923
i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
B
Ben Widawsky 已提交
1924
			 u64 offset, u32 length,
1925
			 unsigned dispatch_flags)
1926
{
1927
	struct intel_engine_cs *engine = req->engine;
1928
	int ret;
1929

1930
	ret = intel_ring_begin(req, 2);
1931 1932 1933
	if (ret)
		return ret;

1934
	intel_ring_emit(engine,
1935 1936
			MI_BATCH_BUFFER_START |
			MI_BATCH_GTT |
1937 1938
			(dispatch_flags & I915_DISPATCH_SECURE ?
			 0 : MI_BATCH_NON_SECURE_I965));
1939 1940
	intel_ring_emit(engine, offset);
	intel_ring_advance(engine);
1941

1942 1943 1944
	return 0;
}

1945 1946
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT (256*1024)
1947 1948
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1949
static int
1950
i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
1951 1952
			 u64 offset, u32 len,
			 unsigned dispatch_flags)
1953
{
1954
	struct intel_engine_cs *engine = req->engine;
1955
	u32 cs_offset = engine->scratch.gtt_offset;
1956
	int ret;
1957

1958
	ret = intel_ring_begin(req, 6);
1959 1960
	if (ret)
		return ret;
1961

1962
	/* Evict the invalid PTE TLBs */
1963 1964 1965 1966 1967 1968 1969
	intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA);
	intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
	intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */
	intel_ring_emit(engine, cs_offset);
	intel_ring_emit(engine, 0xdeadbeef);
	intel_ring_emit(engine, MI_NOOP);
	intel_ring_advance(engine);
1970

1971
	if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1972 1973 1974
		if (len > I830_BATCH_LIMIT)
			return -ENOSPC;

1975
		ret = intel_ring_begin(req, 6 + 2);
1976 1977
		if (ret)
			return ret;
1978 1979 1980 1981 1982

		/* Blit the batch (which has now all relocs applied) to the
		 * stable batch scratch bo area (so that the CS never
		 * stumbles over its tlb invalidation bug) ...
		 */
1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993
		intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
		intel_ring_emit(engine,
				BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
		intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096);
		intel_ring_emit(engine, cs_offset);
		intel_ring_emit(engine, 4096);
		intel_ring_emit(engine, offset);

		intel_ring_emit(engine, MI_FLUSH);
		intel_ring_emit(engine, MI_NOOP);
		intel_ring_advance(engine);
1994 1995

		/* ... and execute it. */
1996
		offset = cs_offset;
1997
	}
1998

1999
	ret = intel_ring_begin(req, 2);
2000 2001 2002
	if (ret)
		return ret;

2003 2004 2005 2006
	intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
	intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
					  0 : MI_BATCH_NON_SECURE));
	intel_ring_advance(engine);
2007

2008 2009 2010 2011
	return 0;
}

static int
2012
i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
B
Ben Widawsky 已提交
2013
			 u64 offset, u32 len,
2014
			 unsigned dispatch_flags)
2015
{
2016
	struct intel_engine_cs *engine = req->engine;
2017 2018
	int ret;

2019
	ret = intel_ring_begin(req, 2);
2020 2021 2022
	if (ret)
		return ret;

2023 2024 2025 2026
	intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
	intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
					  0 : MI_BATCH_NON_SECURE));
	intel_ring_advance(engine);
2027 2028 2029 2030

	return 0;
}

2031
static void cleanup_phys_status_page(struct intel_engine_cs *engine)
2032
{
2033
	struct drm_i915_private *dev_priv = to_i915(engine->dev);
2034 2035 2036 2037

	if (!dev_priv->status_page_dmah)
		return;

2038 2039
	drm_pci_free(engine->dev, dev_priv->status_page_dmah);
	engine->status_page.page_addr = NULL;
2040 2041
}

2042
static void cleanup_status_page(struct intel_engine_cs *engine)
2043
{
2044
	struct drm_i915_gem_object *obj;
2045

2046
	obj = engine->status_page.obj;
2047
	if (obj == NULL)
2048 2049
		return;

2050
	kunmap(sg_page(obj->pages->sgl));
B
Ben Widawsky 已提交
2051
	i915_gem_object_ggtt_unpin(obj);
2052
	drm_gem_object_unreference(&obj->base);
2053
	engine->status_page.obj = NULL;
2054 2055
}

2056
static int init_status_page(struct intel_engine_cs *engine)
2057
{
2058
	struct drm_i915_gem_object *obj = engine->status_page.obj;
2059

2060
	if (obj == NULL) {
2061
		unsigned flags;
2062
		int ret;
2063

2064
		obj = i915_gem_object_create(engine->dev, 4096);
2065
		if (IS_ERR(obj)) {
2066
			DRM_ERROR("Failed to allocate status page\n");
2067
			return PTR_ERR(obj);
2068
		}
2069

2070 2071 2072 2073
		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
		if (ret)
			goto err_unref;

2074
		flags = 0;
2075
		if (!HAS_LLC(engine->dev))
2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087
			/* On g33, we cannot place HWS above 256MiB, so
			 * restrict its pinning to the low mappable arena.
			 * Though this restriction is not documented for
			 * gen4, gen5, or byt, they also behave similarly
			 * and hang if the HWS is placed at the top of the
			 * GTT. To generalise, it appears that all !llc
			 * platforms have issues with us placing the HWS
			 * above the mappable region (even though we never
			 * actualy map it).
			 */
			flags |= PIN_MAPPABLE;
		ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
2088 2089 2090 2091 2092 2093
		if (ret) {
err_unref:
			drm_gem_object_unreference(&obj->base);
			return ret;
		}

2094
		engine->status_page.obj = obj;
2095
	}
2096

2097 2098 2099
	engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
	engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
	memset(engine->status_page.page_addr, 0, PAGE_SIZE);
2100

2101
	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
2102
			engine->name, engine->status_page.gfx_addr);
2103 2104 2105 2106

	return 0;
}

2107
static int init_phys_status_page(struct intel_engine_cs *engine)
2108
{
2109
	struct drm_i915_private *dev_priv = engine->dev->dev_private;
2110 2111 2112

	if (!dev_priv->status_page_dmah) {
		dev_priv->status_page_dmah =
2113
			drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE);
2114 2115 2116 2117
		if (!dev_priv->status_page_dmah)
			return -ENOMEM;
	}

2118 2119
	engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
	memset(engine->status_page.page_addr, 0, PAGE_SIZE);
2120 2121 2122 2123

	return 0;
}

2124
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
2125
{
2126 2127 2128
	GEM_BUG_ON(ringbuf->vma == NULL);
	GEM_BUG_ON(ringbuf->virtual_start == NULL);

2129
	if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
2130
		i915_gem_object_unpin_map(ringbuf->obj);
2131
	else
2132
		i915_vma_unpin_iomap(ringbuf->vma);
2133
	ringbuf->virtual_start = NULL;
2134

2135
	i915_gem_object_ggtt_unpin(ringbuf->obj);
2136
	ringbuf->vma = NULL;
2137 2138 2139 2140 2141 2142 2143
}

int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
				     struct intel_ringbuffer *ringbuf)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct drm_i915_gem_object *obj = ringbuf->obj;
2144 2145
	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
	unsigned flags = PIN_OFFSET_BIAS | 4096;
2146
	void *addr;
2147 2148
	int ret;

2149
	if (HAS_LLC(dev_priv) && !obj->stolen) {
2150
		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
2151 2152
		if (ret)
			return ret;
2153

2154
		ret = i915_gem_object_set_to_cpu_domain(obj, true);
2155 2156
		if (ret)
			goto err_unpin;
2157

2158 2159 2160
		addr = i915_gem_object_pin_map(obj);
		if (IS_ERR(addr)) {
			ret = PTR_ERR(addr);
2161
			goto err_unpin;
2162 2163
		}
	} else {
2164 2165
		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
					    flags | PIN_MAPPABLE);
2166 2167
		if (ret)
			return ret;
2168

2169
		ret = i915_gem_object_set_to_gtt_domain(obj, true);
2170 2171
		if (ret)
			goto err_unpin;
2172

2173 2174 2175
		/* Access through the GTT requires the device to be awake. */
		assert_rpm_wakelock_held(dev_priv);

2176 2177 2178
		addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj));
		if (IS_ERR(addr)) {
			ret = PTR_ERR(addr);
2179
			goto err_unpin;
2180
		}
2181 2182
	}

2183
	ringbuf->virtual_start = addr;
2184
	ringbuf->vma = i915_gem_obj_to_ggtt(obj);
2185
	return 0;
2186 2187 2188 2189

err_unpin:
	i915_gem_object_ggtt_unpin(obj);
	return ret;
2190 2191
}

2192
static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
2193
{
2194 2195 2196 2197
	drm_gem_object_unreference(&ringbuf->obj->base);
	ringbuf->obj = NULL;
}

2198 2199
static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
				      struct intel_ringbuffer *ringbuf)
2200
{
2201
	struct drm_i915_gem_object *obj;
2202

2203 2204
	obj = NULL;
	if (!HAS_LLC(dev))
2205
		obj = i915_gem_object_create_stolen(dev, ringbuf->size);
2206
	if (obj == NULL)
2207
		obj = i915_gem_object_create(dev, ringbuf->size);
2208 2209
	if (IS_ERR(obj))
		return PTR_ERR(obj);
2210

2211 2212 2213
	/* mark ring buffers as read-only from GPU side by default */
	obj->gt_ro = 1;

2214
	ringbuf->obj = obj;
2215

2216
	return 0;
2217 2218
}

2219 2220 2221 2222 2223 2224 2225
struct intel_ringbuffer *
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
{
	struct intel_ringbuffer *ring;
	int ret;

	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2226 2227 2228
	if (ring == NULL) {
		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
				 engine->name);
2229
		return ERR_PTR(-ENOMEM);
2230
	}
2231

2232
	ring->engine = engine;
2233
	list_add(&ring->link, &engine->buffers);
2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248

	ring->size = size;
	/* Workaround an erratum on the i830 which causes a hang if
	 * the TAIL pointer points to within the last 2 cachelines
	 * of the buffer.
	 */
	ring->effective_size = size;
	if (IS_I830(engine->dev) || IS_845G(engine->dev))
		ring->effective_size -= 2 * CACHELINE_BYTES;

	ring->last_retired_head = -1;
	intel_ring_update_space(ring);

	ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
	if (ret) {
2249 2250 2251
		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
				 engine->name, ret);
		list_del(&ring->link);
2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262
		kfree(ring);
		return ERR_PTR(ret);
	}

	return ring;
}

void
intel_ringbuffer_free(struct intel_ringbuffer *ring)
{
	intel_destroy_ringbuffer_obj(ring);
2263
	list_del(&ring->link);
2264 2265 2266
	kfree(ring);
}

2267
static int intel_init_ring_buffer(struct drm_device *dev,
2268
				  struct intel_engine_cs *engine)
2269
{
2270
	struct intel_ringbuffer *ringbuf;
2271 2272
	int ret;

2273
	WARN_ON(engine->buffer);
2274

2275 2276 2277 2278 2279 2280 2281 2282
	engine->dev = dev;
	INIT_LIST_HEAD(&engine->active_list);
	INIT_LIST_HEAD(&engine->request_list);
	INIT_LIST_HEAD(&engine->execlist_queue);
	INIT_LIST_HEAD(&engine->buffers);
	i915_gem_batch_pool_init(dev, &engine->batch_pool);
	memset(engine->semaphore.sync_seqno, 0,
	       sizeof(engine->semaphore.sync_seqno));
2283

2284
	init_waitqueue_head(&engine->irq_queue);
2285

2286
	ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
2287 2288 2289 2290
	if (IS_ERR(ringbuf)) {
		ret = PTR_ERR(ringbuf);
		goto error;
	}
2291
	engine->buffer = ringbuf;
2292

2293
	if (I915_NEED_GFX_HWS(dev)) {
2294
		ret = init_status_page(engine);
2295
		if (ret)
2296
			goto error;
2297
	} else {
2298 2299
		WARN_ON(engine->id != RCS);
		ret = init_phys_status_page(engine);
2300
		if (ret)
2301
			goto error;
2302 2303
	}

2304 2305 2306
	ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
	if (ret) {
		DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
2307
				engine->name, ret);
2308 2309
		intel_destroy_ringbuffer_obj(ringbuf);
		goto error;
2310
	}
2311

2312
	ret = i915_cmd_parser_init_ring(engine);
2313
	if (ret)
2314 2315 2316
		goto error;

	return 0;
2317

2318
error:
2319
	intel_cleanup_engine(engine);
2320
	return ret;
2321 2322
}

2323
void intel_cleanup_engine(struct intel_engine_cs *engine)
2324
{
2325
	struct drm_i915_private *dev_priv;
2326

2327
	if (!intel_engine_initialized(engine))
2328 2329
		return;

2330
	dev_priv = to_i915(engine->dev);
2331

2332
	if (engine->buffer) {
2333
		intel_stop_engine(engine);
2334
		WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
2335

2336 2337 2338
		intel_unpin_ringbuffer_obj(engine->buffer);
		intel_ringbuffer_free(engine->buffer);
		engine->buffer = NULL;
2339
	}
2340

2341 2342
	if (engine->cleanup)
		engine->cleanup(engine);
Z
Zou Nan hai 已提交
2343

2344 2345
	if (I915_NEED_GFX_HWS(engine->dev)) {
		cleanup_status_page(engine);
2346
	} else {
2347 2348
		WARN_ON(engine->id != RCS);
		cleanup_phys_status_page(engine);
2349
	}
2350

2351 2352 2353
	i915_cmd_parser_fini_ring(engine);
	i915_gem_batch_pool_fini(&engine->batch_pool);
	engine->dev = NULL;
2354 2355
}

2356
int intel_engine_idle(struct intel_engine_cs *engine)
2357
{
2358
	struct drm_i915_gem_request *req;
2359 2360

	/* Wait upon the last request to be completed */
2361
	if (list_empty(&engine->request_list))
2362 2363
		return 0;

2364 2365 2366
	req = list_entry(engine->request_list.prev,
			 struct drm_i915_gem_request,
			 list);
2367 2368 2369

	/* Make sure we do not trigger any retires */
	return __i915_wait_request(req,
2370
				   req->i915->mm.interruptible,
2371
				   NULL, NULL);
2372 2373
}

2374
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
2375
{
2376 2377 2378 2379 2380 2381
	int ret;

	/* Flush enough space to reduce the likelihood of waiting after
	 * we start building the request - in which case we will just
	 * have to repeat work.
	 */
2382
	request->reserved_space += LEGACY_REQUEST_SIZE;
2383

2384
	request->ringbuf = request->engine->buffer;
2385 2386 2387 2388 2389

	ret = intel_ring_begin(request, 0);
	if (ret)
		return ret;

2390
	request->reserved_space -= LEGACY_REQUEST_SIZE;
2391
	return 0;
2392 2393
}

2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412
static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
{
	struct intel_ringbuffer *ringbuf = req->ringbuf;
	struct intel_engine_cs *engine = req->engine;
	struct drm_i915_gem_request *target;

	intel_ring_update_space(ringbuf);
	if (ringbuf->space >= bytes)
		return 0;

	/*
	 * Space is reserved in the ringbuffer for finalising the request,
	 * as that cannot be allowed to fail. During request finalisation,
	 * reserved_space is set to 0 to stop the overallocation and the
	 * assumption is that then we never need to wait (which has the
	 * risk of failing with EINTR).
	 *
	 * See also i915_gem_request_alloc() and i915_add_request().
	 */
2413
	GEM_BUG_ON(!req->reserved_space);
2414 2415 2416 2417

	list_for_each_entry(target, &engine->request_list, list) {
		unsigned space;

2418
		/*
2419 2420 2421
		 * The request queue is per-engine, so can contain requests
		 * from multiple ringbuffers. Here, we must ignore any that
		 * aren't from the ringbuffer we're considering.
2422
		 */
2423 2424 2425 2426 2427 2428 2429 2430
		if (target->ringbuf != ringbuf)
			continue;

		/* Would completion of this request free enough space? */
		space = __intel_ring_space(target->postfix, ringbuf->tail,
					   ringbuf->size);
		if (space >= bytes)
			break;
2431
	}
2432

2433 2434 2435 2436
	if (WARN_ON(&target->list == &engine->request_list))
		return -ENOSPC;

	return i915_wait_request(target);
2437 2438
}

2439
int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
M
Mika Kuoppala 已提交
2440
{
2441
	struct intel_ringbuffer *ringbuf = req->ringbuf;
2442
	int remain_actual = ringbuf->size - ringbuf->tail;
2443 2444 2445
	int remain_usable = ringbuf->effective_size - ringbuf->tail;
	int bytes = num_dwords * sizeof(u32);
	int total_bytes, wait_bytes;
2446
	bool need_wrap = false;
2447

2448
	total_bytes = bytes + req->reserved_space;
2449

2450 2451 2452 2453 2454 2455 2456
	if (unlikely(bytes > remain_usable)) {
		/*
		 * Not enough space for the basic request. So need to flush
		 * out the remainder and then wait for base + reserved.
		 */
		wait_bytes = remain_actual + total_bytes;
		need_wrap = true;
2457 2458 2459 2460 2461 2462 2463
	} else if (unlikely(total_bytes > remain_usable)) {
		/*
		 * The base request will fit but the reserved space
		 * falls off the end. So we don't need an immediate wrap
		 * and only need to effectively wait for the reserved
		 * size space from the start of ringbuffer.
		 */
2464
		wait_bytes = remain_actual + req->reserved_space;
2465
	} else {
2466 2467
		/* No wrapping required, just waiting. */
		wait_bytes = total_bytes;
M
Mika Kuoppala 已提交
2468 2469
	}

2470 2471
	if (wait_bytes > ringbuf->space) {
		int ret = wait_for_space(req, wait_bytes);
M
Mika Kuoppala 已提交
2472 2473
		if (unlikely(ret))
			return ret;
2474

2475
		intel_ring_update_space(ringbuf);
M
Mika Kuoppala 已提交
2476 2477
	}

2478 2479 2480
	if (unlikely(need_wrap)) {
		GEM_BUG_ON(remain_actual > ringbuf->space);
		GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
2481

2482 2483 2484 2485 2486 2487
		/* Fill the tail with MI_NOOP */
		memset(ringbuf->virtual_start + ringbuf->tail,
		       0, remain_actual);
		ringbuf->tail = 0;
		ringbuf->space -= remain_actual;
	}
2488

2489 2490
	ringbuf->space -= bytes;
	GEM_BUG_ON(ringbuf->space < 0);
2491
	return 0;
2492
}
2493

2494
/* Align the ring tail to a cacheline boundary */
2495
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
2496
{
2497
	struct intel_engine_cs *engine = req->engine;
2498
	int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
2499 2500 2501 2502 2503
	int ret;

	if (num_dwords == 0)
		return 0;

2504
	num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
2505
	ret = intel_ring_begin(req, num_dwords);
2506 2507 2508 2509
	if (ret)
		return ret;

	while (num_dwords--)
2510
		intel_ring_emit(engine, MI_NOOP);
2511

2512
	intel_ring_advance(engine);
2513 2514 2515 2516

	return 0;
}

2517
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
2518
{
2519
	struct drm_i915_private *dev_priv = to_i915(engine->dev);
2520

2521 2522 2523 2524 2525 2526 2527 2528
	/* Our semaphore implementation is strictly monotonic (i.e. we proceed
	 * so long as the semaphore value in the register/page is greater
	 * than the sync value), so whenever we reset the seqno,
	 * so long as we reset the tracking semaphore value to 0, it will
	 * always be before the next request's seqno. If we don't reset
	 * the semaphore value, then when the seqno moves backwards all
	 * future waits will complete instantly (causing rendering corruption).
	 */
2529
	if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) {
2530 2531
		I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
		I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
2532
		if (HAS_VEBOX(dev_priv))
2533
			I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
2534
	}
2535 2536 2537 2538 2539 2540 2541 2542
	if (dev_priv->semaphore_obj) {
		struct drm_i915_gem_object *obj = dev_priv->semaphore_obj;
		struct page *page = i915_gem_object_get_dirty_page(obj, 0);
		void *semaphores = kmap(page);
		memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
		       0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
		kunmap(page);
	}
2543 2544
	memset(engine->semaphore.sync_seqno, 0,
	       sizeof(engine->semaphore.sync_seqno));
2545

2546
	engine->set_seqno(engine, seqno);
2547
	engine->last_submitted_seqno = seqno;
2548

2549
	engine->hangcheck.seqno = seqno;
2550
}
2551

2552
static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
2553
				     u32 value)
2554
{
2555
	struct drm_i915_private *dev_priv = engine->dev->dev_private;
2556 2557

       /* Every tail move must follow the sequence below */
2558 2559 2560 2561

	/* Disable notification that the ring is IDLE. The GT
	 * will then assume that it is busy and bring it out of rc6.
	 */
2562
	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
2563 2564 2565 2566
		   _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));

	/* Clear the context id. Here be magic! */
	I915_WRITE64(GEN6_BSD_RNCID, 0x0);
2567

2568
	/* Wait for the ring not to be idle, i.e. for it to wake up. */
2569
	if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
2570 2571 2572
		      GEN6_BSD_SLEEP_INDICATOR) == 0,
		     50))
		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
2573

2574
	/* Now that the ring is fully powered up, update the tail */
2575 2576
	I915_WRITE_TAIL(engine, value);
	POSTING_READ(RING_TAIL(engine->mmio_base));
2577 2578 2579 2580

	/* Let the ring send IDLE messages to the GT again,
	 * and so let it sleep to conserve power when idle.
	 */
2581
	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
2582
		   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2583 2584
}

2585
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
2586
			       u32 invalidate, u32 flush)
2587
{
2588
	struct intel_engine_cs *engine = req->engine;
2589
	uint32_t cmd;
2590 2591
	int ret;

2592
	ret = intel_ring_begin(req, 4);
2593 2594 2595
	if (ret)
		return ret;

2596
	cmd = MI_FLUSH_DW;
2597
	if (INTEL_INFO(engine->dev)->gen >= 8)
B
Ben Widawsky 已提交
2598
		cmd += 1;
2599 2600 2601 2602 2603 2604 2605 2606

	/* We always require a command barrier so that subsequent
	 * commands, such as breadcrumb interrupts, are strictly ordered
	 * wrt the contents of the write cache being flushed to memory
	 * (and thus being coherent from the CPU).
	 */
	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;

2607 2608 2609 2610 2611 2612
	/*
	 * Bspec vol 1c.5 - video engine command streamer:
	 * "If ENABLED, all TLBs will be invalidated once the flush
	 * operation is complete. This bit is only valid when the
	 * Post-Sync Operation field is a value of 1h or 3h."
	 */
2613
	if (invalidate & I915_GEM_GPU_DOMAINS)
2614 2615
		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;

2616 2617 2618 2619 2620 2621
	intel_ring_emit(engine, cmd);
	intel_ring_emit(engine,
			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
	if (INTEL_INFO(engine->dev)->gen >= 8) {
		intel_ring_emit(engine, 0); /* upper addr */
		intel_ring_emit(engine, 0); /* value */
B
Ben Widawsky 已提交
2622
	} else  {
2623 2624
		intel_ring_emit(engine, 0);
		intel_ring_emit(engine, MI_NOOP);
B
Ben Widawsky 已提交
2625
	}
2626
	intel_ring_advance(engine);
2627
	return 0;
2628 2629
}

2630
static int
2631
gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
B
Ben Widawsky 已提交
2632
			      u64 offset, u32 len,
2633
			      unsigned dispatch_flags)
2634
{
2635
	struct intel_engine_cs *engine = req->engine;
2636
	bool ppgtt = USES_PPGTT(engine->dev) &&
2637
			!(dispatch_flags & I915_DISPATCH_SECURE);
2638 2639
	int ret;

2640
	ret = intel_ring_begin(req, 4);
2641 2642 2643 2644
	if (ret)
		return ret;

	/* FIXME(BDW): Address space and security selectors. */
2645
	intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
2646 2647
			(dispatch_flags & I915_DISPATCH_RS ?
			 MI_BATCH_RESOURCE_STREAMER : 0));
2648 2649 2650 2651
	intel_ring_emit(engine, lower_32_bits(offset));
	intel_ring_emit(engine, upper_32_bits(offset));
	intel_ring_emit(engine, MI_NOOP);
	intel_ring_advance(engine);
2652 2653 2654 2655

	return 0;
}

2656
static int
2657
hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
2658 2659
			     u64 offset, u32 len,
			     unsigned dispatch_flags)
2660
{
2661
	struct intel_engine_cs *engine = req->engine;
2662 2663
	int ret;

2664
	ret = intel_ring_begin(req, 2);
2665 2666 2667
	if (ret)
		return ret;

2668
	intel_ring_emit(engine,
2669
			MI_BATCH_BUFFER_START |
2670
			(dispatch_flags & I915_DISPATCH_SECURE ?
2671 2672 2673
			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
			(dispatch_flags & I915_DISPATCH_RS ?
			 MI_BATCH_RESOURCE_STREAMER : 0));
2674
	/* bit0-7 is the length on GEN6+ */
2675 2676
	intel_ring_emit(engine, offset);
	intel_ring_advance(engine);
2677 2678 2679 2680

	return 0;
}

2681
static int
2682
gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
B
Ben Widawsky 已提交
2683
			      u64 offset, u32 len,
2684
			      unsigned dispatch_flags)
2685
{
2686
	struct intel_engine_cs *engine = req->engine;
2687
	int ret;
2688

2689
	ret = intel_ring_begin(req, 2);
2690 2691
	if (ret)
		return ret;
2692

2693
	intel_ring_emit(engine,
2694
			MI_BATCH_BUFFER_START |
2695 2696
			(dispatch_flags & I915_DISPATCH_SECURE ?
			 0 : MI_BATCH_NON_SECURE_I965));
2697
	/* bit0-7 is the length on GEN6+ */
2698 2699
	intel_ring_emit(engine, offset);
	intel_ring_advance(engine);
2700

2701
	return 0;
2702 2703
}

2704 2705
/* Blitter support (SandyBridge+) */

2706
static int gen6_ring_flush(struct drm_i915_gem_request *req,
2707
			   u32 invalidate, u32 flush)
Z
Zou Nan hai 已提交
2708
{
2709
	struct intel_engine_cs *engine = req->engine;
2710
	struct drm_device *dev = engine->dev;
2711
	uint32_t cmd;
2712 2713
	int ret;

2714
	ret = intel_ring_begin(req, 4);
2715 2716 2717
	if (ret)
		return ret;

2718
	cmd = MI_FLUSH_DW;
2719
	if (INTEL_INFO(dev)->gen >= 8)
B
Ben Widawsky 已提交
2720
		cmd += 1;
2721 2722 2723 2724 2725 2726 2727 2728

	/* We always require a command barrier so that subsequent
	 * commands, such as breadcrumb interrupts, are strictly ordered
	 * wrt the contents of the write cache being flushed to memory
	 * (and thus being coherent from the CPU).
	 */
	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;

2729 2730 2731 2732 2733 2734
	/*
	 * Bspec vol 1c.3 - blitter engine command streamer:
	 * "If ENABLED, all TLBs will be invalidated once the flush
	 * operation is complete. This bit is only valid when the
	 * Post-Sync Operation field is a value of 1h or 3h."
	 */
2735
	if (invalidate & I915_GEM_DOMAIN_RENDER)
2736
		cmd |= MI_INVALIDATE_TLB;
2737 2738 2739
	intel_ring_emit(engine, cmd);
	intel_ring_emit(engine,
			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2740
	if (INTEL_INFO(dev)->gen >= 8) {
2741 2742
		intel_ring_emit(engine, 0); /* upper addr */
		intel_ring_emit(engine, 0); /* value */
B
Ben Widawsky 已提交
2743
	} else  {
2744 2745
		intel_ring_emit(engine, 0);
		intel_ring_emit(engine, MI_NOOP);
B
Ben Widawsky 已提交
2746
	}
2747
	intel_ring_advance(engine);
R
Rodrigo Vivi 已提交
2748

2749
	return 0;
Z
Zou Nan hai 已提交
2750 2751
}

2752 2753
int intel_init_render_ring_buffer(struct drm_device *dev)
{
2754
	struct drm_i915_private *dev_priv = dev->dev_private;
2755
	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
2756 2757
	struct drm_i915_gem_object *obj;
	int ret;
2758

2759 2760 2761 2762
	engine->name = "render ring";
	engine->id = RCS;
	engine->exec_id = I915_EXEC_RENDER;
	engine->mmio_base = RENDER_RING_BASE;
2763

B
Ben Widawsky 已提交
2764
	if (INTEL_INFO(dev)->gen >= 8) {
2765
		if (i915_semaphore_is_enabled(dev)) {
2766
			obj = i915_gem_object_create(dev, 4096);
2767
			if (IS_ERR(obj)) {
2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780
				DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
				i915.semaphores = 0;
			} else {
				i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
				ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
				if (ret != 0) {
					drm_gem_object_unreference(&obj->base);
					DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
					i915.semaphores = 0;
				} else
					dev_priv->semaphore_obj = obj;
			}
		}
2781

2782
		engine->init_context = intel_rcs_ctx_init;
2783
		engine->add_request = gen8_render_add_request;
2784 2785 2786 2787
		engine->flush = gen8_render_ring_flush;
		engine->irq_get = gen8_ring_get_irq;
		engine->irq_put = gen8_ring_put_irq;
		engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2788
		engine->get_seqno = ring_get_seqno;
2789
		engine->set_seqno = ring_set_seqno;
B
Ben Widawsky 已提交
2790
		if (i915_semaphore_is_enabled(dev)) {
2791
			WARN_ON(!dev_priv->semaphore_obj);
2792 2793 2794
			engine->semaphore.sync_to = gen8_ring_sync;
			engine->semaphore.signal = gen8_rcs_signal;
			GEN8_RING_SEMAPHORE_INIT(engine);
B
Ben Widawsky 已提交
2795 2796
		}
	} else if (INTEL_INFO(dev)->gen >= 6) {
2797 2798 2799
		engine->init_context = intel_rcs_ctx_init;
		engine->add_request = gen6_add_request;
		engine->flush = gen7_render_ring_flush;
2800
		if (INTEL_INFO(dev)->gen == 6)
2801 2802 2803 2804
			engine->flush = gen6_render_ring_flush;
		engine->irq_get = gen6_ring_get_irq;
		engine->irq_put = gen6_ring_put_irq;
		engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2805 2806
		engine->irq_seqno_barrier = gen6_seqno_barrier;
		engine->get_seqno = ring_get_seqno;
2807
		engine->set_seqno = ring_set_seqno;
B
Ben Widawsky 已提交
2808
		if (i915_semaphore_is_enabled(dev)) {
2809 2810
			engine->semaphore.sync_to = gen6_ring_sync;
			engine->semaphore.signal = gen6_signal;
B
Ben Widawsky 已提交
2811 2812 2813 2814 2815 2816 2817
			/*
			 * The current semaphore is only applied on pre-gen8
			 * platform.  And there is no VCS2 ring on the pre-gen8
			 * platform. So the semaphore between RCS and VCS2 is
			 * initialized as INVALID.  Gen8 will initialize the
			 * sema between VCS2 and RCS later.
			 */
2818 2819 2820 2821 2822 2823 2824 2825 2826 2827
			engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
			engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
			engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
			engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
			engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
			engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
			engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
			engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
			engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
			engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
B
Ben Widawsky 已提交
2828
		}
2829
	} else if (IS_GEN5(dev)) {
2830 2831 2832 2833 2834 2835 2836
		engine->add_request = pc_render_add_request;
		engine->flush = gen4_render_ring_flush;
		engine->get_seqno = pc_render_get_seqno;
		engine->set_seqno = pc_render_set_seqno;
		engine->irq_get = gen5_ring_get_irq;
		engine->irq_put = gen5_ring_put_irq;
		engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
2837
					GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
2838
	} else {
2839
		engine->add_request = i9xx_add_request;
2840
		if (INTEL_INFO(dev)->gen < 4)
2841
			engine->flush = gen2_render_ring_flush;
2842
		else
2843 2844 2845
			engine->flush = gen4_render_ring_flush;
		engine->get_seqno = ring_get_seqno;
		engine->set_seqno = ring_set_seqno;
C
Chris Wilson 已提交
2846
		if (IS_GEN2(dev)) {
2847 2848
			engine->irq_get = i8xx_ring_get_irq;
			engine->irq_put = i8xx_ring_put_irq;
C
Chris Wilson 已提交
2849
		} else {
2850 2851
			engine->irq_get = i9xx_ring_get_irq;
			engine->irq_put = i9xx_ring_put_irq;
C
Chris Wilson 已提交
2852
		}
2853
		engine->irq_enable_mask = I915_USER_INTERRUPT;
2854
	}
2855
	engine->write_tail = ring_write_tail;
B
Ben Widawsky 已提交
2856

2857
	if (IS_HASWELL(dev))
2858
		engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
2859
	else if (IS_GEN8(dev))
2860
		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2861
	else if (INTEL_INFO(dev)->gen >= 6)
2862
		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2863
	else if (INTEL_INFO(dev)->gen >= 4)
2864
		engine->dispatch_execbuffer = i965_dispatch_execbuffer;
2865
	else if (IS_I830(dev) || IS_845G(dev))
2866
		engine->dispatch_execbuffer = i830_dispatch_execbuffer;
2867
	else
2868 2869 2870
		engine->dispatch_execbuffer = i915_dispatch_execbuffer;
	engine->init_hw = init_render_ring;
	engine->cleanup = render_ring_cleanup;
2871

2872 2873
	/* Workaround batchbuffer to combat CS tlb bug. */
	if (HAS_BROKEN_CS_TLB(dev)) {
2874
		obj = i915_gem_object_create(dev, I830_WA_SIZE);
2875
		if (IS_ERR(obj)) {
2876
			DRM_ERROR("Failed to allocate batch bo\n");
2877
			return PTR_ERR(obj);
2878 2879
		}

2880
		ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
2881 2882 2883 2884 2885 2886
		if (ret != 0) {
			drm_gem_object_unreference(&obj->base);
			DRM_ERROR("Failed to ping batch bo\n");
			return ret;
		}

2887 2888
		engine->scratch.obj = obj;
		engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
2889 2890
	}

2891
	ret = intel_init_ring_buffer(dev, engine);
2892 2893 2894 2895
	if (ret)
		return ret;

	if (INTEL_INFO(dev)->gen >= 5) {
2896
		ret = intel_init_pipe_control(engine);
2897 2898 2899 2900 2901
		if (ret)
			return ret;
	}

	return 0;
2902 2903 2904 2905
}

int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
2906
	struct drm_i915_private *dev_priv = dev->dev_private;
2907
	struct intel_engine_cs *engine = &dev_priv->engine[VCS];
2908

2909 2910 2911
	engine->name = "bsd ring";
	engine->id = VCS;
	engine->exec_id = I915_EXEC_BSD;
2912

2913
	engine->write_tail = ring_write_tail;
2914
	if (INTEL_INFO(dev)->gen >= 6) {
2915
		engine->mmio_base = GEN6_BSD_RING_BASE;
2916 2917
		/* gen6 bsd needs a special wa for tail updates */
		if (IS_GEN6(dev))
2918 2919 2920
			engine->write_tail = gen6_bsd_ring_write_tail;
		engine->flush = gen6_bsd_ring_flush;
		engine->add_request = gen6_add_request;
2921 2922
		engine->irq_seqno_barrier = gen6_seqno_barrier;
		engine->get_seqno = ring_get_seqno;
2923
		engine->set_seqno = ring_set_seqno;
2924
		if (INTEL_INFO(dev)->gen >= 8) {
2925
			engine->irq_enable_mask =
2926
				GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2927 2928 2929
			engine->irq_get = gen8_ring_get_irq;
			engine->irq_put = gen8_ring_put_irq;
			engine->dispatch_execbuffer =
2930
				gen8_ring_dispatch_execbuffer;
B
Ben Widawsky 已提交
2931
			if (i915_semaphore_is_enabled(dev)) {
2932 2933 2934
				engine->semaphore.sync_to = gen8_ring_sync;
				engine->semaphore.signal = gen8_xcs_signal;
				GEN8_RING_SEMAPHORE_INIT(engine);
B
Ben Widawsky 已提交
2935
			}
2936
		} else {
2937 2938 2939 2940
			engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
			engine->irq_get = gen6_ring_get_irq;
			engine->irq_put = gen6_ring_put_irq;
			engine->dispatch_execbuffer =
2941
				gen6_ring_dispatch_execbuffer;
B
Ben Widawsky 已提交
2942
			if (i915_semaphore_is_enabled(dev)) {
2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954
				engine->semaphore.sync_to = gen6_ring_sync;
				engine->semaphore.signal = gen6_signal;
				engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
				engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
				engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
				engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
				engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
				engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
				engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
				engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
				engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
				engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
B
Ben Widawsky 已提交
2955
			}
2956
		}
2957
	} else {
2958 2959 2960 2961 2962
		engine->mmio_base = BSD_RING_BASE;
		engine->flush = bsd_ring_flush;
		engine->add_request = i9xx_add_request;
		engine->get_seqno = ring_get_seqno;
		engine->set_seqno = ring_set_seqno;
2963
		if (IS_GEN5(dev)) {
2964 2965 2966
			engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
			engine->irq_get = gen5_ring_get_irq;
			engine->irq_put = gen5_ring_put_irq;
2967
		} else {
2968 2969 2970
			engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
			engine->irq_get = i9xx_ring_get_irq;
			engine->irq_put = i9xx_ring_put_irq;
2971
		}
2972
		engine->dispatch_execbuffer = i965_dispatch_execbuffer;
2973
	}
2974
	engine->init_hw = init_ring_common;
2975

2976
	return intel_init_ring_buffer(dev, engine);
2977
}
2978

2979
/**
2980
 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
2981 2982 2983 2984
 */
int intel_init_bsd2_ring_buffer(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
2985
	struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
2986 2987 2988 2989 2990 2991 2992 2993 2994

	engine->name = "bsd2 ring";
	engine->id = VCS2;
	engine->exec_id = I915_EXEC_BSD;

	engine->write_tail = ring_write_tail;
	engine->mmio_base = GEN8_BSD2_RING_BASE;
	engine->flush = gen6_bsd_ring_flush;
	engine->add_request = gen6_add_request;
2995 2996
	engine->irq_seqno_barrier = gen6_seqno_barrier;
	engine->get_seqno = ring_get_seqno;
2997 2998
	engine->set_seqno = ring_set_seqno;
	engine->irq_enable_mask =
2999
			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
3000 3001 3002
	engine->irq_get = gen8_ring_get_irq;
	engine->irq_put = gen8_ring_put_irq;
	engine->dispatch_execbuffer =
3003
			gen8_ring_dispatch_execbuffer;
3004
	if (i915_semaphore_is_enabled(dev)) {
3005 3006 3007
		engine->semaphore.sync_to = gen8_ring_sync;
		engine->semaphore.signal = gen8_xcs_signal;
		GEN8_RING_SEMAPHORE_INIT(engine);
3008
	}
3009
	engine->init_hw = init_ring_common;
3010

3011
	return intel_init_ring_buffer(dev, engine);
3012 3013
}

3014 3015
int intel_init_blt_ring_buffer(struct drm_device *dev)
{
3016
	struct drm_i915_private *dev_priv = dev->dev_private;
3017
	struct intel_engine_cs *engine = &dev_priv->engine[BCS];
3018 3019 3020 3021 3022 3023 3024 3025 3026

	engine->name = "blitter ring";
	engine->id = BCS;
	engine->exec_id = I915_EXEC_BLT;

	engine->mmio_base = BLT_RING_BASE;
	engine->write_tail = ring_write_tail;
	engine->flush = gen6_ring_flush;
	engine->add_request = gen6_add_request;
3027 3028
	engine->irq_seqno_barrier = gen6_seqno_barrier;
	engine->get_seqno = ring_get_seqno;
3029
	engine->set_seqno = ring_set_seqno;
3030
	if (INTEL_INFO(dev)->gen >= 8) {
3031
		engine->irq_enable_mask =
3032
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
3033 3034 3035
		engine->irq_get = gen8_ring_get_irq;
		engine->irq_put = gen8_ring_put_irq;
		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
B
Ben Widawsky 已提交
3036
		if (i915_semaphore_is_enabled(dev)) {
3037 3038 3039
			engine->semaphore.sync_to = gen8_ring_sync;
			engine->semaphore.signal = gen8_xcs_signal;
			GEN8_RING_SEMAPHORE_INIT(engine);
B
Ben Widawsky 已提交
3040
		}
3041
	} else {
3042 3043 3044 3045
		engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
		engine->irq_get = gen6_ring_get_irq;
		engine->irq_put = gen6_ring_put_irq;
		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
B
Ben Widawsky 已提交
3046
		if (i915_semaphore_is_enabled(dev)) {
3047 3048
			engine->semaphore.signal = gen6_signal;
			engine->semaphore.sync_to = gen6_ring_sync;
B
Ben Widawsky 已提交
3049 3050 3051 3052 3053 3054 3055
			/*
			 * The current semaphore is only applied on pre-gen8
			 * platform.  And there is no VCS2 ring on the pre-gen8
			 * platform. So the semaphore between BCS and VCS2 is
			 * initialized as INVALID.  Gen8 will initialize the
			 * sema between BCS and VCS2 later.
			 */
3056 3057 3058 3059 3060 3061 3062 3063 3064 3065
			engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
			engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
			engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
			engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
			engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
			engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
			engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
			engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
			engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
			engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
B
Ben Widawsky 已提交
3066
		}
3067
	}
3068
	engine->init_hw = init_ring_common;
3069

3070
	return intel_init_ring_buffer(dev, engine);
3071
}
3072

B
Ben Widawsky 已提交
3073 3074
int intel_init_vebox_ring_buffer(struct drm_device *dev)
{
3075
	struct drm_i915_private *dev_priv = dev->dev_private;
3076
	struct intel_engine_cs *engine = &dev_priv->engine[VECS];
B
Ben Widawsky 已提交
3077

3078 3079 3080
	engine->name = "video enhancement ring";
	engine->id = VECS;
	engine->exec_id = I915_EXEC_VEBOX;
B
Ben Widawsky 已提交
3081

3082 3083 3084 3085
	engine->mmio_base = VEBOX_RING_BASE;
	engine->write_tail = ring_write_tail;
	engine->flush = gen6_ring_flush;
	engine->add_request = gen6_add_request;
3086 3087
	engine->irq_seqno_barrier = gen6_seqno_barrier;
	engine->get_seqno = ring_get_seqno;
3088
	engine->set_seqno = ring_set_seqno;
3089 3090

	if (INTEL_INFO(dev)->gen >= 8) {
3091
		engine->irq_enable_mask =
3092
			GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
3093 3094 3095
		engine->irq_get = gen8_ring_get_irq;
		engine->irq_put = gen8_ring_put_irq;
		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
B
Ben Widawsky 已提交
3096
		if (i915_semaphore_is_enabled(dev)) {
3097 3098 3099
			engine->semaphore.sync_to = gen8_ring_sync;
			engine->semaphore.signal = gen8_xcs_signal;
			GEN8_RING_SEMAPHORE_INIT(engine);
B
Ben Widawsky 已提交
3100
		}
3101
	} else {
3102 3103 3104 3105
		engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
		engine->irq_get = hsw_vebox_get_irq;
		engine->irq_put = hsw_vebox_put_irq;
		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
B
Ben Widawsky 已提交
3106
		if (i915_semaphore_is_enabled(dev)) {
3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118
			engine->semaphore.sync_to = gen6_ring_sync;
			engine->semaphore.signal = gen6_signal;
			engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
			engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
			engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
			engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
			engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
			engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
			engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
			engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
			engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
			engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
B
Ben Widawsky 已提交
3119
		}
3120
	}
3121
	engine->init_hw = init_ring_common;
B
Ben Widawsky 已提交
3122

3123
	return intel_init_ring_buffer(dev, engine);
B
Ben Widawsky 已提交
3124 3125
}

3126
int
3127
intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
3128
{
3129
	struct intel_engine_cs *engine = req->engine;
3130 3131
	int ret;

3132
	if (!engine->gpu_caches_dirty)
3133 3134
		return 0;

3135
	ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
3136 3137 3138
	if (ret)
		return ret;

3139
	trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
3140

3141
	engine->gpu_caches_dirty = false;
3142 3143 3144 3145
	return 0;
}

int
3146
intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
3147
{
3148
	struct intel_engine_cs *engine = req->engine;
3149 3150 3151 3152
	uint32_t flush_domains;
	int ret;

	flush_domains = 0;
3153
	if (engine->gpu_caches_dirty)
3154 3155
		flush_domains = I915_GEM_GPU_DOMAINS;

3156
	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
3157 3158 3159
	if (ret)
		return ret;

3160
	trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
3161

3162
	engine->gpu_caches_dirty = false;
3163 3164
	return 0;
}
3165 3166

void
3167
intel_stop_engine(struct intel_engine_cs *engine)
3168 3169 3170
{
	int ret;

3171
	if (!intel_engine_initialized(engine))
3172 3173
		return;

3174
	ret = intel_engine_idle(engine);
3175
	if (ret)
3176
		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
3177
			  engine->name, ret);
3178

3179
	stop_ring(engine);
3180
}