intel_engine_cs.c 44.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25 26
#include <drm/drm_print.h>

27
#include "i915_drv.h"
28
#include "i915_reset.h"
29 30 31
#include "intel_ringbuffer.h"
#include "intel_lrc.h"

32 33 34 35 36 37 38 39 40
/* Haswell does have the CXT_SIZE register however it does not appear to be
 * valid. Now, docs explain in dwords what is in the context object. The full
 * size is 70720 bytes, however, the power context and execlist context will
 * never be saved (power context is stored elsewhere, and execlists don't work
 * on HSW) - so the final size, including the extra state required for the
 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
 */
#define HSW_CXT_TOTAL_SIZE		(17 * PAGE_SIZE)

41
#define DEFAULT_LR_CONTEXT_RENDER_SIZE	(22 * PAGE_SIZE)
42 43
#define GEN8_LR_CONTEXT_RENDER_SIZE	(20 * PAGE_SIZE)
#define GEN9_LR_CONTEXT_RENDER_SIZE	(22 * PAGE_SIZE)
44
#define GEN10_LR_CONTEXT_RENDER_SIZE	(18 * PAGE_SIZE)
45
#define GEN11_LR_CONTEXT_RENDER_SIZE	(14 * PAGE_SIZE)
46 47 48

#define GEN8_LR_CONTEXT_OTHER_SIZE	( 2 * PAGE_SIZE)

49
struct engine_class_info {
50
	const char *name;
51 52
	int (*init_legacy)(struct intel_engine_cs *engine);
	int (*init_execlists)(struct intel_engine_cs *engine);
53 54

	u8 uabi_class;
55 56 57 58 59 60 61
};

static const struct engine_class_info intel_engine_classes[] = {
	[RENDER_CLASS] = {
		.name = "rcs",
		.init_execlists = logical_render_ring_init,
		.init_legacy = intel_init_render_ring_buffer,
62
		.uabi_class = I915_ENGINE_CLASS_RENDER,
63 64 65 66 67
	},
	[COPY_ENGINE_CLASS] = {
		.name = "bcs",
		.init_execlists = logical_xcs_ring_init,
		.init_legacy = intel_init_blt_ring_buffer,
68
		.uabi_class = I915_ENGINE_CLASS_COPY,
69 70 71 72 73
	},
	[VIDEO_DECODE_CLASS] = {
		.name = "vcs",
		.init_execlists = logical_xcs_ring_init,
		.init_legacy = intel_init_bsd_ring_buffer,
74
		.uabi_class = I915_ENGINE_CLASS_VIDEO,
75 76 77 78 79
	},
	[VIDEO_ENHANCEMENT_CLASS] = {
		.name = "vecs",
		.init_execlists = logical_xcs_ring_init,
		.init_legacy = intel_init_vebox_ring_buffer,
80
		.uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
81 82 83
	},
};

84
#define MAX_MMIO_BASES 3
85
struct engine_info {
86
	unsigned int hw_id;
87
	unsigned int uabi_id;
88 89
	u8 class;
	u8 instance;
90 91 92 93 94
	/* mmio bases table *must* be sorted in reverse gen order */
	struct engine_mmio_base {
		u32 gen : 8;
		u32 base : 24;
	} mmio_bases[MAX_MMIO_BASES];
95 96 97
};

static const struct engine_info intel_engines[] = {
98
	[RCS] = {
99
		.hw_id = RCS_HW,
100
		.uabi_id = I915_EXEC_RENDER,
101 102
		.class = RENDER_CLASS,
		.instance = 0,
103 104 105
		.mmio_bases = {
			{ .gen = 1, .base = RENDER_RING_BASE }
		},
106 107
	},
	[BCS] = {
108
		.hw_id = BCS_HW,
109
		.uabi_id = I915_EXEC_BLT,
110 111
		.class = COPY_ENGINE_CLASS,
		.instance = 0,
112 113 114
		.mmio_bases = {
			{ .gen = 6, .base = BLT_RING_BASE }
		},
115 116
	},
	[VCS] = {
117
		.hw_id = VCS_HW,
118
		.uabi_id = I915_EXEC_BSD,
119 120
		.class = VIDEO_DECODE_CLASS,
		.instance = 0,
121 122 123 124 125
		.mmio_bases = {
			{ .gen = 11, .base = GEN11_BSD_RING_BASE },
			{ .gen = 6, .base = GEN6_BSD_RING_BASE },
			{ .gen = 4, .base = BSD_RING_BASE }
		},
126 127
	},
	[VCS2] = {
128
		.hw_id = VCS2_HW,
129
		.uabi_id = I915_EXEC_BSD,
130 131
		.class = VIDEO_DECODE_CLASS,
		.instance = 1,
132 133 134 135
		.mmio_bases = {
			{ .gen = 11, .base = GEN11_BSD2_RING_BASE },
			{ .gen = 8, .base = GEN8_BSD2_RING_BASE }
		},
136
	},
137 138 139 140 141
	[VCS3] = {
		.hw_id = VCS3_HW,
		.uabi_id = I915_EXEC_BSD,
		.class = VIDEO_DECODE_CLASS,
		.instance = 2,
142 143 144
		.mmio_bases = {
			{ .gen = 11, .base = GEN11_BSD3_RING_BASE }
		},
145 146 147 148 149 150
	},
	[VCS4] = {
		.hw_id = VCS4_HW,
		.uabi_id = I915_EXEC_BSD,
		.class = VIDEO_DECODE_CLASS,
		.instance = 3,
151 152 153
		.mmio_bases = {
			{ .gen = 11, .base = GEN11_BSD4_RING_BASE }
		},
154
	},
155
	[VECS] = {
156
		.hw_id = VECS_HW,
157
		.uabi_id = I915_EXEC_VEBOX,
158 159
		.class = VIDEO_ENHANCEMENT_CLASS,
		.instance = 0,
160 161 162 163
		.mmio_bases = {
			{ .gen = 11, .base = GEN11_VEBOX_RING_BASE },
			{ .gen = 7, .base = VEBOX_RING_BASE }
		},
164
	},
165 166 167 168 169
	[VECS2] = {
		.hw_id = VECS2_HW,
		.uabi_id = I915_EXEC_VEBOX,
		.class = VIDEO_ENHANCEMENT_CLASS,
		.instance = 1,
170 171 172
		.mmio_bases = {
			{ .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
		},
173
	},
174 175
};

176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
/**
 * ___intel_engine_context_size() - return the size of the context for an engine
 * @dev_priv: i915 device private
 * @class: engine class
 *
 * Each engine class may require a different amount of space for a context
 * image.
 *
 * Return: size (in bytes) of an engine class specific context image
 *
 * Note: this size includes the HWSP, which is part of the context image
 * in LRC mode, but does not include the "shared data page" used with
 * GuC submission. The caller should account for this if using the GuC.
 */
static u32
__intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
{
	u32 cxt_size;

	BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);

	switch (class) {
	case RENDER_CLASS:
		switch (INTEL_GEN(dev_priv)) {
		default:
			MISSING_CASE(INTEL_GEN(dev_priv));
202
			return DEFAULT_LR_CONTEXT_RENDER_SIZE;
203 204
		case 11:
			return GEN11_LR_CONTEXT_RENDER_SIZE;
205
		case 10:
O
Oscar Mateo 已提交
206
			return GEN10_LR_CONTEXT_RENDER_SIZE;
207 208 209
		case 9:
			return GEN9_LR_CONTEXT_RENDER_SIZE;
		case 8:
210
			return GEN8_LR_CONTEXT_RENDER_SIZE;
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
		case 7:
			if (IS_HASWELL(dev_priv))
				return HSW_CXT_TOTAL_SIZE;

			cxt_size = I915_READ(GEN7_CXT_SIZE);
			return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
					PAGE_SIZE);
		case 6:
			cxt_size = I915_READ(CXT_SIZE);
			return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
					PAGE_SIZE);
		case 5:
		case 4:
		case 3:
		case 2:
		/* For the special day when i810 gets merged. */
		case 1:
			return 0;
		}
		break;
	default:
		MISSING_CASE(class);
233
		/* fall through */
234 235 236 237 238 239 240 241 242
	case VIDEO_DECODE_CLASS:
	case VIDEO_ENHANCEMENT_CLASS:
	case COPY_ENGINE_CLASS:
		if (INTEL_GEN(dev_priv) < 8)
			return 0;
		return GEN8_LR_CONTEXT_OTHER_SIZE;
	}
}

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
static u32 __engine_mmio_base(struct drm_i915_private *i915,
			      const struct engine_mmio_base *bases)
{
	int i;

	for (i = 0; i < MAX_MMIO_BASES; i++)
		if (INTEL_GEN(i915) >= bases[i].gen)
			break;

	GEM_BUG_ON(i == MAX_MMIO_BASES);
	GEM_BUG_ON(!bases[i].base);

	return bases[i].base;
}

258 259 260 261 262 263 264
static void __sprint_engine_name(char *name, const struct engine_info *info)
{
	WARN_ON(snprintf(name, INTEL_ENGINE_CS_MAX_NAME, "%s%u",
			 intel_engine_classes[info->class].name,
			 info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
}

265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
{
	struct drm_i915_private *dev_priv = engine->i915;
	i915_reg_t hwstam;

	/*
	 * Though they added more rings on g4x/ilk, they did not add
	 * per-engine HWSTAM until gen6.
	 */
	if (INTEL_GEN(dev_priv) < 6 && engine->class != RENDER_CLASS)
		return;

	hwstam = RING_HWSTAM(engine->mmio_base);
	if (INTEL_GEN(dev_priv) >= 3)
		I915_WRITE(hwstam, mask);
	else
		I915_WRITE16(hwstam, mask);
}

static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
{
	/* Mask off all writes into the unknown HWSP */
	intel_engine_set_hwsp_writemask(engine, ~0u);
}

290
static int
291 292 293 294
intel_engine_setup(struct drm_i915_private *dev_priv,
		   enum intel_engine_id id)
{
	const struct engine_info *info = &intel_engines[id];
295 296
	struct intel_engine_cs *engine;

297 298
	GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));

299 300 301
	BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
	BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));

302
	if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
303 304
		return -EINVAL;

305
	if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
306 307
		return -EINVAL;

308
	if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
309 310
		return -EINVAL;

311 312 313 314
	GEM_BUG_ON(dev_priv->engine[id]);
	engine = kzalloc(sizeof(*engine), GFP_KERNEL);
	if (!engine)
		return -ENOMEM;
315 316 317

	engine->id = id;
	engine->i915 = dev_priv;
318
	__sprint_engine_name(engine->name, info);
319
	engine->hw_id = engine->guc_id = info->hw_id;
320
	engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
321 322
	engine->class = info->class;
	engine->instance = info->instance;
323

324
	engine->uabi_id = info->uabi_id;
325
	engine->uabi_class = intel_engine_classes[info->class].uabi_class;
326

327 328 329 330
	engine->context_size = __intel_engine_context_size(dev_priv,
							   engine->class);
	if (WARN_ON(engine->context_size > BIT(20)))
		engine->context_size = 0;
331 332
	if (engine->context_size)
		DRIVER_CAPS(dev_priv)->has_logical_contexts = true;
333

334 335 336
	/* Nothing to do here, execute in order of dependencies */
	engine->schedule = NULL;

337
	seqlock_init(&engine->stats.lock);
338

339 340
	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);

341 342 343
	/* Scrub mmio state on takeover */
	intel_engine_sanitize_mmio(engine);

344
	dev_priv->engine_class[info->class][info->instance] = engine;
345 346
	dev_priv->engine[id] = engine;
	return 0;
347 348 349
}

/**
350
 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
351
 * @dev_priv: i915 device private
352 353 354
 *
 * Return: non-zero if the initialization failed.
 */
355
int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
356
{
357
	struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
358
	const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
359 360
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
361
	unsigned int mask = 0;
362
	unsigned int i;
363
	int err;
364

365 366
	WARN_ON(ring_mask == 0);
	WARN_ON(ring_mask &
367
		GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
368

369 370 371
	if (i915_inject_load_failure())
		return -ENODEV;

372 373 374 375
	for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
		if (!HAS_ENGINE(dev_priv, i))
			continue;

376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
		err = intel_engine_setup(dev_priv, i);
		if (err)
			goto cleanup;

		mask |= ENGINE_MASK(i);
	}

	/*
	 * Catch failures to update intel_engines table when the new engines
	 * are added to the driver by a warning and disabling the forgotten
	 * engines.
	 */
	if (WARN_ON(mask != ring_mask))
		device_info->ring_mask = mask;

391 392 393 394 395 396
	/* We always presume we have at least RCS available for later probing */
	if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
		err = -ENODEV;
		goto cleanup;
	}

397
	RUNTIME_INFO(dev_priv)->num_rings = hweight32(mask);
398

399 400
	i915_check_and_clear_faults(dev_priv);

401 402 403 404 405 406 407 408 409
	return 0;

cleanup:
	for_each_engine(engine, dev_priv, id)
		kfree(engine);
	return err;
}

/**
410
 * intel_engines_init() - init the Engine Command Streamers
411 412 413 414 415 416 417 418
 * @dev_priv: i915 device private
 *
 * Return: non-zero if the initialization failed.
 */
int intel_engines_init(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id, err_id;
419
	int err;
420 421

	for_each_engine(engine, dev_priv, id) {
422 423
		const struct engine_class_info *class_info =
			&intel_engine_classes[engine->class];
424 425
		int (*init)(struct intel_engine_cs *engine);

426
		if (HAS_EXECLISTS(dev_priv))
427
			init = class_info->init_execlists;
428
		else
429
			init = class_info->init_legacy;
430 431 432 433

		err = -EINVAL;
		err_id = id;

434
		if (GEM_DEBUG_WARN_ON(!init))
435
			goto cleanup;
436

437
		err = init(engine);
438
		if (err)
439 440
			goto cleanup;

441
		GEM_BUG_ON(!engine->submit_request);
442 443 444 445 446
	}

	return 0;

cleanup:
447
	for_each_engine(engine, dev_priv, id) {
448
		if (id >= err_id) {
449
			kfree(engine);
450 451
			dev_priv->engine[id] = NULL;
		} else {
452
			dev_priv->gt.cleanup_engine(engine);
453
		}
454
	}
455
	return err;
456 457
}

458
void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno)
459 460
{
	intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
461

462 463 464 465
	/* After manually advancing the seqno, fake the interrupt in case
	 * there are any waiters for that seqno.
	 */
	intel_engine_wakeup(engine);
466 467

	GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
468 469
}

470 471 472 473 474
static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
{
	i915_gem_batch_pool_init(&engine->batch_pool, engine);
}

475 476 477 478
static void intel_engine_init_execlist(struct intel_engine_cs *engine)
{
	struct intel_engine_execlists * const execlists = &engine->execlists;

479
	execlists->port_mask = 1;
480
	GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
481 482
	GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);

483
	execlists->queue_priority = INT_MIN;
484
	execlists->queue = RB_ROOT_CACHED;
485 486
}

487
static void cleanup_status_page(struct intel_engine_cs *engine)
488
{
489 490
	struct i915_vma *vma;

491 492 493
	/* Prevent writes into HWSP after returning the page to the system */
	intel_engine_set_hwsp_writemask(engine, ~0u);

494 495 496
	vma = fetch_and_zero(&engine->status_page.vma);
	if (!vma)
		return;
497

498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
	if (!HWS_NEEDS_PHYSICAL(engine->i915))
		i915_vma_unpin(vma);

	i915_gem_object_unpin_map(vma->obj);
	__i915_gem_object_release_unless_active(vma->obj);
}

static int pin_ggtt_status_page(struct intel_engine_cs *engine,
				struct i915_vma *vma)
{
	unsigned int flags;

	flags = PIN_GLOBAL;
	if (!HAS_LLC(engine->i915))
		/*
		 * On g33, we cannot place HWS above 256MiB, so
		 * restrict its pinning to the low mappable arena.
		 * Though this restriction is not documented for
		 * gen4, gen5, or byt, they also behave similarly
		 * and hang if the HWS is placed at the top of the
		 * GTT. To generalise, it appears that all !llc
		 * platforms have issues with us placing the HWS
		 * above the mappable region (even though we never
		 * actually map it).
		 */
		flags |= PIN_MAPPABLE;
	else
		flags |= PIN_HIGH;
526

527
	return i915_vma_pin(vma, 0, 0, flags);
528 529 530 531 532 533 534 535 536
}

static int init_status_page(struct intel_engine_cs *engine)
{
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
	void *vaddr;
	int ret;

537 538 539 540 541 542 543
	/*
	 * Though the HWS register does support 36bit addresses, historically
	 * we have had hangs and corruption reported due to wild writes if
	 * the HWS is placed above 4G. We only allow objects to be allocated
	 * in GFP_DMA32 for i965, and no earlier physical address users had
	 * access to more than 4G.
	 */
544 545 546 547 548 549 550 551 552 553
	obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
	if (IS_ERR(obj)) {
		DRM_ERROR("Failed to allocate status page\n");
		return PTR_ERR(obj);
	}

	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
	if (ret)
		goto err;

554
	vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
555 556 557 558 559 560 561 562
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err;
	}

	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
	if (IS_ERR(vaddr)) {
		ret = PTR_ERR(vaddr);
563
		goto err;
564 565
	}

566
	engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
567
	engine->status_page.vma = vma;
568 569 570 571 572 573 574

	if (!HWS_NEEDS_PHYSICAL(engine->i915)) {
		ret = pin_ggtt_status_page(engine, vma);
		if (ret)
			goto err_unpin;
	}

575 576 577
	return 0;

err_unpin:
578
	i915_gem_object_unpin_map(obj);
579 580 581 582 583
err:
	i915_gem_object_put(obj);
	return ret;
}

584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
/**
 * intel_engines_setup_common - setup engine state not requiring hw access
 * @engine: Engine to setup.
 *
 * Initializes @engine@ structure members shared between legacy and execlists
 * submission modes which do not require hardware access.
 *
 * Typically done early in the submission mode specific engine setup stage.
 */
int intel_engine_setup_common(struct intel_engine_cs *engine)
{
	int err;

	err = init_status_page(engine);
	if (err)
		return err;

	err = i915_timeline_init(engine->i915,
				 &engine->timeline,
				 engine->name,
				 engine->status_page.vma);
	if (err)
		goto err_hwsp;

	i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);

	intel_engine_init_execlist(engine);
	intel_engine_init_hangcheck(engine);
	intel_engine_init_batch_pool(engine);
	intel_engine_init_cmd_parser(engine);

	return 0;

err_hwsp:
	cleanup_status_page(engine);
	return err;
}

622 623 624 625 626 627
static void __intel_context_unpin(struct i915_gem_context *ctx,
				  struct intel_engine_cs *engine)
{
	intel_context_unpin(to_intel_context(ctx, engine));
}

628 629 630 631 632 633 634
struct measure_breadcrumb {
	struct i915_request rq;
	struct i915_timeline timeline;
	struct intel_ring ring;
	u32 cs[1024];
};

635
static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
636 637
{
	struct measure_breadcrumb *frame;
638
	int dw = -ENOMEM;
639 640 641 642 643 644 645

	GEM_BUG_ON(!engine->i915->gt.scratch);

	frame = kzalloc(sizeof(*frame), GFP_KERNEL);
	if (!frame)
		return -ENOMEM;

646 647 648 649
	if (i915_timeline_init(engine->i915,
			       &frame->timeline, "measure",
			       engine->status_page.vma))
		goto out_frame;
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666

	INIT_LIST_HEAD(&frame->ring.request_list);
	frame->ring.timeline = &frame->timeline;
	frame->ring.vaddr = frame->cs;
	frame->ring.size = sizeof(frame->cs);
	frame->ring.effective_size = frame->ring.size;
	intel_ring_update_space(&frame->ring);

	frame->rq.i915 = engine->i915;
	frame->rq.engine = engine;
	frame->rq.ring = &frame->ring;
	frame->rq.timeline = &frame->timeline;

	dw = engine->emit_breadcrumb(&frame->rq, frame->cs) - frame->cs;

	i915_timeline_fini(&frame->timeline);

667 668
out_frame:
	kfree(frame);
669 670 671
	return dw;
}

672 673 674 675 676 677 678 679 680 681 682 683 684
/**
 * intel_engines_init_common - initialize cengine state which might require hw access
 * @engine: Engine to initialize.
 *
 * Initializes @engine@ structure members shared between legacy and execlists
 * submission modes which do require hardware access.
 *
 * Typcally done at later stages of submission mode specific engine setup.
 *
 * Returns zero on success or an error code on failure.
 */
int intel_engine_init_common(struct intel_engine_cs *engine)
{
685 686
	struct drm_i915_private *i915 = engine->i915;
	struct intel_context *ce;
687 688
	int ret;

689 690
	engine->set_default_submission(engine);

691 692 693 694 695 696 697
	/* We may need to do things with the shrinker which
	 * require us to immediately switch back to the default
	 * context. This can cause a problem as pinning the
	 * default context also requires GTT space which may not
	 * be available. To avoid this we always pin the default
	 * context.
	 */
698 699 700
	ce = intel_context_pin(i915->kernel_context, engine);
	if (IS_ERR(ce))
		return PTR_ERR(ce);
701

702 703 704 705
	/*
	 * Similarly the preempt context must always be available so that
	 * we can interrupt the engine at any time.
	 */
706 707 708 709
	if (i915->preempt_context) {
		ce = intel_context_pin(i915->preempt_context, engine);
		if (IS_ERR(ce)) {
			ret = PTR_ERR(ce);
710 711 712 713
			goto err_unpin_kernel;
		}
	}

714 715
	ret = intel_engine_init_breadcrumbs(engine);
	if (ret)
716
		goto err_unpin_preempt;
717

718
	ret = measure_breadcrumb_dw(engine);
719
	if (ret < 0)
720
		goto err_breadcrumbs;
721

722
	engine->emit_breadcrumb_dw = ret;
723

724
	return 0;
725

726 727
err_breadcrumbs:
	intel_engine_fini_breadcrumbs(engine);
728
err_unpin_preempt:
729 730 731
	if (i915->preempt_context)
		__intel_context_unpin(i915->preempt_context, engine);

732
err_unpin_kernel:
733
	__intel_context_unpin(i915->kernel_context, engine);
734
	return ret;
735
}
736 737 738 739 740 741 742 743 744 745

/**
 * intel_engines_cleanup_common - cleans up the engine state created by
 *                                the common initiailizers.
 * @engine: Engine to cleanup.
 *
 * This cleans up everything created by the common helpers.
 */
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
746 747
	struct drm_i915_private *i915 = engine->i915;

748
	cleanup_status_page(engine);
749

750
	intel_engine_fini_breadcrumbs(engine);
751
	intel_engine_cleanup_cmd_parser(engine);
752
	i915_gem_batch_pool_fini(&engine->batch_pool);
753

754 755 756
	if (engine->default_state)
		i915_gem_object_put(engine->default_state);

757 758 759
	if (i915->preempt_context)
		__intel_context_unpin(i915->preempt_context, engine);
	__intel_context_unpin(i915->kernel_context, engine);
760 761

	i915_timeline_fini(&engine->timeline);
762

763
	intel_wa_list_free(&engine->ctx_wa_list);
764
	intel_wa_list_free(&engine->wa_list);
765
	intel_wa_list_free(&engine->whitelist);
766
}
767

768
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
{
	struct drm_i915_private *dev_priv = engine->i915;
	u64 acthd;

	if (INTEL_GEN(dev_priv) >= 8)
		acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
					 RING_ACTHD_UDW(engine->mmio_base));
	else if (INTEL_GEN(dev_priv) >= 4)
		acthd = I915_READ(RING_ACTHD(engine->mmio_base));
	else
		acthd = I915_READ(ACTHD);

	return acthd;
}

784
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
785 786 787 788 789 790 791 792 793 794 795 796
{
	struct drm_i915_private *dev_priv = engine->i915;
	u64 bbaddr;

	if (INTEL_GEN(dev_priv) >= 8)
		bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
					  RING_BBADDR_UDW(engine->mmio_base));
	else
		bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));

	return bbaddr;
}
797

798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
int intel_engine_stop_cs(struct intel_engine_cs *engine)
{
	struct drm_i915_private *dev_priv = engine->i915;
	const u32 base = engine->mmio_base;
	const i915_reg_t mode = RING_MI_MODE(base);
	int err;

	if (INTEL_GEN(dev_priv) < 3)
		return -ENODEV;

	GEM_TRACE("%s\n", engine->name);

	I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));

	err = 0;
	if (__intel_wait_for_register_fw(dev_priv,
					 mode, MODE_IDLE, MODE_IDLE,
					 1000, 0,
					 NULL)) {
		GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
		err = -ETIMEDOUT;
	}

	/* A final mmio read to let GPU writes be hopefully flushed to memory */
	POSTING_READ_FW(mode);

	return err;
}

827 828 829 830 831 832 833 834 835 836
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
{
	struct drm_i915_private *dev_priv = engine->i915;

	GEM_TRACE("%s\n", engine->name);

	I915_WRITE_FW(RING_MI_MODE(engine->mmio_base),
		      _MASKED_BIT_DISABLE(STOP_RING));
}

837 838 839 840 841 842 843 844 845 846 847
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
{
	switch (type) {
	case I915_CACHE_NONE: return " uncached";
	case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
	case I915_CACHE_L3_LLC: return " L3+LLC";
	case I915_CACHE_WT: return " WT";
	default: return "";
	}
}

848 849
u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
{
850
	const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
851 852 853 854
	u32 mcr_s_ss_select;
	u32 slice = fls(sseu->slice_mask);
	u32 subslice = fls(sseu->subslice_mask[slice]);

855
	if (IS_GEN(dev_priv, 10))
856 857
		mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
				  GEN8_MCR_SUBSLICE(subslice);
858 859 860
	else if (INTEL_GEN(dev_priv) >= 11)
		mcr_s_ss_select = GEN11_MCR_SLICE(slice) |
				  GEN11_MCR_SUBSLICE(subslice);
861 862 863 864 865 866
	else
		mcr_s_ss_select = 0;

	return mcr_s_ss_select;
}

867
static inline u32
868 869 870
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
		  int subslice, i915_reg_t reg)
{
871 872 873 874 875
	u32 mcr_slice_subslice_mask;
	u32 mcr_slice_subslice_select;
	u32 default_mcr_s_ss_select;
	u32 mcr;
	u32 ret;
876 877
	enum forcewake_domains fw_domains;

878 879 880 881 882 883 884 885 886 887 888 889
	if (INTEL_GEN(dev_priv) >= 11) {
		mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
					  GEN11_MCR_SUBSLICE_MASK;
		mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) |
					    GEN11_MCR_SUBSLICE(subslice);
	} else {
		mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
					  GEN8_MCR_SUBSLICE_MASK;
		mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) |
					    GEN8_MCR_SUBSLICE(subslice);
	}

890 891
	default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);

892 893 894 895 896 897 898 899 900 901
	fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
						    FW_REG_READ);
	fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
						     GEN8_MCR_SELECTOR,
						     FW_REG_READ | FW_REG_WRITE);

	spin_lock_irq(&dev_priv->uncore.lock);
	intel_uncore_forcewake_get__locked(dev_priv, fw_domains);

	mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
902 903 904 905

	WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
		     default_mcr_s_ss_select);

906 907
	mcr &= ~mcr_slice_subslice_mask;
	mcr |= mcr_slice_subslice_select;
908 909 910 911
	I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);

	ret = I915_READ_FW(reg);

912
	mcr &= ~mcr_slice_subslice_mask;
913 914
	mcr |= default_mcr_s_ss_select;

915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976
	I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);

	intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
	spin_unlock_irq(&dev_priv->uncore.lock);

	return ret;
}

/* NB: please notice the memset */
void intel_engine_get_instdone(struct intel_engine_cs *engine,
			       struct intel_instdone *instdone)
{
	struct drm_i915_private *dev_priv = engine->i915;
	u32 mmio_base = engine->mmio_base;
	int slice;
	int subslice;

	memset(instdone, 0, sizeof(*instdone));

	switch (INTEL_GEN(dev_priv)) {
	default:
		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));

		if (engine->id != RCS)
			break;

		instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
		for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
			instdone->sampler[slice][subslice] =
				read_subslice_reg(dev_priv, slice, subslice,
						  GEN7_SAMPLER_INSTDONE);
			instdone->row[slice][subslice] =
				read_subslice_reg(dev_priv, slice, subslice,
						  GEN7_ROW_INSTDONE);
		}
		break;
	case 7:
		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));

		if (engine->id != RCS)
			break;

		instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
		instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
		instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);

		break;
	case 6:
	case 5:
	case 4:
		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));

		if (engine->id == RCS)
			/* HACK: Using the wrong struct member */
			instdone->slice_common = I915_READ(GEN4_INSTDONE1);
		break;
	case 3:
	case 2:
		instdone->instdone = I915_READ(GEN2_INSTDONE);
		break;
	}
}
977

978 979 980
static bool ring_is_idle(struct intel_engine_cs *engine)
{
	struct drm_i915_private *dev_priv = engine->i915;
981
	intel_wakeref_t wakeref;
982 983
	bool idle = true;

984 985 986
	if (I915_SELFTEST_ONLY(!engine->mmio_base))
		return true;

987
	/* If the whole device is asleep, the engine must be idle */
988 989
	wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
	if (!wakeref)
990
		return true;
991

992 993 994 995 996
	/* First check that no commands are left in the ring */
	if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
	    (I915_READ_TAIL(engine) & TAIL_ADDR))
		idle = false;

997 998 999 1000
	/* No bit for gen2, so assume the CS parser is idle */
	if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
		idle = false;

1001
	intel_runtime_pm_put(dev_priv, wakeref);
1002 1003 1004 1005

	return idle;
}

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
/**
 * intel_engine_is_idle() - Report if the engine has finished process all work
 * @engine: the intel_engine_cs
 *
 * Return true if there are no requests pending, nothing left to be submitted
 * to hardware, and that the engine is idle.
 */
bool intel_engine_is_idle(struct intel_engine_cs *engine)
{
	struct drm_i915_private *dev_priv = engine->i915;

1017 1018 1019 1020
	/* More white lies, if wedged, hw state is inconsistent */
	if (i915_terminally_wedged(&dev_priv->gpu_error))
		return true;

1021
	/* Any inflight/incomplete requests? */
1022
	if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
1023 1024
		return false;

1025
	/* Waiting to drain ELSP? */
1026
	if (READ_ONCE(engine->execlists.active)) {
1027
		struct tasklet_struct *t = &engine->execlists.tasklet;
1028

1029
		local_bh_disable();
1030 1031 1032 1033 1034
		if (tasklet_trylock(t)) {
			/* Must wait for any GPU reset in progress. */
			if (__tasklet_is_enabled(t))
				t->func(t->data);
			tasklet_unlock(t);
1035
		}
1036
		local_bh_enable();
1037

1038 1039 1040
		/* Otherwise flush the tasklet if it was on another cpu */
		tasklet_unlock_wait(t);

1041
		if (READ_ONCE(engine->execlists.active))
1042 1043
			return false;
	}
1044

1045
	/* ELSP is empty, but there are ready requests? E.g. after reset */
1046
	if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
1047 1048
		return false;

1049
	/* Ring stopped? */
1050
	return ring_is_idle(engine);
1051 1052
}

1053 1054 1055 1056 1057
bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

1058 1059
	/*
	 * If the driver is wedged, HW state may be very inconsistent and
1060 1061 1062 1063 1064
	 * report that it is still busy, even though we have stopped using it.
	 */
	if (i915_terminally_wedged(&dev_priv->gpu_error))
		return true;

1065 1066 1067 1068 1069 1070 1071 1072
	for_each_engine(engine, dev_priv, id) {
		if (!intel_engine_is_idle(engine))
			return false;
	}

	return true;
}

1073 1074 1075 1076 1077 1078 1079 1080
/**
 * intel_engine_has_kernel_context:
 * @engine: the engine
 *
 * Returns true if the last context to be executed on this engine, or has been
 * executed if the engine is already idle, is the kernel context
 * (#i915.kernel_context).
 */
1081 1082
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
{
1083 1084
	const struct intel_context *kernel_context =
		to_intel_context(engine->i915->kernel_context, engine);
1085
	struct i915_request *rq;
1086 1087 1088 1089 1090 1091 1092 1093

	lockdep_assert_held(&engine->i915->drm.struct_mutex);

	/*
	 * Check the last context seen by the engine. If active, it will be
	 * the last request that remains in the timeline. When idle, it is
	 * the last executed context as tracked by retirement.
	 */
1094
	rq = __i915_gem_active_peek(&engine->timeline.last_request);
1095
	if (rq)
1096
		return rq->hw_context == kernel_context;
1097 1098
	else
		return engine->last_retired_context == kernel_context;
1099 1100
}

1101 1102 1103 1104 1105 1106 1107 1108 1109
void intel_engines_reset_default_submission(struct drm_i915_private *i915)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	for_each_engine(engine, i915, id)
		engine->set_default_submission(engine);
}

1110 1111 1112 1113 1114 1115 1116 1117
static bool reset_engines(struct drm_i915_private *i915)
{
	if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
		return false;

	return intel_gpu_reset(i915, ALL_ENGINES) == 0;
}

1118 1119 1120
/**
 * intel_engines_sanitize: called after the GPU has lost power
 * @i915: the i915 device
1121
 * @force: ignore a failed reset and sanitize engine state anyway
1122 1123 1124 1125 1126 1127
 *
 * Anytime we reset the GPU, either with an explicit GPU reset or through a
 * PCI power cycle, the GPU loses state and we must reset our state tracking
 * to match. Note that calling intel_engines_sanitize() if the GPU has not
 * been reset results in much confusion!
 */
1128
void intel_engines_sanitize(struct drm_i915_private *i915, bool force)
1129 1130 1131 1132 1133 1134
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	GEM_TRACE("\n");

1135 1136 1137
	if (!reset_engines(i915) && !force)
		return;

1138 1139
	for_each_engine(engine, i915, id)
		intel_engine_reset(engine, false);
1140 1141
}

1142 1143 1144 1145 1146 1147 1148 1149 1150
/**
 * intel_engines_park: called when the GT is transitioning from busy->idle
 * @i915: the i915 device
 *
 * The GT is now idle and about to go to sleep (maybe never to wake again?).
 * Time for us to tidy and put away our toys (release resources back to the
 * system).
 */
void intel_engines_park(struct drm_i915_private *i915)
1151 1152 1153 1154 1155
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	for_each_engine(engine, i915, id) {
1156 1157
		/* Flush the residual irq tasklets first. */
		intel_engine_disarm_breadcrumbs(engine);
1158
		tasklet_kill(&engine->execlists.tasklet);
1159

1160 1161 1162 1163 1164
		/*
		 * We are committed now to parking the engines, make sure there
		 * will be no more interrupts arriving later and the engines
		 * are truly idle.
		 */
1165
		if (wait_for(intel_engine_is_idle(engine), 10)) {
1166 1167
			struct drm_printer p = drm_debug_printer(__func__);

1168 1169 1170
			dev_err(i915->drm.dev,
				"%s is not idle before parking\n",
				engine->name);
1171
			intel_engine_dump(engine, &p, NULL);
1172 1173
		}

1174 1175 1176
		/* Must be reset upon idling, or we may miss the busy wakeup. */
		GEM_BUG_ON(engine->execlists.queue_priority != INT_MIN);

1177 1178 1179
		if (engine->park)
			engine->park(engine);

1180 1181 1182 1183 1184
		if (engine->pinned_default_state) {
			i915_gem_object_unpin_map(engine->default_state);
			engine->pinned_default_state = NULL;
		}

1185
		i915_gem_batch_pool_fini(&engine->batch_pool);
1186
		engine->execlists.no_priolist = false;
1187 1188 1189
	}
}

1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
/**
 * intel_engines_unpark: called when the GT is transitioning from idle->busy
 * @i915: the i915 device
 *
 * The GT was idle and now about to fire up with some new user requests.
 */
void intel_engines_unpark(struct drm_i915_private *i915)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	for_each_engine(engine, i915, id) {
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
		void *map;

		/* Pin the default state for fast resets from atomic context. */
		map = NULL;
		if (engine->default_state)
			map = i915_gem_object_pin_map(engine->default_state,
						      I915_MAP_WB);
		if (!IS_ERR_OR_NULL(map))
			engine->pinned_default_state = map;

1212 1213
		if (engine->unpark)
			engine->unpark(engine);
1214 1215

		intel_engine_init_hangcheck(engine);
1216 1217 1218
	}
}

1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
/**
 * intel_engine_lost_context: called when the GPU is reset into unknown state
 * @engine: the engine
 *
 * We have either reset the GPU or otherwise about to lose state tracking of
 * the current GPU logical state (e.g. suspend). On next use, it is therefore
 * imperative that we make no presumptions about the current state and load
 * from scratch.
 */
void intel_engine_lost_context(struct intel_engine_cs *engine)
{
1230
	struct intel_context *ce;
1231 1232 1233

	lockdep_assert_held(&engine->i915->drm.struct_mutex);

1234 1235 1236
	ce = fetch_and_zero(&engine->last_retired_context);
	if (ce)
		intel_context_unpin(ce);
1237 1238
}

1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
	switch (INTEL_GEN(engine->i915)) {
	case 2:
		return false; /* uses physical not virtual addresses */
	case 3:
		/* maybe only uses physical not virtual addresses */
		return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
	case 6:
		return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
	default:
		return true;
	}
}

1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	unsigned int which;

	which = 0;
	for_each_engine(engine, i915, id)
		if (engine->default_state)
			which |= BIT(engine->uabi_class);

	return which;
}

1268 1269 1270
static int print_sched_attr(struct drm_i915_private *i915,
			    const struct i915_sched_attr *attr,
			    char *buf, int x, int len)
1271 1272
{
	if (attr->priority == I915_PRIORITY_INVALID)
1273 1274 1275 1276
		return x;

	x += snprintf(buf + x, len - x,
		      " prio=%d", attr->priority);
1277

1278
	return x;
1279 1280
}

1281
static void print_request(struct drm_printer *m,
1282
			  struct i915_request *rq,
1283 1284
			  const char *prefix)
{
1285
	const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
1286
	char buf[80] = "";
1287 1288 1289
	int x = 0;

	x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
1290

1291
	drm_printf(m, "%s%x%s [%llx:%llx]%s @ %dms: %s\n",
1292
		   prefix,
1293
		   rq->global_seqno,
1294
		   i915_request_completed(rq) ? "!" : "",
1295 1296
		   rq->fence.context, rq->fence.seqno,
		   buf,
1297
		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
1298
		   name);
1299 1300
}

1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
static void hexdump(struct drm_printer *m, const void *buf, size_t len)
{
	const size_t rowsize = 8 * sizeof(u32);
	const void *prev = NULL;
	bool skip = false;
	size_t pos;

	for (pos = 0; pos < len; pos += rowsize) {
		char line[128];

		if (prev && !memcmp(prev, buf + pos, rowsize)) {
			if (!skip) {
				drm_printf(m, "*\n");
				skip = true;
			}
			continue;
		}

		WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
						rowsize, sizeof(u32),
						line, sizeof(line),
						false) >= sizeof(line));
1323
		drm_printf(m, "[%04zx] %s\n", pos, line);
1324 1325 1326 1327 1328 1329

		prev = buf + pos;
		skip = false;
	}
}

1330 1331
static void intel_engine_print_registers(const struct intel_engine_cs *engine,
					 struct drm_printer *m)
1332 1333
{
	struct drm_i915_private *dev_priv = engine->i915;
1334 1335
	const struct intel_engine_execlists * const execlists =
		&engine->execlists;
1336 1337
	u64 addr;

1338
	if (engine->id == RCS && IS_GEN_RANGE(dev_priv, 4, 7))
1339
		drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
1340 1341 1342 1343 1344 1345
	drm_printf(m, "\tRING_START: 0x%08x\n",
		   I915_READ(RING_START(engine->mmio_base)));
	drm_printf(m, "\tRING_HEAD:  0x%08x\n",
		   I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
	drm_printf(m, "\tRING_TAIL:  0x%08x\n",
		   I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
1346
	drm_printf(m, "\tRING_CTL:   0x%08x%s\n",
1347
		   I915_READ(RING_CTL(engine->mmio_base)),
1348 1349 1350 1351 1352 1353
		   I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
	if (INTEL_GEN(engine->i915) > 2) {
		drm_printf(m, "\tRING_MODE:  0x%08x%s\n",
			   I915_READ(RING_MI_MODE(engine->mmio_base)),
			   I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
	}
1354 1355 1356 1357 1358

	if (INTEL_GEN(dev_priv) >= 6) {
		drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
	}

1359 1360 1361 1362 1363 1364
	addr = intel_engine_get_active_head(engine);
	drm_printf(m, "\tACTHD:  0x%08x_%08x\n",
		   upper_32_bits(addr), lower_32_bits(addr));
	addr = intel_engine_get_last_batch_head(engine);
	drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
		   upper_32_bits(addr), lower_32_bits(addr));
1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382
	if (INTEL_GEN(dev_priv) >= 8)
		addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
					RING_DMA_FADD_UDW(engine->mmio_base));
	else if (INTEL_GEN(dev_priv) >= 4)
		addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
	else
		addr = I915_READ(DMA_FADD_I8XX);
	drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
		   upper_32_bits(addr), lower_32_bits(addr));
	if (INTEL_GEN(dev_priv) >= 4) {
		drm_printf(m, "\tIPEIR: 0x%08x\n",
			   I915_READ(RING_IPEIR(engine->mmio_base)));
		drm_printf(m, "\tIPEHR: 0x%08x\n",
			   I915_READ(RING_IPEHR(engine->mmio_base)));
	} else {
		drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
		drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
	}
1383

1384
	if (HAS_EXECLISTS(dev_priv)) {
1385 1386
		const u32 *hws =
			&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
1387
		unsigned int idx;
1388
		u8 read, write;
1389 1390 1391 1392 1393

		drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
			   I915_READ(RING_EXECLIST_STATUS_LO(engine)),
			   I915_READ(RING_EXECLIST_STATUS_HI(engine)));

1394 1395 1396 1397 1398 1399
		read = execlists->csb_head;
		write = READ_ONCE(*execlists->csb_write);

		drm_printf(m, "\tExeclist CSB read %d, write %d [mmio:%d], tasklet queued? %s (%s)\n",
			   read, write,
			   GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine))),
1400 1401 1402
			   yesno(test_bit(TASKLET_STATE_SCHED,
					  &engine->execlists.tasklet.state)),
			   enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
1403 1404 1405 1406 1407 1408 1409 1410
		if (read >= GEN8_CSB_ENTRIES)
			read = 0;
		if (write >= GEN8_CSB_ENTRIES)
			write = 0;
		if (read > write)
			write += GEN8_CSB_ENTRIES;
		while (read < write) {
			idx = ++read % GEN8_CSB_ENTRIES;
1411
			drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [mmio:0x%08x], context: %d [mmio:%d]\n",
1412 1413
				   idx,
				   hws[idx * 2],
1414 1415 1416
				   I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
				   hws[idx * 2 + 1],
				   I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
1417 1418 1419 1420
		}

		rcu_read_lock();
		for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
1421
			struct i915_request *rq;
1422 1423 1424 1425
			unsigned int count;

			rq = port_unpack(&execlists->port[idx], &count);
			if (rq) {
1426 1427
				char hdr[80];

1428
				snprintf(hdr, sizeof(hdr),
1429 1430 1431
					 "\t\tELSP[%d] count=%d, ring->start=%08x, rq: ",
					 idx, count,
					 i915_ggtt_offset(rq->ring->vma));
1432
				print_request(m, rq, hdr);
1433
			} else {
1434
				drm_printf(m, "\t\tELSP[%d] idle\n", idx);
1435 1436
			}
		}
1437
		drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
1438 1439 1440 1441 1442 1443 1444 1445 1446
		rcu_read_unlock();
	} else if (INTEL_GEN(dev_priv) > 6) {
		drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
			   I915_READ(RING_PP_DIR_BASE(engine)));
		drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
		drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
			   I915_READ(RING_PP_DIR_DCLV(engine)));
	}
1447 1448
}

1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481
static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
{
	void *ring;
	int size;

	drm_printf(m,
		   "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
		   rq->head, rq->postfix, rq->tail,
		   rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
		   rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);

	size = rq->tail - rq->head;
	if (rq->tail < rq->head)
		size += rq->ring->size;

	ring = kmalloc(size, GFP_ATOMIC);
	if (ring) {
		const void *vaddr = rq->ring->vaddr;
		unsigned int head = rq->head;
		unsigned int len = 0;

		if (rq->tail < head) {
			len = rq->ring->size - head;
			memcpy(ring, vaddr + head, len);
			head = 0;
		}
		memcpy(ring + len, vaddr + head, size - len);

		hexdump(m, ring, size);
		kfree(ring);
	}
}

1482 1483 1484 1485 1486 1487
void intel_engine_dump(struct intel_engine_cs *engine,
		       struct drm_printer *m,
		       const char *header, ...)
{
	struct intel_breadcrumbs * const b = &engine->breadcrumbs;
	struct i915_gpu_error * const error = &engine->i915->gpu_error;
1488
	struct i915_request *rq;
1489
	intel_wakeref_t wakeref;
1490
	unsigned long flags;
1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
	struct rb_node *rb;

	if (header) {
		va_list ap;

		va_start(ap, header);
		drm_vprintf(m, header, &ap);
		va_end(ap);
	}

	if (i915_terminally_wedged(&engine->i915->gpu_error))
		drm_printf(m, "*** WEDGED ***\n");

1504
	drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
1505 1506 1507
		   intel_engine_get_seqno(engine),
		   intel_engine_last_submit(engine),
		   engine->hangcheck.seqno,
1508
		   jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
1509 1510 1511 1512 1513 1514 1515 1516
	drm_printf(m, "\tReset count: %d (global %d)\n",
		   i915_reset_engine_count(error, engine),
		   i915_reset_count(error));

	rcu_read_lock();

	drm_printf(m, "\tRequests:\n");

1517
	rq = list_first_entry(&engine->timeline.requests,
1518
			      struct i915_request, link);
1519
	if (&rq->link != &engine->timeline.requests)
1520 1521
		print_request(m, rq, "\t\tfirst  ");

1522
	rq = list_last_entry(&engine->timeline.requests,
1523
			     struct i915_request, link);
1524
	if (&rq->link != &engine->timeline.requests)
1525 1526 1527 1528 1529
		print_request(m, rq, "\t\tlast   ");

	rq = i915_gem_find_active_request(engine);
	if (rq) {
		print_request(m, rq, "\t\tactive ");
1530

1531
		drm_printf(m, "\t\tring->start:  0x%08x\n",
1532
			   i915_ggtt_offset(rq->ring->vma));
1533
		drm_printf(m, "\t\tring->head:   0x%08x\n",
1534
			   rq->ring->head);
1535
		drm_printf(m, "\t\tring->tail:   0x%08x\n",
1536
			   rq->ring->tail);
1537 1538 1539 1540
		drm_printf(m, "\t\tring->emit:   0x%08x\n",
			   rq->ring->emit);
		drm_printf(m, "\t\tring->space:  0x%08x\n",
			   rq->ring->space);
1541 1542

		print_request_ring(m, rq);
1543 1544 1545 1546
	}

	rcu_read_unlock();

1547 1548
	wakeref = intel_runtime_pm_get_if_in_use(engine->i915);
	if (wakeref) {
1549
		intel_engine_print_registers(engine, m);
1550
		intel_runtime_pm_put(engine->i915, wakeref);
1551 1552 1553
	} else {
		drm_printf(m, "\tDevice is asleep; skipping register dump\n");
	}
1554

1555
	intel_execlists_show_requests(engine, m, print_request, 8);
1556

1557
	spin_lock_irqsave(&b->rb_lock, flags);
1558 1559 1560
	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
		struct intel_wait *w = rb_entry(rb, typeof(*w), node);

1561 1562 1563 1564
		drm_printf(m, "\t%s [%d:%c] waiting for %x\n",
			   w->tsk->comm, w->tsk->pid,
			   task_state_to_char(w->tsk),
			   w->seqno);
1565
	}
1566
	spin_unlock_irqrestore(&b->rb_lock, flags);
1567

1568
	drm_printf(m, "HWSP:\n");
1569
	hexdump(m, engine->status_page.addr, PAGE_SIZE);
1570

1571
	drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
1572 1573
}

1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
static u8 user_class_map[] = {
	[I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
	[I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
	[I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
	[I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
};

struct intel_engine_cs *
intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
{
	if (class >= ARRAY_SIZE(user_class_map))
		return NULL;

	class = user_class_map[class];

	GEM_BUG_ON(class > MAX_ENGINE_CLASS);

	if (instance > MAX_ENGINE_INSTANCE)
		return NULL;

	return i915->engine_class[class][instance];
}

1597 1598 1599 1600 1601 1602 1603 1604 1605 1606
/**
 * intel_enable_engine_stats() - Enable engine busy tracking on engine
 * @engine: engine to enable stats collection
 *
 * Start collecting the engine busyness data for @engine.
 *
 * Returns 0 on success or a negative error code.
 */
int intel_enable_engine_stats(struct intel_engine_cs *engine)
{
1607
	struct intel_engine_execlists *execlists = &engine->execlists;
1608
	unsigned long flags;
1609
	int err = 0;
1610

1611
	if (!intel_engine_supports_stats(engine))
1612 1613
		return -ENODEV;

1614 1615
	spin_lock_irqsave(&engine->timeline.lock, flags);
	write_seqlock(&engine->stats.lock);
1616 1617 1618 1619 1620 1621

	if (unlikely(engine->stats.enabled == ~0)) {
		err = -EBUSY;
		goto unlock;
	}

1622 1623 1624 1625
	if (engine->stats.enabled++ == 0) {
		const struct execlist_port *port = execlists->port;
		unsigned int num_ports = execlists_num_ports(execlists);

1626
		engine->stats.enabled_at = ktime_get();
1627 1628 1629 1630 1631 1632 1633 1634 1635 1636

		/* XXX submission method oblivious? */
		while (num_ports-- && port_isset(port)) {
			engine->stats.active++;
			port++;
		}

		if (engine->stats.active)
			engine->stats.start = engine->stats.enabled_at;
	}
1637

1638
unlock:
1639 1640
	write_sequnlock(&engine->stats.lock);
	spin_unlock_irqrestore(&engine->timeline.lock, flags);
1641

1642
	return err;
1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
}

static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
{
	ktime_t total = engine->stats.total;

	/*
	 * If the engine is executing something at the moment
	 * add it to the total.
	 */
	if (engine->stats.active)
		total = ktime_add(total,
				  ktime_sub(ktime_get(), engine->stats.start));

	return total;
}

/**
 * intel_engine_get_busy_time() - Return current accumulated engine busyness
 * @engine: engine to report on
 *
 * Returns accumulated time @engine was busy since engine stats were enabled.
 */
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
{
1668
	unsigned int seq;
1669 1670
	ktime_t total;

1671 1672 1673 1674
	do {
		seq = read_seqbegin(&engine->stats.lock);
		total = __intel_engine_get_busy_time(engine);
	} while (read_seqretry(&engine->stats.lock, seq));
1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688

	return total;
}

/**
 * intel_disable_engine_stats() - Disable engine busy tracking on engine
 * @engine: engine to disable stats collection
 *
 * Stops collecting the engine busyness data for @engine.
 */
void intel_disable_engine_stats(struct intel_engine_cs *engine)
{
	unsigned long flags;

1689
	if (!intel_engine_supports_stats(engine))
1690 1691
		return;

1692
	write_seqlock_irqsave(&engine->stats.lock, flags);
1693 1694 1695 1696 1697
	WARN_ON_ONCE(engine->stats.enabled == 0);
	if (--engine->stats.enabled == 0) {
		engine->stats.total = __intel_engine_get_busy_time(engine);
		engine->stats.active = 0;
	}
1698
	write_sequnlock_irqrestore(&engine->stats.lock, flags);
1699 1700
}

1701 1702
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_engine.c"
1703
#include "selftests/intel_engine_cs.c"
1704
#endif