intel_engine_cs.c 43.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25 26
#include <drm/drm_print.h>

27
#include "i915_drv.h"
28
#include "i915_vgpu.h"
29 30 31
#include "intel_ringbuffer.h"
#include "intel_lrc.h"

32 33 34 35 36 37 38 39 40
/* Haswell does have the CXT_SIZE register however it does not appear to be
 * valid. Now, docs explain in dwords what is in the context object. The full
 * size is 70720 bytes, however, the power context and execlist context will
 * never be saved (power context is stored elsewhere, and execlists don't work
 * on HSW) - so the final size, including the extra state required for the
 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
 */
#define HSW_CXT_TOTAL_SIZE		(17 * PAGE_SIZE)

41
#define DEFAULT_LR_CONTEXT_RENDER_SIZE	(22 * PAGE_SIZE)
42 43
#define GEN8_LR_CONTEXT_RENDER_SIZE	(20 * PAGE_SIZE)
#define GEN9_LR_CONTEXT_RENDER_SIZE	(22 * PAGE_SIZE)
44
#define GEN10_LR_CONTEXT_RENDER_SIZE	(18 * PAGE_SIZE)
45
#define GEN11_LR_CONTEXT_RENDER_SIZE	(14 * PAGE_SIZE)
46 47 48

#define GEN8_LR_CONTEXT_OTHER_SIZE	( 2 * PAGE_SIZE)

49
struct engine_class_info {
50
	const char *name;
51 52
	int (*init_legacy)(struct intel_engine_cs *engine);
	int (*init_execlists)(struct intel_engine_cs *engine);
53 54

	u8 uabi_class;
55 56 57 58 59 60 61
};

static const struct engine_class_info intel_engine_classes[] = {
	[RENDER_CLASS] = {
		.name = "rcs",
		.init_execlists = logical_render_ring_init,
		.init_legacy = intel_init_render_ring_buffer,
62
		.uabi_class = I915_ENGINE_CLASS_RENDER,
63 64 65 66 67
	},
	[COPY_ENGINE_CLASS] = {
		.name = "bcs",
		.init_execlists = logical_xcs_ring_init,
		.init_legacy = intel_init_blt_ring_buffer,
68
		.uabi_class = I915_ENGINE_CLASS_COPY,
69 70 71 72 73
	},
	[VIDEO_DECODE_CLASS] = {
		.name = "vcs",
		.init_execlists = logical_xcs_ring_init,
		.init_legacy = intel_init_bsd_ring_buffer,
74
		.uabi_class = I915_ENGINE_CLASS_VIDEO,
75 76 77 78 79
	},
	[VIDEO_ENHANCEMENT_CLASS] = {
		.name = "vecs",
		.init_execlists = logical_xcs_ring_init,
		.init_legacy = intel_init_vebox_ring_buffer,
80
		.uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
81 82 83
	},
};

84
#define MAX_MMIO_BASES 3
85
struct engine_info {
86
	unsigned int hw_id;
87
	unsigned int uabi_id;
88 89
	u8 class;
	u8 instance;
90 91 92 93 94
	/* mmio bases table *must* be sorted in reverse gen order */
	struct engine_mmio_base {
		u32 gen : 8;
		u32 base : 24;
	} mmio_bases[MAX_MMIO_BASES];
95 96 97
};

static const struct engine_info intel_engines[] = {
98
	[RCS] = {
99
		.hw_id = RCS_HW,
100
		.uabi_id = I915_EXEC_RENDER,
101 102
		.class = RENDER_CLASS,
		.instance = 0,
103 104 105
		.mmio_bases = {
			{ .gen = 1, .base = RENDER_RING_BASE }
		},
106 107
	},
	[BCS] = {
108
		.hw_id = BCS_HW,
109
		.uabi_id = I915_EXEC_BLT,
110 111
		.class = COPY_ENGINE_CLASS,
		.instance = 0,
112 113 114
		.mmio_bases = {
			{ .gen = 6, .base = BLT_RING_BASE }
		},
115 116
	},
	[VCS] = {
117
		.hw_id = VCS_HW,
118
		.uabi_id = I915_EXEC_BSD,
119 120
		.class = VIDEO_DECODE_CLASS,
		.instance = 0,
121 122 123 124 125
		.mmio_bases = {
			{ .gen = 11, .base = GEN11_BSD_RING_BASE },
			{ .gen = 6, .base = GEN6_BSD_RING_BASE },
			{ .gen = 4, .base = BSD_RING_BASE }
		},
126 127
	},
	[VCS2] = {
128
		.hw_id = VCS2_HW,
129
		.uabi_id = I915_EXEC_BSD,
130 131
		.class = VIDEO_DECODE_CLASS,
		.instance = 1,
132 133 134 135
		.mmio_bases = {
			{ .gen = 11, .base = GEN11_BSD2_RING_BASE },
			{ .gen = 8, .base = GEN8_BSD2_RING_BASE }
		},
136
	},
137 138 139 140 141
	[VCS3] = {
		.hw_id = VCS3_HW,
		.uabi_id = I915_EXEC_BSD,
		.class = VIDEO_DECODE_CLASS,
		.instance = 2,
142 143 144
		.mmio_bases = {
			{ .gen = 11, .base = GEN11_BSD3_RING_BASE }
		},
145 146 147 148 149 150
	},
	[VCS4] = {
		.hw_id = VCS4_HW,
		.uabi_id = I915_EXEC_BSD,
		.class = VIDEO_DECODE_CLASS,
		.instance = 3,
151 152 153
		.mmio_bases = {
			{ .gen = 11, .base = GEN11_BSD4_RING_BASE }
		},
154
	},
155
	[VECS] = {
156
		.hw_id = VECS_HW,
157
		.uabi_id = I915_EXEC_VEBOX,
158 159
		.class = VIDEO_ENHANCEMENT_CLASS,
		.instance = 0,
160 161 162 163
		.mmio_bases = {
			{ .gen = 11, .base = GEN11_VEBOX_RING_BASE },
			{ .gen = 7, .base = VEBOX_RING_BASE }
		},
164
	},
165 166 167 168 169
	[VECS2] = {
		.hw_id = VECS2_HW,
		.uabi_id = I915_EXEC_VEBOX,
		.class = VIDEO_ENHANCEMENT_CLASS,
		.instance = 1,
170 171 172
		.mmio_bases = {
			{ .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
		},
173
	},
174 175
};

176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
/**
 * ___intel_engine_context_size() - return the size of the context for an engine
 * @dev_priv: i915 device private
 * @class: engine class
 *
 * Each engine class may require a different amount of space for a context
 * image.
 *
 * Return: size (in bytes) of an engine class specific context image
 *
 * Note: this size includes the HWSP, which is part of the context image
 * in LRC mode, but does not include the "shared data page" used with
 * GuC submission. The caller should account for this if using the GuC.
 */
static u32
__intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
{
	u32 cxt_size;

	BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);

	switch (class) {
	case RENDER_CLASS:
		switch (INTEL_GEN(dev_priv)) {
		default:
			MISSING_CASE(INTEL_GEN(dev_priv));
202
			return DEFAULT_LR_CONTEXT_RENDER_SIZE;
203 204
		case 11:
			return GEN11_LR_CONTEXT_RENDER_SIZE;
205
		case 10:
O
Oscar Mateo 已提交
206
			return GEN10_LR_CONTEXT_RENDER_SIZE;
207 208 209
		case 9:
			return GEN9_LR_CONTEXT_RENDER_SIZE;
		case 8:
210
			return GEN8_LR_CONTEXT_RENDER_SIZE;
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
		case 7:
			if (IS_HASWELL(dev_priv))
				return HSW_CXT_TOTAL_SIZE;

			cxt_size = I915_READ(GEN7_CXT_SIZE);
			return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
					PAGE_SIZE);
		case 6:
			cxt_size = I915_READ(CXT_SIZE);
			return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
					PAGE_SIZE);
		case 5:
		case 4:
		case 3:
		case 2:
		/* For the special day when i810 gets merged. */
		case 1:
			return 0;
		}
		break;
	default:
		MISSING_CASE(class);
	case VIDEO_DECODE_CLASS:
	case VIDEO_ENHANCEMENT_CLASS:
	case COPY_ENGINE_CLASS:
		if (INTEL_GEN(dev_priv) < 8)
			return 0;
		return GEN8_LR_CONTEXT_OTHER_SIZE;
	}
}

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
static u32 __engine_mmio_base(struct drm_i915_private *i915,
			      const struct engine_mmio_base *bases)
{
	int i;

	for (i = 0; i < MAX_MMIO_BASES; i++)
		if (INTEL_GEN(i915) >= bases[i].gen)
			break;

	GEM_BUG_ON(i == MAX_MMIO_BASES);
	GEM_BUG_ON(!bases[i].base);

	return bases[i].base;
}

257 258 259 260 261 262 263
static void __sprint_engine_name(char *name, const struct engine_info *info)
{
	WARN_ON(snprintf(name, INTEL_ENGINE_CS_MAX_NAME, "%s%u",
			 intel_engine_classes[info->class].name,
			 info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
}

264
static int
265 266 267 268
intel_engine_setup(struct drm_i915_private *dev_priv,
		   enum intel_engine_id id)
{
	const struct engine_info *info = &intel_engines[id];
269 270
	struct intel_engine_cs *engine;

271 272
	GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));

273 274 275
	BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
	BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));

276 277 278 279 280 281 282 283 284
	if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
		return -EINVAL;

	if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
		return -EINVAL;

	if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
		return -EINVAL;

285 286 287 288
	GEM_BUG_ON(dev_priv->engine[id]);
	engine = kzalloc(sizeof(*engine), GFP_KERNEL);
	if (!engine)
		return -ENOMEM;
289 290 291

	engine->id = id;
	engine->i915 = dev_priv;
292
	__sprint_engine_name(engine->name, info);
293
	engine->hw_id = engine->guc_id = info->hw_id;
294
	engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
295 296
	engine->class = info->class;
	engine->instance = info->instance;
297

298
	engine->uabi_id = info->uabi_id;
299
	engine->uabi_class = intel_engine_classes[info->class].uabi_class;
300

301 302 303 304 305
	engine->context_size = __intel_engine_context_size(dev_priv,
							   engine->class);
	if (WARN_ON(engine->context_size > BIT(20)))
		engine->context_size = 0;

306 307 308
	/* Nothing to do here, execute in order of dependencies */
	engine->schedule = NULL;

309
	seqlock_init(&engine->stats.lock);
310

311 312
	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);

313
	dev_priv->engine_class[info->class][info->instance] = engine;
314 315
	dev_priv->engine[id] = engine;
	return 0;
316 317 318
}

/**
319
 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
320
 * @dev_priv: i915 device private
321 322 323
 *
 * Return: non-zero if the initialization failed.
 */
324
int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
325
{
326
	struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
327
	const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
328 329
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
330
	unsigned int mask = 0;
331
	unsigned int i;
332
	int err;
333

334 335
	WARN_ON(ring_mask == 0);
	WARN_ON(ring_mask &
336 337 338 339 340 341
		GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));

	for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
		if (!HAS_ENGINE(dev_priv, i))
			continue;

342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
		err = intel_engine_setup(dev_priv, i);
		if (err)
			goto cleanup;

		mask |= ENGINE_MASK(i);
	}

	/*
	 * Catch failures to update intel_engines table when the new engines
	 * are added to the driver by a warning and disabling the forgotten
	 * engines.
	 */
	if (WARN_ON(mask != ring_mask))
		device_info->ring_mask = mask;

357 358 359 360 361 362
	/* We always presume we have at least RCS available for later probing */
	if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
		err = -ENODEV;
		goto cleanup;
	}

363 364
	device_info->num_rings = hweight32(mask);

365 366
	i915_check_and_clear_faults(dev_priv);

367 368 369 370 371 372 373 374 375
	return 0;

cleanup:
	for_each_engine(engine, dev_priv, id)
		kfree(engine);
	return err;
}

/**
376
 * intel_engines_init() - init the Engine Command Streamers
377 378 379 380 381 382 383 384
 * @dev_priv: i915 device private
 *
 * Return: non-zero if the initialization failed.
 */
int intel_engines_init(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id, err_id;
385
	int err;
386 387

	for_each_engine(engine, dev_priv, id) {
388 389
		const struct engine_class_info *class_info =
			&intel_engine_classes[engine->class];
390 391
		int (*init)(struct intel_engine_cs *engine);

392
		if (HAS_EXECLISTS(dev_priv))
393
			init = class_info->init_execlists;
394
		else
395
			init = class_info->init_legacy;
396 397 398 399 400 401

		err = -EINVAL;
		err_id = id;

		if (GEM_WARN_ON(!init))
			goto cleanup;
402

403
		err = init(engine);
404
		if (err)
405 406
			goto cleanup;

407
		GEM_BUG_ON(!engine->submit_request);
408 409 410 411 412
	}

	return 0;

cleanup:
413
	for_each_engine(engine, dev_priv, id) {
414
		if (id >= err_id) {
415
			kfree(engine);
416 417
			dev_priv->engine[id] = NULL;
		} else {
418
			dev_priv->gt.cleanup_engine(engine);
419
		}
420
	}
421
	return err;
422 423
}

424
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
{
	struct drm_i915_private *dev_priv = engine->i915;

	/* Our semaphore implementation is strictly monotonic (i.e. we proceed
	 * so long as the semaphore value in the register/page is greater
	 * than the sync value), so whenever we reset the seqno,
	 * so long as we reset the tracking semaphore value to 0, it will
	 * always be before the next request's seqno. If we don't reset
	 * the semaphore value, then when the seqno moves backwards all
	 * future waits will complete instantly (causing rendering corruption).
	 */
	if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
		I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
		I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
		if (HAS_VEBOX(dev_priv))
			I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
	}

	intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
444
	clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
445

446 447 448 449
	/* After manually advancing the seqno, fake the interrupt in case
	 * there are any waiters for that seqno.
	 */
	intel_engine_wakeup(engine);
450 451

	GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
452 453
}

454 455 456 457 458
static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
{
	i915_gem_batch_pool_init(&engine->batch_pool, engine);
}

459 460
static bool csb_force_mmio(struct drm_i915_private *i915)
{
461 462 463 464
	/* Older GVT emulation depends upon intercepting CSB mmio */
	if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
		return true;

465 466 467 468 469 470 471 472 473
	return false;
}

static void intel_engine_init_execlist(struct intel_engine_cs *engine)
{
	struct intel_engine_execlists * const execlists = &engine->execlists;

	execlists->csb_use_mmio = csb_force_mmio(engine->i915);

474 475 476 477
	execlists->port_mask = 1;
	BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
	GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);

478
	execlists->queue_priority = INT_MIN;
479 480 481 482
	execlists->queue = RB_ROOT;
	execlists->first = NULL;
}

483 484 485 486 487 488 489 490 491 492 493
/**
 * intel_engines_setup_common - setup engine state not requiring hw access
 * @engine: Engine to setup.
 *
 * Initializes @engine@ structure members shared between legacy and execlists
 * submission modes which do not require hardware access.
 *
 * Typically done early in the submission mode specific engine setup stage.
 */
void intel_engine_setup_common(struct intel_engine_cs *engine)
{
494 495
	i915_timeline_init(engine->i915, &engine->timeline, engine->name);

496
	intel_engine_init_execlist(engine);
497
	intel_engine_init_hangcheck(engine);
498
	intel_engine_init_batch_pool(engine);
499
	intel_engine_init_cmd_parser(engine);
500 501
}

502 503 504 505 506 507 508 509
int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
{
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
	int ret;

	WARN_ON(engine->scratch);

510
	obj = i915_gem_object_create_stolen(engine->i915, size);
511
	if (!obj)
512
		obj = i915_gem_object_create_internal(engine->i915, size);
513 514 515 516 517
	if (IS_ERR(obj)) {
		DRM_ERROR("Failed to allocate scratch page\n");
		return PTR_ERR(obj);
	}

518
	vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err_unref;
	}

	ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
	if (ret)
		goto err_unref;

	engine->scratch = vma;
	return 0;

err_unref:
	i915_gem_object_put(obj);
	return ret;
}

static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
{
538
	i915_vma_unpin_and_release(&engine->scratch);
539 540
}

541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
static void cleanup_phys_status_page(struct intel_engine_cs *engine)
{
	struct drm_i915_private *dev_priv = engine->i915;

	if (!dev_priv->status_page_dmah)
		return;

	drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
	engine->status_page.page_addr = NULL;
}

static void cleanup_status_page(struct intel_engine_cs *engine)
{
	struct i915_vma *vma;
	struct drm_i915_gem_object *obj;

	vma = fetch_and_zero(&engine->status_page.vma);
	if (!vma)
		return;

	obj = vma->obj;

	i915_vma_unpin(vma);
	i915_vma_close(vma);

	i915_gem_object_unpin_map(obj);
	__i915_gem_object_release_unless_active(obj);
}

static int init_status_page(struct intel_engine_cs *engine)
{
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
	unsigned int flags;
	void *vaddr;
	int ret;

	obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
	if (IS_ERR(obj)) {
		DRM_ERROR("Failed to allocate status page\n");
		return PTR_ERR(obj);
	}

	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
	if (ret)
		goto err;

	vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err;
	}

	flags = PIN_GLOBAL;
	if (!HAS_LLC(engine->i915))
		/* On g33, we cannot place HWS above 256MiB, so
		 * restrict its pinning to the low mappable arena.
		 * Though this restriction is not documented for
		 * gen4, gen5, or byt, they also behave similarly
		 * and hang if the HWS is placed at the top of the
		 * GTT. To generalise, it appears that all !llc
		 * platforms have issues with us placing the HWS
		 * above the mappable region (even though we never
		 * actually map it).
		 */
		flags |= PIN_MAPPABLE;
607 608
	else
		flags |= PIN_HIGH;
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
	ret = i915_vma_pin(vma, 0, 4096, flags);
	if (ret)
		goto err;

	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
	if (IS_ERR(vaddr)) {
		ret = PTR_ERR(vaddr);
		goto err_unpin;
	}

	engine->status_page.vma = vma;
	engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
	engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
	return 0;

err_unpin:
	i915_vma_unpin(vma);
err:
	i915_gem_object_put(obj);
	return ret;
}

static int init_phys_status_page(struct intel_engine_cs *engine)
{
	struct drm_i915_private *dev_priv = engine->i915;

	GEM_BUG_ON(engine->id != RCS);

	dev_priv->status_page_dmah =
		drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
	if (!dev_priv->status_page_dmah)
		return -ENOMEM;

	engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
	memset(engine->status_page.page_addr, 0, PAGE_SIZE);

	return 0;
}

648 649 650 651 652 653
static void __intel_context_unpin(struct i915_gem_context *ctx,
				  struct intel_engine_cs *engine)
{
	intel_context_unpin(to_intel_context(ctx, engine));
}

654 655 656 657 658 659 660 661 662 663 664 665 666
/**
 * intel_engines_init_common - initialize cengine state which might require hw access
 * @engine: Engine to initialize.
 *
 * Initializes @engine@ structure members shared between legacy and execlists
 * submission modes which do require hardware access.
 *
 * Typcally done at later stages of submission mode specific engine setup.
 *
 * Returns zero on success or an error code on failure.
 */
int intel_engine_init_common(struct intel_engine_cs *engine)
{
667 668
	struct drm_i915_private *i915 = engine->i915;
	struct intel_context *ce;
669 670
	int ret;

671 672
	engine->set_default_submission(engine);

673 674 675 676 677 678 679
	/* We may need to do things with the shrinker which
	 * require us to immediately switch back to the default
	 * context. This can cause a problem as pinning the
	 * default context also requires GTT space which may not
	 * be available. To avoid this we always pin the default
	 * context.
	 */
680 681 682
	ce = intel_context_pin(i915->kernel_context, engine);
	if (IS_ERR(ce))
		return PTR_ERR(ce);
683

684 685 686 687
	/*
	 * Similarly the preempt context must always be available so that
	 * we can interrupt the engine at any time.
	 */
688 689 690 691
	if (i915->preempt_context) {
		ce = intel_context_pin(i915->preempt_context, engine);
		if (IS_ERR(ce)) {
			ret = PTR_ERR(ce);
692 693 694 695
			goto err_unpin_kernel;
		}
	}

696 697
	ret = intel_engine_init_breadcrumbs(engine);
	if (ret)
698
		goto err_unpin_preempt;
699

700
	if (HWS_NEEDS_PHYSICAL(i915))
701 702 703 704
		ret = init_phys_status_page(engine);
	else
		ret = init_status_page(engine);
	if (ret)
705
		goto err_breadcrumbs;
706

707
	return 0;
708

709 710
err_breadcrumbs:
	intel_engine_fini_breadcrumbs(engine);
711
err_unpin_preempt:
712 713 714
	if (i915->preempt_context)
		__intel_context_unpin(i915->preempt_context, engine);

715
err_unpin_kernel:
716
	__intel_context_unpin(i915->kernel_context, engine);
717
	return ret;
718
}
719 720 721 722 723 724 725 726 727 728

/**
 * intel_engines_cleanup_common - cleans up the engine state created by
 *                                the common initiailizers.
 * @engine: Engine to cleanup.
 *
 * This cleans up everything created by the common helpers.
 */
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
729 730
	struct drm_i915_private *i915 = engine->i915;

731 732
	intel_engine_cleanup_scratch(engine);

733 734 735 736 737
	if (HWS_NEEDS_PHYSICAL(engine->i915))
		cleanup_phys_status_page(engine);
	else
		cleanup_status_page(engine);

738
	intel_engine_fini_breadcrumbs(engine);
739
	intel_engine_cleanup_cmd_parser(engine);
740
	i915_gem_batch_pool_fini(&engine->batch_pool);
741

742 743 744
	if (engine->default_state)
		i915_gem_object_put(engine->default_state);

745 746 747
	if (i915->preempt_context)
		__intel_context_unpin(i915->preempt_context, engine);
	__intel_context_unpin(i915->kernel_context, engine);
748 749

	i915_timeline_fini(&engine->timeline);
750
}
751

752
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
{
	struct drm_i915_private *dev_priv = engine->i915;
	u64 acthd;

	if (INTEL_GEN(dev_priv) >= 8)
		acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
					 RING_ACTHD_UDW(engine->mmio_base));
	else if (INTEL_GEN(dev_priv) >= 4)
		acthd = I915_READ(RING_ACTHD(engine->mmio_base));
	else
		acthd = I915_READ(ACTHD);

	return acthd;
}

768
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
769 770 771 772 773 774 775 776 777 778 779 780
{
	struct drm_i915_private *dev_priv = engine->i915;
	u64 bbaddr;

	if (INTEL_GEN(dev_priv) >= 8)
		bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
					  RING_BBADDR_UDW(engine->mmio_base));
	else
		bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));

	return bbaddr;
}
781

782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
int intel_engine_stop_cs(struct intel_engine_cs *engine)
{
	struct drm_i915_private *dev_priv = engine->i915;
	const u32 base = engine->mmio_base;
	const i915_reg_t mode = RING_MI_MODE(base);
	int err;

	if (INTEL_GEN(dev_priv) < 3)
		return -ENODEV;

	GEM_TRACE("%s\n", engine->name);

	I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));

	err = 0;
	if (__intel_wait_for_register_fw(dev_priv,
					 mode, MODE_IDLE, MODE_IDLE,
					 1000, 0,
					 NULL)) {
		GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
		err = -ETIMEDOUT;
	}

	/* A final mmio read to let GPU writes be hopefully flushed to memory */
	POSTING_READ_FW(mode);

	return err;
}

811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
{
	switch (type) {
	case I915_CACHE_NONE: return " uncached";
	case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
	case I915_CACHE_L3_LLC: return " L3+LLC";
	case I915_CACHE_WT: return " WT";
	default: return "";
	}
}

static inline uint32_t
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
		  int subslice, i915_reg_t reg)
{
826 827
	uint32_t mcr_slice_subslice_mask;
	uint32_t mcr_slice_subslice_select;
828 829 830 831
	uint32_t mcr;
	uint32_t ret;
	enum forcewake_domains fw_domains;

832 833 834 835 836 837 838 839 840 841 842 843
	if (INTEL_GEN(dev_priv) >= 11) {
		mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
					  GEN11_MCR_SUBSLICE_MASK;
		mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) |
					    GEN11_MCR_SUBSLICE(subslice);
	} else {
		mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
					  GEN8_MCR_SUBSLICE_MASK;
		mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) |
					    GEN8_MCR_SUBSLICE(subslice);
	}

844 845 846 847 848 849 850 851 852 853 854 855 856 857
	fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
						    FW_REG_READ);
	fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
						     GEN8_MCR_SELECTOR,
						     FW_REG_READ | FW_REG_WRITE);

	spin_lock_irq(&dev_priv->uncore.lock);
	intel_uncore_forcewake_get__locked(dev_priv, fw_domains);

	mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
	/*
	 * The HW expects the slice and sublice selectors to be reset to 0
	 * after reading out the registers.
	 */
858 859 860
	WARN_ON_ONCE(mcr & mcr_slice_subslice_mask);
	mcr &= ~mcr_slice_subslice_mask;
	mcr |= mcr_slice_subslice_select;
861 862 863 864
	I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);

	ret = I915_READ_FW(reg);

865
	mcr &= ~mcr_slice_subslice_mask;
866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
	I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);

	intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
	spin_unlock_irq(&dev_priv->uncore.lock);

	return ret;
}

/* NB: please notice the memset */
void intel_engine_get_instdone(struct intel_engine_cs *engine,
			       struct intel_instdone *instdone)
{
	struct drm_i915_private *dev_priv = engine->i915;
	u32 mmio_base = engine->mmio_base;
	int slice;
	int subslice;

	memset(instdone, 0, sizeof(*instdone));

	switch (INTEL_GEN(dev_priv)) {
	default:
		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));

		if (engine->id != RCS)
			break;

		instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
		for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
			instdone->sampler[slice][subslice] =
				read_subslice_reg(dev_priv, slice, subslice,
						  GEN7_SAMPLER_INSTDONE);
			instdone->row[slice][subslice] =
				read_subslice_reg(dev_priv, slice, subslice,
						  GEN7_ROW_INSTDONE);
		}
		break;
	case 7:
		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));

		if (engine->id != RCS)
			break;

		instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
		instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
		instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);

		break;
	case 6:
	case 5:
	case 4:
		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));

		if (engine->id == RCS)
			/* HACK: Using the wrong struct member */
			instdone->slice_common = I915_READ(GEN4_INSTDONE1);
		break;
	case 3:
	case 2:
		instdone->instdone = I915_READ(GEN2_INSTDONE);
		break;
	}
}
928

929 930 931 932 933
static bool ring_is_idle(struct intel_engine_cs *engine)
{
	struct drm_i915_private *dev_priv = engine->i915;
	bool idle = true;

934 935 936
	/* If the whole device is asleep, the engine must be idle */
	if (!intel_runtime_pm_get_if_in_use(dev_priv))
		return true;
937

938 939 940 941 942
	/* First check that no commands are left in the ring */
	if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
	    (I915_READ_TAIL(engine) & TAIL_ADDR))
		idle = false;

943 944 945 946 947 948 949 950 951
	/* No bit for gen2, so assume the CS parser is idle */
	if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
		idle = false;

	intel_runtime_pm_put(dev_priv);

	return idle;
}

952 953 954 955 956 957 958 959 960 961 962
/**
 * intel_engine_is_idle() - Report if the engine has finished process all work
 * @engine: the intel_engine_cs
 *
 * Return true if there are no requests pending, nothing left to be submitted
 * to hardware, and that the engine is idle.
 */
bool intel_engine_is_idle(struct intel_engine_cs *engine)
{
	struct drm_i915_private *dev_priv = engine->i915;

963 964 965 966
	/* More white lies, if wedged, hw state is inconsistent */
	if (i915_terminally_wedged(&dev_priv->gpu_error))
		return true;

967 968 969 970 971
	/* Any inflight/incomplete requests? */
	if (!i915_seqno_passed(intel_engine_get_seqno(engine),
			       intel_engine_last_submit(engine)))
		return false;

972 973 974
	if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
		return true;

975
	/* Waiting to drain ELSP? */
976 977 978 979 980 981 982 983 984 985 986
	if (READ_ONCE(engine->execlists.active)) {
		struct intel_engine_execlists *execlists = &engine->execlists;

		if (tasklet_trylock(&execlists->tasklet)) {
			execlists->tasklet.func(execlists->tasklet.data);
			tasklet_unlock(&execlists->tasklet);
		}

		if (READ_ONCE(execlists->active))
			return false;
	}
987

988
	/* ELSP is empty, but there are ready requests? E.g. after reset */
989
	if (READ_ONCE(engine->execlists.first))
990 991
		return false;

992
	/* Ring stopped? */
993
	if (!ring_is_idle(engine))
994 995 996 997 998
		return false;

	return true;
}

999 1000 1001 1002 1003
bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

1004 1005
	/*
	 * If the driver is wedged, HW state may be very inconsistent and
1006 1007 1008 1009 1010
	 * report that it is still busy, even though we have stopped using it.
	 */
	if (i915_terminally_wedged(&dev_priv->gpu_error))
		return true;

1011 1012 1013 1014 1015 1016 1017 1018
	for_each_engine(engine, dev_priv, id) {
		if (!intel_engine_is_idle(engine))
			return false;
	}

	return true;
}

1019 1020 1021 1022 1023 1024 1025 1026
/**
 * intel_engine_has_kernel_context:
 * @engine: the engine
 *
 * Returns true if the last context to be executed on this engine, or has been
 * executed if the engine is already idle, is the kernel context
 * (#i915.kernel_context).
 */
1027 1028
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
{
1029 1030
	const struct intel_context *kernel_context =
		to_intel_context(engine->i915->kernel_context, engine);
1031
	struct i915_request *rq;
1032 1033 1034 1035 1036 1037 1038 1039

	lockdep_assert_held(&engine->i915->drm.struct_mutex);

	/*
	 * Check the last context seen by the engine. If active, it will be
	 * the last request that remains in the timeline. When idle, it is
	 * the last executed context as tracked by retirement.
	 */
1040
	rq = __i915_gem_active_peek(&engine->timeline.last_request);
1041
	if (rq)
1042
		return rq->hw_context == kernel_context;
1043 1044
	else
		return engine->last_retired_context == kernel_context;
1045 1046
}

1047 1048 1049 1050 1051 1052 1053 1054 1055
void intel_engines_reset_default_submission(struct drm_i915_private *i915)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	for_each_engine(engine, i915, id)
		engine->set_default_submission(engine);
}

1056 1057 1058 1059 1060 1061 1062 1063 1064
/**
 * intel_engines_park: called when the GT is transitioning from busy->idle
 * @i915: the i915 device
 *
 * The GT is now idle and about to go to sleep (maybe never to wake again?).
 * Time for us to tidy and put away our toys (release resources back to the
 * system).
 */
void intel_engines_park(struct drm_i915_private *i915)
1065 1066 1067 1068 1069
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	for_each_engine(engine, i915, id) {
1070 1071
		/* Flush the residual irq tasklets first. */
		intel_engine_disarm_breadcrumbs(engine);
1072
		tasklet_kill(&engine->execlists.tasklet);
1073

1074 1075 1076 1077 1078
		/*
		 * We are committed now to parking the engines, make sure there
		 * will be no more interrupts arriving later and the engines
		 * are truly idle.
		 */
1079
		if (wait_for(intel_engine_is_idle(engine), 10)) {
1080 1081
			struct drm_printer p = drm_debug_printer(__func__);

1082 1083 1084
			dev_err(i915->drm.dev,
				"%s is not idle before parking\n",
				engine->name);
1085
			intel_engine_dump(engine, &p, NULL);
1086 1087
		}

1088 1089 1090
		/* Must be reset upon idling, or we may miss the busy wakeup. */
		GEM_BUG_ON(engine->execlists.queue_priority != INT_MIN);

1091 1092 1093
		if (engine->park)
			engine->park(engine);

1094 1095 1096 1097 1098
		if (engine->pinned_default_state) {
			i915_gem_object_unpin_map(engine->default_state);
			engine->pinned_default_state = NULL;
		}

1099
		i915_gem_batch_pool_fini(&engine->batch_pool);
1100
		engine->execlists.no_priolist = false;
1101 1102 1103
	}
}

1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
/**
 * intel_engines_unpark: called when the GT is transitioning from idle->busy
 * @i915: the i915 device
 *
 * The GT was idle and now about to fire up with some new user requests.
 */
void intel_engines_unpark(struct drm_i915_private *i915)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	for_each_engine(engine, i915, id) {
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
		void *map;

		/* Pin the default state for fast resets from atomic context. */
		map = NULL;
		if (engine->default_state)
			map = i915_gem_object_pin_map(engine->default_state,
						      I915_MAP_WB);
		if (!IS_ERR_OR_NULL(map))
			engine->pinned_default_state = map;

1126 1127
		if (engine->unpark)
			engine->unpark(engine);
1128 1129

		intel_engine_init_hangcheck(engine);
1130 1131 1132
	}
}

1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
/**
 * intel_engine_lost_context: called when the GPU is reset into unknown state
 * @engine: the engine
 *
 * We have either reset the GPU or otherwise about to lose state tracking of
 * the current GPU logical state (e.g. suspend). On next use, it is therefore
 * imperative that we make no presumptions about the current state and load
 * from scratch.
 */
void intel_engine_lost_context(struct intel_engine_cs *engine)
{
1144
	struct intel_context *ce;
1145 1146 1147 1148 1149 1150

	lockdep_assert_held(&engine->i915->drm.struct_mutex);

	engine->legacy_active_context = NULL;
	engine->legacy_active_ppgtt = NULL;

1151 1152 1153
	ce = fetch_and_zero(&engine->last_retired_context);
	if (ce)
		intel_context_unpin(ce);
1154 1155
}

1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
	switch (INTEL_GEN(engine->i915)) {
	case 2:
		return false; /* uses physical not virtual addresses */
	case 3:
		/* maybe only uses physical not virtual addresses */
		return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
	case 6:
		return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
	default:
		return true;
	}
}

1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	unsigned int which;

	which = 0;
	for_each_engine(engine, i915, id)
		if (engine->default_state)
			which |= BIT(engine->uabi_class);

	return which;
}

1185 1186 1187
static int print_sched_attr(struct drm_i915_private *i915,
			    const struct i915_sched_attr *attr,
			    char *buf, int x, int len)
1188 1189
{
	if (attr->priority == I915_PRIORITY_INVALID)
1190 1191 1192 1193
		return x;

	x += snprintf(buf + x, len - x,
		      " prio=%d", attr->priority);
1194

1195
	return x;
1196 1197
}

1198
static void print_request(struct drm_printer *m,
1199
			  struct i915_request *rq,
1200 1201
			  const char *prefix)
{
1202
	const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
1203
	char buf[80] = "";
1204 1205 1206
	int x = 0;

	x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
1207

1208
	drm_printf(m, "%s%x%s [%llx:%x]%s @ %dms: %s\n",
1209
		   prefix,
1210
		   rq->global_seqno,
1211
		   i915_request_completed(rq) ? "!" : "",
1212 1213
		   rq->fence.context, rq->fence.seqno,
		   buf,
1214
		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
1215
		   name);
1216 1217
}

1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
static void hexdump(struct drm_printer *m, const void *buf, size_t len)
{
	const size_t rowsize = 8 * sizeof(u32);
	const void *prev = NULL;
	bool skip = false;
	size_t pos;

	for (pos = 0; pos < len; pos += rowsize) {
		char line[128];

		if (prev && !memcmp(prev, buf + pos, rowsize)) {
			if (!skip) {
				drm_printf(m, "*\n");
				skip = true;
			}
			continue;
		}

		WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
						rowsize, sizeof(u32),
						line, sizeof(line),
						false) >= sizeof(line));
		drm_printf(m, "%08zx %s\n", pos, line);

		prev = buf + pos;
		skip = false;
	}
}

1247 1248
static void intel_engine_print_registers(const struct intel_engine_cs *engine,
					 struct drm_printer *m)
1249 1250
{
	struct drm_i915_private *dev_priv = engine->i915;
1251 1252
	const struct intel_engine_execlists * const execlists =
		&engine->execlists;
1253 1254
	u64 addr;

1255 1256 1257 1258 1259 1260
	drm_printf(m, "\tRING_START: 0x%08x\n",
		   I915_READ(RING_START(engine->mmio_base)));
	drm_printf(m, "\tRING_HEAD:  0x%08x\n",
		   I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
	drm_printf(m, "\tRING_TAIL:  0x%08x\n",
		   I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
1261
	drm_printf(m, "\tRING_CTL:   0x%08x%s\n",
1262
		   I915_READ(RING_CTL(engine->mmio_base)),
1263 1264 1265 1266 1267 1268
		   I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
	if (INTEL_GEN(engine->i915) > 2) {
		drm_printf(m, "\tRING_MODE:  0x%08x%s\n",
			   I915_READ(RING_MI_MODE(engine->mmio_base)),
			   I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
	}
1269 1270 1271 1272 1273

	if (INTEL_GEN(dev_priv) >= 6) {
		drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
	}

1274
	if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
1275 1276 1277 1278 1279 1280 1281 1282
		drm_printf(m, "\tSYNC_0: 0x%08x\n",
			   I915_READ(RING_SYNC_0(engine->mmio_base)));
		drm_printf(m, "\tSYNC_1: 0x%08x\n",
			   I915_READ(RING_SYNC_1(engine->mmio_base)));
		if (HAS_VEBOX(dev_priv))
			drm_printf(m, "\tSYNC_2: 0x%08x\n",
				   I915_READ(RING_SYNC_2(engine->mmio_base)));
	}
1283 1284 1285 1286 1287 1288 1289

	addr = intel_engine_get_active_head(engine);
	drm_printf(m, "\tACTHD:  0x%08x_%08x\n",
		   upper_32_bits(addr), lower_32_bits(addr));
	addr = intel_engine_get_last_batch_head(engine);
	drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
		   upper_32_bits(addr), lower_32_bits(addr));
1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
	if (INTEL_GEN(dev_priv) >= 8)
		addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
					RING_DMA_FADD_UDW(engine->mmio_base));
	else if (INTEL_GEN(dev_priv) >= 4)
		addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
	else
		addr = I915_READ(DMA_FADD_I8XX);
	drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
		   upper_32_bits(addr), lower_32_bits(addr));
	if (INTEL_GEN(dev_priv) >= 4) {
		drm_printf(m, "\tIPEIR: 0x%08x\n",
			   I915_READ(RING_IPEIR(engine->mmio_base)));
		drm_printf(m, "\tIPEHR: 0x%08x\n",
			   I915_READ(RING_IPEHR(engine->mmio_base)));
	} else {
		drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
		drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
	}
1308

1309
	if (HAS_EXECLISTS(dev_priv)) {
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
		const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
		u32 ptr, read, write;
		unsigned int idx;

		drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
			   I915_READ(RING_EXECLIST_STATUS_LO(engine)),
			   I915_READ(RING_EXECLIST_STATUS_HI(engine)));

		ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
		read = GEN8_CSB_READ_PTR(ptr);
		write = GEN8_CSB_WRITE_PTR(ptr);
1321
		drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s, tasklet queued? %s (%s)\n",
1322 1323 1324 1325
			   read, execlists->csb_head,
			   write,
			   intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
			   yesno(test_bit(ENGINE_IRQ_EXECLIST,
1326 1327 1328 1329
					  &engine->irq_posted)),
			   yesno(test_bit(TASKLET_STATE_SCHED,
					  &engine->execlists.tasklet.state)),
			   enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
		if (read >= GEN8_CSB_ENTRIES)
			read = 0;
		if (write >= GEN8_CSB_ENTRIES)
			write = 0;
		if (read > write)
			write += GEN8_CSB_ENTRIES;
		while (read < write) {
			idx = ++read % GEN8_CSB_ENTRIES;
			drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
				   idx,
				   I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
				   hws[idx * 2],
				   I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
				   hws[idx * 2 + 1]);
		}

		rcu_read_lock();
		for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
1348
			struct i915_request *rq;
1349 1350 1351 1352
			unsigned int count;

			rq = port_unpack(&execlists->port[idx], &count);
			if (rq) {
1353 1354
				char hdr[80];

1355
				snprintf(hdr, sizeof(hdr),
1356 1357 1358
					 "\t\tELSP[%d] count=%d, ring->start=%08x, rq: ",
					 idx, count,
					 i915_ggtt_offset(rq->ring->vma));
1359
				print_request(m, rq, hdr);
1360
			} else {
1361
				drm_printf(m, "\t\tELSP[%d] idle\n", idx);
1362 1363
			}
		}
1364
		drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
1365 1366 1367 1368 1369 1370 1371 1372 1373
		rcu_read_unlock();
	} else if (INTEL_GEN(dev_priv) > 6) {
		drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
			   I915_READ(RING_PP_DIR_BASE(engine)));
		drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
		drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
			   I915_READ(RING_PP_DIR_DCLV(engine)));
	}
1374 1375 1376 1377 1378 1379
}

void intel_engine_dump(struct intel_engine_cs *engine,
		       struct drm_printer *m,
		       const char *header, ...)
{
1380
	const int MAX_REQUESTS_TO_SHOW = 8;
1381 1382 1383
	struct intel_breadcrumbs * const b = &engine->breadcrumbs;
	const struct intel_engine_execlists * const execlists = &engine->execlists;
	struct i915_gpu_error * const error = &engine->i915->gpu_error;
1384
	struct i915_request *rq, *last;
1385
	unsigned long flags;
1386
	struct rb_node *rb;
1387
	int count;
1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399

	if (header) {
		va_list ap;

		va_start(ap, header);
		drm_vprintf(m, header, &ap);
		va_end(ap);
	}

	if (i915_terminally_wedged(&engine->i915->gpu_error))
		drm_printf(m, "*** WEDGED ***\n");

1400
	drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
1401 1402 1403
		   intel_engine_get_seqno(engine),
		   intel_engine_last_submit(engine),
		   engine->hangcheck.seqno,
1404
		   jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
1405 1406 1407 1408 1409 1410 1411 1412
	drm_printf(m, "\tReset count: %d (global %d)\n",
		   i915_reset_engine_count(error, engine),
		   i915_reset_count(error));

	rcu_read_lock();

	drm_printf(m, "\tRequests:\n");

1413
	rq = list_first_entry(&engine->timeline.requests,
1414
			      struct i915_request, link);
1415
	if (&rq->link != &engine->timeline.requests)
1416 1417
		print_request(m, rq, "\t\tfirst  ");

1418
	rq = list_last_entry(&engine->timeline.requests,
1419
			     struct i915_request, link);
1420
	if (&rq->link != &engine->timeline.requests)
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430
		print_request(m, rq, "\t\tlast   ");

	rq = i915_gem_find_active_request(engine);
	if (rq) {
		print_request(m, rq, "\t\tactive ");
		drm_printf(m,
			   "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
			   rq->head, rq->postfix, rq->tail,
			   rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
			   rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
1431
		drm_printf(m, "\t\tring->start:  0x%08x\n",
1432
			   i915_ggtt_offset(rq->ring->vma));
1433
		drm_printf(m, "\t\tring->head:   0x%08x\n",
1434
			   rq->ring->head);
1435
		drm_printf(m, "\t\tring->tail:   0x%08x\n",
1436
			   rq->ring->tail);
1437 1438 1439 1440
		drm_printf(m, "\t\tring->emit:   0x%08x\n",
			   rq->ring->emit);
		drm_printf(m, "\t\tring->space:  0x%08x\n",
			   rq->ring->space);
1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
	}

	rcu_read_unlock();

	if (intel_runtime_pm_get_if_in_use(engine->i915)) {
		intel_engine_print_registers(engine, m);
		intel_runtime_pm_put(engine->i915);
	} else {
		drm_printf(m, "\tDevice is asleep; skipping register dump\n");
	}
1451

1452 1453
	local_irq_save(flags);
	spin_lock(&engine->timeline.lock);
1454 1455 1456

	last = NULL;
	count = 0;
1457
	list_for_each_entry(rq, &engine->timeline.requests, link) {
1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473
		if (count++ < MAX_REQUESTS_TO_SHOW - 1)
			print_request(m, rq, "\t\tE ");
		else
			last = rq;
	}
	if (last) {
		if (count > MAX_REQUESTS_TO_SHOW) {
			drm_printf(m,
				   "\t\t...skipping %d executing requests...\n",
				   count - MAX_REQUESTS_TO_SHOW);
		}
		print_request(m, last, "\t\tE ");
	}

	last = NULL;
	count = 0;
1474
	drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
1475 1476 1477 1478
	for (rb = execlists->first; rb; rb = rb_next(rb)) {
		struct i915_priolist *p =
			rb_entry(rb, typeof(*p), node);

1479 1480 1481 1482 1483 1484
		list_for_each_entry(rq, &p->requests, sched.link) {
			if (count++ < MAX_REQUESTS_TO_SHOW - 1)
				print_request(m, rq, "\t\tQ ");
			else
				last = rq;
		}
1485
	}
1486 1487 1488 1489 1490 1491 1492 1493 1494
	if (last) {
		if (count > MAX_REQUESTS_TO_SHOW) {
			drm_printf(m,
				   "\t\t...skipping %d queued requests...\n",
				   count - MAX_REQUESTS_TO_SHOW);
		}
		print_request(m, last, "\t\tQ ");
	}

1495
	spin_unlock(&engine->timeline.lock);
1496

1497
	spin_lock(&b->rb_lock);
1498 1499 1500 1501 1502 1503
	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
		struct intel_wait *w = rb_entry(rb, typeof(*w), node);

		drm_printf(m, "\t%s [%d] waiting for %x\n",
			   w->tsk->comm, w->tsk->pid, w->seqno);
	}
1504 1505
	spin_unlock(&b->rb_lock);
	local_irq_restore(flags);
1506

1507 1508 1509 1510 1511 1512
	drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
		   engine->irq_posted,
		   yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
				  &engine->irq_posted)),
		   yesno(test_bit(ENGINE_IRQ_EXECLIST,
				  &engine->irq_posted)));
1513 1514 1515 1516

	drm_printf(m, "HWSP:\n");
	hexdump(m, engine->status_page.page_addr, PAGE_SIZE);

1517
	drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
1518 1519
}

1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
static u8 user_class_map[] = {
	[I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
	[I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
	[I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
	[I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
};

struct intel_engine_cs *
intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
{
	if (class >= ARRAY_SIZE(user_class_map))
		return NULL;

	class = user_class_map[class];

	GEM_BUG_ON(class > MAX_ENGINE_CLASS);

	if (instance > MAX_ENGINE_INSTANCE)
		return NULL;

	return i915->engine_class[class][instance];
}

1543 1544 1545 1546 1547 1548 1549 1550 1551 1552
/**
 * intel_enable_engine_stats() - Enable engine busy tracking on engine
 * @engine: engine to enable stats collection
 *
 * Start collecting the engine busyness data for @engine.
 *
 * Returns 0 on success or a negative error code.
 */
int intel_enable_engine_stats(struct intel_engine_cs *engine)
{
1553
	struct intel_engine_execlists *execlists = &engine->execlists;
1554
	unsigned long flags;
1555
	int err = 0;
1556

1557
	if (!intel_engine_supports_stats(engine))
1558 1559
		return -ENODEV;

1560
	tasklet_disable(&execlists->tasklet);
1561
	write_seqlock_irqsave(&engine->stats.lock, flags);
1562 1563 1564 1565 1566 1567

	if (unlikely(engine->stats.enabled == ~0)) {
		err = -EBUSY;
		goto unlock;
	}

1568 1569 1570 1571
	if (engine->stats.enabled++ == 0) {
		const struct execlist_port *port = execlists->port;
		unsigned int num_ports = execlists_num_ports(execlists);

1572
		engine->stats.enabled_at = ktime_get();
1573 1574 1575 1576 1577 1578 1579 1580 1581 1582

		/* XXX submission method oblivious? */
		while (num_ports-- && port_isset(port)) {
			engine->stats.active++;
			port++;
		}

		if (engine->stats.active)
			engine->stats.start = engine->stats.enabled_at;
	}
1583

1584
unlock:
1585
	write_sequnlock_irqrestore(&engine->stats.lock, flags);
1586
	tasklet_enable(&execlists->tasklet);
1587

1588
	return err;
1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
}

static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
{
	ktime_t total = engine->stats.total;

	/*
	 * If the engine is executing something at the moment
	 * add it to the total.
	 */
	if (engine->stats.active)
		total = ktime_add(total,
				  ktime_sub(ktime_get(), engine->stats.start));

	return total;
}

/**
 * intel_engine_get_busy_time() - Return current accumulated engine busyness
 * @engine: engine to report on
 *
 * Returns accumulated time @engine was busy since engine stats were enabled.
 */
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
{
1614
	unsigned int seq;
1615 1616
	ktime_t total;

1617 1618 1619 1620
	do {
		seq = read_seqbegin(&engine->stats.lock);
		total = __intel_engine_get_busy_time(engine);
	} while (read_seqretry(&engine->stats.lock, seq));
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634

	return total;
}

/**
 * intel_disable_engine_stats() - Disable engine busy tracking on engine
 * @engine: engine to disable stats collection
 *
 * Stops collecting the engine busyness data for @engine.
 */
void intel_disable_engine_stats(struct intel_engine_cs *engine)
{
	unsigned long flags;

1635
	if (!intel_engine_supports_stats(engine))
1636 1637
		return;

1638
	write_seqlock_irqsave(&engine->stats.lock, flags);
1639 1640 1641 1642 1643
	WARN_ON_ONCE(engine->stats.enabled == 0);
	if (--engine->stats.enabled == 0) {
		engine->stats.total = __intel_engine_get_busy_time(engine);
		engine->stats.active = 0;
	}
1644
	write_sequnlock_irqrestore(&engine->stats.lock, flags);
1645 1646
}

1647 1648
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_engine.c"
1649
#include "selftests/intel_engine_cs.c"
1650
#endif