intel_engine_cs.c 14.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

#include "i915_drv.h"
#include "intel_ringbuffer.h"
#include "intel_lrc.h"

static const struct engine_info {
	const char *name;
	unsigned exec_id;
32
	enum intel_engine_hw_id hw_id;
33 34 35 36 37 38 39 40
	u32 mmio_base;
	unsigned irq_shift;
	int (*init_legacy)(struct intel_engine_cs *engine);
	int (*init_execlists)(struct intel_engine_cs *engine);
} intel_engines[] = {
	[RCS] = {
		.name = "render ring",
		.exec_id = I915_EXEC_RENDER,
41
		.hw_id = RCS_HW,
42 43 44 45 46 47 48 49
		.mmio_base = RENDER_RING_BASE,
		.irq_shift = GEN8_RCS_IRQ_SHIFT,
		.init_execlists = logical_render_ring_init,
		.init_legacy = intel_init_render_ring_buffer,
	},
	[BCS] = {
		.name = "blitter ring",
		.exec_id = I915_EXEC_BLT,
50
		.hw_id = BCS_HW,
51 52 53 54 55 56 57 58
		.mmio_base = BLT_RING_BASE,
		.irq_shift = GEN8_BCS_IRQ_SHIFT,
		.init_execlists = logical_xcs_ring_init,
		.init_legacy = intel_init_blt_ring_buffer,
	},
	[VCS] = {
		.name = "bsd ring",
		.exec_id = I915_EXEC_BSD,
59
		.hw_id = VCS_HW,
60 61 62 63 64 65 66 67
		.mmio_base = GEN6_BSD_RING_BASE,
		.irq_shift = GEN8_VCS1_IRQ_SHIFT,
		.init_execlists = logical_xcs_ring_init,
		.init_legacy = intel_init_bsd_ring_buffer,
	},
	[VCS2] = {
		.name = "bsd2 ring",
		.exec_id = I915_EXEC_BSD,
68
		.hw_id = VCS2_HW,
69 70 71 72 73 74 75 76
		.mmio_base = GEN8_BSD2_RING_BASE,
		.irq_shift = GEN8_VCS2_IRQ_SHIFT,
		.init_execlists = logical_xcs_ring_init,
		.init_legacy = intel_init_bsd2_ring_buffer,
	},
	[VECS] = {
		.name = "video enhancement ring",
		.exec_id = I915_EXEC_VEBOX,
77
		.hw_id = VECS_HW,
78 79 80 81 82 83 84
		.mmio_base = VEBOX_RING_BASE,
		.irq_shift = GEN8_VECS_IRQ_SHIFT,
		.init_execlists = logical_xcs_ring_init,
		.init_legacy = intel_init_vebox_ring_buffer,
	},
};

85
static int
86 87 88 89
intel_engine_setup(struct drm_i915_private *dev_priv,
		   enum intel_engine_id id)
{
	const struct engine_info *info = &intel_engines[id];
90 91 92 93 94 95
	struct intel_engine_cs *engine;

	GEM_BUG_ON(dev_priv->engine[id]);
	engine = kzalloc(sizeof(*engine), GFP_KERNEL);
	if (!engine)
		return -ENOMEM;
96 97 98 99 100

	engine->id = id;
	engine->i915 = dev_priv;
	engine->name = info->name;
	engine->exec_id = info->exec_id;
101
	engine->hw_id = engine->guc_id = info->hw_id;
102 103 104
	engine->mmio_base = info->mmio_base;
	engine->irq_shift = info->irq_shift;

105 106 107
	/* Nothing to do here, execute in order of dependencies */
	engine->schedule = NULL;

108 109
	dev_priv->engine[id] = engine;
	return 0;
110 111 112
}

/**
113
 * intel_engines_init_early() - allocate the Engine Command Streamers
114
 * @dev_priv: i915 device private
115 116 117
 *
 * Return: non-zero if the initialization failed.
 */
118
int intel_engines_init_early(struct drm_i915_private *dev_priv)
119
{
120
	struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
121
	unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
122
	unsigned int mask = 0;
123 124
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
125
	unsigned int i;
126
	int err;
127

128 129
	WARN_ON(ring_mask == 0);
	WARN_ON(ring_mask &
130 131 132 133 134 135
		GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));

	for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
		if (!HAS_ENGINE(dev_priv, i))
			continue;

136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
		err = intel_engine_setup(dev_priv, i);
		if (err)
			goto cleanup;

		mask |= ENGINE_MASK(i);
	}

	/*
	 * Catch failures to update intel_engines table when the new engines
	 * are added to the driver by a warning and disabling the forgotten
	 * engines.
	 */
	if (WARN_ON(mask != ring_mask))
		device_info->ring_mask = mask;

	device_info->num_rings = hweight32(mask);

	return 0;

cleanup:
	for_each_engine(engine, dev_priv, id)
		kfree(engine);
	return err;
}

/**
 * intel_engines_init() - allocate, populate and init the Engine Command Streamers
 * @dev_priv: i915 device private
 *
 * Return: non-zero if the initialization failed.
 */
int intel_engines_init(struct drm_i915_private *dev_priv)
{
	struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
	struct intel_engine_cs *engine;
	enum intel_engine_id id, err_id;
	unsigned int mask = 0;
	int err = 0;

	for_each_engine(engine, dev_priv, id) {
		int (*init)(struct intel_engine_cs *engine);

178
		if (i915.enable_execlists)
179
			init = intel_engines[id].init_execlists;
180
		else
181 182 183 184
			init = intel_engines[id].init_legacy;
		if (!init) {
			kfree(engine);
			dev_priv->engine[id] = NULL;
185
			continue;
186
		}
187

188 189 190
		err = init(engine);
		if (err) {
			err_id = id;
191
			goto cleanup;
192
		}
193

194
		mask |= ENGINE_MASK(id);
195 196 197 198 199 200 201
	}

	/*
	 * Catch failures to update intel_engines table when the new engines
	 * are added to the driver by a warning and disabling the forgotten
	 * engines.
	 */
202
	if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
203 204 205
		device_info->ring_mask = mask;

	device_info->num_rings = hweight32(mask);
206 207 208 209

	return 0;

cleanup:
210
	for_each_engine(engine, dev_priv, id) {
211 212 213
		if (id >= err_id)
			kfree(engine);
		else if (i915.enable_execlists)
214
			intel_logical_ring_cleanup(engine);
215
		else
216
			intel_engine_cleanup(engine);
217
	}
218
	return err;
219 220
}

221
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
{
	struct drm_i915_private *dev_priv = engine->i915;

	/* Our semaphore implementation is strictly monotonic (i.e. we proceed
	 * so long as the semaphore value in the register/page is greater
	 * than the sync value), so whenever we reset the seqno,
	 * so long as we reset the tracking semaphore value to 0, it will
	 * always be before the next request's seqno. If we don't reset
	 * the semaphore value, then when the seqno moves backwards all
	 * future waits will complete instantly (causing rendering corruption).
	 */
	if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
		I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
		I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
		if (HAS_VEBOX(dev_priv))
			I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
	}
239 240 241 242 243 244
	if (dev_priv->semaphore) {
		struct page *page = i915_vma_first_page(dev_priv->semaphore);
		void *semaphores;

		/* Semaphores are in noncoherent memory, flush to be safe */
		semaphores = kmap(page);
245 246
		memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
		       0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
247 248
		drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
				       I915_NUM_ENGINES * gen8_semaphore_seqno_size);
249 250 251 252 253 254
		kunmap(page);
	}

	intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
	if (engine->irq_seqno_barrier)
		engine->irq_seqno_barrier(engine);
255 256 257

	GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
	engine->timeline->last_submitted_seqno = seqno;
258 259 260 261 262 263 264 265 266

	engine->hangcheck.seqno = seqno;

	/* After manually advancing the seqno, fake the interrupt in case
	 * there are any waiters for that seqno.
	 */
	intel_engine_wakeup(engine);
}

267
static void intel_engine_init_timeline(struct intel_engine_cs *engine)
268
{
269
	engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
270 271
}

272 273 274 275 276 277 278 279 280 281 282
/**
 * intel_engines_setup_common - setup engine state not requiring hw access
 * @engine: Engine to setup.
 *
 * Initializes @engine@ structure members shared between legacy and execlists
 * submission modes which do not require hardware access.
 *
 * Typically done early in the submission mode specific engine setup stage.
 */
void intel_engine_setup_common(struct intel_engine_cs *engine)
{
283 284
	engine->execlist_queue = RB_ROOT;
	engine->execlist_first = NULL;
285

286
	intel_engine_init_timeline(engine);
287
	intel_engine_init_hangcheck(engine);
288
	i915_gem_batch_pool_init(engine, &engine->batch_pool);
289 290

	intel_engine_init_cmd_parser(engine);
291 292
}

293 294 295 296 297 298 299 300
int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
{
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
	int ret;

	WARN_ON(engine->scratch);

301
	obj = i915_gem_object_create_stolen(engine->i915, size);
302
	if (!obj)
303
		obj = i915_gem_object_create_internal(engine->i915, size);
304 305 306 307 308
	if (IS_ERR(obj)) {
		DRM_ERROR("Failed to allocate scratch page\n");
		return PTR_ERR(obj);
	}

309
	vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
310 311 312 313 314 315 316 317 318 319
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err_unref;
	}

	ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
	if (ret)
		goto err_unref;

	engine->scratch = vma;
320 321
	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
			 engine->name, i915_ggtt_offset(vma));
322 323 324 325 326 327 328 329 330
	return 0;

err_unref:
	i915_gem_object_put(obj);
	return ret;
}

static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
{
331
	i915_vma_unpin_and_release(&engine->scratch);
332 333
}

334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
/**
 * intel_engines_init_common - initialize cengine state which might require hw access
 * @engine: Engine to initialize.
 *
 * Initializes @engine@ structure members shared between legacy and execlists
 * submission modes which do require hardware access.
 *
 * Typcally done at later stages of submission mode specific engine setup.
 *
 * Returns zero on success or an error code on failure.
 */
int intel_engine_init_common(struct intel_engine_cs *engine)
{
	int ret;

349 350 351 352 353 354 355 356
	/* We may need to do things with the shrinker which
	 * require us to immediately switch back to the default
	 * context. This can cause a problem as pinning the
	 * default context also requires GTT space which may not
	 * be available. To avoid this we always pin the default
	 * context.
	 */
	ret = engine->context_pin(engine, engine->i915->kernel_context);
357 358 359
	if (ret)
		return ret;

360 361 362 363
	ret = intel_engine_init_breadcrumbs(engine);
	if (ret)
		goto err_unpin;

364 365
	ret = i915_gem_render_state_init(engine);
	if (ret)
366
		goto err_unpin;
367

368
	return 0;
369 370 371 372

err_unpin:
	engine->context_unpin(engine, engine->i915->kernel_context);
	return ret;
373
}
374 375 376 377 378 379 380 381 382 383

/**
 * intel_engines_cleanup_common - cleans up the engine state created by
 *                                the common initiailizers.
 * @engine: Engine to cleanup.
 *
 * This cleans up everything created by the common helpers.
 */
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
384 385
	intel_engine_cleanup_scratch(engine);

386
	i915_gem_render_state_fini(engine);
387
	intel_engine_fini_breadcrumbs(engine);
388
	intel_engine_cleanup_cmd_parser(engine);
389
	i915_gem_batch_pool_fini(&engine->batch_pool);
390 391

	engine->context_unpin(engine, engine->i915->kernel_context);
392
}
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422

u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
{
	struct drm_i915_private *dev_priv = engine->i915;
	u64 acthd;

	if (INTEL_GEN(dev_priv) >= 8)
		acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
					 RING_ACTHD_UDW(engine->mmio_base));
	else if (INTEL_GEN(dev_priv) >= 4)
		acthd = I915_READ(RING_ACTHD(engine->mmio_base));
	else
		acthd = I915_READ(ACTHD);

	return acthd;
}

u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
{
	struct drm_i915_private *dev_priv = engine->i915;
	u64 bbaddr;

	if (INTEL_GEN(dev_priv) >= 8)
		bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
					  RING_BBADDR_UDW(engine->mmio_base));
	else
		bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));

	return bbaddr;
}
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526

const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
{
	switch (type) {
	case I915_CACHE_NONE: return " uncached";
	case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
	case I915_CACHE_L3_LLC: return " L3+LLC";
	case I915_CACHE_WT: return " WT";
	default: return "";
	}
}

static inline uint32_t
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
		  int subslice, i915_reg_t reg)
{
	uint32_t mcr;
	uint32_t ret;
	enum forcewake_domains fw_domains;

	fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
						    FW_REG_READ);
	fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
						     GEN8_MCR_SELECTOR,
						     FW_REG_READ | FW_REG_WRITE);

	spin_lock_irq(&dev_priv->uncore.lock);
	intel_uncore_forcewake_get__locked(dev_priv, fw_domains);

	mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
	/*
	 * The HW expects the slice and sublice selectors to be reset to 0
	 * after reading out the registers.
	 */
	WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
	mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
	mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
	I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);

	ret = I915_READ_FW(reg);

	mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
	I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);

	intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
	spin_unlock_irq(&dev_priv->uncore.lock);

	return ret;
}

/* NB: please notice the memset */
void intel_engine_get_instdone(struct intel_engine_cs *engine,
			       struct intel_instdone *instdone)
{
	struct drm_i915_private *dev_priv = engine->i915;
	u32 mmio_base = engine->mmio_base;
	int slice;
	int subslice;

	memset(instdone, 0, sizeof(*instdone));

	switch (INTEL_GEN(dev_priv)) {
	default:
		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));

		if (engine->id != RCS)
			break;

		instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
		for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
			instdone->sampler[slice][subslice] =
				read_subslice_reg(dev_priv, slice, subslice,
						  GEN7_SAMPLER_INSTDONE);
			instdone->row[slice][subslice] =
				read_subslice_reg(dev_priv, slice, subslice,
						  GEN7_ROW_INSTDONE);
		}
		break;
	case 7:
		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));

		if (engine->id != RCS)
			break;

		instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
		instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
		instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);

		break;
	case 6:
	case 5:
	case 4:
		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));

		if (engine->id == RCS)
			/* HACK: Using the wrong struct member */
			instdone->slice_common = I915_READ(GEN4_INSTDONE1);
		break;
	case 3:
	case 2:
		instdone->instdone = I915_READ(GEN2_INSTDONE);
		break;
	}
}