i915_gem_context.c 30.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Copyright © 2011-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *
 */

/*
 * This file implements HW context support. On gen5+ a HW context consists of an
 * opaque GPU object which is referenced at times of context saves and restores.
 * With RC6 enabled, the context is also referenced as the GPU enters and exists
 * from RC6 (GPU has it's own internal power context, except on gen5). Though
 * something like a context does exist for the media ring, the code only
 * supports contexts for the render ring.
 *
 * In software, there is a distinction between contexts created by the user,
 * and the default HW context. The default HW context is used by GPU clients
 * that do not request setup of their own hardware context. The default
 * context's state is never restored to help prevent programming errors. This
 * would happen if a client ran and piggy-backed off another clients GPU state.
 * The default context only exists to give the GPU some offset to load as the
 * current to invoke a save of the context we actually care about. In fact, the
 * code could likely be constructed, albeit in a more complicated fashion, to
 * never use the default context, though that limits the driver's ability to
 * swap out, and/or destroy other contexts.
 *
 * All other contexts are created as a request by the GPU client. These contexts
 * store GPU state, and thus allow GPU clients to not re-emit state (and
 * potentially query certain state) at any time. The kernel driver makes
 * certain that the appropriate commands are inserted.
 *
 * The context life cycle is semi-complicated in that context BOs may live
 * longer than the context itself because of the way the hardware, and object
 * tracking works. Below is a very crude representation of the state machine
 * describing the context life.
 *                                         refcount     pincount     active
 * S0: initial state                          0            0           0
 * S1: context created                        1            0           0
 * S2: context is currently running           2            1           X
 * S3: GPU referenced, but not current        2            0           1
 * S4: context is current, but destroyed      1            1           0
 * S5: like S3, but destroyed                 1            0           1
 *
 * The most common (but not all) transitions:
 * S0->S1: client creates a context
 * S1->S2: client submits execbuf with context
 * S2->S3: other clients submits execbuf with context
 * S3->S1: context object was retired
 * S3->S2: clients submits another execbuf
 * S2->S4: context destroy called with current context
 * S3->S5->S0: destroy path
 * S4->S5->S0: destroy path on current context
 *
 * There are two confusing terms used above:
 *  The "current context" means the context which is currently running on the
D
Damien Lespiau 已提交
76
 *  GPU. The GPU has loaded its state already and has stored away the gtt
77 78 79 80 81 82 83 84 85 86 87
 *  offset of the BO. The GPU is not actively referencing the data at this
 *  offset, but it will on the next context switch. The only way to avoid this
 *  is to do a GPU reset.
 *
 *  An "active context' is one which was previously the "current context" and is
 *  on the active list waiting for the next context switch to occur. Until this
 *  happens, the object must remain at the same gtt offset. It is therefore
 *  possible to destroy a context, but it is still active.
 *
 */

88 89
#include <drm/drmP.h>
#include <drm/i915_drm.h>
90
#include "i915_drv.h"
91
#include "i915_trace.h"
92

93 94
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1

95 96 97 98
/* This is a HW constraint. The value below is the largest known requirement
 * I've seen in a spec to date, and that was a workaround for a non-shipping
 * part. It should be safe to decrease this, but it's more future proof as is.
 */
B
Ben Widawsky 已提交
99 100
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
101

102
static size_t get_context_alignment(struct drm_i915_private *dev_priv)
B
Ben Widawsky 已提交
103
{
104
	if (IS_GEN6(dev_priv))
B
Ben Widawsky 已提交
105 106 107 108 109
		return GEN6_CONTEXT_ALIGN;

	return GEN7_CONTEXT_ALIGN;
}

110
static int get_context_size(struct drm_i915_private *dev_priv)
111 112 113 114
{
	int ret;
	u32 reg;

115
	switch (INTEL_GEN(dev_priv)) {
116 117 118 119 120
	case 6:
		reg = I915_READ(CXT_SIZE);
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
		break;
	case 7:
B
Ben Widawsky 已提交
121
		reg = I915_READ(GEN7_CXT_SIZE);
122
		if (IS_HASWELL(dev_priv))
123
			ret = HSW_CXT_TOTAL_SIZE;
B
Ben Widawsky 已提交
124 125
		else
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
126
		break;
B
Ben Widawsky 已提交
127 128 129
	case 8:
		ret = GEN8_CXT_TOTAL_SIZE;
		break;
130 131 132 133 134 135 136
	default:
		BUG();
	}

	return ret;
}

137
void i915_gem_context_free(struct kref *ctx_ref)
138
{
139
	struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
140
	int i;
141

142
	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
143
	trace_i915_context_free(ctx);
144
	GEM_BUG_ON(!ctx->closed);
145

146 147
	i915_ppgtt_put(ctx->ppgtt);

148 149 150 151 152 153 154
	for (i = 0; i < I915_NUM_ENGINES; i++) {
		struct intel_context *ce = &ctx->engine[i];

		if (!ce->state)
			continue;

		WARN_ON(ce->pin_count);
155
		if (ce->ring)
156
			intel_ring_free(ce->ring);
157

158
		i915_vma_put(ce->state);
159 160
	}

B
Ben Widawsky 已提交
161
	list_del(&ctx->link);
162 163

	ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
164 165 166
	kfree(ctx);
}

167
struct drm_i915_gem_object *
168 169 170 171 172
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
{
	struct drm_i915_gem_object *obj;
	int ret;

173 174
	lockdep_assert_held(&dev->struct_mutex);

175
	obj = i915_gem_object_create(dev, size);
176 177
	if (IS_ERR(obj))
		return obj;
178 179 180 181 182 183 184 185

	/*
	 * Try to make the context utilize L3 as well as LLC.
	 *
	 * On VLV we don't have L3 controls in the PTEs so we
	 * shouldn't touch the cache level, especially as that
	 * would make the object snooped which might have a
	 * negative performance impact.
186 187 188 189 190 191 192
	 *
	 * Snooping is required on non-llc platforms in execlist
	 * mode, but since all GGTT accesses use PAT entry 0 we
	 * get snooping anyway regardless of cache_level.
	 *
	 * This is only applicable for Ivy Bridge devices since
	 * later platforms don't have L3 control bits in the PTE.
193
	 */
194
	if (IS_IVYBRIDGE(dev)) {
195 196 197
		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
		/* Failure shouldn't ever happen this early */
		if (WARN_ON(ret)) {
198
			i915_gem_object_put(obj);
199 200 201 202 203 204 205
			return ERR_PTR(ret);
		}
	}

	return obj;
}

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
static void i915_ppgtt_close(struct i915_address_space *vm)
{
	struct list_head *phases[] = {
		&vm->active_list,
		&vm->inactive_list,
		&vm->unbound_list,
		NULL,
	}, **phase;

	GEM_BUG_ON(vm->closed);
	vm->closed = true;

	for (phase = phases; *phase; phase++) {
		struct i915_vma *vma, *vn;

		list_for_each_entry_safe(vma, vn, *phase, vm_link)
222
			if (!i915_vma_is_closed(vma))
223 224 225 226 227 228 229 230 231 232 233 234 235 236
				i915_vma_close(vma);
	}
}

static void context_close(struct i915_gem_context *ctx)
{
	GEM_BUG_ON(ctx->closed);
	ctx->closed = true;
	if (ctx->ppgtt)
		i915_ppgtt_close(&ctx->ppgtt->base);
	ctx->file_priv = ERR_PTR(-EBADF);
	i915_gem_context_put(ctx);
}

237 238 239 240 241 242 243 244 245 246 247
static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
{
	int ret;

	ret = ida_simple_get(&dev_priv->context_hw_ida,
			     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
	if (ret < 0) {
		/* Contexts are only released when no longer active.
		 * Flush any pending retires to hopefully release some
		 * stale contexts and try again.
		 */
248
		i915_gem_retire_requests(dev_priv);
249 250 251 252 253 254 255 256 257 258
		ret = ida_simple_get(&dev_priv->context_hw_ida,
				     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
		if (ret < 0)
			return ret;
	}

	*out = ret;
	return 0;
}

259
static struct i915_gem_context *
260
__create_hw_context(struct drm_device *dev,
261
		    struct drm_i915_file_private *file_priv)
262
{
263
	struct drm_i915_private *dev_priv = to_i915(dev);
264
	struct i915_gem_context *ctx;
T
Tejun Heo 已提交
265
	int ret;
266

267
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
268 269
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);
270

271 272 273 274 275 276
	ret = assign_hw_id(dev_priv, &ctx->hw_id);
	if (ret) {
		kfree(ctx);
		return ERR_PTR(ret);
	}

277
	kref_init(&ctx->ref);
278
	list_add_tail(&ctx->link, &dev_priv->context_list);
279
	ctx->i915 = dev_priv;
280

281 282
	ctx->ggtt_alignment = get_context_alignment(dev_priv);

283
	if (dev_priv->hw_context_size) {
284 285 286 287 288
		struct drm_i915_gem_object *obj;
		struct i915_vma *vma;

		obj = i915_gem_alloc_context_obj(dev,
						 dev_priv->hw_context_size);
289 290
		if (IS_ERR(obj)) {
			ret = PTR_ERR(obj);
291
			goto err_out;
292
		}
293 294 295 296 297 298 299 300 301

		vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
		if (IS_ERR(vma)) {
			i915_gem_object_put(obj);
			ret = PTR_ERR(vma);
			goto err_out;
		}

		ctx->engine[RCS].state = vma;
302
	}
303 304

	/* Default context will never have a file_priv */
305 306
	if (file_priv != NULL) {
		ret = idr_alloc(&file_priv->context_idr, ctx,
307
				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
308 309 310
		if (ret < 0)
			goto err_out;
	} else
311
		ret = DEFAULT_CONTEXT_HANDLE;
312 313

	ctx->file_priv = file_priv;
314
	ctx->user_handle = ret;
315 316 317
	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
318
	ctx->remap_slice = ALL_L3_SLICES(dev_priv);
319

320
	ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
321
	ctx->ring_size = 4 * PAGE_SIZE;
322 323
	ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
			     GEN8_CTX_ADDRESSING_MODE_SHIFT;
324
	ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
325

326
	return ctx;
327 328

err_out:
329
	context_close(ctx);
330
	return ERR_PTR(ret);
331 332
}

333 334 335 336 337
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
338
static struct i915_gem_context *
339
i915_gem_create_context(struct drm_device *dev,
340
			struct drm_i915_file_private *file_priv)
341
{
342
	struct i915_gem_context *ctx;
343

344
	lockdep_assert_held(&dev->struct_mutex);
345

346
	ctx = __create_hw_context(dev, file_priv);
347
	if (IS_ERR(ctx))
348
		return ctx;
349

350
	if (USES_FULL_PPGTT(dev)) {
351 352
		struct i915_hw_ppgtt *ppgtt =
			i915_ppgtt_create(to_i915(dev), file_priv);
353

354
		if (IS_ERR(ppgtt)) {
355 356
			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
					 PTR_ERR(ppgtt));
357
			idr_remove(&file_priv->context_idr, ctx->user_handle);
358
			context_close(ctx);
359
			return ERR_CAST(ppgtt);
360 361 362 363
		}

		ctx->ppgtt = ppgtt;
	}
364

365 366
	trace_i915_context_create(ctx);

367
	return ctx;
368 369
}

370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
/**
 * i915_gem_context_create_gvt - create a GVT GEM context
 * @dev: drm device *
 *
 * This function is used to create a GVT specific GEM context.
 *
 * Returns:
 * pointer to i915_gem_context on success, error pointer if failed
 *
 */
struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev)
{
	struct i915_gem_context *ctx;
	int ret;

	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
		return ERR_PTR(-ENODEV);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ERR_PTR(ret);

	ctx = i915_gem_create_context(dev, NULL);
	if (IS_ERR(ctx))
		goto out;

	ctx->execlists_force_single_submission = true;
	ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
out:
	mutex_unlock(&dev->struct_mutex);
	return ctx;
}

404
static void i915_gem_context_unpin(struct i915_gem_context *ctx,
405 406
				   struct intel_engine_cs *engine)
{
407 408 409
	if (i915.enable_execlists) {
		intel_lr_context_unpin(ctx, engine);
	} else {
410 411 412
		struct intel_context *ce = &ctx->engine[engine->id];

		if (ce->state)
413
			i915_vma_unpin(ce->state);
414

415
		i915_gem_context_put(ctx);
416
	}
417 418
}

419 420
void i915_gem_context_reset(struct drm_device *dev)
{
421
	struct drm_i915_private *dev_priv = to_i915(dev);
422

423 424
	lockdep_assert_held(&dev->struct_mutex);

425
	if (i915.enable_execlists) {
426
		struct i915_gem_context *ctx;
427

428
		list_for_each_entry(ctx, &dev_priv->context_list, link)
429
			intel_lr_context_reset(dev_priv, ctx);
430
	}
431

432
	i915_gem_context_lost(dev_priv);
433 434
}

435
int i915_gem_context_init(struct drm_device *dev)
436
{
437
	struct drm_i915_private *dev_priv = to_i915(dev);
438
	struct i915_gem_context *ctx;
439

440 441
	/* Init should only be called once per module load. Eventually the
	 * restriction on the context_disabled check can be loosened. */
442
	if (WARN_ON(dev_priv->kernel_context))
443
		return 0;
444

445 446
	if (intel_vgpu_active(dev_priv) &&
	    HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
447 448 449 450 451 452
		if (!i915.enable_execlists) {
			DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
			return -EINVAL;
		}
	}

453 454 455 456
	/* Using the simple ida interface, the max is limited by sizeof(int) */
	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
	ida_init(&dev_priv->context_hw_ida);

457 458 459 460
	if (i915.enable_execlists) {
		/* NB: intentionally left blank. We will allocate our own
		 * backing objects as we need them, thank you very much */
		dev_priv->hw_context_size = 0;
461 462 463
	} else if (HAS_HW_CONTEXTS(dev_priv)) {
		dev_priv->hw_context_size =
			round_up(get_context_size(dev_priv), 4096);
464 465 466 467 468
		if (dev_priv->hw_context_size > (1<<20)) {
			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
					 dev_priv->hw_context_size);
			dev_priv->hw_context_size = 0;
		}
469 470
	}

471
	ctx = i915_gem_create_context(dev, NULL);
472 473 474 475
	if (IS_ERR(ctx)) {
		DRM_ERROR("Failed to create default global context (error %ld)\n",
			  PTR_ERR(ctx));
		return PTR_ERR(ctx);
476 477
	}

478
	dev_priv->kernel_context = ctx;
479

480 481 482
	DRM_DEBUG_DRIVER("%s context support initialized\n",
			i915.enable_execlists ? "LR" :
			dev_priv->hw_context_size ? "HW" : "fake");
483
	return 0;
484 485
}

486 487 488 489
void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;

490
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
491

492
	for_each_engine(engine, dev_priv) {
493 494 495 496
		if (engine->last_context) {
			i915_gem_context_unpin(engine->last_context, engine);
			engine->last_context = NULL;
		}
497 498
	}

499 500
	/* Force the GPU state to be restored on enabling */
	if (!i915.enable_execlists) {
501 502 503 504 505 506 507 508 509 510 511 512
		struct i915_gem_context *ctx;

		list_for_each_entry(ctx, &dev_priv->context_list, link) {
			if (!i915_gem_context_is_default(ctx))
				continue;

			for_each_engine(engine, dev_priv)
				ctx->engine[engine->id].initialised = false;

			ctx->remap_slice = ALL_L3_SLICES(dev_priv);
		}

513 514 515 516 517 518 519
		for_each_engine(engine, dev_priv) {
			struct intel_context *kce =
				&dev_priv->kernel_context->engine[engine->id];

			kce->initialised = true;
		}
	}
520 521
}

522 523
void i915_gem_context_fini(struct drm_device *dev)
{
524
	struct drm_i915_private *dev_priv = to_i915(dev);
525
	struct i915_gem_context *dctx = dev_priv->kernel_context;
526

527 528
	lockdep_assert_held(&dev->struct_mutex);

529
	context_close(dctx);
530
	dev_priv->kernel_context = NULL;
531 532

	ida_destroy(&dev_priv->context_hw_ida);
533 534
}

535 536
static int context_idr_cleanup(int id, void *p, void *data)
{
537
	struct i915_gem_context *ctx = p;
538

539
	context_close(ctx);
540
	return 0;
541 542
}

543 544 545
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
546
	struct i915_gem_context *ctx;
547 548 549

	idr_init(&file_priv->context_idr);

550
	mutex_lock(&dev->struct_mutex);
551
	ctx = i915_gem_create_context(dev, file_priv);
552 553
	mutex_unlock(&dev->struct_mutex);

554
	if (IS_ERR(ctx)) {
555
		idr_destroy(&file_priv->context_idr);
556
		return PTR_ERR(ctx);
557 558
	}

559 560 561
	return 0;
}

562 563
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
564
	struct drm_i915_file_private *file_priv = file->driver_priv;
565

566 567
	lockdep_assert_held(&dev->struct_mutex);

568
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
569 570 571
	idr_destroy(&file_priv->context_idr);
}

572
static inline int
573
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
574
{
575
	struct drm_i915_private *dev_priv = req->i915;
576
	struct intel_ring *ring = req->ring;
577
	struct intel_engine_cs *engine = req->engine;
578
	u32 flags = hw_flags | MI_MM_SPACE_GTT;
579 580
	const int num_rings =
		/* Use an extended w/a on ivb+ if signalling from other rings */
581
		i915.semaphores ?
582
		INTEL_INFO(dev_priv)->num_rings - 1 :
583
		0;
584
	int len, ret;
585

586 587 588 589 590
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
	 * explicitly, so we rely on the value at ring init, stored in
	 * itlb_before_ctx_switch.
	 */
591
	if (IS_GEN6(dev_priv)) {
592
		ret = engine->emit_flush(req, EMIT_INVALIDATE);
593 594 595 596
		if (ret)
			return ret;
	}

597
	/* These flags are for resource streamer on HSW+ */
598
	if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
599
		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
600
	else if (INTEL_GEN(dev_priv) < 8)
601 602
		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);

603 604

	len = 4;
605
	if (INTEL_GEN(dev_priv) >= 7)
606
		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
607

608
	ret = intel_ring_begin(req, len);
609 610 611
	if (ret)
		return ret;

612
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
613
	if (INTEL_GEN(dev_priv) >= 7) {
614
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
615 616 617
		if (num_rings) {
			struct intel_engine_cs *signaller;

618
			intel_ring_emit(ring,
619
					MI_LOAD_REGISTER_IMM(num_rings));
620
			for_each_engine(signaller, dev_priv) {
621
				if (signaller == engine)
622 623
					continue;

624
				intel_ring_emit_reg(ring,
625
						    RING_PSMI_CTL(signaller->mmio_base));
626
				intel_ring_emit(ring,
627
						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
628 629 630
			}
		}
	}
631

632 633
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_emit(ring, MI_SET_CONTEXT);
634 635
	intel_ring_emit(ring,
			i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
636 637 638 639
	/*
	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
	 * WaMiSetContext_Hang:snb,ivb,vlv
	 */
640
	intel_ring_emit(ring, MI_NOOP);
641

642
	if (INTEL_GEN(dev_priv) >= 7) {
643 644
		if (num_rings) {
			struct intel_engine_cs *signaller;
645
			i915_reg_t last_reg = {}; /* keep gcc quiet */
646

647
			intel_ring_emit(ring,
648
					MI_LOAD_REGISTER_IMM(num_rings));
649
			for_each_engine(signaller, dev_priv) {
650
				if (signaller == engine)
651 652
					continue;

653
				last_reg = RING_PSMI_CTL(signaller->mmio_base);
654 655
				intel_ring_emit_reg(ring, last_reg);
				intel_ring_emit(ring,
656
						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
657
			}
658 659

			/* Insert a delay before the next switch! */
660
			intel_ring_emit(ring,
661 662
					MI_STORE_REGISTER_MEM |
					MI_SRM_LRM_GLOBAL_GTT);
663
			intel_ring_emit_reg(ring, last_reg);
664 665
			intel_ring_emit(ring,
					i915_ggtt_offset(engine->scratch));
666
			intel_ring_emit(ring, MI_NOOP);
667
		}
668
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
669
	}
670

671
	intel_ring_advance(ring);
672 673 674 675

	return ret;
}

C
Chris Wilson 已提交
676
static int remap_l3(struct drm_i915_gem_request *req, int slice)
677
{
678
	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
679
	struct intel_ring *ring = req->ring;
680 681
	int i, ret;

682
	if (!remap_info)
683 684
		return 0;

685
	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
686 687 688 689 690 691 692 693
	if (ret)
		return ret;

	/*
	 * Note: We do not worry about the concurrent register cacheline hang
	 * here because no other code should access these registers other than
	 * at initialization time.
	 */
694
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
695
	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
696 697
		intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
		intel_ring_emit(ring, remap_info[i]);
698
	}
699 700
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_advance(ring);
701

702
	return 0;
703 704
}

705 706
static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
				   struct intel_engine_cs *engine,
707
				   struct i915_gem_context *to)
708
{
709 710 711
	if (to->remap_slice)
		return false;

712
	if (!to->engine[RCS].initialised)
713 714
		return false;

715
	if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
716
		return false;
717

718
	return to == engine->last_context;
719 720 721
}

static bool
722 723
needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
		  struct intel_engine_cs *engine,
724
		  struct i915_gem_context *to)
725
{
726
	if (!ppgtt)
727 728
		return false;

729 730 731 732 733
	/* Always load the ppgtt on first use */
	if (!engine->last_context)
		return true;

	/* Same context without new entries, skip */
734
	if (engine->last_context == to &&
735
	    !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
736 737 738
		return false;

	if (engine->id != RCS)
739 740
		return true;

741
	if (INTEL_GEN(engine->i915) < 8)
742 743 744 745 746 747
		return true;

	return false;
}

static bool
748
needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
749
		   struct i915_gem_context *to,
750
		   u32 hw_flags)
751
{
752
	if (!ppgtt)
753 754
		return false;

755
	if (!IS_GEN8(to->i915))
756 757
		return false;

B
Ben Widawsky 已提交
758
	if (hw_flags & MI_RESTORE_INHIBIT)
759 760 761 762 763
		return true;

	return false;
}

764
static int do_rcs_switch(struct drm_i915_gem_request *req)
765
{
766
	struct i915_gem_context *to = req->ctx;
767
	struct intel_engine_cs *engine = req->engine;
768
	struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
769
	struct i915_vma *vma = to->engine[RCS].state;
770
	struct i915_gem_context *from;
771
	u32 hw_flags;
772
	int ret, i;
773

774
	if (skip_rcs_switch(ppgtt, engine, to))
775 776
		return 0;

777 778 779 780 781 782 783
	/* Clear this page out of any CPU caches for coherent swap-in/out. */
	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
		ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
		if (ret)
			return ret;
	}

784
	/* Trying to pin first makes error handling easier. */
785
	ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL);
786 787
	if (ret)
		return ret;
788

789 790 791 792
	/*
	 * Pin can switch back to the default context if we end up calling into
	 * evict_everything - as a last ditch gtt defrag effort that also
	 * switches to the default context. Hence we need to reload from here.
793 794
	 *
	 * XXX: Doing so is painfully broken!
795
	 */
796
	from = engine->last_context;
797

798
	if (needs_pd_load_pre(ppgtt, engine, to)) {
799 800 801 802 803
		/* Older GENs and non render rings still want the load first,
		 * "PP_DCLV followed by PP_DIR_BASE register through Load
		 * Register Immediate commands in Ring Buffer before submitting
		 * a context."*/
		trace_switch_mm(engine, to);
804
		ret = ppgtt->switch_mm(ppgtt, req);
805
		if (ret)
806
			goto err;
807 808
	}

809
	if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
B
Ben Widawsky 已提交
810 811 812 813
		/* NB: If we inhibit the restore, the context is not allowed to
		 * die because future work may end up depending on valid address
		 * space. This means we must enforce that a page table load
		 * occur when this occurs. */
814
		hw_flags = MI_RESTORE_INHIBIT;
815
	else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
816 817 818
		hw_flags = MI_FORCE_RESTORE;
	else
		hw_flags = 0;
819

820 821
	if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
		ret = mi_set_context(req, hw_flags);
822
		if (ret)
823
			goto err;
824 825
	}

826 827 828 829 830 831
	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
832
	if (from != NULL) {
833 834 835 836 837 838 839
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
840 841 842
		i915_vma_move_to_active(from->engine[RCS].state, req, 0);
		/* state is kept alive until the next request */
		i915_vma_unpin(from->engine[RCS].state);
843
		i915_gem_context_put(from);
844
	}
845
	engine->last_context = i915_gem_context_get(to);
846

847 848 849
	/* GEN8 does *not* require an explicit reload if the PDPs have been
	 * setup, and we do not wish to move them.
	 */
850
	if (needs_pd_load_post(ppgtt, to, hw_flags)) {
851
		trace_switch_mm(engine, to);
852
		ret = ppgtt->switch_mm(ppgtt, req);
853 854 855 856 857 858 859 860 861
		/* The hardware context switch is emitted, but we haven't
		 * actually changed the state - so it's probably safe to bail
		 * here. Still, let the user know something dangerous has
		 * happened.
		 */
		if (ret)
			return ret;
	}

862 863
	if (ppgtt)
		ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
864 865 866 867 868

	for (i = 0; i < MAX_L3_SLICES; i++) {
		if (!(to->remap_slice & (1<<i)))
			continue;

C
Chris Wilson 已提交
869
		ret = remap_l3(req, i);
870 871 872 873 874 875
		if (ret)
			return ret;

		to->remap_slice &= ~(1<<i);
	}

876
	if (!to->engine[RCS].initialised) {
877 878
		if (engine->init_context) {
			ret = engine->init_context(req);
879
			if (ret)
880
				return ret;
881
		}
882
		to->engine[RCS].initialised = true;
883 884
	}

885
	return 0;
886

887 888
err:
	i915_vma_unpin(vma);
889
	return ret;
890 891 892 893
}

/**
 * i915_switch_context() - perform a GPU context switch.
894
 * @req: request for which we'll execute the context switch
895 896 897
 *
 * The context life cycle is simple. The context refcount is incremented and
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
898
 * it will have a refcount > 1. This allows us to destroy the context abstract
899
 * object while letting the normal object tracking destroy the backing BO.
900 901 902 903
 *
 * This function should not be used in execlists mode.  Instead the context is
 * switched by writing to the ELSP and requests keep a reference to their
 * context.
904
 */
905
int i915_switch_context(struct drm_i915_gem_request *req)
906
{
907
	struct intel_engine_cs *engine = req->engine;
908

909
	lockdep_assert_held(&req->i915->drm.struct_mutex);
910 911
	if (i915.enable_execlists)
		return 0;
912

913
	if (!req->ctx->engine[engine->id].state) {
914
		struct i915_gem_context *to = req->ctx;
915 916
		struct i915_hw_ppgtt *ppgtt =
			to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
917

918
		if (needs_pd_load_pre(ppgtt, engine, to)) {
919 920 921
			int ret;

			trace_switch_mm(engine, to);
922
			ret = ppgtt->switch_mm(ppgtt, req);
923 924 925
			if (ret)
				return ret;

926
			ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
927 928 929
		}

		if (to != engine->last_context) {
930
			if (engine->last_context)
931 932
				i915_gem_context_put(engine->last_context);
			engine->last_context = i915_gem_context_get(to);
933
		}
934

935
		return 0;
936
	}
937

938
	return do_rcs_switch(req);
939
}
940

941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;

	for_each_engine(engine, dev_priv) {
		struct drm_i915_gem_request *req;
		int ret;

		if (engine->last_context == NULL)
			continue;

		if (engine->last_context == dev_priv->kernel_context)
			continue;

		req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
		if (IS_ERR(req))
			return PTR_ERR(req);

959
		ret = i915_switch_context(req);
960 961 962 963 964 965 966 967
		i915_add_request_no_flush(req);
		if (ret)
			return ret;
	}

	return 0;
}

968
static bool contexts_enabled(struct drm_device *dev)
969
{
970
	return i915.enable_execlists || to_i915(dev)->hw_context_size;
971 972
}

973 974 975 976 977
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file)
{
	struct drm_i915_gem_context_create *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
978
	struct i915_gem_context *ctx;
979 980
	int ret;

981
	if (!contexts_enabled(dev))
982 983
		return -ENODEV;

984 985 986
	if (args->pad != 0)
		return -EINVAL;

987 988 989 990
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

991
	ctx = i915_gem_create_context(dev, file_priv);
992
	mutex_unlock(&dev->struct_mutex);
993 994
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);
995

996
	args->ctx_id = ctx->user_handle;
997 998
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);

999
	return 0;
1000 1001 1002 1003 1004 1005 1006
}

int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file)
{
	struct drm_i915_gem_context_destroy *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
1007
	struct i915_gem_context *ctx;
1008 1009
	int ret;

1010 1011 1012
	if (args->pad != 0)
		return -EINVAL;

1013
	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
1014
		return -ENOENT;
1015

1016 1017 1018 1019
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1020
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1021
	if (IS_ERR(ctx)) {
1022
		mutex_unlock(&dev->struct_mutex);
1023
		return PTR_ERR(ctx);
1024 1025
	}

1026
	idr_remove(&file_priv->context_idr, ctx->user_handle);
1027
	context_close(ctx);
1028 1029 1030 1031 1032
	mutex_unlock(&dev->struct_mutex);

	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
	return 0;
}
1033 1034 1035 1036 1037 1038

int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
1039
	struct i915_gem_context *ctx;
1040 1041 1042 1043 1044 1045
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1046
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	args->size = 0;
	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		args->value = ctx->hang_stats.ban_period_seconds;
		break;
1057 1058 1059
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
		break;
C
Chris Wilson 已提交
1060 1061 1062 1063 1064 1065
	case I915_CONTEXT_PARAM_GTT_SIZE:
		if (ctx->ppgtt)
			args->value = ctx->ppgtt->base.total;
		else if (to_i915(dev)->mm.aliasing_ppgtt)
			args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
		else
1066
			args->value = to_i915(dev)->ggtt.base.total;
C
Chris Wilson 已提交
1067
		break;
1068 1069 1070
	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
		args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE);
		break;
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}

int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
1085
	struct i915_gem_context *ctx;
1086 1087 1088 1089 1090 1091
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1092
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		if (args->size)
			ret = -EINVAL;
		else if (args->value < ctx->hang_stats.ban_period_seconds &&
			 !capable(CAP_SYS_ADMIN))
			ret = -EPERM;
		else
			ctx->hang_stats.ban_period_seconds = args->value;
		break;
1108 1109 1110 1111 1112 1113
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		if (args->size) {
			ret = -EINVAL;
		} else {
			ctx->flags &= ~CONTEXT_NO_ZEROMAP;
			ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
		}
		break;
	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
		if (args->size) {
			ret = -EINVAL;
		} else {
			if (args->value)
				ctx->flags |= CONTEXT_NO_ERROR_CAPTURE;
			else
				ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE;
1124 1125
		}
		break;
1126 1127 1128 1129 1130 1131 1132 1133
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}
1134 1135 1136 1137

int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
				       void *data, struct drm_file *file)
{
1138
	struct drm_i915_private *dev_priv = to_i915(dev);
1139 1140
	struct drm_i915_reset_stats *args = data;
	struct i915_ctx_hang_stats *hs;
1141
	struct i915_gem_context *ctx;
1142 1143 1144 1145 1146 1147 1148 1149
	int ret;

	if (args->flags || args->pad)
		return -EINVAL;

	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
		return -EPERM;

1150
	ret = i915_mutex_lock_interruptible(dev);
1151 1152 1153
	if (ret)
		return ret;

1154
	ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}
	hs = &ctx->hang_stats;

	if (capable(CAP_SYS_ADMIN))
		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
	else
		args->reset_count = 0;

	args->batch_active = hs->batch_active;
	args->batch_pending = hs->batch_pending;

	mutex_unlock(&dev->struct_mutex);

	return 0;
}