i915_gem_context.c 30.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Copyright © 2011-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *
 */

/*
 * This file implements HW context support. On gen5+ a HW context consists of an
 * opaque GPU object which is referenced at times of context saves and restores.
 * With RC6 enabled, the context is also referenced as the GPU enters and exists
 * from RC6 (GPU has it's own internal power context, except on gen5). Though
 * something like a context does exist for the media ring, the code only
 * supports contexts for the render ring.
 *
 * In software, there is a distinction between contexts created by the user,
 * and the default HW context. The default HW context is used by GPU clients
 * that do not request setup of their own hardware context. The default
 * context's state is never restored to help prevent programming errors. This
 * would happen if a client ran and piggy-backed off another clients GPU state.
 * The default context only exists to give the GPU some offset to load as the
 * current to invoke a save of the context we actually care about. In fact, the
 * code could likely be constructed, albeit in a more complicated fashion, to
 * never use the default context, though that limits the driver's ability to
 * swap out, and/or destroy other contexts.
 *
 * All other contexts are created as a request by the GPU client. These contexts
 * store GPU state, and thus allow GPU clients to not re-emit state (and
 * potentially query certain state) at any time. The kernel driver makes
 * certain that the appropriate commands are inserted.
 *
 * The context life cycle is semi-complicated in that context BOs may live
 * longer than the context itself because of the way the hardware, and object
 * tracking works. Below is a very crude representation of the state machine
 * describing the context life.
 *                                         refcount     pincount     active
 * S0: initial state                          0            0           0
 * S1: context created                        1            0           0
 * S2: context is currently running           2            1           X
 * S3: GPU referenced, but not current        2            0           1
 * S4: context is current, but destroyed      1            1           0
 * S5: like S3, but destroyed                 1            0           1
 *
 * The most common (but not all) transitions:
 * S0->S1: client creates a context
 * S1->S2: client submits execbuf with context
 * S2->S3: other clients submits execbuf with context
 * S3->S1: context object was retired
 * S3->S2: clients submits another execbuf
 * S2->S4: context destroy called with current context
 * S3->S5->S0: destroy path
 * S4->S5->S0: destroy path on current context
 *
 * There are two confusing terms used above:
 *  The "current context" means the context which is currently running on the
D
Damien Lespiau 已提交
76
 *  GPU. The GPU has loaded its state already and has stored away the gtt
77 78 79 80 81 82 83 84 85 86 87
 *  offset of the BO. The GPU is not actively referencing the data at this
 *  offset, but it will on the next context switch. The only way to avoid this
 *  is to do a GPU reset.
 *
 *  An "active context' is one which was previously the "current context" and is
 *  on the active list waiting for the next context switch to occur. Until this
 *  happens, the object must remain at the same gtt offset. It is therefore
 *  possible to destroy a context, but it is still active.
 *
 */

88 89
#include <drm/drmP.h>
#include <drm/i915_drm.h>
90
#include "i915_drv.h"
91
#include "i915_trace.h"
92

93 94
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1

95 96 97 98
/* This is a HW constraint. The value below is the largest known requirement
 * I've seen in a spec to date, and that was a workaround for a non-shipping
 * part. It should be safe to decrease this, but it's more future proof as is.
 */
B
Ben Widawsky 已提交
99 100
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
101

102
static size_t get_context_alignment(struct drm_i915_private *dev_priv)
B
Ben Widawsky 已提交
103
{
104
	if (IS_GEN6(dev_priv))
B
Ben Widawsky 已提交
105 106 107 108 109
		return GEN6_CONTEXT_ALIGN;

	return GEN7_CONTEXT_ALIGN;
}

110
static int get_context_size(struct drm_i915_private *dev_priv)
111 112 113 114
{
	int ret;
	u32 reg;

115
	switch (INTEL_GEN(dev_priv)) {
116 117 118 119 120
	case 6:
		reg = I915_READ(CXT_SIZE);
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
		break;
	case 7:
B
Ben Widawsky 已提交
121
		reg = I915_READ(GEN7_CXT_SIZE);
122
		if (IS_HASWELL(dev_priv))
123
			ret = HSW_CXT_TOTAL_SIZE;
B
Ben Widawsky 已提交
124 125
		else
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
126
		break;
B
Ben Widawsky 已提交
127 128 129
	case 8:
		ret = GEN8_CXT_TOTAL_SIZE;
		break;
130 131 132 133 134 135 136
	default:
		BUG();
	}

	return ret;
}

137
void i915_gem_context_free(struct kref *ctx_ref)
138
{
139
	struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
140
	int i;
141

142
	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
143
	trace_i915_context_free(ctx);
144
	GEM_BUG_ON(!ctx->closed);
145

146 147
	i915_ppgtt_put(ctx->ppgtt);

148 149 150 151 152 153 154
	for (i = 0; i < I915_NUM_ENGINES; i++) {
		struct intel_context *ce = &ctx->engine[i];

		if (!ce->state)
			continue;

		WARN_ON(ce->pin_count);
155
		if (ce->ring)
156
			intel_ring_free(ce->ring);
157

158
		i915_vma_put(ce->state);
159 160
	}

161
	put_pid(ctx->pid);
B
Ben Widawsky 已提交
162
	list_del(&ctx->link);
163 164

	ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
165 166 167
	kfree(ctx);
}

168
struct drm_i915_gem_object *
169 170 171 172 173
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
{
	struct drm_i915_gem_object *obj;
	int ret;

174 175
	lockdep_assert_held(&dev->struct_mutex);

176
	obj = i915_gem_object_create(dev, size);
177 178
	if (IS_ERR(obj))
		return obj;
179 180 181 182 183 184 185 186

	/*
	 * Try to make the context utilize L3 as well as LLC.
	 *
	 * On VLV we don't have L3 controls in the PTEs so we
	 * shouldn't touch the cache level, especially as that
	 * would make the object snooped which might have a
	 * negative performance impact.
187 188 189 190 191 192 193
	 *
	 * Snooping is required on non-llc platforms in execlist
	 * mode, but since all GGTT accesses use PAT entry 0 we
	 * get snooping anyway regardless of cache_level.
	 *
	 * This is only applicable for Ivy Bridge devices since
	 * later platforms don't have L3 control bits in the PTE.
194
	 */
195
	if (IS_IVYBRIDGE(dev)) {
196 197 198
		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
		/* Failure shouldn't ever happen this early */
		if (WARN_ON(ret)) {
199
			i915_gem_object_put(obj);
200 201 202 203 204 205 206
			return ERR_PTR(ret);
		}
	}

	return obj;
}

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
static void i915_ppgtt_close(struct i915_address_space *vm)
{
	struct list_head *phases[] = {
		&vm->active_list,
		&vm->inactive_list,
		&vm->unbound_list,
		NULL,
	}, **phase;

	GEM_BUG_ON(vm->closed);
	vm->closed = true;

	for (phase = phases; *phase; phase++) {
		struct i915_vma *vma, *vn;

		list_for_each_entry_safe(vma, vn, *phase, vm_link)
223
			if (!i915_vma_is_closed(vma))
224 225 226 227 228 229 230 231 232 233 234 235 236 237
				i915_vma_close(vma);
	}
}

static void context_close(struct i915_gem_context *ctx)
{
	GEM_BUG_ON(ctx->closed);
	ctx->closed = true;
	if (ctx->ppgtt)
		i915_ppgtt_close(&ctx->ppgtt->base);
	ctx->file_priv = ERR_PTR(-EBADF);
	i915_gem_context_put(ctx);
}

238 239 240 241 242 243 244 245 246 247 248
static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
{
	int ret;

	ret = ida_simple_get(&dev_priv->context_hw_ida,
			     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
	if (ret < 0) {
		/* Contexts are only released when no longer active.
		 * Flush any pending retires to hopefully release some
		 * stale contexts and try again.
		 */
249
		i915_gem_retire_requests(dev_priv);
250 251 252 253 254 255 256 257 258 259
		ret = ida_simple_get(&dev_priv->context_hw_ida,
				     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
		if (ret < 0)
			return ret;
	}

	*out = ret;
	return 0;
}

260
static struct i915_gem_context *
261
__create_hw_context(struct drm_device *dev,
262
		    struct drm_i915_file_private *file_priv)
263
{
264
	struct drm_i915_private *dev_priv = to_i915(dev);
265
	struct i915_gem_context *ctx;
T
Tejun Heo 已提交
266
	int ret;
267

268
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
269 270
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);
271

272 273 274 275 276 277
	ret = assign_hw_id(dev_priv, &ctx->hw_id);
	if (ret) {
		kfree(ctx);
		return ERR_PTR(ret);
	}

278
	kref_init(&ctx->ref);
279
	list_add_tail(&ctx->link, &dev_priv->context_list);
280
	ctx->i915 = dev_priv;
281

282 283
	ctx->ggtt_alignment = get_context_alignment(dev_priv);

284
	if (dev_priv->hw_context_size) {
285 286 287 288 289
		struct drm_i915_gem_object *obj;
		struct i915_vma *vma;

		obj = i915_gem_alloc_context_obj(dev,
						 dev_priv->hw_context_size);
290 291
		if (IS_ERR(obj)) {
			ret = PTR_ERR(obj);
292
			goto err_out;
293
		}
294 295 296 297 298 299 300 301 302

		vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
		if (IS_ERR(vma)) {
			i915_gem_object_put(obj);
			ret = PTR_ERR(vma);
			goto err_out;
		}

		ctx->engine[RCS].state = vma;
303
	}
304 305

	/* Default context will never have a file_priv */
306 307
	if (file_priv != NULL) {
		ret = idr_alloc(&file_priv->context_idr, ctx,
308
				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
309 310 311
		if (ret < 0)
			goto err_out;
	} else
312
		ret = DEFAULT_CONTEXT_HANDLE;
313 314

	ctx->file_priv = file_priv;
315 316 317
	if (file_priv)
		ctx->pid = get_task_pid(current, PIDTYPE_PID);

318
	ctx->user_handle = ret;
319 320 321
	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
322
	ctx->remap_slice = ALL_L3_SLICES(dev_priv);
323

324
	ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
325
	ctx->ring_size = 4 * PAGE_SIZE;
326 327
	ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
			     GEN8_CTX_ADDRESSING_MODE_SHIFT;
328
	ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
329

330
	return ctx;
331 332

err_out:
333
	context_close(ctx);
334
	return ERR_PTR(ret);
335 336
}

337 338 339 340 341
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
342
static struct i915_gem_context *
343
i915_gem_create_context(struct drm_device *dev,
344
			struct drm_i915_file_private *file_priv)
345
{
346
	struct i915_gem_context *ctx;
347

348
	lockdep_assert_held(&dev->struct_mutex);
349

350
	ctx = __create_hw_context(dev, file_priv);
351
	if (IS_ERR(ctx))
352
		return ctx;
353

354
	if (USES_FULL_PPGTT(dev)) {
355 356
		struct i915_hw_ppgtt *ppgtt =
			i915_ppgtt_create(to_i915(dev), file_priv);
357

358
		if (IS_ERR(ppgtt)) {
359 360
			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
					 PTR_ERR(ppgtt));
361
			idr_remove(&file_priv->context_idr, ctx->user_handle);
362
			context_close(ctx);
363
			return ERR_CAST(ppgtt);
364 365 366 367
		}

		ctx->ppgtt = ppgtt;
	}
368

369 370
	trace_i915_context_create(ctx);

371
	return ctx;
372 373
}

374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
/**
 * i915_gem_context_create_gvt - create a GVT GEM context
 * @dev: drm device *
 *
 * This function is used to create a GVT specific GEM context.
 *
 * Returns:
 * pointer to i915_gem_context on success, error pointer if failed
 *
 */
struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev)
{
	struct i915_gem_context *ctx;
	int ret;

	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
		return ERR_PTR(-ENODEV);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ERR_PTR(ret);

	ctx = i915_gem_create_context(dev, NULL);
	if (IS_ERR(ctx))
		goto out;

	ctx->execlists_force_single_submission = true;
	ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
out:
	mutex_unlock(&dev->struct_mutex);
	return ctx;
}

408
static void i915_gem_context_unpin(struct i915_gem_context *ctx,
409 410
				   struct intel_engine_cs *engine)
{
411 412 413
	if (i915.enable_execlists) {
		intel_lr_context_unpin(ctx, engine);
	} else {
414 415 416
		struct intel_context *ce = &ctx->engine[engine->id];

		if (ce->state)
417
			i915_vma_unpin(ce->state);
418

419
		i915_gem_context_put(ctx);
420
	}
421 422
}

423
int i915_gem_context_init(struct drm_device *dev)
424
{
425
	struct drm_i915_private *dev_priv = to_i915(dev);
426
	struct i915_gem_context *ctx;
427

428 429
	/* Init should only be called once per module load. Eventually the
	 * restriction on the context_disabled check can be loosened. */
430
	if (WARN_ON(dev_priv->kernel_context))
431
		return 0;
432

433 434
	if (intel_vgpu_active(dev_priv) &&
	    HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
435 436 437 438 439 440
		if (!i915.enable_execlists) {
			DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
			return -EINVAL;
		}
	}

441 442 443 444
	/* Using the simple ida interface, the max is limited by sizeof(int) */
	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
	ida_init(&dev_priv->context_hw_ida);

445 446 447 448
	if (i915.enable_execlists) {
		/* NB: intentionally left blank. We will allocate our own
		 * backing objects as we need them, thank you very much */
		dev_priv->hw_context_size = 0;
449 450 451
	} else if (HAS_HW_CONTEXTS(dev_priv)) {
		dev_priv->hw_context_size =
			round_up(get_context_size(dev_priv), 4096);
452 453 454 455 456
		if (dev_priv->hw_context_size > (1<<20)) {
			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
					 dev_priv->hw_context_size);
			dev_priv->hw_context_size = 0;
		}
457 458
	}

459
	ctx = i915_gem_create_context(dev, NULL);
460 461 462 463
	if (IS_ERR(ctx)) {
		DRM_ERROR("Failed to create default global context (error %ld)\n",
			  PTR_ERR(ctx));
		return PTR_ERR(ctx);
464 465
	}

466
	dev_priv->kernel_context = ctx;
467

468 469 470
	DRM_DEBUG_DRIVER("%s context support initialized\n",
			i915.enable_execlists ? "LR" :
			dev_priv->hw_context_size ? "HW" : "fake");
471
	return 0;
472 473
}

474 475 476 477
void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;

478
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
479

480
	for_each_engine(engine, dev_priv) {
481 482 483 484
		if (engine->last_context) {
			i915_gem_context_unpin(engine->last_context, engine);
			engine->last_context = NULL;
		}
485 486
	}

487 488
	/* Force the GPU state to be restored on enabling */
	if (!i915.enable_execlists) {
489 490 491 492 493 494 495 496 497 498 499 500
		struct i915_gem_context *ctx;

		list_for_each_entry(ctx, &dev_priv->context_list, link) {
			if (!i915_gem_context_is_default(ctx))
				continue;

			for_each_engine(engine, dev_priv)
				ctx->engine[engine->id].initialised = false;

			ctx->remap_slice = ALL_L3_SLICES(dev_priv);
		}

501 502 503 504 505 506 507
		for_each_engine(engine, dev_priv) {
			struct intel_context *kce =
				&dev_priv->kernel_context->engine[engine->id];

			kce->initialised = true;
		}
	}
508 509
}

510 511
void i915_gem_context_fini(struct drm_device *dev)
{
512
	struct drm_i915_private *dev_priv = to_i915(dev);
513
	struct i915_gem_context *dctx = dev_priv->kernel_context;
514

515 516
	lockdep_assert_held(&dev->struct_mutex);

517
	context_close(dctx);
518
	dev_priv->kernel_context = NULL;
519 520

	ida_destroy(&dev_priv->context_hw_ida);
521 522
}

523 524
static int context_idr_cleanup(int id, void *p, void *data)
{
525
	struct i915_gem_context *ctx = p;
526

527
	context_close(ctx);
528
	return 0;
529 530
}

531 532 533
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
534
	struct i915_gem_context *ctx;
535 536 537

	idr_init(&file_priv->context_idr);

538
	mutex_lock(&dev->struct_mutex);
539
	ctx = i915_gem_create_context(dev, file_priv);
540 541
	mutex_unlock(&dev->struct_mutex);

542
	if (IS_ERR(ctx)) {
543
		idr_destroy(&file_priv->context_idr);
544
		return PTR_ERR(ctx);
545 546
	}

547 548 549
	return 0;
}

550 551
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
552
	struct drm_i915_file_private *file_priv = file->driver_priv;
553

554 555
	lockdep_assert_held(&dev->struct_mutex);

556
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
557 558 559
	idr_destroy(&file_priv->context_idr);
}

560
static inline int
561
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
562
{
563
	struct drm_i915_private *dev_priv = req->i915;
564
	struct intel_ring *ring = req->ring;
565
	struct intel_engine_cs *engine = req->engine;
566
	u32 flags = hw_flags | MI_MM_SPACE_GTT;
567 568
	const int num_rings =
		/* Use an extended w/a on ivb+ if signalling from other rings */
569
		i915.semaphores ?
570
		INTEL_INFO(dev_priv)->num_rings - 1 :
571
		0;
572
	int len, ret;
573

574 575 576 577 578
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
	 * explicitly, so we rely on the value at ring init, stored in
	 * itlb_before_ctx_switch.
	 */
579
	if (IS_GEN6(dev_priv)) {
580
		ret = engine->emit_flush(req, EMIT_INVALIDATE);
581 582 583 584
		if (ret)
			return ret;
	}

585
	/* These flags are for resource streamer on HSW+ */
586
	if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
587
		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
588
	else if (INTEL_GEN(dev_priv) < 8)
589 590
		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);

591 592

	len = 4;
593
	if (INTEL_GEN(dev_priv) >= 7)
594
		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
595

596
	ret = intel_ring_begin(req, len);
597 598 599
	if (ret)
		return ret;

600
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
601
	if (INTEL_GEN(dev_priv) >= 7) {
602
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
603 604 605
		if (num_rings) {
			struct intel_engine_cs *signaller;

606
			intel_ring_emit(ring,
607
					MI_LOAD_REGISTER_IMM(num_rings));
608
			for_each_engine(signaller, dev_priv) {
609
				if (signaller == engine)
610 611
					continue;

612
				intel_ring_emit_reg(ring,
613
						    RING_PSMI_CTL(signaller->mmio_base));
614
				intel_ring_emit(ring,
615
						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
616 617 618
			}
		}
	}
619

620 621
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_emit(ring, MI_SET_CONTEXT);
622 623
	intel_ring_emit(ring,
			i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
624 625 626 627
	/*
	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
	 * WaMiSetContext_Hang:snb,ivb,vlv
	 */
628
	intel_ring_emit(ring, MI_NOOP);
629

630
	if (INTEL_GEN(dev_priv) >= 7) {
631 632
		if (num_rings) {
			struct intel_engine_cs *signaller;
633
			i915_reg_t last_reg = {}; /* keep gcc quiet */
634

635
			intel_ring_emit(ring,
636
					MI_LOAD_REGISTER_IMM(num_rings));
637
			for_each_engine(signaller, dev_priv) {
638
				if (signaller == engine)
639 640
					continue;

641
				last_reg = RING_PSMI_CTL(signaller->mmio_base);
642 643
				intel_ring_emit_reg(ring, last_reg);
				intel_ring_emit(ring,
644
						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
645
			}
646 647

			/* Insert a delay before the next switch! */
648
			intel_ring_emit(ring,
649 650
					MI_STORE_REGISTER_MEM |
					MI_SRM_LRM_GLOBAL_GTT);
651
			intel_ring_emit_reg(ring, last_reg);
652 653
			intel_ring_emit(ring,
					i915_ggtt_offset(engine->scratch));
654
			intel_ring_emit(ring, MI_NOOP);
655
		}
656
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
657
	}
658

659
	intel_ring_advance(ring);
660 661 662 663

	return ret;
}

C
Chris Wilson 已提交
664
static int remap_l3(struct drm_i915_gem_request *req, int slice)
665
{
666
	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
667
	struct intel_ring *ring = req->ring;
668 669
	int i, ret;

670
	if (!remap_info)
671 672
		return 0;

673
	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
674 675 676 677 678 679 680 681
	if (ret)
		return ret;

	/*
	 * Note: We do not worry about the concurrent register cacheline hang
	 * here because no other code should access these registers other than
	 * at initialization time.
	 */
682
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
683
	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
684 685
		intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
		intel_ring_emit(ring, remap_info[i]);
686
	}
687 688
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_advance(ring);
689

690
	return 0;
691 692
}

693 694
static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
				   struct intel_engine_cs *engine,
695
				   struct i915_gem_context *to)
696
{
697 698 699
	if (to->remap_slice)
		return false;

700
	if (!to->engine[RCS].initialised)
701 702
		return false;

703
	if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
704
		return false;
705

706
	return to == engine->last_context;
707 708 709
}

static bool
710 711
needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
		  struct intel_engine_cs *engine,
712
		  struct i915_gem_context *to)
713
{
714
	if (!ppgtt)
715 716
		return false;

717 718 719 720 721
	/* Always load the ppgtt on first use */
	if (!engine->last_context)
		return true;

	/* Same context without new entries, skip */
722
	if (engine->last_context == to &&
723
	    !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
724 725 726
		return false;

	if (engine->id != RCS)
727 728
		return true;

729
	if (INTEL_GEN(engine->i915) < 8)
730 731 732 733 734 735
		return true;

	return false;
}

static bool
736
needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
737
		   struct i915_gem_context *to,
738
		   u32 hw_flags)
739
{
740
	if (!ppgtt)
741 742
		return false;

743
	if (!IS_GEN8(to->i915))
744 745
		return false;

B
Ben Widawsky 已提交
746
	if (hw_flags & MI_RESTORE_INHIBIT)
747 748 749 750 751
		return true;

	return false;
}

752
static int do_rcs_switch(struct drm_i915_gem_request *req)
753
{
754
	struct i915_gem_context *to = req->ctx;
755
	struct intel_engine_cs *engine = req->engine;
756
	struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
757
	struct i915_vma *vma = to->engine[RCS].state;
758
	struct i915_gem_context *from;
759
	u32 hw_flags;
760
	int ret, i;
761

762
	if (skip_rcs_switch(ppgtt, engine, to))
763 764
		return 0;

765 766 767 768 769 770 771
	/* Clear this page out of any CPU caches for coherent swap-in/out. */
	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
		ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
		if (ret)
			return ret;
	}

772
	/* Trying to pin first makes error handling easier. */
773
	ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL);
774 775
	if (ret)
		return ret;
776

777 778 779 780
	/*
	 * Pin can switch back to the default context if we end up calling into
	 * evict_everything - as a last ditch gtt defrag effort that also
	 * switches to the default context. Hence we need to reload from here.
781 782
	 *
	 * XXX: Doing so is painfully broken!
783
	 */
784
	from = engine->last_context;
785

786
	if (needs_pd_load_pre(ppgtt, engine, to)) {
787 788 789 790 791
		/* Older GENs and non render rings still want the load first,
		 * "PP_DCLV followed by PP_DIR_BASE register through Load
		 * Register Immediate commands in Ring Buffer before submitting
		 * a context."*/
		trace_switch_mm(engine, to);
792
		ret = ppgtt->switch_mm(ppgtt, req);
793
		if (ret)
794
			goto err;
795 796
	}

797
	if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
B
Ben Widawsky 已提交
798 799 800 801
		/* NB: If we inhibit the restore, the context is not allowed to
		 * die because future work may end up depending on valid address
		 * space. This means we must enforce that a page table load
		 * occur when this occurs. */
802
		hw_flags = MI_RESTORE_INHIBIT;
803
	else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
804 805 806
		hw_flags = MI_FORCE_RESTORE;
	else
		hw_flags = 0;
807

808 809
	if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
		ret = mi_set_context(req, hw_flags);
810
		if (ret)
811
			goto err;
812 813
	}

814 815 816 817 818 819
	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
820
	if (from != NULL) {
821 822 823 824 825 826 827
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
828 829 830
		i915_vma_move_to_active(from->engine[RCS].state, req, 0);
		/* state is kept alive until the next request */
		i915_vma_unpin(from->engine[RCS].state);
831
		i915_gem_context_put(from);
832
	}
833
	engine->last_context = i915_gem_context_get(to);
834

835 836 837
	/* GEN8 does *not* require an explicit reload if the PDPs have been
	 * setup, and we do not wish to move them.
	 */
838
	if (needs_pd_load_post(ppgtt, to, hw_flags)) {
839
		trace_switch_mm(engine, to);
840
		ret = ppgtt->switch_mm(ppgtt, req);
841 842 843 844 845 846 847 848 849
		/* The hardware context switch is emitted, but we haven't
		 * actually changed the state - so it's probably safe to bail
		 * here. Still, let the user know something dangerous has
		 * happened.
		 */
		if (ret)
			return ret;
	}

850 851
	if (ppgtt)
		ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
852 853 854 855 856

	for (i = 0; i < MAX_L3_SLICES; i++) {
		if (!(to->remap_slice & (1<<i)))
			continue;

C
Chris Wilson 已提交
857
		ret = remap_l3(req, i);
858 859 860 861 862 863
		if (ret)
			return ret;

		to->remap_slice &= ~(1<<i);
	}

864
	if (!to->engine[RCS].initialised) {
865 866
		if (engine->init_context) {
			ret = engine->init_context(req);
867
			if (ret)
868
				return ret;
869
		}
870
		to->engine[RCS].initialised = true;
871 872
	}

873
	return 0;
874

875 876
err:
	i915_vma_unpin(vma);
877
	return ret;
878 879 880 881
}

/**
 * i915_switch_context() - perform a GPU context switch.
882
 * @req: request for which we'll execute the context switch
883 884 885
 *
 * The context life cycle is simple. The context refcount is incremented and
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
886
 * it will have a refcount > 1. This allows us to destroy the context abstract
887
 * object while letting the normal object tracking destroy the backing BO.
888 889 890 891
 *
 * This function should not be used in execlists mode.  Instead the context is
 * switched by writing to the ELSP and requests keep a reference to their
 * context.
892
 */
893
int i915_switch_context(struct drm_i915_gem_request *req)
894
{
895
	struct intel_engine_cs *engine = req->engine;
896

897
	lockdep_assert_held(&req->i915->drm.struct_mutex);
898 899
	if (i915.enable_execlists)
		return 0;
900

901
	if (!req->ctx->engine[engine->id].state) {
902
		struct i915_gem_context *to = req->ctx;
903 904
		struct i915_hw_ppgtt *ppgtt =
			to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
905

906
		if (needs_pd_load_pre(ppgtt, engine, to)) {
907 908 909
			int ret;

			trace_switch_mm(engine, to);
910
			ret = ppgtt->switch_mm(ppgtt, req);
911 912 913
			if (ret)
				return ret;

914
			ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
915 916 917
		}

		if (to != engine->last_context) {
918
			if (engine->last_context)
919 920
				i915_gem_context_put(engine->last_context);
			engine->last_context = i915_gem_context_get(to);
921
		}
922

923
		return 0;
924
	}
925

926
	return do_rcs_switch(req);
927
}
928

929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;

	for_each_engine(engine, dev_priv) {
		struct drm_i915_gem_request *req;
		int ret;

		if (engine->last_context == NULL)
			continue;

		if (engine->last_context == dev_priv->kernel_context)
			continue;

		req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
		if (IS_ERR(req))
			return PTR_ERR(req);

947
		ret = i915_switch_context(req);
948 949 950 951 952 953 954 955
		i915_add_request_no_flush(req);
		if (ret)
			return ret;
	}

	return 0;
}

956
static bool contexts_enabled(struct drm_device *dev)
957
{
958
	return i915.enable_execlists || to_i915(dev)->hw_context_size;
959 960
}

961 962 963 964 965
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file)
{
	struct drm_i915_gem_context_create *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
966
	struct i915_gem_context *ctx;
967 968
	int ret;

969
	if (!contexts_enabled(dev))
970 971
		return -ENODEV;

972 973 974
	if (args->pad != 0)
		return -EINVAL;

975 976 977 978
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

979
	ctx = i915_gem_create_context(dev, file_priv);
980
	mutex_unlock(&dev->struct_mutex);
981 982
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);
983

984
	args->ctx_id = ctx->user_handle;
985 986
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);

987
	return 0;
988 989 990 991 992 993 994
}

int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file)
{
	struct drm_i915_gem_context_destroy *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
995
	struct i915_gem_context *ctx;
996 997
	int ret;

998 999 1000
	if (args->pad != 0)
		return -EINVAL;

1001
	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
1002
		return -ENOENT;
1003

1004 1005 1006 1007
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1008
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1009
	if (IS_ERR(ctx)) {
1010
		mutex_unlock(&dev->struct_mutex);
1011
		return PTR_ERR(ctx);
1012 1013
	}

1014
	idr_remove(&file_priv->context_idr, ctx->user_handle);
1015
	context_close(ctx);
1016 1017 1018 1019 1020
	mutex_unlock(&dev->struct_mutex);

	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
	return 0;
}
1021 1022 1023 1024 1025 1026

int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
1027
	struct i915_gem_context *ctx;
1028 1029 1030 1031 1032 1033
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1034
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	args->size = 0;
	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		args->value = ctx->hang_stats.ban_period_seconds;
		break;
1045 1046 1047
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
		break;
C
Chris Wilson 已提交
1048 1049 1050 1051 1052 1053
	case I915_CONTEXT_PARAM_GTT_SIZE:
		if (ctx->ppgtt)
			args->value = ctx->ppgtt->base.total;
		else if (to_i915(dev)->mm.aliasing_ppgtt)
			args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
		else
1054
			args->value = to_i915(dev)->ggtt.base.total;
C
Chris Wilson 已提交
1055
		break;
1056 1057 1058
	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
		args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE);
		break;
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}

int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
1073
	struct i915_gem_context *ctx;
1074 1075 1076 1077 1078 1079
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1080
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		if (args->size)
			ret = -EINVAL;
		else if (args->value < ctx->hang_stats.ban_period_seconds &&
			 !capable(CAP_SYS_ADMIN))
			ret = -EPERM;
		else
			ctx->hang_stats.ban_period_seconds = args->value;
		break;
1096 1097 1098 1099 1100 1101
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		if (args->size) {
			ret = -EINVAL;
		} else {
			ctx->flags &= ~CONTEXT_NO_ZEROMAP;
			ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
		}
		break;
	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
		if (args->size) {
			ret = -EINVAL;
		} else {
			if (args->value)
				ctx->flags |= CONTEXT_NO_ERROR_CAPTURE;
			else
				ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE;
1112 1113
		}
		break;
1114 1115 1116 1117 1118 1119 1120 1121
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}
1122 1123 1124 1125

int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
				       void *data, struct drm_file *file)
{
1126
	struct drm_i915_private *dev_priv = to_i915(dev);
1127 1128
	struct drm_i915_reset_stats *args = data;
	struct i915_ctx_hang_stats *hs;
1129
	struct i915_gem_context *ctx;
1130 1131 1132 1133 1134 1135 1136 1137
	int ret;

	if (args->flags || args->pad)
		return -EINVAL;

	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
		return -EPERM;

1138
	ret = i915_mutex_lock_interruptible(dev);
1139 1140 1141
	if (ret)
		return ret;

1142
	ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}
	hs = &ctx->hang_stats;

	if (capable(CAP_SYS_ADMIN))
		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
	else
		args->reset_count = 0;

	args->batch_active = hs->batch_active;
	args->batch_pending = hs->batch_pending;

	mutex_unlock(&dev->struct_mutex);

	return 0;
}