i915_gem_context.c 30.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Copyright © 2011-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *
 */

/*
 * This file implements HW context support. On gen5+ a HW context consists of an
 * opaque GPU object which is referenced at times of context saves and restores.
 * With RC6 enabled, the context is also referenced as the GPU enters and exists
 * from RC6 (GPU has it's own internal power context, except on gen5). Though
 * something like a context does exist for the media ring, the code only
 * supports contexts for the render ring.
 *
 * In software, there is a distinction between contexts created by the user,
 * and the default HW context. The default HW context is used by GPU clients
 * that do not request setup of their own hardware context. The default
 * context's state is never restored to help prevent programming errors. This
 * would happen if a client ran and piggy-backed off another clients GPU state.
 * The default context only exists to give the GPU some offset to load as the
 * current to invoke a save of the context we actually care about. In fact, the
 * code could likely be constructed, albeit in a more complicated fashion, to
 * never use the default context, though that limits the driver's ability to
 * swap out, and/or destroy other contexts.
 *
 * All other contexts are created as a request by the GPU client. These contexts
 * store GPU state, and thus allow GPU clients to not re-emit state (and
 * potentially query certain state) at any time. The kernel driver makes
 * certain that the appropriate commands are inserted.
 *
 * The context life cycle is semi-complicated in that context BOs may live
 * longer than the context itself because of the way the hardware, and object
 * tracking works. Below is a very crude representation of the state machine
 * describing the context life.
 *                                         refcount     pincount     active
 * S0: initial state                          0            0           0
 * S1: context created                        1            0           0
 * S2: context is currently running           2            1           X
 * S3: GPU referenced, but not current        2            0           1
 * S4: context is current, but destroyed      1            1           0
 * S5: like S3, but destroyed                 1            0           1
 *
 * The most common (but not all) transitions:
 * S0->S1: client creates a context
 * S1->S2: client submits execbuf with context
 * S2->S3: other clients submits execbuf with context
 * S3->S1: context object was retired
 * S3->S2: clients submits another execbuf
 * S2->S4: context destroy called with current context
 * S3->S5->S0: destroy path
 * S4->S5->S0: destroy path on current context
 *
 * There are two confusing terms used above:
 *  The "current context" means the context which is currently running on the
D
Damien Lespiau 已提交
76
 *  GPU. The GPU has loaded its state already and has stored away the gtt
77 78 79 80 81 82 83 84 85 86 87
 *  offset of the BO. The GPU is not actively referencing the data at this
 *  offset, but it will on the next context switch. The only way to avoid this
 *  is to do a GPU reset.
 *
 *  An "active context' is one which was previously the "current context" and is
 *  on the active list waiting for the next context switch to occur. Until this
 *  happens, the object must remain at the same gtt offset. It is therefore
 *  possible to destroy a context, but it is still active.
 *
 */

88 89
#include <drm/drmP.h>
#include <drm/i915_drm.h>
90
#include "i915_drv.h"
91
#include "i915_trace.h"
92

93 94
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1

95 96 97 98
/* This is a HW constraint. The value below is the largest known requirement
 * I've seen in a spec to date, and that was a workaround for a non-shipping
 * part. It should be safe to decrease this, but it's more future proof as is.
 */
B
Ben Widawsky 已提交
99 100
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
101

102
static size_t get_context_alignment(struct drm_i915_private *dev_priv)
B
Ben Widawsky 已提交
103
{
104
	if (IS_GEN6(dev_priv))
B
Ben Widawsky 已提交
105 106 107 108 109
		return GEN6_CONTEXT_ALIGN;

	return GEN7_CONTEXT_ALIGN;
}

110
static int get_context_size(struct drm_i915_private *dev_priv)
111 112 113 114
{
	int ret;
	u32 reg;

115
	switch (INTEL_GEN(dev_priv)) {
116 117 118 119 120
	case 6:
		reg = I915_READ(CXT_SIZE);
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
		break;
	case 7:
B
Ben Widawsky 已提交
121
		reg = I915_READ(GEN7_CXT_SIZE);
122
		if (IS_HASWELL(dev_priv))
123
			ret = HSW_CXT_TOTAL_SIZE;
B
Ben Widawsky 已提交
124 125
		else
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
126
		break;
B
Ben Widawsky 已提交
127 128 129
	case 8:
		ret = GEN8_CXT_TOTAL_SIZE;
		break;
130 131 132 133 134 135 136
	default:
		BUG();
	}

	return ret;
}

137
void i915_gem_context_free(struct kref *ctx_ref)
138
{
139
	struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
140
	int i;
141

142
	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
143
	trace_i915_context_free(ctx);
144
	GEM_BUG_ON(!ctx->closed);
145

146 147
	i915_ppgtt_put(ctx->ppgtt);

148 149 150 151 152 153 154
	for (i = 0; i < I915_NUM_ENGINES; i++) {
		struct intel_context *ce = &ctx->engine[i];

		if (!ce->state)
			continue;

		WARN_ON(ce->pin_count);
155
		if (ce->ring)
156
			intel_ring_free(ce->ring);
157

158
		i915_vma_put(ce->state);
159 160
	}

161
	put_pid(ctx->pid);
B
Ben Widawsky 已提交
162
	list_del(&ctx->link);
163 164

	ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
165 166 167
	kfree(ctx);
}

168
struct drm_i915_gem_object *
169 170 171 172 173
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
{
	struct drm_i915_gem_object *obj;
	int ret;

174 175
	lockdep_assert_held(&dev->struct_mutex);

176
	obj = i915_gem_object_create(dev, size);
177 178
	if (IS_ERR(obj))
		return obj;
179 180 181 182 183 184 185 186

	/*
	 * Try to make the context utilize L3 as well as LLC.
	 *
	 * On VLV we don't have L3 controls in the PTEs so we
	 * shouldn't touch the cache level, especially as that
	 * would make the object snooped which might have a
	 * negative performance impact.
187 188 189 190 191 192 193
	 *
	 * Snooping is required on non-llc platforms in execlist
	 * mode, but since all GGTT accesses use PAT entry 0 we
	 * get snooping anyway regardless of cache_level.
	 *
	 * This is only applicable for Ivy Bridge devices since
	 * later platforms don't have L3 control bits in the PTE.
194
	 */
195
	if (IS_IVYBRIDGE(dev)) {
196 197 198
		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
		/* Failure shouldn't ever happen this early */
		if (WARN_ON(ret)) {
199
			i915_gem_object_put(obj);
200 201 202 203 204 205 206
			return ERR_PTR(ret);
		}
	}

	return obj;
}

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
static void i915_ppgtt_close(struct i915_address_space *vm)
{
	struct list_head *phases[] = {
		&vm->active_list,
		&vm->inactive_list,
		&vm->unbound_list,
		NULL,
	}, **phase;

	GEM_BUG_ON(vm->closed);
	vm->closed = true;

	for (phase = phases; *phase; phase++) {
		struct i915_vma *vma, *vn;

		list_for_each_entry_safe(vma, vn, *phase, vm_link)
223
			if (!i915_vma_is_closed(vma))
224 225 226 227 228 229 230 231 232 233 234 235 236 237
				i915_vma_close(vma);
	}
}

static void context_close(struct i915_gem_context *ctx)
{
	GEM_BUG_ON(ctx->closed);
	ctx->closed = true;
	if (ctx->ppgtt)
		i915_ppgtt_close(&ctx->ppgtt->base);
	ctx->file_priv = ERR_PTR(-EBADF);
	i915_gem_context_put(ctx);
}

238 239 240 241 242 243 244 245 246 247 248
static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
{
	int ret;

	ret = ida_simple_get(&dev_priv->context_hw_ida,
			     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
	if (ret < 0) {
		/* Contexts are only released when no longer active.
		 * Flush any pending retires to hopefully release some
		 * stale contexts and try again.
		 */
249
		i915_gem_retire_requests(dev_priv);
250 251 252 253 254 255 256 257 258 259
		ret = ida_simple_get(&dev_priv->context_hw_ida,
				     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
		if (ret < 0)
			return ret;
	}

	*out = ret;
	return 0;
}

260
static struct i915_gem_context *
261
__create_hw_context(struct drm_device *dev,
262
		    struct drm_i915_file_private *file_priv)
263
{
264
	struct drm_i915_private *dev_priv = to_i915(dev);
265
	struct i915_gem_context *ctx;
T
Tejun Heo 已提交
266
	int ret;
267

268
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
269 270
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);
271

272 273 274 275 276 277
	ret = assign_hw_id(dev_priv, &ctx->hw_id);
	if (ret) {
		kfree(ctx);
		return ERR_PTR(ret);
	}

278
	kref_init(&ctx->ref);
279
	list_add_tail(&ctx->link, &dev_priv->context_list);
280
	ctx->i915 = dev_priv;
281

282 283
	ctx->ggtt_alignment = get_context_alignment(dev_priv);

284
	if (dev_priv->hw_context_size) {
285 286 287 288 289
		struct drm_i915_gem_object *obj;
		struct i915_vma *vma;

		obj = i915_gem_alloc_context_obj(dev,
						 dev_priv->hw_context_size);
290 291
		if (IS_ERR(obj)) {
			ret = PTR_ERR(obj);
292
			goto err_out;
293
		}
294 295 296 297 298 299 300 301 302

		vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
		if (IS_ERR(vma)) {
			i915_gem_object_put(obj);
			ret = PTR_ERR(vma);
			goto err_out;
		}

		ctx->engine[RCS].state = vma;
303
	}
304 305

	/* Default context will never have a file_priv */
306 307
	if (file_priv != NULL) {
		ret = idr_alloc(&file_priv->context_idr, ctx,
308
				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
309 310 311
		if (ret < 0)
			goto err_out;
	} else
312
		ret = DEFAULT_CONTEXT_HANDLE;
313 314

	ctx->file_priv = file_priv;
315 316 317
	if (file_priv)
		ctx->pid = get_task_pid(current, PIDTYPE_PID);

318
	ctx->user_handle = ret;
319 320 321
	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
322
	ctx->remap_slice = ALL_L3_SLICES(dev_priv);
323

324
	ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
325
	ctx->ring_size = 4 * PAGE_SIZE;
326 327
	ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
			     GEN8_CTX_ADDRESSING_MODE_SHIFT;
328
	ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
329

330
	return ctx;
331 332

err_out:
333
	context_close(ctx);
334
	return ERR_PTR(ret);
335 336
}

337 338 339 340 341
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
342
static struct i915_gem_context *
343
i915_gem_create_context(struct drm_device *dev,
344
			struct drm_i915_file_private *file_priv)
345
{
346
	struct i915_gem_context *ctx;
347

348
	lockdep_assert_held(&dev->struct_mutex);
349

350
	ctx = __create_hw_context(dev, file_priv);
351
	if (IS_ERR(ctx))
352
		return ctx;
353

354
	if (USES_FULL_PPGTT(dev)) {
355 356
		struct i915_hw_ppgtt *ppgtt =
			i915_ppgtt_create(to_i915(dev), file_priv);
357

358
		if (IS_ERR(ppgtt)) {
359 360
			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
					 PTR_ERR(ppgtt));
361
			idr_remove(&file_priv->context_idr, ctx->user_handle);
362
			context_close(ctx);
363
			return ERR_CAST(ppgtt);
364 365 366 367
		}

		ctx->ppgtt = ppgtt;
	}
368

369 370
	trace_i915_context_create(ctx);

371
	return ctx;
372 373
}

374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
/**
 * i915_gem_context_create_gvt - create a GVT GEM context
 * @dev: drm device *
 *
 * This function is used to create a GVT specific GEM context.
 *
 * Returns:
 * pointer to i915_gem_context on success, error pointer if failed
 *
 */
struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev)
{
	struct i915_gem_context *ctx;
	int ret;

	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
		return ERR_PTR(-ENODEV);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ERR_PTR(ret);

	ctx = i915_gem_create_context(dev, NULL);
	if (IS_ERR(ctx))
		goto out;

	ctx->execlists_force_single_submission = true;
	ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
out:
	mutex_unlock(&dev->struct_mutex);
	return ctx;
}

408
static void i915_gem_context_unpin(struct i915_gem_context *ctx,
409 410
				   struct intel_engine_cs *engine)
{
411 412 413
	if (i915.enable_execlists) {
		intel_lr_context_unpin(ctx, engine);
	} else {
414 415 416
		struct intel_context *ce = &ctx->engine[engine->id];

		if (ce->state)
417
			i915_vma_unpin(ce->state);
418

419
		i915_gem_context_put(ctx);
420
	}
421 422
}

423
int i915_gem_context_init(struct drm_device *dev)
424
{
425
	struct drm_i915_private *dev_priv = to_i915(dev);
426
	struct i915_gem_context *ctx;
427

428 429
	/* Init should only be called once per module load. Eventually the
	 * restriction on the context_disabled check can be loosened. */
430
	if (WARN_ON(dev_priv->kernel_context))
431
		return 0;
432

433 434
	if (intel_vgpu_active(dev_priv) &&
	    HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
435 436 437 438 439 440
		if (!i915.enable_execlists) {
			DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
			return -EINVAL;
		}
	}

441 442 443 444
	/* Using the simple ida interface, the max is limited by sizeof(int) */
	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
	ida_init(&dev_priv->context_hw_ida);

445 446 447 448
	if (i915.enable_execlists) {
		/* NB: intentionally left blank. We will allocate our own
		 * backing objects as we need them, thank you very much */
		dev_priv->hw_context_size = 0;
449 450 451
	} else if (HAS_HW_CONTEXTS(dev_priv)) {
		dev_priv->hw_context_size =
			round_up(get_context_size(dev_priv), 4096);
452 453 454 455 456
		if (dev_priv->hw_context_size > (1<<20)) {
			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
					 dev_priv->hw_context_size);
			dev_priv->hw_context_size = 0;
		}
457 458
	}

459
	ctx = i915_gem_create_context(dev, NULL);
460 461 462 463
	if (IS_ERR(ctx)) {
		DRM_ERROR("Failed to create default global context (error %ld)\n",
			  PTR_ERR(ctx));
		return PTR_ERR(ctx);
464 465
	}

466
	dev_priv->kernel_context = ctx;
467

468 469 470
	DRM_DEBUG_DRIVER("%s context support initialized\n",
			i915.enable_execlists ? "LR" :
			dev_priv->hw_context_size ? "HW" : "fake");
471
	return 0;
472 473
}

474 475 476
void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
477
	enum intel_engine_id id;
478

479
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
480

481
	for_each_engine(engine, dev_priv, id) {
482 483 484 485
		if (engine->last_context) {
			i915_gem_context_unpin(engine->last_context, engine);
			engine->last_context = NULL;
		}
486 487
	}

488 489
	/* Force the GPU state to be restored on enabling */
	if (!i915.enable_execlists) {
490 491 492 493 494 495
		struct i915_gem_context *ctx;

		list_for_each_entry(ctx, &dev_priv->context_list, link) {
			if (!i915_gem_context_is_default(ctx))
				continue;

496
			for_each_engine(engine, dev_priv, id)
497 498 499 500 501
				ctx->engine[engine->id].initialised = false;

			ctx->remap_slice = ALL_L3_SLICES(dev_priv);
		}

502
		for_each_engine(engine, dev_priv, id) {
503 504 505 506 507 508
			struct intel_context *kce =
				&dev_priv->kernel_context->engine[engine->id];

			kce->initialised = true;
		}
	}
509 510
}

511 512
void i915_gem_context_fini(struct drm_device *dev)
{
513
	struct drm_i915_private *dev_priv = to_i915(dev);
514
	struct i915_gem_context *dctx = dev_priv->kernel_context;
515

516 517
	lockdep_assert_held(&dev->struct_mutex);

518
	context_close(dctx);
519
	dev_priv->kernel_context = NULL;
520 521

	ida_destroy(&dev_priv->context_hw_ida);
522 523
}

524 525
static int context_idr_cleanup(int id, void *p, void *data)
{
526
	struct i915_gem_context *ctx = p;
527

528
	context_close(ctx);
529
	return 0;
530 531
}

532 533 534
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
535
	struct i915_gem_context *ctx;
536 537 538

	idr_init(&file_priv->context_idr);

539
	mutex_lock(&dev->struct_mutex);
540
	ctx = i915_gem_create_context(dev, file_priv);
541 542
	mutex_unlock(&dev->struct_mutex);

543
	if (IS_ERR(ctx)) {
544
		idr_destroy(&file_priv->context_idr);
545
		return PTR_ERR(ctx);
546 547
	}

548 549 550
	return 0;
}

551 552
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
553
	struct drm_i915_file_private *file_priv = file->driver_priv;
554

555 556
	lockdep_assert_held(&dev->struct_mutex);

557
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
558 559 560
	idr_destroy(&file_priv->context_idr);
}

561
static inline int
562
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
563
{
564
	struct drm_i915_private *dev_priv = req->i915;
565
	struct intel_ring *ring = req->ring;
566
	struct intel_engine_cs *engine = req->engine;
567
	enum intel_engine_id id;
568
	u32 flags = hw_flags | MI_MM_SPACE_GTT;
569 570
	const int num_rings =
		/* Use an extended w/a on ivb+ if signalling from other rings */
571
		i915.semaphores ?
572
		INTEL_INFO(dev_priv)->num_rings - 1 :
573
		0;
574
	int len, ret;
575

576 577 578 579 580
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
	 * explicitly, so we rely on the value at ring init, stored in
	 * itlb_before_ctx_switch.
	 */
581
	if (IS_GEN6(dev_priv)) {
582
		ret = engine->emit_flush(req, EMIT_INVALIDATE);
583 584 585 586
		if (ret)
			return ret;
	}

587
	/* These flags are for resource streamer on HSW+ */
588
	if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
589
		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
590
	else if (INTEL_GEN(dev_priv) < 8)
591 592
		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);

593 594

	len = 4;
595
	if (INTEL_GEN(dev_priv) >= 7)
596
		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
597

598
	ret = intel_ring_begin(req, len);
599 600 601
	if (ret)
		return ret;

602
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
603
	if (INTEL_GEN(dev_priv) >= 7) {
604
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
605 606 607
		if (num_rings) {
			struct intel_engine_cs *signaller;

608
			intel_ring_emit(ring,
609
					MI_LOAD_REGISTER_IMM(num_rings));
610
			for_each_engine(signaller, dev_priv, id) {
611
				if (signaller == engine)
612 613
					continue;

614
				intel_ring_emit_reg(ring,
615
						    RING_PSMI_CTL(signaller->mmio_base));
616
				intel_ring_emit(ring,
617
						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
618 619 620
			}
		}
	}
621

622 623
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_emit(ring, MI_SET_CONTEXT);
624 625
	intel_ring_emit(ring,
			i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
626 627 628 629
	/*
	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
	 * WaMiSetContext_Hang:snb,ivb,vlv
	 */
630
	intel_ring_emit(ring, MI_NOOP);
631

632
	if (INTEL_GEN(dev_priv) >= 7) {
633 634
		if (num_rings) {
			struct intel_engine_cs *signaller;
635
			i915_reg_t last_reg = {}; /* keep gcc quiet */
636

637
			intel_ring_emit(ring,
638
					MI_LOAD_REGISTER_IMM(num_rings));
639
			for_each_engine(signaller, dev_priv, id) {
640
				if (signaller == engine)
641 642
					continue;

643
				last_reg = RING_PSMI_CTL(signaller->mmio_base);
644 645
				intel_ring_emit_reg(ring, last_reg);
				intel_ring_emit(ring,
646
						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
647
			}
648 649

			/* Insert a delay before the next switch! */
650
			intel_ring_emit(ring,
651 652
					MI_STORE_REGISTER_MEM |
					MI_SRM_LRM_GLOBAL_GTT);
653
			intel_ring_emit_reg(ring, last_reg);
654 655
			intel_ring_emit(ring,
					i915_ggtt_offset(engine->scratch));
656
			intel_ring_emit(ring, MI_NOOP);
657
		}
658
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
659
	}
660

661
	intel_ring_advance(ring);
662 663 664 665

	return ret;
}

C
Chris Wilson 已提交
666
static int remap_l3(struct drm_i915_gem_request *req, int slice)
667
{
668
	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
669
	struct intel_ring *ring = req->ring;
670 671
	int i, ret;

672
	if (!remap_info)
673 674
		return 0;

675
	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
676 677 678 679 680 681 682 683
	if (ret)
		return ret;

	/*
	 * Note: We do not worry about the concurrent register cacheline hang
	 * here because no other code should access these registers other than
	 * at initialization time.
	 */
684
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
685
	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
686 687
		intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
		intel_ring_emit(ring, remap_info[i]);
688
	}
689 690
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_advance(ring);
691

692
	return 0;
693 694
}

695 696
static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
				   struct intel_engine_cs *engine,
697
				   struct i915_gem_context *to)
698
{
699 700 701
	if (to->remap_slice)
		return false;

702
	if (!to->engine[RCS].initialised)
703 704
		return false;

705
	if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
706
		return false;
707

708
	return to == engine->last_context;
709 710 711
}

static bool
712 713
needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
		  struct intel_engine_cs *engine,
714
		  struct i915_gem_context *to)
715
{
716
	if (!ppgtt)
717 718
		return false;

719 720 721 722 723
	/* Always load the ppgtt on first use */
	if (!engine->last_context)
		return true;

	/* Same context without new entries, skip */
724
	if (engine->last_context == to &&
725
	    !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
726 727 728
		return false;

	if (engine->id != RCS)
729 730
		return true;

731
	if (INTEL_GEN(engine->i915) < 8)
732 733 734 735 736 737
		return true;

	return false;
}

static bool
738
needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
739
		   struct i915_gem_context *to,
740
		   u32 hw_flags)
741
{
742
	if (!ppgtt)
743 744
		return false;

745
	if (!IS_GEN8(to->i915))
746 747
		return false;

B
Ben Widawsky 已提交
748
	if (hw_flags & MI_RESTORE_INHIBIT)
749 750 751 752 753
		return true;

	return false;
}

754
static int do_rcs_switch(struct drm_i915_gem_request *req)
755
{
756
	struct i915_gem_context *to = req->ctx;
757
	struct intel_engine_cs *engine = req->engine;
758
	struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
759
	struct i915_vma *vma = to->engine[RCS].state;
760
	struct i915_gem_context *from;
761
	u32 hw_flags;
762
	int ret, i;
763

764
	if (skip_rcs_switch(ppgtt, engine, to))
765 766
		return 0;

767 768 769 770 771 772 773
	/* Clear this page out of any CPU caches for coherent swap-in/out. */
	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
		ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
		if (ret)
			return ret;
	}

774
	/* Trying to pin first makes error handling easier. */
775
	ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL);
776 777
	if (ret)
		return ret;
778

779 780 781 782
	/*
	 * Pin can switch back to the default context if we end up calling into
	 * evict_everything - as a last ditch gtt defrag effort that also
	 * switches to the default context. Hence we need to reload from here.
783 784
	 *
	 * XXX: Doing so is painfully broken!
785
	 */
786
	from = engine->last_context;
787

788
	if (needs_pd_load_pre(ppgtt, engine, to)) {
789 790 791 792 793
		/* Older GENs and non render rings still want the load first,
		 * "PP_DCLV followed by PP_DIR_BASE register through Load
		 * Register Immediate commands in Ring Buffer before submitting
		 * a context."*/
		trace_switch_mm(engine, to);
794
		ret = ppgtt->switch_mm(ppgtt, req);
795
		if (ret)
796
			goto err;
797 798
	}

799
	if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
B
Ben Widawsky 已提交
800 801 802 803
		/* NB: If we inhibit the restore, the context is not allowed to
		 * die because future work may end up depending on valid address
		 * space. This means we must enforce that a page table load
		 * occur when this occurs. */
804
		hw_flags = MI_RESTORE_INHIBIT;
805
	else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
806 807 808
		hw_flags = MI_FORCE_RESTORE;
	else
		hw_flags = 0;
809

810 811
	if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
		ret = mi_set_context(req, hw_flags);
812
		if (ret)
813
			goto err;
814 815
	}

816 817 818 819 820 821
	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
822
	if (from != NULL) {
823 824 825 826 827 828 829
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
830 831 832
		i915_vma_move_to_active(from->engine[RCS].state, req, 0);
		/* state is kept alive until the next request */
		i915_vma_unpin(from->engine[RCS].state);
833
		i915_gem_context_put(from);
834
	}
835
	engine->last_context = i915_gem_context_get(to);
836

837 838 839
	/* GEN8 does *not* require an explicit reload if the PDPs have been
	 * setup, and we do not wish to move them.
	 */
840
	if (needs_pd_load_post(ppgtt, to, hw_flags)) {
841
		trace_switch_mm(engine, to);
842
		ret = ppgtt->switch_mm(ppgtt, req);
843 844 845 846 847 848 849 850 851
		/* The hardware context switch is emitted, but we haven't
		 * actually changed the state - so it's probably safe to bail
		 * here. Still, let the user know something dangerous has
		 * happened.
		 */
		if (ret)
			return ret;
	}

852 853
	if (ppgtt)
		ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
854 855 856 857 858

	for (i = 0; i < MAX_L3_SLICES; i++) {
		if (!(to->remap_slice & (1<<i)))
			continue;

C
Chris Wilson 已提交
859
		ret = remap_l3(req, i);
860 861 862 863 864 865
		if (ret)
			return ret;

		to->remap_slice &= ~(1<<i);
	}

866
	if (!to->engine[RCS].initialised) {
867 868
		if (engine->init_context) {
			ret = engine->init_context(req);
869
			if (ret)
870
				return ret;
871
		}
872
		to->engine[RCS].initialised = true;
873 874
	}

875
	return 0;
876

877 878
err:
	i915_vma_unpin(vma);
879
	return ret;
880 881 882 883
}

/**
 * i915_switch_context() - perform a GPU context switch.
884
 * @req: request for which we'll execute the context switch
885 886 887
 *
 * The context life cycle is simple. The context refcount is incremented and
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
888
 * it will have a refcount > 1. This allows us to destroy the context abstract
889
 * object while letting the normal object tracking destroy the backing BO.
890 891 892 893
 *
 * This function should not be used in execlists mode.  Instead the context is
 * switched by writing to the ELSP and requests keep a reference to their
 * context.
894
 */
895
int i915_switch_context(struct drm_i915_gem_request *req)
896
{
897
	struct intel_engine_cs *engine = req->engine;
898

899
	lockdep_assert_held(&req->i915->drm.struct_mutex);
900 901
	if (i915.enable_execlists)
		return 0;
902

903
	if (!req->ctx->engine[engine->id].state) {
904
		struct i915_gem_context *to = req->ctx;
905 906
		struct i915_hw_ppgtt *ppgtt =
			to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
907

908
		if (needs_pd_load_pre(ppgtt, engine, to)) {
909 910 911
			int ret;

			trace_switch_mm(engine, to);
912
			ret = ppgtt->switch_mm(ppgtt, req);
913 914 915
			if (ret)
				return ret;

916
			ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
917 918 919
		}

		if (to != engine->last_context) {
920
			if (engine->last_context)
921 922
				i915_gem_context_put(engine->last_context);
			engine->last_context = i915_gem_context_get(to);
923
		}
924

925
		return 0;
926
	}
927

928
	return do_rcs_switch(req);
929
}
930

931 932 933
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
934
	enum intel_engine_id id;
935

936
	for_each_engine(engine, dev_priv, id) {
937 938 939 940 941 942 943 944 945 946 947 948 949
		struct drm_i915_gem_request *req;
		int ret;

		if (engine->last_context == NULL)
			continue;

		if (engine->last_context == dev_priv->kernel_context)
			continue;

		req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
		if (IS_ERR(req))
			return PTR_ERR(req);

950
		ret = i915_switch_context(req);
951 952 953 954 955 956 957 958
		i915_add_request_no_flush(req);
		if (ret)
			return ret;
	}

	return 0;
}

959
static bool contexts_enabled(struct drm_device *dev)
960
{
961
	return i915.enable_execlists || to_i915(dev)->hw_context_size;
962 963
}

964 965 966 967 968
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file)
{
	struct drm_i915_gem_context_create *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
969
	struct i915_gem_context *ctx;
970 971
	int ret;

972
	if (!contexts_enabled(dev))
973 974
		return -ENODEV;

975 976 977
	if (args->pad != 0)
		return -EINVAL;

978 979 980 981
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

982
	ctx = i915_gem_create_context(dev, file_priv);
983
	mutex_unlock(&dev->struct_mutex);
984 985
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);
986

987
	args->ctx_id = ctx->user_handle;
988 989
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);

990
	return 0;
991 992 993 994 995 996 997
}

int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file)
{
	struct drm_i915_gem_context_destroy *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
998
	struct i915_gem_context *ctx;
999 1000
	int ret;

1001 1002 1003
	if (args->pad != 0)
		return -EINVAL;

1004
	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
1005
		return -ENOENT;
1006

1007 1008 1009 1010
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1011
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1012
	if (IS_ERR(ctx)) {
1013
		mutex_unlock(&dev->struct_mutex);
1014
		return PTR_ERR(ctx);
1015 1016
	}

1017
	idr_remove(&file_priv->context_idr, ctx->user_handle);
1018
	context_close(ctx);
1019 1020 1021 1022 1023
	mutex_unlock(&dev->struct_mutex);

	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
	return 0;
}
1024 1025 1026 1027 1028 1029

int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
1030
	struct i915_gem_context *ctx;
1031 1032 1033 1034 1035 1036
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1037
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	args->size = 0;
	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		args->value = ctx->hang_stats.ban_period_seconds;
		break;
1048 1049 1050
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
		break;
C
Chris Wilson 已提交
1051 1052 1053 1054 1055 1056
	case I915_CONTEXT_PARAM_GTT_SIZE:
		if (ctx->ppgtt)
			args->value = ctx->ppgtt->base.total;
		else if (to_i915(dev)->mm.aliasing_ppgtt)
			args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
		else
1057
			args->value = to_i915(dev)->ggtt.base.total;
C
Chris Wilson 已提交
1058
		break;
1059 1060 1061
	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
		args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE);
		break;
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}

int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
1076
	struct i915_gem_context *ctx;
1077 1078 1079 1080 1081 1082
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1083
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		if (args->size)
			ret = -EINVAL;
		else if (args->value < ctx->hang_stats.ban_period_seconds &&
			 !capable(CAP_SYS_ADMIN))
			ret = -EPERM;
		else
			ctx->hang_stats.ban_period_seconds = args->value;
		break;
1099 1100 1101 1102 1103 1104
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		if (args->size) {
			ret = -EINVAL;
		} else {
			ctx->flags &= ~CONTEXT_NO_ZEROMAP;
			ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
		}
		break;
	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
		if (args->size) {
			ret = -EINVAL;
		} else {
			if (args->value)
				ctx->flags |= CONTEXT_NO_ERROR_CAPTURE;
			else
				ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE;
1115 1116
		}
		break;
1117 1118 1119 1120 1121 1122 1123 1124
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}
1125 1126 1127 1128

int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
				       void *data, struct drm_file *file)
{
1129
	struct drm_i915_private *dev_priv = to_i915(dev);
1130 1131
	struct drm_i915_reset_stats *args = data;
	struct i915_ctx_hang_stats *hs;
1132
	struct i915_gem_context *ctx;
1133 1134 1135 1136 1137 1138 1139 1140
	int ret;

	if (args->flags || args->pad)
		return -EINVAL;

	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
		return -EPERM;

1141
	ret = i915_mutex_lock_interruptible(dev);
1142 1143 1144
	if (ret)
		return ret;

1145
	ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}
	hs = &ctx->hang_stats;

	if (capable(CAP_SYS_ADMIN))
		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
	else
		args->reset_count = 0;

	args->batch_active = hs->batch_active;
	args->batch_pending = hs->batch_pending;

	mutex_unlock(&dev->struct_mutex);

	return 0;
}