i915_gem_context.c 22.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Copyright © 2011-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *
 */

/*
 * This file implements HW context support. On gen5+ a HW context consists of an
 * opaque GPU object which is referenced at times of context saves and restores.
 * With RC6 enabled, the context is also referenced as the GPU enters and exists
 * from RC6 (GPU has it's own internal power context, except on gen5). Though
 * something like a context does exist for the media ring, the code only
 * supports contexts for the render ring.
 *
 * In software, there is a distinction between contexts created by the user,
 * and the default HW context. The default HW context is used by GPU clients
 * that do not request setup of their own hardware context. The default
 * context's state is never restored to help prevent programming errors. This
 * would happen if a client ran and piggy-backed off another clients GPU state.
 * The default context only exists to give the GPU some offset to load as the
 * current to invoke a save of the context we actually care about. In fact, the
 * code could likely be constructed, albeit in a more complicated fashion, to
 * never use the default context, though that limits the driver's ability to
 * swap out, and/or destroy other contexts.
 *
 * All other contexts are created as a request by the GPU client. These contexts
 * store GPU state, and thus allow GPU clients to not re-emit state (and
 * potentially query certain state) at any time. The kernel driver makes
 * certain that the appropriate commands are inserted.
 *
 * The context life cycle is semi-complicated in that context BOs may live
 * longer than the context itself because of the way the hardware, and object
 * tracking works. Below is a very crude representation of the state machine
 * describing the context life.
 *                                         refcount     pincount     active
 * S0: initial state                          0            0           0
 * S1: context created                        1            0           0
 * S2: context is currently running           2            1           X
 * S3: GPU referenced, but not current        2            0           1
 * S4: context is current, but destroyed      1            1           0
 * S5: like S3, but destroyed                 1            0           1
 *
 * The most common (but not all) transitions:
 * S0->S1: client creates a context
 * S1->S2: client submits execbuf with context
 * S2->S3: other clients submits execbuf with context
 * S3->S1: context object was retired
 * S3->S2: clients submits another execbuf
 * S2->S4: context destroy called with current context
 * S3->S5->S0: destroy path
 * S4->S5->S0: destroy path on current context
 *
 * There are two confusing terms used above:
 *  The "current context" means the context which is currently running on the
D
Damien Lespiau 已提交
76
 *  GPU. The GPU has loaded its state already and has stored away the gtt
77 78 79 80 81 82 83 84 85 86 87
 *  offset of the BO. The GPU is not actively referencing the data at this
 *  offset, but it will on the next context switch. The only way to avoid this
 *  is to do a GPU reset.
 *
 *  An "active context' is one which was previously the "current context" and is
 *  on the active list waiting for the next context switch to occur. Until this
 *  happens, the object must remain at the same gtt offset. It is therefore
 *  possible to destroy a context, but it is still active.
 *
 */

88 89
#include <drm/drmP.h>
#include <drm/i915_drm.h>
90
#include "i915_drv.h"
91
#include "i915_trace.h"
92

93 94 95 96
/* This is a HW constraint. The value below is the largest known requirement
 * I've seen in a spec to date, and that was a workaround for a non-shipping
 * part. It should be safe to decrease this, but it's more future proof as is.
 */
B
Ben Widawsky 已提交
97 98
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
99

B
Ben Widawsky 已提交
100 101 102 103 104 105 106 107
static size_t get_context_alignment(struct drm_device *dev)
{
	if (IS_GEN6(dev))
		return GEN6_CONTEXT_ALIGN;

	return GEN7_CONTEXT_ALIGN;
}

108 109 110 111 112 113 114 115 116 117 118 119
static int get_context_size(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;
	u32 reg;

	switch (INTEL_INFO(dev)->gen) {
	case 6:
		reg = I915_READ(CXT_SIZE);
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
		break;
	case 7:
B
Ben Widawsky 已提交
120
		reg = I915_READ(GEN7_CXT_SIZE);
B
Ben Widawsky 已提交
121
		if (IS_HASWELL(dev))
122
			ret = HSW_CXT_TOTAL_SIZE;
B
Ben Widawsky 已提交
123 124
		else
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
125
		break;
B
Ben Widawsky 已提交
126 127 128
	case 8:
		ret = GEN8_CXT_TOTAL_SIZE;
		break;
129 130 131 132 133 134 135
	default:
		BUG();
	}

	return ret;
}

136
void i915_gem_context_free(struct kref *ctx_ref)
137
{
138
	struct intel_context *ctx = container_of(ctx_ref,
139
						 typeof(*ctx), ref);
140

141 142
	trace_i915_context_free(ctx);

143
	if (i915.enable_execlists)
144
		intel_lr_context_free(ctx);
B
Ben Widawsky 已提交
145

146 147
	i915_ppgtt_put(ctx->ppgtt);

148 149
	if (ctx->legacy_hw_ctx.rcs_state)
		drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
B
Ben Widawsky 已提交
150
	list_del(&ctx->link);
151 152 153
	kfree(ctx);
}

154
struct drm_i915_gem_object *
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
{
	struct drm_i915_gem_object *obj;
	int ret;

	obj = i915_gem_alloc_object(dev, size);
	if (obj == NULL)
		return ERR_PTR(-ENOMEM);

	/*
	 * Try to make the context utilize L3 as well as LLC.
	 *
	 * On VLV we don't have L3 controls in the PTEs so we
	 * shouldn't touch the cache level, especially as that
	 * would make the object snooped which might have a
	 * negative performance impact.
	 */
	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
		/* Failure shouldn't ever happen this early */
		if (WARN_ON(ret)) {
			drm_gem_object_unreference(&obj->base);
			return ERR_PTR(ret);
		}
	}

	return obj;
}

184
static struct intel_context *
185
__create_hw_context(struct drm_device *dev,
186
		    struct drm_i915_file_private *file_priv)
187 188
{
	struct drm_i915_private *dev_priv = dev->dev_private;
189
	struct intel_context *ctx;
T
Tejun Heo 已提交
190
	int ret;
191

192
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
193 194
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);
195

196
	kref_init(&ctx->ref);
197
	list_add_tail(&ctx->link, &dev_priv->context_list);
198

199
	if (dev_priv->hw_context_size) {
200 201 202 203
		struct drm_i915_gem_object *obj =
				i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
		if (IS_ERR(obj)) {
			ret = PTR_ERR(obj);
204
			goto err_out;
205
		}
206
		ctx->legacy_hw_ctx.rcs_state = obj;
207
	}
208 209

	/* Default context will never have a file_priv */
210 211
	if (file_priv != NULL) {
		ret = idr_alloc(&file_priv->context_idr, ctx,
212
				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
213 214 215
		if (ret < 0)
			goto err_out;
	} else
216
		ret = DEFAULT_CONTEXT_HANDLE;
217 218

	ctx->file_priv = file_priv;
219
	ctx->user_handle = ret;
220 221 222 223
	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
	ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
224

225 226
	ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;

227
	return ctx;
228 229

err_out:
230
	i915_gem_context_unreference(ctx);
231
	return ERR_PTR(ret);
232 233
}

234 235 236 237 238
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
239
static struct intel_context *
240
i915_gem_create_context(struct drm_device *dev,
241
			struct drm_i915_file_private *file_priv)
242
{
243
	const bool is_global_default_ctx = file_priv == NULL;
244
	struct intel_context *ctx;
245
	int ret = 0;
246

B
Ben Widawsky 已提交
247
	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
248

249
	ctx = __create_hw_context(dev, file_priv);
250
	if (IS_ERR(ctx))
251
		return ctx;
252

253
	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
254 255 256 257 258 259 260
		/* We may need to do things with the shrinker which
		 * require us to immediately switch back to the default
		 * context. This can cause a problem as pinning the
		 * default context also requires GTT space which may not
		 * be available. To avoid this we always pin the default
		 * context.
		 */
261
		ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
262
					    get_context_alignment(dev), 0);
263 264 265 266 267 268
		if (ret) {
			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
			goto err_destroy;
		}
	}

269
	if (USES_FULL_PPGTT(dev)) {
270
		struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
271 272

		if (IS_ERR_OR_NULL(ppgtt)) {
273 274
			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
					 PTR_ERR(ppgtt));
275
			ret = PTR_ERR(ppgtt);
276
			goto err_unpin;
277 278 279 280
		}

		ctx->ppgtt = ppgtt;
	}
281

282 283
	trace_i915_context_create(ctx);

284
	return ctx;
285

286
err_unpin:
287 288
	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
		i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
289
err_destroy:
290
	i915_gem_context_unreference(ctx);
291
	return ERR_PTR(ret);
292 293
}

294 295 296 297 298
void i915_gem_context_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

299 300 301 302 303 304
	/* In execlists mode we will unreference the context when the execlist
	 * queue is cleared and the requests destroyed.
	 */
	if (i915.enable_execlists)
		return;

305
	for (i = 0; i < I915_NUM_RINGS; i++) {
306
		struct intel_engine_cs *ring = &dev_priv->ring[i];
307
		struct intel_context *lctx = ring->last_context;
308

309 310 311
		if (lctx) {
			if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
				i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
312

313 314
			i915_gem_context_unreference(lctx);
			ring->last_context = NULL;
315 316 317 318
		}
	}
}

319
int i915_gem_context_init(struct drm_device *dev)
320 321
{
	struct drm_i915_private *dev_priv = dev->dev_private;
322
	struct intel_context *ctx;
323
	int i;
324

325 326 327
	/* Init should only be called once per module load. Eventually the
	 * restriction on the context_disabled check can be loosened. */
	if (WARN_ON(dev_priv->ring[RCS].default_context))
328
		return 0;
329

330 331 332 333 334
	if (i915.enable_execlists) {
		/* NB: intentionally left blank. We will allocate our own
		 * backing objects as we need them, thank you very much */
		dev_priv->hw_context_size = 0;
	} else if (HAS_HW_CONTEXTS(dev)) {
335 336 337 338 339 340
		dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
		if (dev_priv->hw_context_size > (1<<20)) {
			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
					 dev_priv->hw_context_size);
			dev_priv->hw_context_size = 0;
		}
341 342
	}

343
	ctx = i915_gem_create_context(dev, NULL);
344 345 346 347
	if (IS_ERR(ctx)) {
		DRM_ERROR("Failed to create default global context (error %ld)\n",
			  PTR_ERR(ctx));
		return PTR_ERR(ctx);
348 349
	}

350 351 352 353 354 355
	for (i = 0; i < I915_NUM_RINGS; i++) {
		struct intel_engine_cs *ring = &dev_priv->ring[i];

		/* NB: RCS will hold a ref for all rings */
		ring->default_context = ctx;
	}
356

357 358 359
	DRM_DEBUG_DRIVER("%s context support initialized\n",
			i915.enable_execlists ? "LR" :
			dev_priv->hw_context_size ? "HW" : "fake");
360
	return 0;
361 362 363 364 365
}

void i915_gem_context_fini(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
366
	struct intel_context *dctx = dev_priv->ring[RCS].default_context;
367
	int i;
368

369
	if (dctx->legacy_hw_ctx.rcs_state) {
370 371 372 373 374 375 376 377 378 379 380 381 382 383
		/* The only known way to stop the gpu from accessing the hw context is
		 * to reset it. Do this as the very last operation to avoid confusing
		 * other code, leading to spurious errors. */
		intel_gpu_reset(dev);

		/* When default context is created and switched to, base object refcount
		 * will be 2 (+1 from object creation and +1 from do_switch()).
		 * i915_gem_context_fini() will be called after gpu_idle() has switched
		 * to default context. So we need to unreference the base object once
		 * to offset the do_switch part, so that i915_gem_context_unreference()
		 * can then free the base object correctly. */
		WARN_ON(!dev_priv->ring[RCS].last_context);
		if (dev_priv->ring[RCS].last_context == dctx) {
			/* Fake switch to NULL context */
384 385
			WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
			i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
386 387 388
			i915_gem_context_unreference(dctx);
			dev_priv->ring[RCS].last_context = NULL;
		}
389

390
		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
391 392 393
	}

	for (i = 0; i < I915_NUM_RINGS; i++) {
394
		struct intel_engine_cs *ring = &dev_priv->ring[i];
395 396 397 398 399

		if (ring->last_context)
			i915_gem_context_unreference(ring->last_context);

		ring->default_context = NULL;
400
		ring->last_context = NULL;
B
Ben Widawsky 已提交
401 402
	}

403
	i915_gem_context_unreference(dctx);
404 405
}

406 407
int i915_gem_context_enable(struct drm_i915_private *dev_priv)
{
408
	struct intel_engine_cs *ring;
409 410 411
	int ret, i;

	BUG_ON(!dev_priv->ring[RCS].default_context);
412

413 414 415 416 417 418 419 420 421 422 423 424
	if (i915.enable_execlists) {
		for_each_ring(ring, dev_priv, i) {
			if (ring->init_context) {
				ret = ring->init_context(ring,
						ring->default_context);
				if (ret) {
					DRM_ERROR("ring init context: %d\n",
							ret);
					return ret;
				}
			}
		}
425

426 427 428 429 430 431
	} else
		for_each_ring(ring, dev_priv, i) {
			ret = i915_switch_context(ring, ring->default_context);
			if (ret)
				return ret;
		}
432 433 434 435

	return 0;
}

436 437
static int context_idr_cleanup(int id, void *p, void *data)
{
438
	struct intel_context *ctx = p;
439

440
	i915_gem_context_unreference(ctx);
441
	return 0;
442 443
}

444 445 446
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
447
	struct intel_context *ctx;
448 449 450

	idr_init(&file_priv->context_idr);

451
	mutex_lock(&dev->struct_mutex);
452
	ctx = i915_gem_create_context(dev, file_priv);
453 454
	mutex_unlock(&dev->struct_mutex);

455
	if (IS_ERR(ctx)) {
456
		idr_destroy(&file_priv->context_idr);
457
		return PTR_ERR(ctx);
458 459
	}

460 461 462
	return 0;
}

463 464
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
465
	struct drm_i915_file_private *file_priv = file->driver_priv;
466

467
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
468 469 470
	idr_destroy(&file_priv->context_idr);
}

471
struct intel_context *
472 473
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
{
474
	struct intel_context *ctx;
475

476
	ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
477 478 479 480
	if (!ctx)
		return ERR_PTR(-ENOENT);

	return ctx;
481
}
482 483

static inline int
484
mi_set_context(struct intel_engine_cs *ring,
485
	       struct intel_context *new_context,
486 487
	       u32 hw_flags)
{
488
	u32 flags = hw_flags | MI_MM_SPACE_GTT;
489 490
	int ret;

491 492 493 494 495
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
	 * explicitly, so we rely on the value at ring init, stored in
	 * itlb_before_ctx_switch.
	 */
496
	if (IS_GEN6(ring->dev)) {
497
		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
498 499 500 501
		if (ret)
			return ret;
	}

502 503 504 505
	/* These flags are for resource streamer on HSW+ */
	if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);

506
	ret = intel_ring_begin(ring, 6);
507 508 509
	if (ret)
		return ret;

510
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
511
	if (INTEL_INFO(ring->dev)->gen >= 7)
512 513 514 515
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
	else
		intel_ring_emit(ring, MI_NOOP);

516 517
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_emit(ring, MI_SET_CONTEXT);
518
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
519
			flags);
520 521 522 523
	/*
	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
	 * WaMiSetContext_Hang:snb,ivb,vlv
	 */
524 525
	intel_ring_emit(ring, MI_NOOP);

526
	if (INTEL_INFO(ring->dev)->gen >= 7)
527 528 529 530
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
	else
		intel_ring_emit(ring, MI_NOOP);

531 532 533 534 535
	intel_ring_advance(ring);

	return ret;
}

536
static int do_switch(struct intel_engine_cs *ring,
537
		     struct intel_context *to)
538
{
539
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
540
	struct intel_context *from = ring->last_context;
541
	u32 hw_flags = 0;
542
	bool uninitialized = false;
543
	struct i915_vma *vma;
544
	int ret, i;
545

546
	if (from != NULL && ring == &dev_priv->ring[RCS]) {
547 548
		BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
		BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
549
	}
550

O
Oscar Mateo 已提交
551
	if (from == to && !to->remap_slice)
552 553
		return 0;

554 555
	/* Trying to pin first makes error handling easier. */
	if (ring == &dev_priv->ring[RCS]) {
556
		ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
557
					    get_context_alignment(ring->dev), 0);
558 559
		if (ret)
			return ret;
560 561
	}

562 563 564 565 566 567 568
	/*
	 * Pin can switch back to the default context if we end up calling into
	 * evict_everything - as a last ditch gtt defrag effort that also
	 * switches to the default context. Hence we need to reload from here.
	 */
	from = ring->last_context;

569
	if (to->ppgtt) {
570
		trace_switch_mm(ring, to);
571
		ret = to->ppgtt->switch_mm(to->ppgtt, ring);
572 573 574 575 576 577 578 579 580 581
		if (ret)
			goto unpin_out;
	}

	if (ring != &dev_priv->ring[RCS]) {
		if (from)
			i915_gem_context_unreference(from);
		goto done;
	}

582 583
	/*
	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
584 585 586
	 * that thanks to write = false in this call and us not setting any gpu
	 * write domains when putting a context object onto the active list
	 * (when switching away from it), this won't block.
587 588 589
	 *
	 * XXX: We need a real interface to do this instead of trickery.
	 */
590
	ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
591 592
	if (ret)
		goto unpin_out;
593

594
	vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
595 596 597 598 599 600 601 602
	if (!(vma->bound & GLOBAL_BIND)) {
		ret = i915_vma_bind(vma,
				    to->legacy_hw_ctx.rcs_state->cache_level,
				    GLOBAL_BIND);
		/* This shouldn't ever fail. */
		if (WARN_ONCE(ret, "GGTT context bind failed!"))
			goto unpin_out;
	}
603

604
	if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
605 606 607
		hw_flags |= MI_RESTORE_INHIBIT;

	ret = mi_set_context(ring, to, hw_flags);
608 609
	if (ret)
		goto unpin_out;
610

611 612 613 614 615 616 617 618 619 620 621 622
	for (i = 0; i < MAX_L3_SLICES; i++) {
		if (!(to->remap_slice & (1<<i)))
			continue;

		ret = i915_gem_l3_remap(ring, i);
		/* If it failed, try again next round */
		if (ret)
			DRM_DEBUG_DRIVER("L3 remapping failed\n");
		else
			to->remap_slice &= ~(1<<i);
	}

623 624 625 626 627 628
	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
629
	if (from != NULL) {
630 631
		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
632 633 634 635 636 637 638
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
639
		from->legacy_hw_ctx.rcs_state->dirty = 1;
640 641
		BUG_ON(i915_gem_request_get_ring(
			from->legacy_hw_ctx.rcs_state->last_read_req) != ring);
642

643
		/* obj is kept alive until the next request by its active ref */
644
		i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
645
		i915_gem_context_unreference(from);
646 647
	}

648 649
	uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
	to->legacy_hw_ctx.initialized = true;
650

651
done:
652 653
	i915_gem_context_reference(to);
	ring->last_context = to;
654

655
	if (uninitialized) {
656
		if (ring->init_context) {
657
			ret = ring->init_context(ring, to);
658 659 660
			if (ret)
				DRM_ERROR("ring init context: %d\n", ret);
		}
661 662
	}

663
	return 0;
664 665 666

unpin_out:
	if (ring->id == RCS)
667
		i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
668
	return ret;
669 670 671 672 673
}

/**
 * i915_switch_context() - perform a GPU context switch.
 * @ring: ring for which we'll execute the context switch
674
 * @to: the context to switch to
675 676 677
 *
 * The context life cycle is simple. The context refcount is incremented and
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
678
 * it will have a refcount > 1. This allows us to destroy the context abstract
679
 * object while letting the normal object tracking destroy the backing BO.
680 681 682 683
 *
 * This function should not be used in execlists mode.  Instead the context is
 * switched by writing to the ELSP and requests keep a reference to their
 * context.
684
 */
685
int i915_switch_context(struct intel_engine_cs *ring,
686
			struct intel_context *to)
687 688 689
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;

690
	WARN_ON(i915.enable_execlists);
691 692
	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));

693
	if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
694 695 696 697 698 699
		if (to != ring->last_context) {
			i915_gem_context_reference(to);
			if (ring->last_context)
				i915_gem_context_unreference(ring->last_context);
			ring->last_context = to;
		}
700
		return 0;
701
	}
702

703
	return do_switch(ring, to);
704
}
705

706
static bool contexts_enabled(struct drm_device *dev)
707
{
708
	return i915.enable_execlists || to_i915(dev)->hw_context_size;
709 710
}

711 712 713 714 715
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file)
{
	struct drm_i915_gem_context_create *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
716
	struct intel_context *ctx;
717 718
	int ret;

719
	if (!contexts_enabled(dev))
720 721
		return -ENODEV;

722 723 724 725
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

726
	ctx = i915_gem_create_context(dev, file_priv);
727
	mutex_unlock(&dev->struct_mutex);
728 729
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);
730

731
	args->ctx_id = ctx->user_handle;
732 733
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);

734
	return 0;
735 736 737 738 739 740 741
}

int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file)
{
	struct drm_i915_gem_context_destroy *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
742
	struct intel_context *ctx;
743 744
	int ret;

745
	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
746
		return -ENOENT;
747

748 749 750 751 752
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
753
	if (IS_ERR(ctx)) {
754
		mutex_unlock(&dev->struct_mutex);
755
		return PTR_ERR(ctx);
756 757
	}

758
	idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
759
	i915_gem_context_unreference(ctx);
760 761 762 763 764
	mutex_unlock(&dev->struct_mutex);

	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
	return 0;
}