i915_gem_context.c 27.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Copyright © 2011-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *
 */

/*
 * This file implements HW context support. On gen5+ a HW context consists of an
 * opaque GPU object which is referenced at times of context saves and restores.
 * With RC6 enabled, the context is also referenced as the GPU enters and exists
 * from RC6 (GPU has it's own internal power context, except on gen5). Though
 * something like a context does exist for the media ring, the code only
 * supports contexts for the render ring.
 *
 * In software, there is a distinction between contexts created by the user,
 * and the default HW context. The default HW context is used by GPU clients
 * that do not request setup of their own hardware context. The default
 * context's state is never restored to help prevent programming errors. This
 * would happen if a client ran and piggy-backed off another clients GPU state.
 * The default context only exists to give the GPU some offset to load as the
 * current to invoke a save of the context we actually care about. In fact, the
 * code could likely be constructed, albeit in a more complicated fashion, to
 * never use the default context, though that limits the driver's ability to
 * swap out, and/or destroy other contexts.
 *
 * All other contexts are created as a request by the GPU client. These contexts
 * store GPU state, and thus allow GPU clients to not re-emit state (and
 * potentially query certain state) at any time. The kernel driver makes
 * certain that the appropriate commands are inserted.
 *
 * The context life cycle is semi-complicated in that context BOs may live
 * longer than the context itself because of the way the hardware, and object
 * tracking works. Below is a very crude representation of the state machine
 * describing the context life.
 *                                         refcount     pincount     active
 * S0: initial state                          0            0           0
 * S1: context created                        1            0           0
 * S2: context is currently running           2            1           X
 * S3: GPU referenced, but not current        2            0           1
 * S4: context is current, but destroyed      1            1           0
 * S5: like S3, but destroyed                 1            0           1
 *
 * The most common (but not all) transitions:
 * S0->S1: client creates a context
 * S1->S2: client submits execbuf with context
 * S2->S3: other clients submits execbuf with context
 * S3->S1: context object was retired
 * S3->S2: clients submits another execbuf
 * S2->S4: context destroy called with current context
 * S3->S5->S0: destroy path
 * S4->S5->S0: destroy path on current context
 *
 * There are two confusing terms used above:
 *  The "current context" means the context which is currently running on the
D
Damien Lespiau 已提交
76
 *  GPU. The GPU has loaded its state already and has stored away the gtt
77 78 79 80 81 82 83 84 85 86 87
 *  offset of the BO. The GPU is not actively referencing the data at this
 *  offset, but it will on the next context switch. The only way to avoid this
 *  is to do a GPU reset.
 *
 *  An "active context' is one which was previously the "current context" and is
 *  on the active list waiting for the next context switch to occur. Until this
 *  happens, the object must remain at the same gtt offset. It is therefore
 *  possible to destroy a context, but it is still active.
 *
 */

88 89
#include <drm/drmP.h>
#include <drm/i915_drm.h>
90
#include "i915_drv.h"
91
#include "i915_trace.h"
92

93 94 95 96
/* This is a HW constraint. The value below is the largest known requirement
 * I've seen in a spec to date, and that was a workaround for a non-shipping
 * part. It should be safe to decrease this, but it's more future proof as is.
 */
B
Ben Widawsky 已提交
97 98
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
99

B
Ben Widawsky 已提交
100 101 102 103 104 105 106 107
static size_t get_context_alignment(struct drm_device *dev)
{
	if (IS_GEN6(dev))
		return GEN6_CONTEXT_ALIGN;

	return GEN7_CONTEXT_ALIGN;
}

108 109 110 111 112 113 114 115 116 117 118 119
static int get_context_size(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;
	u32 reg;

	switch (INTEL_INFO(dev)->gen) {
	case 6:
		reg = I915_READ(CXT_SIZE);
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
		break;
	case 7:
B
Ben Widawsky 已提交
120
		reg = I915_READ(GEN7_CXT_SIZE);
B
Ben Widawsky 已提交
121
		if (IS_HASWELL(dev))
122
			ret = HSW_CXT_TOTAL_SIZE;
B
Ben Widawsky 已提交
123 124
		else
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
125
		break;
B
Ben Widawsky 已提交
126 127 128
	case 8:
		ret = GEN8_CXT_TOTAL_SIZE;
		break;
129 130 131 132 133 134 135
	default:
		BUG();
	}

	return ret;
}

136
void i915_gem_context_free(struct kref *ctx_ref)
137
{
138
	struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
139

140 141
	trace_i915_context_free(ctx);

142
	if (i915.enable_execlists)
143
		intel_lr_context_free(ctx);
B
Ben Widawsky 已提交
144

145 146
	i915_ppgtt_put(ctx->ppgtt);

147 148
	if (ctx->legacy_hw_ctx.rcs_state)
		drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
B
Ben Widawsky 已提交
149
	list_del(&ctx->link);
150 151 152
	kfree(ctx);
}

153
struct drm_i915_gem_object *
154 155 156 157 158
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
{
	struct drm_i915_gem_object *obj;
	int ret;

159
	obj = i915_gem_alloc_object(dev, size);
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
	if (obj == NULL)
		return ERR_PTR(-ENOMEM);

	/*
	 * Try to make the context utilize L3 as well as LLC.
	 *
	 * On VLV we don't have L3 controls in the PTEs so we
	 * shouldn't touch the cache level, especially as that
	 * would make the object snooped which might have a
	 * negative performance impact.
	 */
	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
		/* Failure shouldn't ever happen this early */
		if (WARN_ON(ret)) {
			drm_gem_object_unreference(&obj->base);
			return ERR_PTR(ret);
		}
	}

	return obj;
}

183
static struct intel_context *
184
__create_hw_context(struct drm_device *dev,
185
		    struct drm_i915_file_private *file_priv)
186 187
{
	struct drm_i915_private *dev_priv = dev->dev_private;
188
	struct intel_context *ctx;
T
Tejun Heo 已提交
189
	int ret;
190

191
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
192 193
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);
194

195
	kref_init(&ctx->ref);
196
	list_add_tail(&ctx->link, &dev_priv->context_list);
197
	ctx->i915 = dev_priv;
198

199
	if (dev_priv->hw_context_size) {
200 201 202 203
		struct drm_i915_gem_object *obj =
				i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
		if (IS_ERR(obj)) {
			ret = PTR_ERR(obj);
204
			goto err_out;
205
		}
206
		ctx->legacy_hw_ctx.rcs_state = obj;
207
	}
208 209

	/* Default context will never have a file_priv */
210 211
	if (file_priv != NULL) {
		ret = idr_alloc(&file_priv->context_idr, ctx,
212
				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
213 214 215
		if (ret < 0)
			goto err_out;
	} else
216
		ret = DEFAULT_CONTEXT_HANDLE;
217 218

	ctx->file_priv = file_priv;
219
	ctx->user_handle = ret;
220 221 222 223
	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
	ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
224

225 226
	ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;

227
	return ctx;
228 229

err_out:
230
	i915_gem_context_unreference(ctx);
231
	return ERR_PTR(ret);
232 233
}

234 235 236 237 238
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
239
static struct intel_context *
240
i915_gem_create_context(struct drm_device *dev,
241
			struct drm_i915_file_private *file_priv)
242
{
243
	const bool is_global_default_ctx = file_priv == NULL;
244
	struct intel_context *ctx;
245
	int ret = 0;
246

B
Ben Widawsky 已提交
247
	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
248

249
	ctx = __create_hw_context(dev, file_priv);
250
	if (IS_ERR(ctx))
251
		return ctx;
252

253
	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
254 255 256 257 258 259 260
		/* We may need to do things with the shrinker which
		 * require us to immediately switch back to the default
		 * context. This can cause a problem as pinning the
		 * default context also requires GTT space which may not
		 * be available. To avoid this we always pin the default
		 * context.
		 */
261
		ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
262
					    get_context_alignment(dev), 0);
263 264 265 266 267 268
		if (ret) {
			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
			goto err_destroy;
		}
	}

269
	if (USES_FULL_PPGTT(dev)) {
270
		struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
271 272

		if (IS_ERR_OR_NULL(ppgtt)) {
273 274
			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
					 PTR_ERR(ppgtt));
275
			ret = PTR_ERR(ppgtt);
276
			goto err_unpin;
277 278 279 280
		}

		ctx->ppgtt = ppgtt;
	}
281

282 283
	trace_i915_context_create(ctx);

284
	return ctx;
285

286
err_unpin:
287 288
	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
		i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
289
err_destroy:
290
	i915_gem_context_unreference(ctx);
291
	return ERR_PTR(ret);
292 293
}

294 295 296 297 298
void i915_gem_context_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

299 300 301 302 303 304 305
	if (i915.enable_execlists) {
		struct intel_context *ctx;

		list_for_each_entry(ctx, &dev_priv->context_list, link) {
			intel_lr_context_reset(dev, ctx);
		}

306
		return;
307
	}
308

309
	for (i = 0; i < I915_NUM_RINGS; i++) {
310
		struct intel_engine_cs *ring = &dev_priv->ring[i];
311
		struct intel_context *lctx = ring->last_context;
312

313 314 315
		if (lctx) {
			if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
				i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
316

317 318
			i915_gem_context_unreference(lctx);
			ring->last_context = NULL;
319 320 321 322
		}
	}
}

323
int i915_gem_context_init(struct drm_device *dev)
324 325
{
	struct drm_i915_private *dev_priv = dev->dev_private;
326
	struct intel_context *ctx;
327
	int i;
328

329 330 331
	/* Init should only be called once per module load. Eventually the
	 * restriction on the context_disabled check can be loosened. */
	if (WARN_ON(dev_priv->ring[RCS].default_context))
332
		return 0;
333

334 335 336 337 338
	if (i915.enable_execlists) {
		/* NB: intentionally left blank. We will allocate our own
		 * backing objects as we need them, thank you very much */
		dev_priv->hw_context_size = 0;
	} else if (HAS_HW_CONTEXTS(dev)) {
339 340 341 342 343 344
		dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
		if (dev_priv->hw_context_size > (1<<20)) {
			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
					 dev_priv->hw_context_size);
			dev_priv->hw_context_size = 0;
		}
345 346
	}

347
	ctx = i915_gem_create_context(dev, NULL);
348 349 350 351
	if (IS_ERR(ctx)) {
		DRM_ERROR("Failed to create default global context (error %ld)\n",
			  PTR_ERR(ctx));
		return PTR_ERR(ctx);
352 353
	}

354 355 356 357 358 359
	for (i = 0; i < I915_NUM_RINGS; i++) {
		struct intel_engine_cs *ring = &dev_priv->ring[i];

		/* NB: RCS will hold a ref for all rings */
		ring->default_context = ctx;
	}
360

361 362 363
	DRM_DEBUG_DRIVER("%s context support initialized\n",
			i915.enable_execlists ? "LR" :
			dev_priv->hw_context_size ? "HW" : "fake");
364
	return 0;
365 366 367 368 369
}

void i915_gem_context_fini(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
370
	struct intel_context *dctx = dev_priv->ring[RCS].default_context;
371
	int i;
372

373
	if (dctx->legacy_hw_ctx.rcs_state) {
374 375 376 377 378 379 380 381 382 383 384 385 386 387
		/* The only known way to stop the gpu from accessing the hw context is
		 * to reset it. Do this as the very last operation to avoid confusing
		 * other code, leading to spurious errors. */
		intel_gpu_reset(dev);

		/* When default context is created and switched to, base object refcount
		 * will be 2 (+1 from object creation and +1 from do_switch()).
		 * i915_gem_context_fini() will be called after gpu_idle() has switched
		 * to default context. So we need to unreference the base object once
		 * to offset the do_switch part, so that i915_gem_context_unreference()
		 * can then free the base object correctly. */
		WARN_ON(!dev_priv->ring[RCS].last_context);
		if (dev_priv->ring[RCS].last_context == dctx) {
			/* Fake switch to NULL context */
388 389
			WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
			i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
390 391 392
			i915_gem_context_unreference(dctx);
			dev_priv->ring[RCS].last_context = NULL;
		}
393

394
		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
395 396 397
	}

	for (i = 0; i < I915_NUM_RINGS; i++) {
398
		struct intel_engine_cs *ring = &dev_priv->ring[i];
399 400 401 402 403

		if (ring->last_context)
			i915_gem_context_unreference(ring->last_context);

		ring->default_context = NULL;
404
		ring->last_context = NULL;
B
Ben Widawsky 已提交
405 406
	}

407
	i915_gem_context_unreference(dctx);
408 409
}

410
int i915_gem_context_enable(struct drm_i915_gem_request *req)
411
{
412
	struct intel_engine_cs *ring = req->ring;
413
	int ret;
414

415
	if (i915.enable_execlists) {
416 417
		if (ring->init_context == NULL)
			return 0;
418

419
		ret = ring->init_context(req);
420
	} else
421
		ret = i915_switch_context(req);
422 423 424 425 426

	if (ret) {
		DRM_ERROR("ring init context: %d\n", ret);
		return ret;
	}
427 428 429 430

	return 0;
}

431 432
static int context_idr_cleanup(int id, void *p, void *data)
{
433
	struct intel_context *ctx = p;
434

435
	i915_gem_context_unreference(ctx);
436
	return 0;
437 438
}

439 440 441
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
442
	struct intel_context *ctx;
443 444 445

	idr_init(&file_priv->context_idr);

446
	mutex_lock(&dev->struct_mutex);
447
	ctx = i915_gem_create_context(dev, file_priv);
448 449
	mutex_unlock(&dev->struct_mutex);

450
	if (IS_ERR(ctx)) {
451
		idr_destroy(&file_priv->context_idr);
452
		return PTR_ERR(ctx);
453 454
	}

455 456 457
	return 0;
}

458 459
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
460
	struct drm_i915_file_private *file_priv = file->driver_priv;
461

462
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
463 464 465
	idr_destroy(&file_priv->context_idr);
}

466
struct intel_context *
467 468
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
{
469
	struct intel_context *ctx;
470

471
	ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
472 473 474 475
	if (!ctx)
		return ERR_PTR(-ENOENT);

	return ctx;
476
}
477 478

static inline int
479
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
480
{
481
	struct intel_engine_cs *ring = req->ring;
482
	u32 flags = hw_flags | MI_MM_SPACE_GTT;
483 484 485 486 487 488
	const int num_rings =
		/* Use an extended w/a on ivb+ if signalling from other rings */
		i915_semaphore_is_enabled(ring->dev) ?
		hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
		0;
	int len, i, ret;
489

490 491 492 493 494
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
	 * explicitly, so we rely on the value at ring init, stored in
	 * itlb_before_ctx_switch.
	 */
495
	if (IS_GEN6(ring->dev)) {
496
		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
497 498 499 500
		if (ret)
			return ret;
	}

501
	/* These flags are for resource streamer on HSW+ */
502 503 504
	if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
	else if (INTEL_INFO(ring->dev)->gen < 8)
505 506
		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);

507 508 509 510 511

	len = 4;
	if (INTEL_INFO(ring->dev)->gen >= 7)
		len += 2 + (num_rings ? 4*num_rings + 2 : 0);

512
	ret = intel_ring_begin(req, len);
513 514 515
	if (ret)
		return ret;

516
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
517
	if (INTEL_INFO(ring->dev)->gen >= 7) {
518
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
519 520 521 522 523 524 525 526 527 528 529 530 531
		if (num_rings) {
			struct intel_engine_cs *signaller;

			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
			for_each_ring(signaller, to_i915(ring->dev), i) {
				if (signaller == ring)
					continue;

				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
				intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
			}
		}
	}
532

533 534
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_emit(ring, MI_SET_CONTEXT);
535
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
536
			flags);
537 538 539 540
	/*
	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
	 * WaMiSetContext_Hang:snb,ivb,vlv
	 */
541 542
	intel_ring_emit(ring, MI_NOOP);

543 544 545 546 547 548 549 550 551 552 553 554 555
	if (INTEL_INFO(ring->dev)->gen >= 7) {
		if (num_rings) {
			struct intel_engine_cs *signaller;

			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
			for_each_ring(signaller, to_i915(ring->dev), i) {
				if (signaller == ring)
					continue;

				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
				intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
			}
		}
556
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
557
	}
558

559 560 561 562 563
	intel_ring_advance(ring);

	return ret;
}

564 565 566 567
static inline bool should_skip_switch(struct intel_engine_cs *ring,
				      struct intel_context *from,
				      struct intel_context *to)
{
568 569 570
	if (to->remap_slice)
		return false;

571 572 573
	if (to->ppgtt && from == to &&
	    !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
		return true;
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595

	return false;
}

static bool
needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;

	if (!to->ppgtt)
		return false;

	if (INTEL_INFO(ring->dev)->gen < 8)
		return true;

	if (ring != &dev_priv->ring[RCS])
		return true;

	return false;
}

static bool
B
Ben Widawsky 已提交
596 597
needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
		u32 hw_flags)
598 599 600 601 602 603 604 605 606 607 608 609
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;

	if (!to->ppgtt)
		return false;

	if (!IS_GEN8(ring->dev))
		return false;

	if (ring != &dev_priv->ring[RCS])
		return false;

B
Ben Widawsky 已提交
610
	if (hw_flags & MI_RESTORE_INHIBIT)
611 612 613 614 615
		return true;

	return false;
}

616
static int do_switch(struct drm_i915_gem_request *req)
617
{
618 619
	struct intel_context *to = req->ctx;
	struct intel_engine_cs *ring = req->ring;
620
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
621
	struct intel_context *from = ring->last_context;
622
	u32 hw_flags = 0;
623
	bool uninitialized = false;
624
	int ret, i;
625

626
	if (from != NULL && ring == &dev_priv->ring[RCS]) {
627 628
		BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
		BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
629
	}
630

631
	if (should_skip_switch(ring, from, to))
632 633
		return 0;

634 635
	/* Trying to pin first makes error handling easier. */
	if (ring == &dev_priv->ring[RCS]) {
636
		ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
637
					    get_context_alignment(ring->dev), 0);
638 639
		if (ret)
			return ret;
640 641
	}

642 643 644 645 646 647 648
	/*
	 * Pin can switch back to the default context if we end up calling into
	 * evict_everything - as a last ditch gtt defrag effort that also
	 * switches to the default context. Hence we need to reload from here.
	 */
	from = ring->last_context;

649 650 651 652 653
	if (needs_pd_load_pre(ring, to)) {
		/* Older GENs and non render rings still want the load first,
		 * "PP_DCLV followed by PP_DIR_BASE register through Load
		 * Register Immediate commands in Ring Buffer before submitting
		 * a context."*/
654
		trace_switch_mm(ring, to);
655
		ret = to->ppgtt->switch_mm(to->ppgtt, req);
656 657
		if (ret)
			goto unpin_out;
658 659

		/* Doing a PD load always reloads the page dirs */
660
		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
661 662 663 664 665 666 667 668
	}

	if (ring != &dev_priv->ring[RCS]) {
		if (from)
			i915_gem_context_unreference(from);
		goto done;
	}

669 670
	/*
	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
671 672 673
	 * that thanks to write = false in this call and us not setting any gpu
	 * write domains when putting a context object onto the active list
	 * (when switching away from it), this won't block.
674 675 676
	 *
	 * XXX: We need a real interface to do this instead of trickery.
	 */
677
	ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
678 679
	if (ret)
		goto unpin_out;
680

B
Ben Widawsky 已提交
681
	if (!to->legacy_hw_ctx.initialized) {
682
		hw_flags |= MI_RESTORE_INHIBIT;
B
Ben Widawsky 已提交
683 684 685 686 687
		/* NB: If we inhibit the restore, the context is not allowed to
		 * die because future work may end up depending on valid address
		 * space. This means we must enforce that a page table load
		 * occur when this occurs. */
	} else if (to->ppgtt &&
688
		   (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
689
		hw_flags |= MI_FORCE_RESTORE;
690 691
		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
	}
692

B
Ben Widawsky 已提交
693 694
	/* We should never emit switch_mm more than once */
	WARN_ON(needs_pd_load_pre(ring, to) &&
695
		needs_pd_load_post(ring, to, hw_flags));
B
Ben Widawsky 已提交
696

697
	ret = mi_set_context(req, hw_flags);
698 699
	if (ret)
		goto unpin_out;
700

B
Ben Widawsky 已提交
701 702 703 704
	/* GEN8 does *not* require an explicit reload if the PDPs have been
	 * setup, and we do not wish to move them.
	 */
	if (needs_pd_load_post(ring, to, hw_flags)) {
705
		trace_switch_mm(ring, to);
706
		ret = to->ppgtt->switch_mm(to->ppgtt, req);
707 708 709 710 711 712 713 714 715 716 717
		/* The hardware context switch is emitted, but we haven't
		 * actually changed the state - so it's probably safe to bail
		 * here. Still, let the user know something dangerous has
		 * happened.
		 */
		if (ret) {
			DRM_ERROR("Failed to change address space on context switch\n");
			goto unpin_out;
		}
	}

718 719 720 721
	for (i = 0; i < MAX_L3_SLICES; i++) {
		if (!(to->remap_slice & (1<<i)))
			continue;

722
		ret = i915_gem_l3_remap(req, i);
723 724 725 726 727 728 729
		/* If it failed, try again next round */
		if (ret)
			DRM_DEBUG_DRIVER("L3 remapping failed\n");
		else
			to->remap_slice &= ~(1<<i);
	}

730 731 732 733 734 735
	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
736
	if (from != NULL) {
737
		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
738
		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
739 740 741 742 743 744 745
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
746
		from->legacy_hw_ctx.rcs_state->dirty = 1;
747

748
		/* obj is kept alive until the next request by its active ref */
749
		i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
750
		i915_gem_context_unreference(from);
751 752
	}

B
Ben Widawsky 已提交
753
	uninitialized = !to->legacy_hw_ctx.initialized;
754
	to->legacy_hw_ctx.initialized = true;
755

756
done:
757 758
	i915_gem_context_reference(to);
	ring->last_context = to;
759

760
	if (uninitialized) {
761
		if (ring->init_context) {
762
			ret = ring->init_context(req);
763 764 765
			if (ret)
				DRM_ERROR("ring init context: %d\n", ret);
		}
766 767
	}

768
	return 0;
769 770 771

unpin_out:
	if (ring->id == RCS)
772
		i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
773
	return ret;
774 775 776 777
}

/**
 * i915_switch_context() - perform a GPU context switch.
778
 * @req: request for which we'll execute the context switch
779 780 781
 *
 * The context life cycle is simple. The context refcount is incremented and
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
782
 * it will have a refcount > 1. This allows us to destroy the context abstract
783
 * object while letting the normal object tracking destroy the backing BO.
784 785 786 787
 *
 * This function should not be used in execlists mode.  Instead the context is
 * switched by writing to the ELSP and requests keep a reference to their
 * context.
788
 */
789
int i915_switch_context(struct drm_i915_gem_request *req)
790
{
791
	struct intel_engine_cs *ring = req->ring;
792 793
	struct drm_i915_private *dev_priv = ring->dev->dev_private;

794
	WARN_ON(i915.enable_execlists);
795 796
	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));

797 798 799
	if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
		if (req->ctx != ring->last_context) {
			i915_gem_context_reference(req->ctx);
800 801
			if (ring->last_context)
				i915_gem_context_unreference(ring->last_context);
802
			ring->last_context = req->ctx;
803
		}
804
		return 0;
805
	}
806

807
	return do_switch(req);
808
}
809

810
static bool contexts_enabled(struct drm_device *dev)
811
{
812
	return i915.enable_execlists || to_i915(dev)->hw_context_size;
813 814
}

815 816 817 818 819
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file)
{
	struct drm_i915_gem_context_create *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
820
	struct intel_context *ctx;
821 822
	int ret;

823
	if (!contexts_enabled(dev))
824 825
		return -ENODEV;

826 827 828 829
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

830
	ctx = i915_gem_create_context(dev, file_priv);
831
	mutex_unlock(&dev->struct_mutex);
832 833
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);
834

835
	args->ctx_id = ctx->user_handle;
836 837
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);

838
	return 0;
839 840 841 842 843 844 845
}

int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file)
{
	struct drm_i915_gem_context_destroy *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
846
	struct intel_context *ctx;
847 848
	int ret;

849
	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
850
		return -ENOENT;
851

852 853 854 855 856
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
857
	if (IS_ERR(ctx)) {
858
		mutex_unlock(&dev->struct_mutex);
859
		return PTR_ERR(ctx);
860 861
	}

862
	idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
863
	i915_gem_context_unreference(ctx);
864 865 866 867 868
	mutex_unlock(&dev->struct_mutex);

	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
	return 0;
}
869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892

int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
	struct intel_context *ctx;
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	args->size = 0;
	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		args->value = ctx->hang_stats.ban_period_seconds;
		break;
893 894 895
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
		break;
896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}

int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
	struct intel_context *ctx;
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		if (args->size)
			ret = -EINVAL;
		else if (args->value < ctx->hang_stats.ban_period_seconds &&
			 !capable(CAP_SYS_ADMIN))
			ret = -EPERM;
		else
			ctx->hang_stats.ban_period_seconds = args->value;
		break;
933 934 935 936 937 938 939 940
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		if (args->size) {
			ret = -EINVAL;
		} else {
			ctx->flags &= ~CONTEXT_NO_ZEROMAP;
			ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
		}
		break;
941 942 943 944 945 946 947 948
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}