i915_gem_context.c 24.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Copyright © 2011-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *
 */

/*
 * This file implements HW context support. On gen5+ a HW context consists of an
 * opaque GPU object which is referenced at times of context saves and restores.
 * With RC6 enabled, the context is also referenced as the GPU enters and exists
 * from RC6 (GPU has it's own internal power context, except on gen5). Though
 * something like a context does exist for the media ring, the code only
 * supports contexts for the render ring.
 *
 * In software, there is a distinction between contexts created by the user,
 * and the default HW context. The default HW context is used by GPU clients
 * that do not request setup of their own hardware context. The default
 * context's state is never restored to help prevent programming errors. This
 * would happen if a client ran and piggy-backed off another clients GPU state.
 * The default context only exists to give the GPU some offset to load as the
 * current to invoke a save of the context we actually care about. In fact, the
 * code could likely be constructed, albeit in a more complicated fashion, to
 * never use the default context, though that limits the driver's ability to
 * swap out, and/or destroy other contexts.
 *
 * All other contexts are created as a request by the GPU client. These contexts
 * store GPU state, and thus allow GPU clients to not re-emit state (and
 * potentially query certain state) at any time. The kernel driver makes
 * certain that the appropriate commands are inserted.
 *
 * The context life cycle is semi-complicated in that context BOs may live
 * longer than the context itself because of the way the hardware, and object
 * tracking works. Below is a very crude representation of the state machine
 * describing the context life.
 *                                         refcount     pincount     active
 * S0: initial state                          0            0           0
 * S1: context created                        1            0           0
 * S2: context is currently running           2            1           X
 * S3: GPU referenced, but not current        2            0           1
 * S4: context is current, but destroyed      1            1           0
 * S5: like S3, but destroyed                 1            0           1
 *
 * The most common (but not all) transitions:
 * S0->S1: client creates a context
 * S1->S2: client submits execbuf with context
 * S2->S3: other clients submits execbuf with context
 * S3->S1: context object was retired
 * S3->S2: clients submits another execbuf
 * S2->S4: context destroy called with current context
 * S3->S5->S0: destroy path
 * S4->S5->S0: destroy path on current context
 *
 * There are two confusing terms used above:
 *  The "current context" means the context which is currently running on the
D
Damien Lespiau 已提交
76
 *  GPU. The GPU has loaded its state already and has stored away the gtt
77 78 79 80 81 82 83 84 85 86 87
 *  offset of the BO. The GPU is not actively referencing the data at this
 *  offset, but it will on the next context switch. The only way to avoid this
 *  is to do a GPU reset.
 *
 *  An "active context' is one which was previously the "current context" and is
 *  on the active list waiting for the next context switch to occur. Until this
 *  happens, the object must remain at the same gtt offset. It is therefore
 *  possible to destroy a context, but it is still active.
 *
 */

88 89
#include <drm/drmP.h>
#include <drm/i915_drm.h>
90
#include "i915_drv.h"
91
#include "i915_trace.h"
92

93 94 95 96
/* This is a HW constraint. The value below is the largest known requirement
 * I've seen in a spec to date, and that was a workaround for a non-shipping
 * part. It should be safe to decrease this, but it's more future proof as is.
 */
B
Ben Widawsky 已提交
97 98
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
99

B
Ben Widawsky 已提交
100 101 102 103 104 105 106 107
static size_t get_context_alignment(struct drm_device *dev)
{
	if (IS_GEN6(dev))
		return GEN6_CONTEXT_ALIGN;

	return GEN7_CONTEXT_ALIGN;
}

108 109 110 111 112 113 114 115 116 117 118 119
static int get_context_size(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;
	u32 reg;

	switch (INTEL_INFO(dev)->gen) {
	case 6:
		reg = I915_READ(CXT_SIZE);
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
		break;
	case 7:
B
Ben Widawsky 已提交
120
		reg = I915_READ(GEN7_CXT_SIZE);
B
Ben Widawsky 已提交
121
		if (IS_HASWELL(dev))
122
			ret = HSW_CXT_TOTAL_SIZE;
B
Ben Widawsky 已提交
123 124
		else
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
125
		break;
B
Ben Widawsky 已提交
126 127 128
	case 8:
		ret = GEN8_CXT_TOTAL_SIZE;
		break;
129 130 131 132 133 134 135
	default:
		BUG();
	}

	return ret;
}

136
void i915_gem_context_free(struct kref *ctx_ref)
137
{
138
	struct intel_context *ctx = container_of(ctx_ref,
139
						 typeof(*ctx), ref);
140

141 142
	trace_i915_context_free(ctx);

143
	if (i915.enable_execlists)
144
		intel_lr_context_free(ctx);
B
Ben Widawsky 已提交
145

146 147
	i915_ppgtt_put(ctx->ppgtt);

148 149
	if (ctx->legacy_hw_ctx.rcs_state)
		drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
B
Ben Widawsky 已提交
150
	list_del(&ctx->link);
151 152 153
	kfree(ctx);
}

154
struct drm_i915_gem_object *
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
{
	struct drm_i915_gem_object *obj;
	int ret;

	obj = i915_gem_alloc_object(dev, size);
	if (obj == NULL)
		return ERR_PTR(-ENOMEM);

	/*
	 * Try to make the context utilize L3 as well as LLC.
	 *
	 * On VLV we don't have L3 controls in the PTEs so we
	 * shouldn't touch the cache level, especially as that
	 * would make the object snooped which might have a
	 * negative performance impact.
	 */
	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
		/* Failure shouldn't ever happen this early */
		if (WARN_ON(ret)) {
			drm_gem_object_unreference(&obj->base);
			return ERR_PTR(ret);
		}
	}

	return obj;
}

184
static struct intel_context *
185
__create_hw_context(struct drm_device *dev,
186
		    struct drm_i915_file_private *file_priv)
187 188
{
	struct drm_i915_private *dev_priv = dev->dev_private;
189
	struct intel_context *ctx;
T
Tejun Heo 已提交
190
	int ret;
191

192
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
193 194
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);
195

196
	kref_init(&ctx->ref);
197
	list_add_tail(&ctx->link, &dev_priv->context_list);
198

199
	if (dev_priv->hw_context_size) {
200 201 202 203
		struct drm_i915_gem_object *obj =
				i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
		if (IS_ERR(obj)) {
			ret = PTR_ERR(obj);
204
			goto err_out;
205
		}
206
		ctx->legacy_hw_ctx.rcs_state = obj;
207
	}
208 209

	/* Default context will never have a file_priv */
210 211
	if (file_priv != NULL) {
		ret = idr_alloc(&file_priv->context_idr, ctx,
212
				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
213 214 215
		if (ret < 0)
			goto err_out;
	} else
216
		ret = DEFAULT_CONTEXT_HANDLE;
217 218

	ctx->file_priv = file_priv;
219
	ctx->user_handle = ret;
220 221 222 223
	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
	ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
224

225 226
	ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;

227
	return ctx;
228 229

err_out:
230
	i915_gem_context_unreference(ctx);
231
	return ERR_PTR(ret);
232 233
}

234 235 236 237 238
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
239
static struct intel_context *
240
i915_gem_create_context(struct drm_device *dev,
241
			struct drm_i915_file_private *file_priv)
242
{
243
	const bool is_global_default_ctx = file_priv == NULL;
244
	struct intel_context *ctx;
245
	int ret = 0;
246

B
Ben Widawsky 已提交
247
	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
248

249
	ctx = __create_hw_context(dev, file_priv);
250
	if (IS_ERR(ctx))
251
		return ctx;
252

253
	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
254 255 256 257 258 259 260
		/* We may need to do things with the shrinker which
		 * require us to immediately switch back to the default
		 * context. This can cause a problem as pinning the
		 * default context also requires GTT space which may not
		 * be available. To avoid this we always pin the default
		 * context.
		 */
261
		ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
262
					    get_context_alignment(dev), 0);
263 264 265 266 267 268
		if (ret) {
			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
			goto err_destroy;
		}
	}

269
	if (USES_FULL_PPGTT(dev)) {
270
		struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
271 272

		if (IS_ERR_OR_NULL(ppgtt)) {
273 274
			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
					 PTR_ERR(ppgtt));
275
			ret = PTR_ERR(ppgtt);
276
			goto err_unpin;
277 278 279 280
		}

		ctx->ppgtt = ppgtt;
	}
281

282 283
	trace_i915_context_create(ctx);

284
	return ctx;
285

286
err_unpin:
287 288
	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
		i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
289
err_destroy:
290
	i915_gem_context_unreference(ctx);
291
	return ERR_PTR(ret);
292 293
}

294 295 296 297 298
void i915_gem_context_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

299 300 301 302 303 304 305
	if (i915.enable_execlists) {
		struct intel_context *ctx;

		list_for_each_entry(ctx, &dev_priv->context_list, link) {
			intel_lr_context_reset(dev, ctx);
		}

306
		return;
307
	}
308

309
	for (i = 0; i < I915_NUM_RINGS; i++) {
310
		struct intel_engine_cs *ring = &dev_priv->ring[i];
311
		struct intel_context *lctx = ring->last_context;
312

313 314 315
		if (lctx) {
			if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
				i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
316

317 318
			i915_gem_context_unreference(lctx);
			ring->last_context = NULL;
319 320 321 322
		}
	}
}

323
int i915_gem_context_init(struct drm_device *dev)
324 325
{
	struct drm_i915_private *dev_priv = dev->dev_private;
326
	struct intel_context *ctx;
327
	int i;
328

329 330 331
	/* Init should only be called once per module load. Eventually the
	 * restriction on the context_disabled check can be loosened. */
	if (WARN_ON(dev_priv->ring[RCS].default_context))
332
		return 0;
333

334 335 336 337 338
	if (i915.enable_execlists) {
		/* NB: intentionally left blank. We will allocate our own
		 * backing objects as we need them, thank you very much */
		dev_priv->hw_context_size = 0;
	} else if (HAS_HW_CONTEXTS(dev)) {
339 340 341 342 343 344
		dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
		if (dev_priv->hw_context_size > (1<<20)) {
			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
					 dev_priv->hw_context_size);
			dev_priv->hw_context_size = 0;
		}
345 346
	}

347
	ctx = i915_gem_create_context(dev, NULL);
348 349 350 351
	if (IS_ERR(ctx)) {
		DRM_ERROR("Failed to create default global context (error %ld)\n",
			  PTR_ERR(ctx));
		return PTR_ERR(ctx);
352 353
	}

354 355 356 357 358 359
	for (i = 0; i < I915_NUM_RINGS; i++) {
		struct intel_engine_cs *ring = &dev_priv->ring[i];

		/* NB: RCS will hold a ref for all rings */
		ring->default_context = ctx;
	}
360

361 362 363
	DRM_DEBUG_DRIVER("%s context support initialized\n",
			i915.enable_execlists ? "LR" :
			dev_priv->hw_context_size ? "HW" : "fake");
364
	return 0;
365 366 367 368 369
}

void i915_gem_context_fini(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
370
	struct intel_context *dctx = dev_priv->ring[RCS].default_context;
371
	int i;
372

373
	if (dctx->legacy_hw_ctx.rcs_state) {
374 375 376 377 378 379 380 381 382 383 384 385 386 387
		/* The only known way to stop the gpu from accessing the hw context is
		 * to reset it. Do this as the very last operation to avoid confusing
		 * other code, leading to spurious errors. */
		intel_gpu_reset(dev);

		/* When default context is created and switched to, base object refcount
		 * will be 2 (+1 from object creation and +1 from do_switch()).
		 * i915_gem_context_fini() will be called after gpu_idle() has switched
		 * to default context. So we need to unreference the base object once
		 * to offset the do_switch part, so that i915_gem_context_unreference()
		 * can then free the base object correctly. */
		WARN_ON(!dev_priv->ring[RCS].last_context);
		if (dev_priv->ring[RCS].last_context == dctx) {
			/* Fake switch to NULL context */
388 389
			WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
			i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
390 391 392
			i915_gem_context_unreference(dctx);
			dev_priv->ring[RCS].last_context = NULL;
		}
393

394
		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
395 396 397
	}

	for (i = 0; i < I915_NUM_RINGS; i++) {
398
		struct intel_engine_cs *ring = &dev_priv->ring[i];
399 400 401 402 403

		if (ring->last_context)
			i915_gem_context_unreference(ring->last_context);

		ring->default_context = NULL;
404
		ring->last_context = NULL;
B
Ben Widawsky 已提交
405 406
	}

407
	i915_gem_context_unreference(dctx);
408 409
}

410 411
int i915_gem_context_enable(struct drm_i915_private *dev_priv)
{
412
	struct intel_engine_cs *ring;
413 414 415
	int ret, i;

	BUG_ON(!dev_priv->ring[RCS].default_context);
416

417 418 419 420 421 422 423 424 425 426 427 428
	if (i915.enable_execlists) {
		for_each_ring(ring, dev_priv, i) {
			if (ring->init_context) {
				ret = ring->init_context(ring,
						ring->default_context);
				if (ret) {
					DRM_ERROR("ring init context: %d\n",
							ret);
					return ret;
				}
			}
		}
429

430 431 432 433 434 435
	} else
		for_each_ring(ring, dev_priv, i) {
			ret = i915_switch_context(ring, ring->default_context);
			if (ret)
				return ret;
		}
436 437 438 439

	return 0;
}

440 441
static int context_idr_cleanup(int id, void *p, void *data)
{
442
	struct intel_context *ctx = p;
443

444
	i915_gem_context_unreference(ctx);
445
	return 0;
446 447
}

448 449 450
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
451
	struct intel_context *ctx;
452 453 454

	idr_init(&file_priv->context_idr);

455
	mutex_lock(&dev->struct_mutex);
456
	ctx = i915_gem_create_context(dev, file_priv);
457 458
	mutex_unlock(&dev->struct_mutex);

459
	if (IS_ERR(ctx)) {
460
		idr_destroy(&file_priv->context_idr);
461
		return PTR_ERR(ctx);
462 463
	}

464 465 466
	return 0;
}

467 468
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
469
	struct drm_i915_file_private *file_priv = file->driver_priv;
470

471
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
472 473 474
	idr_destroy(&file_priv->context_idr);
}

475
struct intel_context *
476 477
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
{
478
	struct intel_context *ctx;
479

480
	ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
481 482 483 484
	if (!ctx)
		return ERR_PTR(-ENOENT);

	return ctx;
485
}
486 487

static inline int
488
mi_set_context(struct intel_engine_cs *ring,
489
	       struct intel_context *new_context,
490 491
	       u32 hw_flags)
{
492
	u32 flags = hw_flags | MI_MM_SPACE_GTT;
493 494 495 496 497 498
	const int num_rings =
		/* Use an extended w/a on ivb+ if signalling from other rings */
		i915_semaphore_is_enabled(ring->dev) ?
		hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
		0;
	int len, i, ret;
499

500 501 502 503 504
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
	 * explicitly, so we rely on the value at ring init, stored in
	 * itlb_before_ctx_switch.
	 */
505
	if (IS_GEN6(ring->dev)) {
506
		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
507 508 509 510
		if (ret)
			return ret;
	}

511 512 513 514
	/* These flags are for resource streamer on HSW+ */
	if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);

515 516 517 518 519 520

	len = 4;
	if (INTEL_INFO(ring->dev)->gen >= 7)
		len += 2 + (num_rings ? 4*num_rings + 2 : 0);

	ret = intel_ring_begin(ring, len);
521 522 523
	if (ret)
		return ret;

524
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
525
	if (INTEL_INFO(ring->dev)->gen >= 7) {
526
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
527 528 529 530 531 532 533 534 535 536 537 538 539
		if (num_rings) {
			struct intel_engine_cs *signaller;

			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
			for_each_ring(signaller, to_i915(ring->dev), i) {
				if (signaller == ring)
					continue;

				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
				intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
			}
		}
	}
540

541 542
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_emit(ring, MI_SET_CONTEXT);
543
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
544
			flags);
545 546 547 548
	/*
	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
	 * WaMiSetContext_Hang:snb,ivb,vlv
	 */
549 550
	intel_ring_emit(ring, MI_NOOP);

551 552 553 554 555 556 557 558 559 560 561 562 563
	if (INTEL_INFO(ring->dev)->gen >= 7) {
		if (num_rings) {
			struct intel_engine_cs *signaller;

			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
			for_each_ring(signaller, to_i915(ring->dev), i) {
				if (signaller == ring)
					continue;

				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
				intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
			}
		}
564
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
565
	}
566

567 568 569 570 571
	intel_ring_advance(ring);

	return ret;
}

572
static int do_switch(struct intel_engine_cs *ring,
573
		     struct intel_context *to)
574
{
575
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
576
	struct intel_context *from = ring->last_context;
577
	u32 hw_flags = 0;
578
	bool uninitialized = false;
579
	struct i915_vma *vma;
580
	int ret, i;
581

582
	if (from != NULL && ring == &dev_priv->ring[RCS]) {
583 584
		BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
		BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
585
	}
586

O
Oscar Mateo 已提交
587
	if (from == to && !to->remap_slice)
588 589
		return 0;

590 591
	/* Trying to pin first makes error handling easier. */
	if (ring == &dev_priv->ring[RCS]) {
592
		ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
593
					    get_context_alignment(ring->dev), 0);
594 595
		if (ret)
			return ret;
596 597
	}

598 599 600 601 602 603 604
	/*
	 * Pin can switch back to the default context if we end up calling into
	 * evict_everything - as a last ditch gtt defrag effort that also
	 * switches to the default context. Hence we need to reload from here.
	 */
	from = ring->last_context;

605
	if (to->ppgtt) {
606
		trace_switch_mm(ring, to);
607
		ret = to->ppgtt->switch_mm(to->ppgtt, ring);
608 609 610 611 612 613 614 615 616 617
		if (ret)
			goto unpin_out;
	}

	if (ring != &dev_priv->ring[RCS]) {
		if (from)
			i915_gem_context_unreference(from);
		goto done;
	}

618 619
	/*
	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
620 621 622
	 * that thanks to write = false in this call and us not setting any gpu
	 * write domains when putting a context object onto the active list
	 * (when switching away from it), this won't block.
623 624 625
	 *
	 * XXX: We need a real interface to do this instead of trickery.
	 */
626
	ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
627 628
	if (ret)
		goto unpin_out;
629

630
	vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
631 632 633 634 635 636 637 638
	if (!(vma->bound & GLOBAL_BIND)) {
		ret = i915_vma_bind(vma,
				    to->legacy_hw_ctx.rcs_state->cache_level,
				    GLOBAL_BIND);
		/* This shouldn't ever fail. */
		if (WARN_ONCE(ret, "GGTT context bind failed!"))
			goto unpin_out;
	}
639

640
	if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
641 642 643
		hw_flags |= MI_RESTORE_INHIBIT;

	ret = mi_set_context(ring, to, hw_flags);
644 645
	if (ret)
		goto unpin_out;
646

647 648 649 650 651 652 653 654 655 656 657 658
	for (i = 0; i < MAX_L3_SLICES; i++) {
		if (!(to->remap_slice & (1<<i)))
			continue;

		ret = i915_gem_l3_remap(ring, i);
		/* If it failed, try again next round */
		if (ret)
			DRM_DEBUG_DRIVER("L3 remapping failed\n");
		else
			to->remap_slice &= ~(1<<i);
	}

659 660 661 662 663 664
	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
665
	if (from != NULL) {
666 667
		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
668 669 670 671 672 673 674
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
675
		from->legacy_hw_ctx.rcs_state->dirty = 1;
676 677
		BUG_ON(i915_gem_request_get_ring(
			from->legacy_hw_ctx.rcs_state->last_read_req) != ring);
678

679
		/* obj is kept alive until the next request by its active ref */
680
		i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
681
		i915_gem_context_unreference(from);
682 683
	}

684 685
	uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
	to->legacy_hw_ctx.initialized = true;
686

687
done:
688 689
	i915_gem_context_reference(to);
	ring->last_context = to;
690

691
	if (uninitialized) {
692
		if (ring->init_context) {
693
			ret = ring->init_context(ring, to);
694 695 696
			if (ret)
				DRM_ERROR("ring init context: %d\n", ret);
		}
697 698
	}

699
	return 0;
700 701 702

unpin_out:
	if (ring->id == RCS)
703
		i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
704
	return ret;
705 706 707 708 709
}

/**
 * i915_switch_context() - perform a GPU context switch.
 * @ring: ring for which we'll execute the context switch
710
 * @to: the context to switch to
711 712 713
 *
 * The context life cycle is simple. The context refcount is incremented and
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
714
 * it will have a refcount > 1. This allows us to destroy the context abstract
715
 * object while letting the normal object tracking destroy the backing BO.
716 717 718 719
 *
 * This function should not be used in execlists mode.  Instead the context is
 * switched by writing to the ELSP and requests keep a reference to their
 * context.
720
 */
721
int i915_switch_context(struct intel_engine_cs *ring,
722
			struct intel_context *to)
723 724 725
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;

726
	WARN_ON(i915.enable_execlists);
727 728
	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));

729
	if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
730 731 732 733 734 735
		if (to != ring->last_context) {
			i915_gem_context_reference(to);
			if (ring->last_context)
				i915_gem_context_unreference(ring->last_context);
			ring->last_context = to;
		}
736
		return 0;
737
	}
738

739
	return do_switch(ring, to);
740
}
741

742
static bool contexts_enabled(struct drm_device *dev)
743
{
744
	return i915.enable_execlists || to_i915(dev)->hw_context_size;
745 746
}

747 748 749 750 751
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file)
{
	struct drm_i915_gem_context_create *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
752
	struct intel_context *ctx;
753 754
	int ret;

755
	if (!contexts_enabled(dev))
756 757
		return -ENODEV;

758 759 760 761
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

762
	ctx = i915_gem_create_context(dev, file_priv);
763
	mutex_unlock(&dev->struct_mutex);
764 765
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);
766

767
	args->ctx_id = ctx->user_handle;
768 769
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);

770
	return 0;
771 772 773 774 775 776 777
}

int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file)
{
	struct drm_i915_gem_context_destroy *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
778
	struct intel_context *ctx;
779 780
	int ret;

781
	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
782
		return -ENOENT;
783

784 785 786 787 788
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
789
	if (IS_ERR(ctx)) {
790
		mutex_unlock(&dev->struct_mutex);
791
		return PTR_ERR(ctx);
792 793
	}

794
	idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
795
	i915_gem_context_unreference(ctx);
796 797 798 799 800
	mutex_unlock(&dev->struct_mutex);

	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
	return 0;
}
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869

int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
	struct intel_context *ctx;
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	args->size = 0;
	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		args->value = ctx->hang_stats.ban_period_seconds;
		break;
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}

int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
	struct intel_context *ctx;
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		if (args->size)
			ret = -EINVAL;
		else if (args->value < ctx->hang_stats.ban_period_seconds &&
			 !capable(CAP_SYS_ADMIN))
			ret = -EPERM;
		else
			ctx->hang_stats.ban_period_seconds = args->value;
		break;
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}