i915_gem_context.c 24.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Copyright © 2011-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *
 */

/*
 * This file implements HW context support. On gen5+ a HW context consists of an
 * opaque GPU object which is referenced at times of context saves and restores.
 * With RC6 enabled, the context is also referenced as the GPU enters and exists
 * from RC6 (GPU has it's own internal power context, except on gen5). Though
 * something like a context does exist for the media ring, the code only
 * supports contexts for the render ring.
 *
 * In software, there is a distinction between contexts created by the user,
 * and the default HW context. The default HW context is used by GPU clients
 * that do not request setup of their own hardware context. The default
 * context's state is never restored to help prevent programming errors. This
 * would happen if a client ran and piggy-backed off another clients GPU state.
 * The default context only exists to give the GPU some offset to load as the
 * current to invoke a save of the context we actually care about. In fact, the
 * code could likely be constructed, albeit in a more complicated fashion, to
 * never use the default context, though that limits the driver's ability to
 * swap out, and/or destroy other contexts.
 *
 * All other contexts are created as a request by the GPU client. These contexts
 * store GPU state, and thus allow GPU clients to not re-emit state (and
 * potentially query certain state) at any time. The kernel driver makes
 * certain that the appropriate commands are inserted.
 *
 * The context life cycle is semi-complicated in that context BOs may live
 * longer than the context itself because of the way the hardware, and object
 * tracking works. Below is a very crude representation of the state machine
 * describing the context life.
 *                                         refcount     pincount     active
 * S0: initial state                          0            0           0
 * S1: context created                        1            0           0
 * S2: context is currently running           2            1           X
 * S3: GPU referenced, but not current        2            0           1
 * S4: context is current, but destroyed      1            1           0
 * S5: like S3, but destroyed                 1            0           1
 *
 * The most common (but not all) transitions:
 * S0->S1: client creates a context
 * S1->S2: client submits execbuf with context
 * S2->S3: other clients submits execbuf with context
 * S3->S1: context object was retired
 * S3->S2: clients submits another execbuf
 * S2->S4: context destroy called with current context
 * S3->S5->S0: destroy path
 * S4->S5->S0: destroy path on current context
 *
 * There are two confusing terms used above:
 *  The "current context" means the context which is currently running on the
D
Damien Lespiau 已提交
76
 *  GPU. The GPU has loaded its state already and has stored away the gtt
77 78 79 80 81 82 83 84 85 86 87
 *  offset of the BO. The GPU is not actively referencing the data at this
 *  offset, but it will on the next context switch. The only way to avoid this
 *  is to do a GPU reset.
 *
 *  An "active context' is one which was previously the "current context" and is
 *  on the active list waiting for the next context switch to occur. Until this
 *  happens, the object must remain at the same gtt offset. It is therefore
 *  possible to destroy a context, but it is still active.
 *
 */

88 89
#include <drm/drmP.h>
#include <drm/i915_drm.h>
90 91
#include "i915_drv.h"

92 93 94 95
/* This is a HW constraint. The value below is the largest known requirement
 * I've seen in a spec to date, and that was a workaround for a non-shipping
 * part. It should be safe to decrease this, but it's more future proof as is.
 */
B
Ben Widawsky 已提交
96 97
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
98

99 100
static int do_switch(struct intel_ring_buffer *ring,
		     struct i915_hw_context *to);
101

B
Ben Widawsky 已提交
102
static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
{
	struct drm_device *dev = ppgtt->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_address_space *vm = &ppgtt->base;

	if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
	    (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
		ppgtt->base.cleanup(&ppgtt->base);
		return;
	}

	/*
	 * Make sure vmas are unbound before we take down the drm_mm
	 *
	 * FIXME: Proper refcounting should take care of this, this shouldn't be
	 * needed at all.
	 */
	if (!list_empty(&vm->active_list)) {
		struct i915_vma *vma;

		list_for_each_entry(vma, &vm->active_list, mm_list)
			if (WARN_ON(list_empty(&vma->vma_link) ||
				    list_is_singular(&vma->vma_link)))
				break;

		i915_gem_evict_vm(&ppgtt->base, true);
	} else {
		i915_gem_retire_requests(dev);
		i915_gem_evict_vm(&ppgtt->base, false);
	}

	ppgtt->base.cleanup(&ppgtt->base);
}

B
Ben Widawsky 已提交
137 138 139 140 141 142 143 144 145
static void ppgtt_release(struct kref *kref)
{
	struct i915_hw_ppgtt *ppgtt =
		container_of(kref, struct i915_hw_ppgtt, ref);

	do_ppgtt_cleanup(ppgtt);
	kfree(ppgtt);
}

B
Ben Widawsky 已提交
146 147 148 149 150 151 152 153
static size_t get_context_alignment(struct drm_device *dev)
{
	if (IS_GEN6(dev))
		return GEN6_CONTEXT_ALIGN;

	return GEN7_CONTEXT_ALIGN;
}

154 155 156 157 158 159 160 161 162 163 164 165
static int get_context_size(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;
	u32 reg;

	switch (INTEL_INFO(dev)->gen) {
	case 6:
		reg = I915_READ(CXT_SIZE);
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
		break;
	case 7:
B
Ben Widawsky 已提交
166
		reg = I915_READ(GEN7_CXT_SIZE);
B
Ben Widawsky 已提交
167
		if (IS_HASWELL(dev))
168
			ret = HSW_CXT_TOTAL_SIZE;
B
Ben Widawsky 已提交
169 170
		else
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
171
		break;
B
Ben Widawsky 已提交
172 173 174
	case 8:
		ret = GEN8_CXT_TOTAL_SIZE;
		break;
175 176 177 178 179 180 181
	default:
		BUG();
	}

	return ret;
}

182
void i915_gem_context_free(struct kref *ctx_ref)
183
{
184 185
	struct i915_hw_context *ctx = container_of(ctx_ref,
						   typeof(*ctx), ref);
B
Ben Widawsky 已提交
186
	struct i915_hw_ppgtt *ppgtt = NULL;
187

B
Ben Widawsky 已提交
188
	/* We refcount even the aliasing PPGTT to keep the code symmetric */
189
	if (USES_PPGTT(ctx->obj->base.dev))
190
		ppgtt = ctx_to_ppgtt(ctx);
B
Ben Widawsky 已提交
191 192 193

	/* XXX: Free up the object before tearing down the address space, in
	 * case we're bound in the PPGTT */
194
	drm_gem_object_unreference(&ctx->obj->base);
B
Ben Widawsky 已提交
195 196 197 198

	if (ppgtt)
		kref_put(&ppgtt->ref, ppgtt_release);
	list_del(&ctx->link);
199 200 201
	kfree(ctx);
}

202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
static struct i915_hw_ppgtt *
create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx)
{
	struct i915_hw_ppgtt *ppgtt;
	int ret;

	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
	if (!ppgtt)
		return ERR_PTR(-ENOMEM);

	ret = i915_gem_init_ppgtt(dev, ppgtt);
	if (ret) {
		kfree(ppgtt);
		return ERR_PTR(ret);
	}

	return ppgtt;
}

221
static struct i915_hw_context *
222
__create_hw_context(struct drm_device *dev,
223
		  struct drm_i915_file_private *file_priv)
224 225
{
	struct drm_i915_private *dev_priv = dev->dev_private;
226
	struct i915_hw_context *ctx;
T
Tejun Heo 已提交
227
	int ret;
228

229
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
230 231
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);
232

233
	kref_init(&ctx->ref);
234
	ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
235
	INIT_LIST_HEAD(&ctx->link);
236 237
	if (ctx->obj == NULL) {
		kfree(ctx);
238
		DRM_DEBUG_DRIVER("Context object allocated failed\n");
239
		return ERR_PTR(-ENOMEM);
240 241
	}

242 243
	if (INTEL_INFO(dev)->gen >= 7) {
		ret = i915_gem_object_set_cache_level(ctx->obj,
244
						      I915_CACHE_L3_LLC);
B
Ben Widawsky 已提交
245 246
		/* Failure shouldn't ever happen this early */
		if (WARN_ON(ret))
247 248 249
			goto err_out;
	}

250
	list_add_tail(&ctx->link, &dev_priv->context_list);
251 252 253

	/* Default context will never have a file_priv */
	if (file_priv == NULL)
254
		return ctx;
255

256
	ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID, 0,
T
Tejun Heo 已提交
257 258
			GFP_KERNEL);
	if (ret < 0)
259
		goto err_out;
260 261

	ctx->file_priv = file_priv;
T
Tejun Heo 已提交
262
	ctx->id = ret;
263 264 265 266
	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
	ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
267

268
	return ctx;
269 270

err_out:
271
	i915_gem_context_unreference(ctx);
272
	return ERR_PTR(ret);
273 274
}

275 276 277 278 279
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
280
static struct i915_hw_context *
281 282 283
i915_gem_create_context(struct drm_device *dev,
			struct drm_i915_file_private *file_priv,
			bool create_vm)
284
{
285
	const bool is_global_default_ctx = file_priv == NULL;
286
	struct drm_i915_private *dev_priv = dev->dev_private;
287
	struct i915_hw_context *ctx;
288
	int ret = 0;
289

B
Ben Widawsky 已提交
290
	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
291

292
	ctx = __create_hw_context(dev, file_priv);
293
	if (IS_ERR(ctx))
294
		return ctx;
295

296 297 298 299 300 301 302 303 304
	if (is_global_default_ctx) {
		/* We may need to do things with the shrinker which
		 * require us to immediately switch back to the default
		 * context. This can cause a problem as pinning the
		 * default context also requires GTT space which may not
		 * be available. To avoid this we always pin the default
		 * context.
		 */
		ret = i915_gem_obj_ggtt_pin(ctx->obj,
305
					    get_context_alignment(dev), 0);
306 307 308 309 310 311
		if (ret) {
			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
			goto err_destroy;
		}
	}

312 313 314 315
	if (create_vm) {
		struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx);

		if (IS_ERR_OR_NULL(ppgtt)) {
316 317
			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
					 PTR_ERR(ppgtt));
318
			ret = PTR_ERR(ppgtt);
319
			goto err_unpin;
320 321 322 323 324
		} else
			ctx->vm = &ppgtt->base;

		/* This case is reserved for the global default context and
		 * should only happen once. */
325
		if (is_global_default_ctx) {
326 327
			if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
				ret = -EEXIST;
328
				goto err_unpin;
329 330 331 332
			}

			dev_priv->mm.aliasing_ppgtt = ppgtt;
		}
333
	} else if (USES_PPGTT(dev)) {
334 335 336
		/* For platforms which only have aliasing PPGTT, we fake the
		 * address space and refcounting. */
		ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
337 338
		kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
	} else
339 340
		ctx->vm = &dev_priv->gtt.base;

341
	return ctx;
342

343 344 345
err_unpin:
	if (is_global_default_ctx)
		i915_gem_object_ggtt_unpin(ctx->obj);
346
err_destroy:
347
	i915_gem_context_unreference(ctx);
348
	return ERR_PTR(ret);
349 350
}

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
void i915_gem_context_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring;
	int i;

	if (!HAS_HW_CONTEXTS(dev))
		return;

	/* Prevent the hardware from restoring the last context (which hung) on
	 * the next switch */
	for (i = 0; i < I915_NUM_RINGS; i++) {
		struct i915_hw_context *dctx;
		if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
			continue;

		/* Do a fake switch to the default context */
		ring = &dev_priv->ring[i];
		dctx = ring->default_context;
		if (WARN_ON(!dctx))
			continue;

		if (!ring->last_context)
			continue;

		if (ring->last_context == dctx)
			continue;

		if (i == RCS) {
			WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
381
						      get_context_alignment(dev), 0));
382 383 384 385 386 387 388 389 390 391 392
			/* Fake a finish/inactive */
			dctx->obj->base.write_domain = 0;
			dctx->obj->active = 0;
		}

		i915_gem_context_unreference(ring->last_context);
		i915_gem_context_reference(dctx);
		ring->last_context = dctx;
	}
}

393
int i915_gem_context_init(struct drm_device *dev)
394 395
{
	struct drm_i915_private *dev_priv = dev->dev_private;
396
	struct intel_ring_buffer *ring;
397
	int i;
398

399 400
	if (!HAS_HW_CONTEXTS(dev))
		return 0;
401

402 403 404
	/* Init should only be called once per module load. Eventually the
	 * restriction on the context_disabled check can be loosened. */
	if (WARN_ON(dev_priv->ring[RCS].default_context))
405
		return 0;
406

407
	dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
408

409
	if (dev_priv->hw_context_size > (1<<20)) {
B
Ben Widawsky 已提交
410
		DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
411
		return -E2BIG;
412 413
	}

414
	dev_priv->ring[RCS].default_context =
415
		i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
416 417 418 419 420

	if (IS_ERR_OR_NULL(dev_priv->ring[RCS].default_context)) {
		DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %ld\n",
				 PTR_ERR(dev_priv->ring[RCS].default_context));
		return PTR_ERR(dev_priv->ring[RCS].default_context);
421 422
	}

423 424 425 426 427 428 429 430 431 432
	for (i = RCS + 1; i < I915_NUM_RINGS; i++) {
		if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
			continue;

		ring = &dev_priv->ring[i];

		/* NB: RCS will hold a ref for all rings */
		ring->default_context = dev_priv->ring[RCS].default_context;
	}

433
	DRM_DEBUG_DRIVER("HW context support initialized\n");
434
	return 0;
435 436 437 438 439
}

void i915_gem_context_fini(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
440
	struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
441
	int i;
442

443
	if (!HAS_HW_CONTEXTS(dev))
444
		return;
445

446 447 448 449 450
	/* The only known way to stop the gpu from accessing the hw context is
	 * to reset it. Do this as the very last operation to avoid confusing
	 * other code, leading to spurious errors. */
	intel_gpu_reset(dev);

451 452 453 454 455 456
	/* When default context is created and switched to, base object refcount
	 * will be 2 (+1 from object creation and +1 from do_switch()).
	 * i915_gem_context_fini() will be called after gpu_idle() has switched
	 * to default context. So we need to unreference the base object once
	 * to offset the do_switch part, so that i915_gem_context_unreference()
	 * can then free the base object correctly. */
B
Ben Widawsky 已提交
457 458 459 460
	WARN_ON(!dev_priv->ring[RCS].last_context);
	if (dev_priv->ring[RCS].last_context == dctx) {
		/* Fake switch to NULL context */
		WARN_ON(dctx->obj->active);
B
Ben Widawsky 已提交
461
		i915_gem_object_ggtt_unpin(dctx->obj);
B
Ben Widawsky 已提交
462
		i915_gem_context_unreference(dctx);
463 464 465 466 467 468 469 470 471 472 473 474
		dev_priv->ring[RCS].last_context = NULL;
	}

	for (i = 0; i < I915_NUM_RINGS; i++) {
		struct intel_ring_buffer *ring = &dev_priv->ring[i];
		if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
			continue;

		if (ring->last_context)
			i915_gem_context_unreference(ring->last_context);

		ring->default_context = NULL;
475
		ring->last_context = NULL;
B
Ben Widawsky 已提交
476 477
	}

B
Ben Widawsky 已提交
478
	i915_gem_object_ggtt_unpin(dctx->obj);
479
	i915_gem_context_unreference(dctx);
480
	dev_priv->mm.aliasing_ppgtt = NULL;
481 482
}

483 484 485 486 487 488 489 490
int i915_gem_context_enable(struct drm_i915_private *dev_priv)
{
	struct intel_ring_buffer *ring;
	int ret, i;

	if (!HAS_HW_CONTEXTS(dev_priv->dev))
		return 0;

491 492 493 494 495 496 497
	/* This is the only place the aliasing PPGTT gets enabled, which means
	 * it has to happen before we bail on reset */
	if (dev_priv->mm.aliasing_ppgtt) {
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
		ppgtt->enable(ppgtt);
	}

498 499 500 501 502
	/* FIXME: We should make this work, even in reset */
	if (i915_reset_in_progress(&dev_priv->gpu_error))
		return 0;

	BUG_ON(!dev_priv->ring[RCS].default_context);
503

504 505 506 507 508 509 510 511 512
	for_each_ring(ring, dev_priv, i) {
		ret = do_switch(ring, ring->default_context);
		if (ret)
			return ret;
	}

	return 0;
}

513 514
static int context_idr_cleanup(int id, void *p, void *data)
{
515
	struct i915_hw_context *ctx = p;
516

517
	/* Ignore the default context because close will handle it */
518
	if (i915_gem_context_is_default(ctx))
519
		return 0;
520

521
	i915_gem_context_unreference(ctx);
522
	return 0;
523 524
}

525 526 527
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
528
	struct drm_i915_private *dev_priv = dev->dev_private;
529

530 531 532 533
	if (!HAS_HW_CONTEXTS(dev)) {
		/* Cheat for hang stats */
		file_priv->private_default_ctx =
			kzalloc(sizeof(struct i915_hw_context), GFP_KERNEL);
534 535 536 537

		if (file_priv->private_default_ctx == NULL)
			return -ENOMEM;

538
		file_priv->private_default_ctx->vm = &dev_priv->gtt.base;
539
		return 0;
540
	}
541 542 543

	idr_init(&file_priv->context_idr);

544 545
	mutex_lock(&dev->struct_mutex);
	file_priv->private_default_ctx =
546
		i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
547 548 549 550 551 552 553
	mutex_unlock(&dev->struct_mutex);

	if (IS_ERR(file_priv->private_default_ctx)) {
		idr_destroy(&file_priv->context_idr);
		return PTR_ERR(file_priv->private_default_ctx);
	}

554 555 556
	return 0;
}

557 558
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
559
	struct drm_i915_file_private *file_priv = file->driver_priv;
560

561 562
	if (!HAS_HW_CONTEXTS(dev)) {
		kfree(file_priv->private_default_ctx);
563
		return;
564
	}
565

566
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
567
	i915_gem_context_unreference(file_priv->private_default_ctx);
568 569 570
	idr_destroy(&file_priv->context_idr);
}

571
struct i915_hw_context *
572 573
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
{
574 575
	struct i915_hw_context *ctx;

576 577 578
	if (!HAS_HW_CONTEXTS(file_priv->dev_priv->dev))
		return file_priv->private_default_ctx;

579 580 581 582 583
	ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
	if (!ctx)
		return ERR_PTR(-ENOENT);

	return ctx;
584
}
585 586 587 588 589 590 591 592

static inline int
mi_set_context(struct intel_ring_buffer *ring,
	       struct i915_hw_context *new_context,
	       u32 hw_flags)
{
	int ret;

593 594 595 596 597 598
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
	 * explicitly, so we rely on the value at ring init, stored in
	 * itlb_before_ctx_switch.
	 */
	if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
599
		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
600 601 602 603
		if (ret)
			return ret;
	}

604
	ret = intel_ring_begin(ring, 6);
605 606 607
	if (ret)
		return ret;

608
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */
609 610 611 612 613
	if (IS_GEN7(ring->dev))
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
	else
		intel_ring_emit(ring, MI_NOOP);

614 615
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_emit(ring, MI_SET_CONTEXT);
616
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
617 618 619 620
			MI_MM_SPACE_GTT |
			MI_SAVE_EXT_STATE_EN |
			MI_RESTORE_EXT_STATE_EN |
			hw_flags);
621 622 623 624
	/*
	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
	 * WaMiSetContext_Hang:snb,ivb,vlv
	 */
625 626
	intel_ring_emit(ring, MI_NOOP);

627 628 629 630 631
	if (IS_GEN7(ring->dev))
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
	else
		intel_ring_emit(ring, MI_NOOP);

632 633 634 635 636
	intel_ring_advance(ring);

	return ret;
}

637 638
static int do_switch(struct intel_ring_buffer *ring,
		     struct i915_hw_context *to)
639
{
640
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
641
	struct i915_hw_context *from = ring->last_context;
642
	struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
643
	u32 hw_flags = 0;
644
	int ret, i;
645

646 647 648 649
	if (from != NULL && ring == &dev_priv->ring[RCS]) {
		BUG_ON(from->obj == NULL);
		BUG_ON(!i915_gem_obj_is_pinned(from->obj));
	}
650

651
	if (from == to && from->last_ring == ring && !to->remap_slice)
652 653
		return 0;

654 655 656
	/* Trying to pin first makes error handling easier. */
	if (ring == &dev_priv->ring[RCS]) {
		ret = i915_gem_obj_ggtt_pin(to->obj,
657
					    get_context_alignment(ring->dev), 0);
658 659
		if (ret)
			return ret;
660 661
	}

662 663 664 665 666 667 668
	/*
	 * Pin can switch back to the default context if we end up calling into
	 * evict_everything - as a last ditch gtt defrag effort that also
	 * switches to the default context. Hence we need to reload from here.
	 */
	from = ring->last_context;

669 670 671 672 673 674 675 676 677 678 679 680
	if (USES_FULL_PPGTT(ring->dev)) {
		ret = ppgtt->switch_mm(ppgtt, ring, false);
		if (ret)
			goto unpin_out;
	}

	if (ring != &dev_priv->ring[RCS]) {
		if (from)
			i915_gem_context_unreference(from);
		goto done;
	}

681 682
	/*
	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
683 684 685
	 * that thanks to write = false in this call and us not setting any gpu
	 * write domains when putting a context object onto the active list
	 * (when switching away from it), this won't block.
686 687 688
	 *
	 * XXX: We need a real interface to do this instead of trickery.
	 */
689
	ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
690 691
	if (ret)
		goto unpin_out;
692

693 694 695 696 697
	if (!to->obj->has_global_gtt_mapping) {
		struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
							   &dev_priv->gtt.base);
		vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
	}
698

699
	if (!to->is_initialized || i915_gem_context_is_default(to))
700 701 702
		hw_flags |= MI_RESTORE_INHIBIT;

	ret = mi_set_context(ring, to, hw_flags);
703 704
	if (ret)
		goto unpin_out;
705

706 707 708 709 710 711 712 713 714 715 716 717
	for (i = 0; i < MAX_L3_SLICES; i++) {
		if (!(to->remap_slice & (1<<i)))
			continue;

		ret = i915_gem_l3_remap(ring, i);
		/* If it failed, try again next round */
		if (ret)
			DRM_DEBUG_DRIVER("L3 remapping failed\n");
		else
			to->remap_slice &= ~(1<<i);
	}

718 719 720 721 722 723
	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
724 725
	if (from != NULL) {
		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
B
Ben Widawsky 已提交
726
		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
727 728 729 730 731 732 733
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
734 735 736
		from->obj->dirty = 1;
		BUG_ON(from->obj->ring != ring);

737
		/* obj is kept alive until the next request by its active ref */
B
Ben Widawsky 已提交
738
		i915_gem_object_ggtt_unpin(from->obj);
739
		i915_gem_context_unreference(from);
740 741
	}

742 743
	to->is_initialized = true;

744
done:
745 746
	i915_gem_context_reference(to);
	ring->last_context = to;
747
	to->last_ring = ring;
748 749

	return 0;
750 751 752 753 754

unpin_out:
	if (ring->id == RCS)
		i915_gem_object_ggtt_unpin(to->obj);
	return ret;
755 756 757 758 759 760
}

/**
 * i915_switch_context() - perform a GPU context switch.
 * @ring: ring for which we'll execute the context switch
 * @file_priv: file_priv associated with the context, may be NULL
761
 * @to: the context to switch to
762 763 764 765 766 767 768 769
 *
 * The context life cycle is simple. The context refcount is incremented and
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
 * it will have a refoucnt > 1. This allows us to destroy the context abstract
 * object while letting the normal object tracking destroy the backing BO.
 */
int i915_switch_context(struct intel_ring_buffer *ring,
			struct drm_file *file,
770
			struct i915_hw_context *to)
771 772 773
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;

774 775
	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));

776
	BUG_ON(file && to == NULL);
777

778 779 780 781
	/* We have the fake context, but don't supports switching. */
	if (!HAS_HW_CONTEXTS(ring->dev))
		return 0;

782
	return do_switch(ring, to);
783
}
784 785 786 787 788 789 790 791 792

int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file)
{
	struct drm_i915_gem_context_create *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct i915_hw_context *ctx;
	int ret;

793
	if (!HAS_HW_CONTEXTS(dev))
794 795
		return -ENODEV;

796 797 798 799
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

800
	ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
801
	mutex_unlock(&dev->struct_mutex);
802 803
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);
804 805 806 807

	args->ctx_id = ctx->id;
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);

808
	return 0;
809 810 811 812 813 814 815 816 817 818
}

int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file)
{
	struct drm_i915_gem_context_destroy *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct i915_hw_context *ctx;
	int ret;

819
	if (args->ctx_id == DEFAULT_CONTEXT_ID)
820
		return -ENOENT;
821

822 823 824 825 826
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
827
	if (IS_ERR(ctx)) {
828
		mutex_unlock(&dev->struct_mutex);
829
		return PTR_ERR(ctx);
830 831
	}

832 833
	idr_remove(&ctx->file_priv->context_idr, ctx->id);
	i915_gem_context_unreference(ctx);
834 835 836 837 838
	mutex_unlock(&dev->struct_mutex);

	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
	return 0;
}