i915_gem_context.c 23.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Copyright © 2011-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *
 */

/*
 * This file implements HW context support. On gen5+ a HW context consists of an
 * opaque GPU object which is referenced at times of context saves and restores.
 * With RC6 enabled, the context is also referenced as the GPU enters and exists
 * from RC6 (GPU has it's own internal power context, except on gen5). Though
 * something like a context does exist for the media ring, the code only
 * supports contexts for the render ring.
 *
 * In software, there is a distinction between contexts created by the user,
 * and the default HW context. The default HW context is used by GPU clients
 * that do not request setup of their own hardware context. The default
 * context's state is never restored to help prevent programming errors. This
 * would happen if a client ran and piggy-backed off another clients GPU state.
 * The default context only exists to give the GPU some offset to load as the
 * current to invoke a save of the context we actually care about. In fact, the
 * code could likely be constructed, albeit in a more complicated fashion, to
 * never use the default context, though that limits the driver's ability to
 * swap out, and/or destroy other contexts.
 *
 * All other contexts are created as a request by the GPU client. These contexts
 * store GPU state, and thus allow GPU clients to not re-emit state (and
 * potentially query certain state) at any time. The kernel driver makes
 * certain that the appropriate commands are inserted.
 *
 * The context life cycle is semi-complicated in that context BOs may live
 * longer than the context itself because of the way the hardware, and object
 * tracking works. Below is a very crude representation of the state machine
 * describing the context life.
 *                                         refcount     pincount     active
 * S0: initial state                          0            0           0
 * S1: context created                        1            0           0
 * S2: context is currently running           2            1           X
 * S3: GPU referenced, but not current        2            0           1
 * S4: context is current, but destroyed      1            1           0
 * S5: like S3, but destroyed                 1            0           1
 *
 * The most common (but not all) transitions:
 * S0->S1: client creates a context
 * S1->S2: client submits execbuf with context
 * S2->S3: other clients submits execbuf with context
 * S3->S1: context object was retired
 * S3->S2: clients submits another execbuf
 * S2->S4: context destroy called with current context
 * S3->S5->S0: destroy path
 * S4->S5->S0: destroy path on current context
 *
 * There are two confusing terms used above:
 *  The "current context" means the context which is currently running on the
D
Damien Lespiau 已提交
76
 *  GPU. The GPU has loaded its state already and has stored away the gtt
77 78 79 80 81 82 83 84 85 86 87
 *  offset of the BO. The GPU is not actively referencing the data at this
 *  offset, but it will on the next context switch. The only way to avoid this
 *  is to do a GPU reset.
 *
 *  An "active context' is one which was previously the "current context" and is
 *  on the active list waiting for the next context switch to occur. Until this
 *  happens, the object must remain at the same gtt offset. It is therefore
 *  possible to destroy a context, but it is still active.
 *
 */

88 89
#include <drm/drmP.h>
#include <drm/i915_drm.h>
90 91
#include "i915_drv.h"

92 93 94 95
/* This is a HW constraint. The value below is the largest known requirement
 * I've seen in a spec to date, and that was a workaround for a non-shipping
 * part. It should be safe to decrease this, but it's more future proof as is.
 */
B
Ben Widawsky 已提交
96 97
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
98

B
Ben Widawsky 已提交
99
static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
{
	struct drm_device *dev = ppgtt->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_address_space *vm = &ppgtt->base;

	if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
	    (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
		ppgtt->base.cleanup(&ppgtt->base);
		return;
	}

	/*
	 * Make sure vmas are unbound before we take down the drm_mm
	 *
	 * FIXME: Proper refcounting should take care of this, this shouldn't be
	 * needed at all.
	 */
	if (!list_empty(&vm->active_list)) {
		struct i915_vma *vma;

		list_for_each_entry(vma, &vm->active_list, mm_list)
			if (WARN_ON(list_empty(&vma->vma_link) ||
				    list_is_singular(&vma->vma_link)))
				break;

		i915_gem_evict_vm(&ppgtt->base, true);
	} else {
		i915_gem_retire_requests(dev);
		i915_gem_evict_vm(&ppgtt->base, false);
	}

	ppgtt->base.cleanup(&ppgtt->base);
}

B
Ben Widawsky 已提交
134 135 136 137 138 139 140 141 142
static void ppgtt_release(struct kref *kref)
{
	struct i915_hw_ppgtt *ppgtt =
		container_of(kref, struct i915_hw_ppgtt, ref);

	do_ppgtt_cleanup(ppgtt);
	kfree(ppgtt);
}

B
Ben Widawsky 已提交
143 144 145 146 147 148 149 150
static size_t get_context_alignment(struct drm_device *dev)
{
	if (IS_GEN6(dev))
		return GEN6_CONTEXT_ALIGN;

	return GEN7_CONTEXT_ALIGN;
}

151 152 153 154 155 156 157 158 159 160 161 162
static int get_context_size(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;
	u32 reg;

	switch (INTEL_INFO(dev)->gen) {
	case 6:
		reg = I915_READ(CXT_SIZE);
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
		break;
	case 7:
B
Ben Widawsky 已提交
163
		reg = I915_READ(GEN7_CXT_SIZE);
B
Ben Widawsky 已提交
164
		if (IS_HASWELL(dev))
165
			ret = HSW_CXT_TOTAL_SIZE;
B
Ben Widawsky 已提交
166 167
		else
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
168
		break;
B
Ben Widawsky 已提交
169 170 171
	case 8:
		ret = GEN8_CXT_TOTAL_SIZE;
		break;
172 173 174 175 176 177 178
	default:
		BUG();
	}

	return ret;
}

179
void i915_gem_context_free(struct kref *ctx_ref)
180
{
181
	struct intel_context *ctx = container_of(ctx_ref,
182
						   typeof(*ctx), ref);
B
Ben Widawsky 已提交
183
	struct i915_hw_ppgtt *ppgtt = NULL;
184

185 186 187 188
	if (ctx->obj) {
		/* We refcount even the aliasing PPGTT to keep the code symmetric */
		if (USES_PPGTT(ctx->obj->base.dev))
			ppgtt = ctx_to_ppgtt(ctx);
B
Ben Widawsky 已提交
189

190 191 192 193
		/* XXX: Free up the object before tearing down the address space, in
		 * case we're bound in the PPGTT */
		drm_gem_object_unreference(&ctx->obj->base);
	}
B
Ben Widawsky 已提交
194 195 196 197

	if (ppgtt)
		kref_put(&ppgtt->ref, ppgtt_release);
	list_del(&ctx->link);
198 199 200
	kfree(ctx);
}

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
static struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
{
	struct drm_i915_gem_object *obj;
	int ret;

	obj = i915_gem_alloc_object(dev, size);
	if (obj == NULL)
		return ERR_PTR(-ENOMEM);

	/*
	 * Try to make the context utilize L3 as well as LLC.
	 *
	 * On VLV we don't have L3 controls in the PTEs so we
	 * shouldn't touch the cache level, especially as that
	 * would make the object snooped which might have a
	 * negative performance impact.
	 */
	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
		/* Failure shouldn't ever happen this early */
		if (WARN_ON(ret)) {
			drm_gem_object_unreference(&obj->base);
			return ERR_PTR(ret);
		}
	}

	return obj;
}

231
static struct i915_hw_ppgtt *
232
create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
233 234 235 236 237 238 239 240 241 242 243 244 245 246
{
	struct i915_hw_ppgtt *ppgtt;
	int ret;

	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
	if (!ppgtt)
		return ERR_PTR(-ENOMEM);

	ret = i915_gem_init_ppgtt(dev, ppgtt);
	if (ret) {
		kfree(ppgtt);
		return ERR_PTR(ret);
	}

247
	ppgtt->ctx = ctx;
248 249 250
	return ppgtt;
}

251
static struct intel_context *
252
__create_hw_context(struct drm_device *dev,
253
		  struct drm_i915_file_private *file_priv)
254 255
{
	struct drm_i915_private *dev_priv = dev->dev_private;
256
	struct intel_context *ctx;
T
Tejun Heo 已提交
257
	int ret;
258

259
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
260 261
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);
262

263
	kref_init(&ctx->ref);
264
	list_add_tail(&ctx->link, &dev_priv->context_list);
265

266
	if (dev_priv->hw_context_size) {
267 268 269 270
		struct drm_i915_gem_object *obj =
				i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
		if (IS_ERR(obj)) {
			ret = PTR_ERR(obj);
271
			goto err_out;
272
		}
273
		ctx->obj = obj;
274
	}
275 276

	/* Default context will never have a file_priv */
277 278 279 280 281 282 283
	if (file_priv != NULL) {
		ret = idr_alloc(&file_priv->context_idr, ctx,
				DEFAULT_CONTEXT_ID, 0, GFP_KERNEL);
		if (ret < 0)
			goto err_out;
	} else
		ret = DEFAULT_CONTEXT_ID;
284 285

	ctx->file_priv = file_priv;
T
Tejun Heo 已提交
286
	ctx->id = ret;
287 288 289 290
	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
	ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
291

292
	return ctx;
293 294

err_out:
295
	i915_gem_context_unreference(ctx);
296
	return ERR_PTR(ret);
297 298
}

299 300 301 302 303
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
304
static struct intel_context *
305 306 307
i915_gem_create_context(struct drm_device *dev,
			struct drm_i915_file_private *file_priv,
			bool create_vm)
308
{
309
	const bool is_global_default_ctx = file_priv == NULL;
310
	struct drm_i915_private *dev_priv = dev->dev_private;
311
	struct intel_context *ctx;
312
	int ret = 0;
313

B
Ben Widawsky 已提交
314
	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
315

316
	ctx = __create_hw_context(dev, file_priv);
317
	if (IS_ERR(ctx))
318
		return ctx;
319

320
	if (is_global_default_ctx && ctx->obj) {
321 322 323 324 325 326 327 328
		/* We may need to do things with the shrinker which
		 * require us to immediately switch back to the default
		 * context. This can cause a problem as pinning the
		 * default context also requires GTT space which may not
		 * be available. To avoid this we always pin the default
		 * context.
		 */
		ret = i915_gem_obj_ggtt_pin(ctx->obj,
329
					    get_context_alignment(dev), 0);
330 331 332 333 334 335
		if (ret) {
			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
			goto err_destroy;
		}
	}

336 337 338 339
	if (create_vm) {
		struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx);

		if (IS_ERR_OR_NULL(ppgtt)) {
340 341
			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
					 PTR_ERR(ppgtt));
342
			ret = PTR_ERR(ppgtt);
343
			goto err_unpin;
344 345 346 347 348
		} else
			ctx->vm = &ppgtt->base;

		/* This case is reserved for the global default context and
		 * should only happen once. */
349
		if (is_global_default_ctx) {
350 351
			if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
				ret = -EEXIST;
352
				goto err_unpin;
353 354 355 356
			}

			dev_priv->mm.aliasing_ppgtt = ppgtt;
		}
357
	} else if (USES_PPGTT(dev)) {
358 359 360
		/* For platforms which only have aliasing PPGTT, we fake the
		 * address space and refcounting. */
		ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
361 362
		kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
	} else
363 364
		ctx->vm = &dev_priv->gtt.base;

365
	return ctx;
366

367
err_unpin:
368
	if (is_global_default_ctx && ctx->obj)
369
		i915_gem_object_ggtt_unpin(ctx->obj);
370
err_destroy:
371
	i915_gem_context_unreference(ctx);
372
	return ERR_PTR(ret);
373 374
}

375 376 377 378 379 380 381 382
void i915_gem_context_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

	/* Prevent the hardware from restoring the last context (which hung) on
	 * the next switch */
	for (i = 0; i < I915_NUM_RINGS; i++) {
383
		struct intel_engine_cs *ring = &dev_priv->ring[i];
384
		struct intel_context *dctx = ring->default_context;
385 386

		/* Do a fake switch to the default context */
387
		if (ring->last_context == dctx)
388 389 390 391 392
			continue;

		if (!ring->last_context)
			continue;

393
		if (dctx->obj && i == RCS) {
394
			WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
395
						      get_context_alignment(dev), 0));
396 397 398 399 400
			/* Fake a finish/inactive */
			dctx->obj->base.write_domain = 0;
			dctx->obj->active = 0;
		}

401 402 403
		if (ring->last_context->obj && i == RCS)
			i915_gem_object_ggtt_unpin(ring->last_context->obj);

404 405 406 407 408 409
		i915_gem_context_unreference(ring->last_context);
		i915_gem_context_reference(dctx);
		ring->last_context = dctx;
	}
}

410
int i915_gem_context_init(struct drm_device *dev)
411 412
{
	struct drm_i915_private *dev_priv = dev->dev_private;
413
	struct intel_context *ctx;
414
	int i;
415

416 417 418
	/* Init should only be called once per module load. Eventually the
	 * restriction on the context_disabled check can be loosened. */
	if (WARN_ON(dev_priv->ring[RCS].default_context))
419
		return 0;
420

421 422 423 424 425 426 427
	if (HAS_HW_CONTEXTS(dev)) {
		dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
		if (dev_priv->hw_context_size > (1<<20)) {
			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
					 dev_priv->hw_context_size);
			dev_priv->hw_context_size = 0;
		}
428 429
	}

430 431 432 433 434
	ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
	if (IS_ERR(ctx)) {
		DRM_ERROR("Failed to create default global context (error %ld)\n",
			  PTR_ERR(ctx));
		return PTR_ERR(ctx);
435 436
	}

437 438 439
	/* NB: RCS will hold a ref for all rings */
	for (i = 0; i < I915_NUM_RINGS; i++)
		dev_priv->ring[i].default_context = ctx;
440

441
	DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake");
442
	return 0;
443 444 445 446 447
}

void i915_gem_context_fini(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
448
	struct intel_context *dctx = dev_priv->ring[RCS].default_context;
449
	int i;
450

451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
	if (dctx->obj) {
		/* The only known way to stop the gpu from accessing the hw context is
		 * to reset it. Do this as the very last operation to avoid confusing
		 * other code, leading to spurious errors. */
		intel_gpu_reset(dev);

		/* When default context is created and switched to, base object refcount
		 * will be 2 (+1 from object creation and +1 from do_switch()).
		 * i915_gem_context_fini() will be called after gpu_idle() has switched
		 * to default context. So we need to unreference the base object once
		 * to offset the do_switch part, so that i915_gem_context_unreference()
		 * can then free the base object correctly. */
		WARN_ON(!dev_priv->ring[RCS].last_context);
		if (dev_priv->ring[RCS].last_context == dctx) {
			/* Fake switch to NULL context */
			WARN_ON(dctx->obj->active);
			i915_gem_object_ggtt_unpin(dctx->obj);
			i915_gem_context_unreference(dctx);
			dev_priv->ring[RCS].last_context = NULL;
		}
471 472

		i915_gem_object_ggtt_unpin(dctx->obj);
473 474 475
	}

	for (i = 0; i < I915_NUM_RINGS; i++) {
476
		struct intel_engine_cs *ring = &dev_priv->ring[i];
477 478 479 480 481

		if (ring->last_context)
			i915_gem_context_unreference(ring->last_context);

		ring->default_context = NULL;
482
		ring->last_context = NULL;
B
Ben Widawsky 已提交
483 484
	}

485
	i915_gem_context_unreference(dctx);
486 487
}

488 489
int i915_gem_context_enable(struct drm_i915_private *dev_priv)
{
490
	struct intel_engine_cs *ring;
491 492
	int ret, i;

493 494 495 496 497 498 499
	/* This is the only place the aliasing PPGTT gets enabled, which means
	 * it has to happen before we bail on reset */
	if (dev_priv->mm.aliasing_ppgtt) {
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
		ppgtt->enable(ppgtt);
	}

500 501 502 503 504
	/* FIXME: We should make this work, even in reset */
	if (i915_reset_in_progress(&dev_priv->gpu_error))
		return 0;

	BUG_ON(!dev_priv->ring[RCS].default_context);
505

506
	for_each_ring(ring, dev_priv, i) {
507
		ret = i915_switch_context(ring, ring->default_context);
508 509 510 511 512 513 514
		if (ret)
			return ret;
	}

	return 0;
}

515 516
static int context_idr_cleanup(int id, void *p, void *data)
{
517
	struct intel_context *ctx = p;
518

519
	i915_gem_context_unreference(ctx);
520
	return 0;
521 522
}

523 524 525
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
526
	struct intel_context *ctx;
527 528 529

	idr_init(&file_priv->context_idr);

530
	mutex_lock(&dev->struct_mutex);
531
	ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
532 533
	mutex_unlock(&dev->struct_mutex);

534
	if (IS_ERR(ctx)) {
535
		idr_destroy(&file_priv->context_idr);
536
		return PTR_ERR(ctx);
537 538
	}

539 540 541
	return 0;
}

542 543
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
544
	struct drm_i915_file_private *file_priv = file->driver_priv;
545

546
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
547 548 549
	idr_destroy(&file_priv->context_idr);
}

550
struct intel_context *
551 552
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
{
553
	struct intel_context *ctx;
554

555
	ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
556 557 558 559
	if (!ctx)
		return ERR_PTR(-ENOENT);

	return ctx;
560
}
561 562

static inline int
563
mi_set_context(struct intel_engine_cs *ring,
564
	       struct intel_context *new_context,
565 566 567 568
	       u32 hw_flags)
{
	int ret;

569 570 571 572 573
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
	 * explicitly, so we rely on the value at ring init, stored in
	 * itlb_before_ctx_switch.
	 */
574
	if (IS_GEN6(ring->dev)) {
575
		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
576 577 578 579
		if (ret)
			return ret;
	}

580
	ret = intel_ring_begin(ring, 6);
581 582 583
	if (ret)
		return ret;

584
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
585
	if (INTEL_INFO(ring->dev)->gen >= 7)
586 587 588 589
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
	else
		intel_ring_emit(ring, MI_NOOP);

590 591
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_emit(ring, MI_SET_CONTEXT);
592
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
593 594 595 596
			MI_MM_SPACE_GTT |
			MI_SAVE_EXT_STATE_EN |
			MI_RESTORE_EXT_STATE_EN |
			hw_flags);
597 598 599 600
	/*
	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
	 * WaMiSetContext_Hang:snb,ivb,vlv
	 */
601 602
	intel_ring_emit(ring, MI_NOOP);

603
	if (INTEL_INFO(ring->dev)->gen >= 7)
604 605 606 607
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
	else
		intel_ring_emit(ring, MI_NOOP);

608 609 610 611 612
	intel_ring_advance(ring);

	return ret;
}

613
static int do_switch(struct intel_engine_cs *ring,
614
		     struct intel_context *to)
615
{
616
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
617
	struct intel_context *from = ring->last_context;
618
	struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
619
	u32 hw_flags = 0;
620
	bool uninitialized = false;
621
	int ret, i;
622

623 624 625 626
	if (from != NULL && ring == &dev_priv->ring[RCS]) {
		BUG_ON(from->obj == NULL);
		BUG_ON(!i915_gem_obj_is_pinned(from->obj));
	}
627

O
Oscar Mateo 已提交
628
	if (from == to && !to->remap_slice)
629 630
		return 0;

631 632 633
	/* Trying to pin first makes error handling easier. */
	if (ring == &dev_priv->ring[RCS]) {
		ret = i915_gem_obj_ggtt_pin(to->obj,
634
					    get_context_alignment(ring->dev), 0);
635 636
		if (ret)
			return ret;
637 638
	}

639 640 641 642 643 644 645
	/*
	 * Pin can switch back to the default context if we end up calling into
	 * evict_everything - as a last ditch gtt defrag effort that also
	 * switches to the default context. Hence we need to reload from here.
	 */
	from = ring->last_context;

646 647 648 649 650 651 652 653 654 655 656 657
	if (USES_FULL_PPGTT(ring->dev)) {
		ret = ppgtt->switch_mm(ppgtt, ring, false);
		if (ret)
			goto unpin_out;
	}

	if (ring != &dev_priv->ring[RCS]) {
		if (from)
			i915_gem_context_unreference(from);
		goto done;
	}

658 659
	/*
	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
660 661 662
	 * that thanks to write = false in this call and us not setting any gpu
	 * write domains when putting a context object onto the active list
	 * (when switching away from it), this won't block.
663 664 665
	 *
	 * XXX: We need a real interface to do this instead of trickery.
	 */
666
	ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
667 668
	if (ret)
		goto unpin_out;
669

670 671 672 673 674
	if (!to->obj->has_global_gtt_mapping) {
		struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
							   &dev_priv->gtt.base);
		vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
	}
675

676
	if (!to->is_initialized || i915_gem_context_is_default(to))
677 678 679
		hw_flags |= MI_RESTORE_INHIBIT;

	ret = mi_set_context(ring, to, hw_flags);
680 681
	if (ret)
		goto unpin_out;
682

683 684 685 686 687 688 689 690 691 692 693 694
	for (i = 0; i < MAX_L3_SLICES; i++) {
		if (!(to->remap_slice & (1<<i)))
			continue;

		ret = i915_gem_l3_remap(ring, i);
		/* If it failed, try again next round */
		if (ret)
			DRM_DEBUG_DRIVER("L3 remapping failed\n");
		else
			to->remap_slice &= ~(1<<i);
	}

695 696 697 698 699 700
	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
701 702
	if (from != NULL) {
		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
B
Ben Widawsky 已提交
703
		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
704 705 706 707 708 709 710
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
711 712 713
		from->obj->dirty = 1;
		BUG_ON(from->obj->ring != ring);

714
		/* obj is kept alive until the next request by its active ref */
B
Ben Widawsky 已提交
715
		i915_gem_object_ggtt_unpin(from->obj);
716
		i915_gem_context_unreference(from);
717 718
	}

719 720 721
	uninitialized = !to->is_initialized && from == NULL;
	to->is_initialized = true;

722
done:
723 724
	i915_gem_context_reference(to);
	ring->last_context = to;
725

726
	if (uninitialized) {
727 728 729 730 731
		ret = i915_gem_render_state_init(ring);
		if (ret)
			DRM_ERROR("init render state: %d\n", ret);
	}

732
	return 0;
733 734 735 736 737

unpin_out:
	if (ring->id == RCS)
		i915_gem_object_ggtt_unpin(to->obj);
	return ret;
738 739 740 741 742
}

/**
 * i915_switch_context() - perform a GPU context switch.
 * @ring: ring for which we'll execute the context switch
743
 * @to: the context to switch to
744 745 746 747 748 749
 *
 * The context life cycle is simple. The context refcount is incremented and
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
 * it will have a refoucnt > 1. This allows us to destroy the context abstract
 * object while letting the normal object tracking destroy the backing BO.
 */
750
int i915_switch_context(struct intel_engine_cs *ring,
751
			struct intel_context *to)
752 753 754
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;

755 756
	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));

757 758 759 760 761 762 763
	if (to->obj == NULL) { /* We have the fake context */
		if (to != ring->last_context) {
			i915_gem_context_reference(to);
			if (ring->last_context)
				i915_gem_context_unreference(ring->last_context);
			ring->last_context = to;
		}
764
		return 0;
765
	}
766

767
	return do_switch(ring, to);
768
}
769

770 771 772 773 774
static bool hw_context_enabled(struct drm_device *dev)
{
	return to_i915(dev)->hw_context_size;
}

775 776 777 778 779
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file)
{
	struct drm_i915_gem_context_create *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
780
	struct intel_context *ctx;
781 782
	int ret;

783
	if (!hw_context_enabled(dev))
784 785
		return -ENODEV;

786 787 788 789
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

790
	ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
791
	mutex_unlock(&dev->struct_mutex);
792 793
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);
794 795 796 797

	args->ctx_id = ctx->id;
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);

798
	return 0;
799 800 801 802 803 804 805
}

int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file)
{
	struct drm_i915_gem_context_destroy *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
806
	struct intel_context *ctx;
807 808
	int ret;

809
	if (args->ctx_id == DEFAULT_CONTEXT_ID)
810
		return -ENOENT;
811

812 813 814 815 816
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
817
	if (IS_ERR(ctx)) {
818
		mutex_unlock(&dev->struct_mutex);
819
		return PTR_ERR(ctx);
820 821
	}

822 823
	idr_remove(&ctx->file_priv->context_idr, ctx->id);
	i915_gem_context_unreference(ctx);
824 825 826 827 828
	mutex_unlock(&dev->struct_mutex);

	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
	return 0;
}