i915_gem_context.c 27.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Copyright © 2011-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *
 */

/*
 * This file implements HW context support. On gen5+ a HW context consists of an
 * opaque GPU object which is referenced at times of context saves and restores.
 * With RC6 enabled, the context is also referenced as the GPU enters and exists
 * from RC6 (GPU has it's own internal power context, except on gen5). Though
 * something like a context does exist for the media ring, the code only
 * supports contexts for the render ring.
 *
 * In software, there is a distinction between contexts created by the user,
 * and the default HW context. The default HW context is used by GPU clients
 * that do not request setup of their own hardware context. The default
 * context's state is never restored to help prevent programming errors. This
 * would happen if a client ran and piggy-backed off another clients GPU state.
 * The default context only exists to give the GPU some offset to load as the
 * current to invoke a save of the context we actually care about. In fact, the
 * code could likely be constructed, albeit in a more complicated fashion, to
 * never use the default context, though that limits the driver's ability to
 * swap out, and/or destroy other contexts.
 *
 * All other contexts are created as a request by the GPU client. These contexts
 * store GPU state, and thus allow GPU clients to not re-emit state (and
 * potentially query certain state) at any time. The kernel driver makes
 * certain that the appropriate commands are inserted.
 *
 * The context life cycle is semi-complicated in that context BOs may live
 * longer than the context itself because of the way the hardware, and object
 * tracking works. Below is a very crude representation of the state machine
 * describing the context life.
 *                                         refcount     pincount     active
 * S0: initial state                          0            0           0
 * S1: context created                        1            0           0
 * S2: context is currently running           2            1           X
 * S3: GPU referenced, but not current        2            0           1
 * S4: context is current, but destroyed      1            1           0
 * S5: like S3, but destroyed                 1            0           1
 *
 * The most common (but not all) transitions:
 * S0->S1: client creates a context
 * S1->S2: client submits execbuf with context
 * S2->S3: other clients submits execbuf with context
 * S3->S1: context object was retired
 * S3->S2: clients submits another execbuf
 * S2->S4: context destroy called with current context
 * S3->S5->S0: destroy path
 * S4->S5->S0: destroy path on current context
 *
 * There are two confusing terms used above:
 *  The "current context" means the context which is currently running on the
D
Damien Lespiau 已提交
76
 *  GPU. The GPU has loaded its state already and has stored away the gtt
77 78 79 80 81 82 83 84 85 86 87
 *  offset of the BO. The GPU is not actively referencing the data at this
 *  offset, but it will on the next context switch. The only way to avoid this
 *  is to do a GPU reset.
 *
 *  An "active context' is one which was previously the "current context" and is
 *  on the active list waiting for the next context switch to occur. Until this
 *  happens, the object must remain at the same gtt offset. It is therefore
 *  possible to destroy a context, but it is still active.
 *
 */

88 89
#include <drm/drmP.h>
#include <drm/i915_drm.h>
90
#include "i915_drv.h"
91
#include "i915_trace.h"
92

93 94 95 96
/* This is a HW constraint. The value below is the largest known requirement
 * I've seen in a spec to date, and that was a workaround for a non-shipping
 * part. It should be safe to decrease this, but it's more future proof as is.
 */
B
Ben Widawsky 已提交
97 98
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
99

B
Ben Widawsky 已提交
100 101 102 103 104 105 106 107
static size_t get_context_alignment(struct drm_device *dev)
{
	if (IS_GEN6(dev))
		return GEN6_CONTEXT_ALIGN;

	return GEN7_CONTEXT_ALIGN;
}

108 109 110 111 112 113 114 115 116 117 118 119
static int get_context_size(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;
	u32 reg;

	switch (INTEL_INFO(dev)->gen) {
	case 6:
		reg = I915_READ(CXT_SIZE);
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
		break;
	case 7:
B
Ben Widawsky 已提交
120
		reg = I915_READ(GEN7_CXT_SIZE);
B
Ben Widawsky 已提交
121
		if (IS_HASWELL(dev))
122
			ret = HSW_CXT_TOTAL_SIZE;
B
Ben Widawsky 已提交
123 124
		else
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
125
		break;
B
Ben Widawsky 已提交
126 127 128
	case 8:
		ret = GEN8_CXT_TOTAL_SIZE;
		break;
129 130 131 132 133 134 135
	default:
		BUG();
	}

	return ret;
}

136
void i915_gem_context_free(struct kref *ctx_ref)
137
{
138
	struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
139

140 141
	trace_i915_context_free(ctx);

142
	if (i915.enable_execlists)
143
		intel_lr_context_free(ctx);
B
Ben Widawsky 已提交
144

145 146
	i915_ppgtt_put(ctx->ppgtt);

147 148
	if (ctx->legacy_hw_ctx.rcs_state)
		drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
B
Ben Widawsky 已提交
149
	list_del(&ctx->link);
150 151 152
	kfree(ctx);
}

153
struct drm_i915_gem_object *
154 155 156 157 158
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
{
	struct drm_i915_gem_object *obj;
	int ret;

159
	obj = i915_gem_alloc_object(dev, size);
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
	if (obj == NULL)
		return ERR_PTR(-ENOMEM);

	/*
	 * Try to make the context utilize L3 as well as LLC.
	 *
	 * On VLV we don't have L3 controls in the PTEs so we
	 * shouldn't touch the cache level, especially as that
	 * would make the object snooped which might have a
	 * negative performance impact.
	 */
	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
		/* Failure shouldn't ever happen this early */
		if (WARN_ON(ret)) {
			drm_gem_object_unreference(&obj->base);
			return ERR_PTR(ret);
		}
	}

	return obj;
}

183
static struct intel_context *
184
__create_hw_context(struct drm_device *dev,
185
		    struct drm_i915_file_private *file_priv)
186 187
{
	struct drm_i915_private *dev_priv = dev->dev_private;
188
	struct intel_context *ctx;
T
Tejun Heo 已提交
189
	int ret;
190

191
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
192 193
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);
194

195
	kref_init(&ctx->ref);
196
	list_add_tail(&ctx->link, &dev_priv->context_list);
197
	ctx->i915 = dev_priv;
198

199
	if (dev_priv->hw_context_size) {
200 201 202 203
		struct drm_i915_gem_object *obj =
				i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
		if (IS_ERR(obj)) {
			ret = PTR_ERR(obj);
204
			goto err_out;
205
		}
206
		ctx->legacy_hw_ctx.rcs_state = obj;
207
	}
208 209

	/* Default context will never have a file_priv */
210 211
	if (file_priv != NULL) {
		ret = idr_alloc(&file_priv->context_idr, ctx,
212
				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
213 214 215
		if (ret < 0)
			goto err_out;
	} else
216
		ret = DEFAULT_CONTEXT_HANDLE;
217 218

	ctx->file_priv = file_priv;
219
	ctx->user_handle = ret;
220 221 222 223
	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
	ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
224

225 226
	ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;

227
	return ctx;
228 229

err_out:
230
	i915_gem_context_unreference(ctx);
231
	return ERR_PTR(ret);
232 233
}

234 235 236 237 238
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
239
static struct intel_context *
240
i915_gem_create_context(struct drm_device *dev,
241
			struct drm_i915_file_private *file_priv)
242
{
243
	const bool is_global_default_ctx = file_priv == NULL;
244
	struct intel_context *ctx;
245
	int ret = 0;
246

B
Ben Widawsky 已提交
247
	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
248

249
	ctx = __create_hw_context(dev, file_priv);
250
	if (IS_ERR(ctx))
251
		return ctx;
252

253
	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
254 255 256 257 258 259 260
		/* We may need to do things with the shrinker which
		 * require us to immediately switch back to the default
		 * context. This can cause a problem as pinning the
		 * default context also requires GTT space which may not
		 * be available. To avoid this we always pin the default
		 * context.
		 */
261
		ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
262
					    get_context_alignment(dev), 0);
263 264 265 266 267 268
		if (ret) {
			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
			goto err_destroy;
		}
	}

269
	if (USES_FULL_PPGTT(dev)) {
270
		struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
271 272

		if (IS_ERR_OR_NULL(ppgtt)) {
273 274
			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
					 PTR_ERR(ppgtt));
275
			ret = PTR_ERR(ppgtt);
276
			goto err_unpin;
277 278 279 280
		}

		ctx->ppgtt = ppgtt;
	}
281

282 283
	trace_i915_context_create(ctx);

284
	return ctx;
285

286
err_unpin:
287 288
	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
		i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
289
err_destroy:
290
	idr_remove(&file_priv->context_idr, ctx->user_handle);
291
	i915_gem_context_unreference(ctx);
292
	return ERR_PTR(ret);
293 294
}

295 296 297 298 299
void i915_gem_context_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

300 301 302 303 304 305 306
	if (i915.enable_execlists) {
		struct intel_context *ctx;

		list_for_each_entry(ctx, &dev_priv->context_list, link) {
			intel_lr_context_reset(dev, ctx);
		}

307
		return;
308
	}
309

310
	for (i = 0; i < I915_NUM_RINGS; i++) {
311
		struct intel_engine_cs *ring = &dev_priv->ring[i];
312
		struct intel_context *lctx = ring->last_context;
313

314 315 316
		if (lctx) {
			if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
				i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
317

318 319
			i915_gem_context_unreference(lctx);
			ring->last_context = NULL;
320 321 322 323
		}
	}
}

324
int i915_gem_context_init(struct drm_device *dev)
325 326
{
	struct drm_i915_private *dev_priv = dev->dev_private;
327
	struct intel_context *ctx;
328
	int i;
329

330 331 332
	/* Init should only be called once per module load. Eventually the
	 * restriction on the context_disabled check can be loosened. */
	if (WARN_ON(dev_priv->ring[RCS].default_context))
333
		return 0;
334

335 336 337 338 339 340 341
	if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
		if (!i915.enable_execlists) {
			DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
			return -EINVAL;
		}
	}

342 343 344 345 346
	if (i915.enable_execlists) {
		/* NB: intentionally left blank. We will allocate our own
		 * backing objects as we need them, thank you very much */
		dev_priv->hw_context_size = 0;
	} else if (HAS_HW_CONTEXTS(dev)) {
347 348 349 350 351 352
		dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
		if (dev_priv->hw_context_size > (1<<20)) {
			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
					 dev_priv->hw_context_size);
			dev_priv->hw_context_size = 0;
		}
353 354
	}

355
	ctx = i915_gem_create_context(dev, NULL);
356 357 358 359
	if (IS_ERR(ctx)) {
		DRM_ERROR("Failed to create default global context (error %ld)\n",
			  PTR_ERR(ctx));
		return PTR_ERR(ctx);
360 361
	}

362 363 364 365 366 367
	for (i = 0; i < I915_NUM_RINGS; i++) {
		struct intel_engine_cs *ring = &dev_priv->ring[i];

		/* NB: RCS will hold a ref for all rings */
		ring->default_context = ctx;
	}
368

369 370 371
	DRM_DEBUG_DRIVER("%s context support initialized\n",
			i915.enable_execlists ? "LR" :
			dev_priv->hw_context_size ? "HW" : "fake");
372
	return 0;
373 374 375 376 377
}

void i915_gem_context_fini(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
378
	struct intel_context *dctx = dev_priv->ring[RCS].default_context;
379
	int i;
380

381
	if (dctx->legacy_hw_ctx.rcs_state) {
382 383 384 385 386 387 388 389 390 391 392 393 394 395
		/* The only known way to stop the gpu from accessing the hw context is
		 * to reset it. Do this as the very last operation to avoid confusing
		 * other code, leading to spurious errors. */
		intel_gpu_reset(dev);

		/* When default context is created and switched to, base object refcount
		 * will be 2 (+1 from object creation and +1 from do_switch()).
		 * i915_gem_context_fini() will be called after gpu_idle() has switched
		 * to default context. So we need to unreference the base object once
		 * to offset the do_switch part, so that i915_gem_context_unreference()
		 * can then free the base object correctly. */
		WARN_ON(!dev_priv->ring[RCS].last_context);
		if (dev_priv->ring[RCS].last_context == dctx) {
			/* Fake switch to NULL context */
396 397
			WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
			i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
398 399 400
			i915_gem_context_unreference(dctx);
			dev_priv->ring[RCS].last_context = NULL;
		}
401

402
		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
403 404 405
	}

	for (i = 0; i < I915_NUM_RINGS; i++) {
406
		struct intel_engine_cs *ring = &dev_priv->ring[i];
407 408 409 410 411

		if (ring->last_context)
			i915_gem_context_unreference(ring->last_context);

		ring->default_context = NULL;
412
		ring->last_context = NULL;
B
Ben Widawsky 已提交
413 414
	}

415
	i915_gem_context_unreference(dctx);
416 417
}

418
int i915_gem_context_enable(struct drm_i915_gem_request *req)
419
{
420
	struct intel_engine_cs *ring = req->ring;
421
	int ret;
422

423
	if (i915.enable_execlists) {
424 425
		if (ring->init_context == NULL)
			return 0;
426

427
		ret = ring->init_context(req);
428
	} else
429
		ret = i915_switch_context(req);
430 431 432 433 434

	if (ret) {
		DRM_ERROR("ring init context: %d\n", ret);
		return ret;
	}
435 436 437 438

	return 0;
}

439 440
static int context_idr_cleanup(int id, void *p, void *data)
{
441
	struct intel_context *ctx = p;
442

443
	i915_gem_context_unreference(ctx);
444
	return 0;
445 446
}

447 448 449
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
450
	struct intel_context *ctx;
451 452 453

	idr_init(&file_priv->context_idr);

454
	mutex_lock(&dev->struct_mutex);
455
	ctx = i915_gem_create_context(dev, file_priv);
456 457
	mutex_unlock(&dev->struct_mutex);

458
	if (IS_ERR(ctx)) {
459
		idr_destroy(&file_priv->context_idr);
460
		return PTR_ERR(ctx);
461 462
	}

463 464 465
	return 0;
}

466 467
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
468
	struct drm_i915_file_private *file_priv = file->driver_priv;
469

470
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
471 472 473
	idr_destroy(&file_priv->context_idr);
}

474
struct intel_context *
475 476
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
{
477
	struct intel_context *ctx;
478

479
	ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
480 481 482 483
	if (!ctx)
		return ERR_PTR(-ENOENT);

	return ctx;
484
}
485 486

static inline int
487
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
488
{
489
	struct intel_engine_cs *ring = req->ring;
490
	u32 flags = hw_flags | MI_MM_SPACE_GTT;
491 492 493 494 495 496
	const int num_rings =
		/* Use an extended w/a on ivb+ if signalling from other rings */
		i915_semaphore_is_enabled(ring->dev) ?
		hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
		0;
	int len, i, ret;
497

498 499 500 501 502
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
	 * explicitly, so we rely on the value at ring init, stored in
	 * itlb_before_ctx_switch.
	 */
503
	if (IS_GEN6(ring->dev)) {
504
		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
505 506 507 508
		if (ret)
			return ret;
	}

509
	/* These flags are for resource streamer on HSW+ */
510 511 512
	if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
	else if (INTEL_INFO(ring->dev)->gen < 8)
513 514
		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);

515 516 517 518 519

	len = 4;
	if (INTEL_INFO(ring->dev)->gen >= 7)
		len += 2 + (num_rings ? 4*num_rings + 2 : 0);

520
	ret = intel_ring_begin(req, len);
521 522 523
	if (ret)
		return ret;

524
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
525
	if (INTEL_INFO(ring->dev)->gen >= 7) {
526
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
527 528 529 530 531 532 533 534 535 536 537 538 539
		if (num_rings) {
			struct intel_engine_cs *signaller;

			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
			for_each_ring(signaller, to_i915(ring->dev), i) {
				if (signaller == ring)
					continue;

				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
				intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
			}
		}
	}
540

541 542
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_emit(ring, MI_SET_CONTEXT);
543
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
544
			flags);
545 546 547 548
	/*
	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
	 * WaMiSetContext_Hang:snb,ivb,vlv
	 */
549 550
	intel_ring_emit(ring, MI_NOOP);

551 552 553 554 555 556 557 558 559 560 561 562 563
	if (INTEL_INFO(ring->dev)->gen >= 7) {
		if (num_rings) {
			struct intel_engine_cs *signaller;

			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
			for_each_ring(signaller, to_i915(ring->dev), i) {
				if (signaller == ring)
					continue;

				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
				intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
			}
		}
564
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
565
	}
566

567 568 569 570 571
	intel_ring_advance(ring);

	return ret;
}

572 573 574 575
static inline bool should_skip_switch(struct intel_engine_cs *ring,
				      struct intel_context *from,
				      struct intel_context *to)
{
576 577 578
	if (to->remap_slice)
		return false;

579 580 581
	if (to->ppgtt && from == to &&
	    !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
		return true;
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603

	return false;
}

static bool
needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;

	if (!to->ppgtt)
		return false;

	if (INTEL_INFO(ring->dev)->gen < 8)
		return true;

	if (ring != &dev_priv->ring[RCS])
		return true;

	return false;
}

static bool
B
Ben Widawsky 已提交
604 605
needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
		u32 hw_flags)
606 607 608 609 610 611 612 613 614 615 616 617
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;

	if (!to->ppgtt)
		return false;

	if (!IS_GEN8(ring->dev))
		return false;

	if (ring != &dev_priv->ring[RCS])
		return false;

B
Ben Widawsky 已提交
618
	if (hw_flags & MI_RESTORE_INHIBIT)
619 620 621 622 623
		return true;

	return false;
}

624
static int do_switch(struct drm_i915_gem_request *req)
625
{
626 627
	struct intel_context *to = req->ctx;
	struct intel_engine_cs *ring = req->ring;
628
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
629
	struct intel_context *from = ring->last_context;
630
	u32 hw_flags = 0;
631
	bool uninitialized = false;
632
	int ret, i;
633

634
	if (from != NULL && ring == &dev_priv->ring[RCS]) {
635 636
		BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
		BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
637
	}
638

639
	if (should_skip_switch(ring, from, to))
640 641
		return 0;

642 643
	/* Trying to pin first makes error handling easier. */
	if (ring == &dev_priv->ring[RCS]) {
644
		ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
645
					    get_context_alignment(ring->dev), 0);
646 647
		if (ret)
			return ret;
648 649
	}

650 651 652 653 654 655 656
	/*
	 * Pin can switch back to the default context if we end up calling into
	 * evict_everything - as a last ditch gtt defrag effort that also
	 * switches to the default context. Hence we need to reload from here.
	 */
	from = ring->last_context;

657 658 659 660 661
	if (needs_pd_load_pre(ring, to)) {
		/* Older GENs and non render rings still want the load first,
		 * "PP_DCLV followed by PP_DIR_BASE register through Load
		 * Register Immediate commands in Ring Buffer before submitting
		 * a context."*/
662
		trace_switch_mm(ring, to);
663
		ret = to->ppgtt->switch_mm(to->ppgtt, req);
664 665
		if (ret)
			goto unpin_out;
666 667

		/* Doing a PD load always reloads the page dirs */
668
		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
669 670 671 672 673 674 675 676
	}

	if (ring != &dev_priv->ring[RCS]) {
		if (from)
			i915_gem_context_unreference(from);
		goto done;
	}

677 678
	/*
	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
679 680 681
	 * that thanks to write = false in this call and us not setting any gpu
	 * write domains when putting a context object onto the active list
	 * (when switching away from it), this won't block.
682 683 684
	 *
	 * XXX: We need a real interface to do this instead of trickery.
	 */
685
	ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
686 687
	if (ret)
		goto unpin_out;
688

B
Ben Widawsky 已提交
689
	if (!to->legacy_hw_ctx.initialized) {
690
		hw_flags |= MI_RESTORE_INHIBIT;
B
Ben Widawsky 已提交
691 692 693 694 695
		/* NB: If we inhibit the restore, the context is not allowed to
		 * die because future work may end up depending on valid address
		 * space. This means we must enforce that a page table load
		 * occur when this occurs. */
	} else if (to->ppgtt &&
696
		   (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
697
		hw_flags |= MI_FORCE_RESTORE;
698 699
		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
	}
700

B
Ben Widawsky 已提交
701 702
	/* We should never emit switch_mm more than once */
	WARN_ON(needs_pd_load_pre(ring, to) &&
703
		needs_pd_load_post(ring, to, hw_flags));
B
Ben Widawsky 已提交
704

705
	ret = mi_set_context(req, hw_flags);
706 707
	if (ret)
		goto unpin_out;
708

B
Ben Widawsky 已提交
709 710 711 712
	/* GEN8 does *not* require an explicit reload if the PDPs have been
	 * setup, and we do not wish to move them.
	 */
	if (needs_pd_load_post(ring, to, hw_flags)) {
713
		trace_switch_mm(ring, to);
714
		ret = to->ppgtt->switch_mm(to->ppgtt, req);
715 716 717 718 719 720 721 722 723 724 725
		/* The hardware context switch is emitted, but we haven't
		 * actually changed the state - so it's probably safe to bail
		 * here. Still, let the user know something dangerous has
		 * happened.
		 */
		if (ret) {
			DRM_ERROR("Failed to change address space on context switch\n");
			goto unpin_out;
		}
	}

726 727 728 729
	for (i = 0; i < MAX_L3_SLICES; i++) {
		if (!(to->remap_slice & (1<<i)))
			continue;

730
		ret = i915_gem_l3_remap(req, i);
731 732 733 734 735 736 737
		/* If it failed, try again next round */
		if (ret)
			DRM_DEBUG_DRIVER("L3 remapping failed\n");
		else
			to->remap_slice &= ~(1<<i);
	}

738 739 740 741 742 743
	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
744
	if (from != NULL) {
745
		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
746
		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
747 748 749 750 751 752 753
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
754
		from->legacy_hw_ctx.rcs_state->dirty = 1;
755

756
		/* obj is kept alive until the next request by its active ref */
757
		i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
758
		i915_gem_context_unreference(from);
759 760
	}

B
Ben Widawsky 已提交
761
	uninitialized = !to->legacy_hw_ctx.initialized;
762
	to->legacy_hw_ctx.initialized = true;
763

764
done:
765 766
	i915_gem_context_reference(to);
	ring->last_context = to;
767

768
	if (uninitialized) {
769
		if (ring->init_context) {
770
			ret = ring->init_context(req);
771 772 773
			if (ret)
				DRM_ERROR("ring init context: %d\n", ret);
		}
774 775
	}

776
	return 0;
777 778 779

unpin_out:
	if (ring->id == RCS)
780
		i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
781
	return ret;
782 783 784 785
}

/**
 * i915_switch_context() - perform a GPU context switch.
786
 * @req: request for which we'll execute the context switch
787 788 789
 *
 * The context life cycle is simple. The context refcount is incremented and
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
790
 * it will have a refcount > 1. This allows us to destroy the context abstract
791
 * object while letting the normal object tracking destroy the backing BO.
792 793 794 795
 *
 * This function should not be used in execlists mode.  Instead the context is
 * switched by writing to the ELSP and requests keep a reference to their
 * context.
796
 */
797
int i915_switch_context(struct drm_i915_gem_request *req)
798
{
799
	struct intel_engine_cs *ring = req->ring;
800 801
	struct drm_i915_private *dev_priv = ring->dev->dev_private;

802
	WARN_ON(i915.enable_execlists);
803 804
	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));

805 806 807
	if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
		if (req->ctx != ring->last_context) {
			i915_gem_context_reference(req->ctx);
808 809
			if (ring->last_context)
				i915_gem_context_unreference(ring->last_context);
810
			ring->last_context = req->ctx;
811
		}
812
		return 0;
813
	}
814

815
	return do_switch(req);
816
}
817

818
static bool contexts_enabled(struct drm_device *dev)
819
{
820
	return i915.enable_execlists || to_i915(dev)->hw_context_size;
821 822
}

823 824 825 826 827
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file)
{
	struct drm_i915_gem_context_create *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
828
	struct intel_context *ctx;
829 830
	int ret;

831
	if (!contexts_enabled(dev))
832 833
		return -ENODEV;

834 835 836 837
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

838
	ctx = i915_gem_create_context(dev, file_priv);
839
	mutex_unlock(&dev->struct_mutex);
840 841
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);
842

843
	args->ctx_id = ctx->user_handle;
844 845
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);

846
	return 0;
847 848 849 850 851 852 853
}

int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file)
{
	struct drm_i915_gem_context_destroy *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
854
	struct intel_context *ctx;
855 856
	int ret;

857
	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
858
		return -ENOENT;
859

860 861 862 863 864
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
865
	if (IS_ERR(ctx)) {
866
		mutex_unlock(&dev->struct_mutex);
867
		return PTR_ERR(ctx);
868 869
	}

870
	idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
871
	i915_gem_context_unreference(ctx);
872 873 874 875 876
	mutex_unlock(&dev->struct_mutex);

	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
	return 0;
}
877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900

int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
	struct intel_context *ctx;
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	args->size = 0;
	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		args->value = ctx->hang_stats.ban_period_seconds;
		break;
901 902 903
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
		break;
904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}

int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
	struct intel_context *ctx;
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		if (args->size)
			ret = -EINVAL;
		else if (args->value < ctx->hang_stats.ban_period_seconds &&
			 !capable(CAP_SYS_ADMIN))
			ret = -EPERM;
		else
			ctx->hang_stats.ban_period_seconds = args->value;
		break;
941 942 943 944 945 946 947 948
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		if (args->size) {
			ret = -EINVAL;
		} else {
			ctx->flags &= ~CONTEXT_NO_ZEROMAP;
			ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
		}
		break;
949 950 951 952 953 954 955 956
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}