i915_gem_context.c 29.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Copyright © 2011-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *
 */

/*
 * This file implements HW context support. On gen5+ a HW context consists of an
 * opaque GPU object which is referenced at times of context saves and restores.
 * With RC6 enabled, the context is also referenced as the GPU enters and exists
 * from RC6 (GPU has it's own internal power context, except on gen5). Though
 * something like a context does exist for the media ring, the code only
 * supports contexts for the render ring.
 *
 * In software, there is a distinction between contexts created by the user,
 * and the default HW context. The default HW context is used by GPU clients
 * that do not request setup of their own hardware context. The default
 * context's state is never restored to help prevent programming errors. This
 * would happen if a client ran and piggy-backed off another clients GPU state.
 * The default context only exists to give the GPU some offset to load as the
 * current to invoke a save of the context we actually care about. In fact, the
 * code could likely be constructed, albeit in a more complicated fashion, to
 * never use the default context, though that limits the driver's ability to
 * swap out, and/or destroy other contexts.
 *
 * All other contexts are created as a request by the GPU client. These contexts
 * store GPU state, and thus allow GPU clients to not re-emit state (and
 * potentially query certain state) at any time. The kernel driver makes
 * certain that the appropriate commands are inserted.
 *
 * The context life cycle is semi-complicated in that context BOs may live
 * longer than the context itself because of the way the hardware, and object
 * tracking works. Below is a very crude representation of the state machine
 * describing the context life.
 *                                         refcount     pincount     active
 * S0: initial state                          0            0           0
 * S1: context created                        1            0           0
 * S2: context is currently running           2            1           X
 * S3: GPU referenced, but not current        2            0           1
 * S4: context is current, but destroyed      1            1           0
 * S5: like S3, but destroyed                 1            0           1
 *
 * The most common (but not all) transitions:
 * S0->S1: client creates a context
 * S1->S2: client submits execbuf with context
 * S2->S3: other clients submits execbuf with context
 * S3->S1: context object was retired
 * S3->S2: clients submits another execbuf
 * S2->S4: context destroy called with current context
 * S3->S5->S0: destroy path
 * S4->S5->S0: destroy path on current context
 *
 * There are two confusing terms used above:
 *  The "current context" means the context which is currently running on the
D
Damien Lespiau 已提交
76
 *  GPU. The GPU has loaded its state already and has stored away the gtt
77 78 79 80 81 82 83 84 85 86 87
 *  offset of the BO. The GPU is not actively referencing the data at this
 *  offset, but it will on the next context switch. The only way to avoid this
 *  is to do a GPU reset.
 *
 *  An "active context' is one which was previously the "current context" and is
 *  on the active list waiting for the next context switch to occur. Until this
 *  happens, the object must remain at the same gtt offset. It is therefore
 *  possible to destroy a context, but it is still active.
 *
 */

88 89
#include <drm/drmP.h>
#include <drm/i915_drm.h>
90
#include "i915_drv.h"
91
#include "i915_trace.h"
92

93 94
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1

95 96 97 98
/* This is a HW constraint. The value below is the largest known requirement
 * I've seen in a spec to date, and that was a workaround for a non-shipping
 * part. It should be safe to decrease this, but it's more future proof as is.
 */
B
Ben Widawsky 已提交
99 100
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
101

B
Ben Widawsky 已提交
102 103 104 105 106 107 108 109
static size_t get_context_alignment(struct drm_device *dev)
{
	if (IS_GEN6(dev))
		return GEN6_CONTEXT_ALIGN;

	return GEN7_CONTEXT_ALIGN;
}

110 111 112 113 114 115 116 117 118 119 120 121
static int get_context_size(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;
	u32 reg;

	switch (INTEL_INFO(dev)->gen) {
	case 6:
		reg = I915_READ(CXT_SIZE);
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
		break;
	case 7:
B
Ben Widawsky 已提交
122
		reg = I915_READ(GEN7_CXT_SIZE);
B
Ben Widawsky 已提交
123
		if (IS_HASWELL(dev))
124
			ret = HSW_CXT_TOTAL_SIZE;
B
Ben Widawsky 已提交
125 126
		else
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
127
		break;
B
Ben Widawsky 已提交
128 129 130
	case 8:
		ret = GEN8_CXT_TOTAL_SIZE;
		break;
131 132 133 134 135 136 137
	default:
		BUG();
	}

	return ret;
}

138 139 140 141 142
static void i915_gem_context_clean(struct intel_context *ctx)
{
	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
	struct i915_vma *vma, *next;

143
	if (!ppgtt)
144 145 146
		return;

	list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
147
				 vm_link) {
148 149 150 151 152
		if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
			break;
	}
}

153
void i915_gem_context_free(struct kref *ctx_ref)
154
{
155
	struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
156

157 158
	trace_i915_context_free(ctx);

159
	if (i915.enable_execlists)
160
		intel_lr_context_free(ctx);
B
Ben Widawsky 已提交
161

162 163 164 165 166 167 168
	/*
	 * This context is going away and we need to remove all VMAs still
	 * around. This is to handle imported shared objects for which
	 * destructor did not run when their handles were closed.
	 */
	i915_gem_context_clean(ctx);

169 170
	i915_ppgtt_put(ctx->ppgtt);

171 172
	if (ctx->legacy_hw_ctx.rcs_state)
		drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
B
Ben Widawsky 已提交
173
	list_del(&ctx->link);
174 175

	ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
176 177 178
	kfree(ctx);
}

179
struct drm_i915_gem_object *
180 181 182 183 184
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
{
	struct drm_i915_gem_object *obj;
	int ret;

185
	obj = i915_gem_object_create(dev, size);
186 187
	if (IS_ERR(obj))
		return obj;
188 189 190 191 192 193 194 195

	/*
	 * Try to make the context utilize L3 as well as LLC.
	 *
	 * On VLV we don't have L3 controls in the PTEs so we
	 * shouldn't touch the cache level, especially as that
	 * would make the object snooped which might have a
	 * negative performance impact.
196 197 198 199 200 201 202
	 *
	 * Snooping is required on non-llc platforms in execlist
	 * mode, but since all GGTT accesses use PAT entry 0 we
	 * get snooping anyway regardless of cache_level.
	 *
	 * This is only applicable for Ivy Bridge devices since
	 * later platforms don't have L3 control bits in the PTE.
203
	 */
204
	if (IS_IVYBRIDGE(dev)) {
205 206 207 208 209 210 211 212 213 214 215
		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
		/* Failure shouldn't ever happen this early */
		if (WARN_ON(ret)) {
			drm_gem_object_unreference(&obj->base);
			return ERR_PTR(ret);
		}
	}

	return obj;
}

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
{
	int ret;

	ret = ida_simple_get(&dev_priv->context_hw_ida,
			     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
	if (ret < 0) {
		/* Contexts are only released when no longer active.
		 * Flush any pending retires to hopefully release some
		 * stale contexts and try again.
		 */
		i915_gem_retire_requests(dev_priv->dev);
		ret = ida_simple_get(&dev_priv->context_hw_ida,
				     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
		if (ret < 0)
			return ret;
	}

	*out = ret;
	return 0;
}

238
static struct intel_context *
239
__create_hw_context(struct drm_device *dev,
240
		    struct drm_i915_file_private *file_priv)
241 242
{
	struct drm_i915_private *dev_priv = dev->dev_private;
243
	struct intel_context *ctx;
T
Tejun Heo 已提交
244
	int ret;
245

246
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
247 248
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);
249

250 251 252 253 254 255
	ret = assign_hw_id(dev_priv, &ctx->hw_id);
	if (ret) {
		kfree(ctx);
		return ERR_PTR(ret);
	}

256
	kref_init(&ctx->ref);
257
	list_add_tail(&ctx->link, &dev_priv->context_list);
258
	ctx->i915 = dev_priv;
259

260
	if (dev_priv->hw_context_size) {
261 262 263 264
		struct drm_i915_gem_object *obj =
				i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
		if (IS_ERR(obj)) {
			ret = PTR_ERR(obj);
265
			goto err_out;
266
		}
267
		ctx->legacy_hw_ctx.rcs_state = obj;
268
	}
269 270

	/* Default context will never have a file_priv */
271 272
	if (file_priv != NULL) {
		ret = idr_alloc(&file_priv->context_idr, ctx,
273
				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
274 275 276
		if (ret < 0)
			goto err_out;
	} else
277
		ret = DEFAULT_CONTEXT_HANDLE;
278 279

	ctx->file_priv = file_priv;
280
	ctx->user_handle = ret;
281 282 283
	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
284
	ctx->remap_slice = ALL_L3_SLICES(dev_priv);
285

286 287
	ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;

288
	return ctx;
289 290

err_out:
291
	i915_gem_context_unreference(ctx);
292
	return ERR_PTR(ret);
293 294
}

295 296 297 298 299
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
300
static struct intel_context *
301
i915_gem_create_context(struct drm_device *dev,
302
			struct drm_i915_file_private *file_priv)
303
{
304
	const bool is_global_default_ctx = file_priv == NULL;
305
	struct intel_context *ctx;
306
	int ret = 0;
307

B
Ben Widawsky 已提交
308
	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
309

310
	ctx = __create_hw_context(dev, file_priv);
311
	if (IS_ERR(ctx))
312
		return ctx;
313

314
	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
315 316 317 318 319 320 321
		/* We may need to do things with the shrinker which
		 * require us to immediately switch back to the default
		 * context. This can cause a problem as pinning the
		 * default context also requires GTT space which may not
		 * be available. To avoid this we always pin the default
		 * context.
		 */
322
		ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
323
					    get_context_alignment(dev), 0);
324 325 326 327 328 329
		if (ret) {
			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
			goto err_destroy;
		}
	}

330
	if (USES_FULL_PPGTT(dev)) {
331
		struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
332 333

		if (IS_ERR_OR_NULL(ppgtt)) {
334 335
			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
					 PTR_ERR(ppgtt));
336
			ret = PTR_ERR(ppgtt);
337
			goto err_unpin;
338 339 340 341
		}

		ctx->ppgtt = ppgtt;
	}
342

343 344
	trace_i915_context_create(ctx);

345
	return ctx;
346

347
err_unpin:
348 349
	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
		i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
350
err_destroy:
351
	idr_remove(&file_priv->context_idr, ctx->user_handle);
352
	i915_gem_context_unreference(ctx);
353
	return ERR_PTR(ret);
354 355
}

356 357 358
static void i915_gem_context_unpin(struct intel_context *ctx,
				   struct intel_engine_cs *engine)
{
359 360 361 362 363 364 365
	if (i915.enable_execlists) {
		intel_lr_context_unpin(ctx, engine);
	} else {
		if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state)
			i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
		i915_gem_context_unreference(ctx);
	}
366 367
}

368 369 370 371
void i915_gem_context_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

372 373 374
	if (i915.enable_execlists) {
		struct intel_context *ctx;

375
		list_for_each_entry(ctx, &dev_priv->context_list, link)
376
			intel_lr_context_reset(dev_priv, ctx);
377
	}
378

379
	i915_gem_context_lost(dev_priv);
380 381
}

382
int i915_gem_context_init(struct drm_device *dev)
383 384
{
	struct drm_i915_private *dev_priv = dev->dev_private;
385
	struct intel_context *ctx;
386

387 388
	/* Init should only be called once per module load. Eventually the
	 * restriction on the context_disabled check can be loosened. */
389
	if (WARN_ON(dev_priv->kernel_context))
390
		return 0;
391

392 393 394 395 396 397 398
	if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
		if (!i915.enable_execlists) {
			DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
			return -EINVAL;
		}
	}

399 400 401 402
	/* Using the simple ida interface, the max is limited by sizeof(int) */
	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
	ida_init(&dev_priv->context_hw_ida);

403 404 405 406 407
	if (i915.enable_execlists) {
		/* NB: intentionally left blank. We will allocate our own
		 * backing objects as we need them, thank you very much */
		dev_priv->hw_context_size = 0;
	} else if (HAS_HW_CONTEXTS(dev)) {
408 409 410 411 412 413
		dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
		if (dev_priv->hw_context_size > (1<<20)) {
			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
					 dev_priv->hw_context_size);
			dev_priv->hw_context_size = 0;
		}
414 415
	}

416
	ctx = i915_gem_create_context(dev, NULL);
417 418 419 420
	if (IS_ERR(ctx)) {
		DRM_ERROR("Failed to create default global context (error %ld)\n",
			  PTR_ERR(ctx));
		return PTR_ERR(ctx);
421 422
	}

423
	dev_priv->kernel_context = ctx;
424

425 426 427
	DRM_DEBUG_DRIVER("%s context support initialized\n",
			i915.enable_execlists ? "LR" :
			dev_priv->hw_context_size ? "HW" : "fake");
428
	return 0;
429 430
}

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;

	for_each_engine(engine, dev_priv) {
		if (engine->last_context == NULL)
			continue;

		i915_gem_context_unpin(engine->last_context, engine);
		engine->last_context = NULL;
	}

	/* Force the GPU state to be reinitialised on enabling */
	dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
	dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv);
}

448 449 450
void i915_gem_context_fini(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
451
	struct intel_context *dctx = dev_priv->kernel_context;
452 453

	i915_gem_context_lost(dev_priv);
454

455
	if (dctx->legacy_hw_ctx.rcs_state) {
456 457 458
		/* The only known way to stop the gpu from accessing the hw context is
		 * to reset it. Do this as the very last operation to avoid confusing
		 * other code, leading to spurious errors. */
459
		intel_gpu_reset(dev, ALL_ENGINES);
460

461
		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
462 463
	}

464
	i915_gem_context_unreference(dctx);
465
	dev_priv->kernel_context = NULL;
466 467

	ida_destroy(&dev_priv->context_hw_ida);
468 469
}

470 471
static int context_idr_cleanup(int id, void *p, void *data)
{
472
	struct intel_context *ctx = p;
473

474
	i915_gem_context_unreference(ctx);
475
	return 0;
476 477
}

478 479 480
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
481
	struct intel_context *ctx;
482 483 484

	idr_init(&file_priv->context_idr);

485
	mutex_lock(&dev->struct_mutex);
486
	ctx = i915_gem_create_context(dev, file_priv);
487 488
	mutex_unlock(&dev->struct_mutex);

489
	if (IS_ERR(ctx)) {
490
		idr_destroy(&file_priv->context_idr);
491
		return PTR_ERR(ctx);
492 493
	}

494 495 496
	return 0;
}

497 498
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
499
	struct drm_i915_file_private *file_priv = file->driver_priv;
500

501
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
502 503 504
	idr_destroy(&file_priv->context_idr);
}

505
struct intel_context *
506 507
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
{
508
	struct intel_context *ctx;
509

510
	ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
511 512 513 514
	if (!ctx)
		return ERR_PTR(-ENOENT);

	return ctx;
515
}
516 517

static inline int
518
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
519
{
520
	struct intel_engine_cs *engine = req->engine;
521
	u32 flags = hw_flags | MI_MM_SPACE_GTT;
522 523
	const int num_rings =
		/* Use an extended w/a on ivb+ if signalling from other rings */
524 525
		i915_semaphore_is_enabled(engine->dev) ?
		hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
526
		0;
527
	int len, ret;
528

529 530 531 532 533
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
	 * explicitly, so we rely on the value at ring init, stored in
	 * itlb_before_ctx_switch.
	 */
534 535
	if (IS_GEN6(engine->dev)) {
		ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
536 537 538 539
		if (ret)
			return ret;
	}

540
	/* These flags are for resource streamer on HSW+ */
541
	if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
542
		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
543
	else if (INTEL_INFO(engine->dev)->gen < 8)
544 545
		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);

546 547

	len = 4;
548
	if (INTEL_INFO(engine->dev)->gen >= 7)
549
		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
550

551
	ret = intel_ring_begin(req, len);
552 553 554
	if (ret)
		return ret;

555
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
556 557
	if (INTEL_INFO(engine->dev)->gen >= 7) {
		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
558 559 560
		if (num_rings) {
			struct intel_engine_cs *signaller;

561 562
			intel_ring_emit(engine,
					MI_LOAD_REGISTER_IMM(num_rings));
563
			for_each_engine(signaller, to_i915(engine->dev)) {
564
				if (signaller == engine)
565 566
					continue;

567 568 569 570
				intel_ring_emit_reg(engine,
						    RING_PSMI_CTL(signaller->mmio_base));
				intel_ring_emit(engine,
						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
571 572 573
			}
		}
	}
574

575 576 577 578
	intel_ring_emit(engine, MI_NOOP);
	intel_ring_emit(engine, MI_SET_CONTEXT);
	intel_ring_emit(engine,
			i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
579
			flags);
580 581 582 583
	/*
	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
	 * WaMiSetContext_Hang:snb,ivb,vlv
	 */
584
	intel_ring_emit(engine, MI_NOOP);
585

586
	if (INTEL_INFO(engine->dev)->gen >= 7) {
587 588
		if (num_rings) {
			struct intel_engine_cs *signaller;
589
			i915_reg_t last_reg = {}; /* keep gcc quiet */
590

591 592
			intel_ring_emit(engine,
					MI_LOAD_REGISTER_IMM(num_rings));
593
			for_each_engine(signaller, to_i915(engine->dev)) {
594
				if (signaller == engine)
595 596
					continue;

597 598
				last_reg = RING_PSMI_CTL(signaller->mmio_base);
				intel_ring_emit_reg(engine, last_reg);
599 600
				intel_ring_emit(engine,
						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
601
			}
602 603 604 605 606 607 608 609

			/* Insert a delay before the next switch! */
			intel_ring_emit(engine,
					MI_STORE_REGISTER_MEM |
					MI_SRM_LRM_GLOBAL_GTT);
			intel_ring_emit_reg(engine, last_reg);
			intel_ring_emit(engine, engine->scratch.gtt_offset);
			intel_ring_emit(engine, MI_NOOP);
610
		}
611
		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
612
	}
613

614
	intel_ring_advance(engine);
615 616 617 618

	return ret;
}

C
Chris Wilson 已提交
619
static int remap_l3(struct drm_i915_gem_request *req, int slice)
620
{
621
	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
622 623 624
	struct intel_engine_cs *engine = req->engine;
	int i, ret;

625
	if (!remap_info)
626 627
		return 0;

628
	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
629 630 631 632 633 634 635 636
	if (ret)
		return ret;

	/*
	 * Note: We do not worry about the concurrent register cacheline hang
	 * here because no other code should access these registers other than
	 * at initialization time.
	 */
637 638
	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
639 640 641
		intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
		intel_ring_emit(engine, remap_info[i]);
	}
642
	intel_ring_emit(engine, MI_NOOP);
643 644
	intel_ring_advance(engine);

645
	return 0;
646 647
}

648 649
static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
				   struct intel_engine_cs *engine,
650
				   struct intel_context *to)
651
{
652 653 654
	if (to->remap_slice)
		return false;

655 656 657
	if (!to->legacy_hw_ctx.initialized)
		return false;

658
	if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
659
		return false;
660

661
	return to == engine->last_context;
662 663 664
}

static bool
665 666 667
needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
		  struct intel_engine_cs *engine,
		  struct intel_context *to)
668
{
669
	if (!ppgtt)
670 671
		return false;

672 673 674 675 676
	/* Always load the ppgtt on first use */
	if (!engine->last_context)
		return true;

	/* Same context without new entries, skip */
677
	if (engine->last_context == to &&
678
	    !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
679 680 681
		return false;

	if (engine->id != RCS)
682 683
		return true;

684
	if (INTEL_INFO(engine->dev)->gen < 8)
685 686 687 688 689 690
		return true;

	return false;
}

static bool
691 692 693
needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
		   struct intel_context *to,
		   u32 hw_flags)
694
{
695
	if (!ppgtt)
696 697
		return false;

698
	if (!IS_GEN8(to->i915))
699 700
		return false;

B
Ben Widawsky 已提交
701
	if (hw_flags & MI_RESTORE_INHIBIT)
702 703 704 705 706
		return true;

	return false;
}

707
static int do_rcs_switch(struct drm_i915_gem_request *req)
708
{
709
	struct intel_context *to = req->ctx;
710
	struct intel_engine_cs *engine = req->engine;
711
	struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
712 713
	struct intel_context *from;
	u32 hw_flags;
714
	int ret, i;
715

716
	if (skip_rcs_switch(ppgtt, engine, to))
717 718
		return 0;

719
	/* Trying to pin first makes error handling easier. */
720 721 722 723 724
	ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
				    get_context_alignment(engine->dev),
				    0);
	if (ret)
		return ret;
725

726 727 728 729
	/*
	 * Pin can switch back to the default context if we end up calling into
	 * evict_everything - as a last ditch gtt defrag effort that also
	 * switches to the default context. Hence we need to reload from here.
730 731
	 *
	 * XXX: Doing so is painfully broken!
732
	 */
733
	from = engine->last_context;
734 735 736

	/*
	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
737 738 739
	 * that thanks to write = false in this call and us not setting any gpu
	 * write domains when putting a context object onto the active list
	 * (when switching away from it), this won't block.
740 741 742
	 *
	 * XXX: We need a real interface to do this instead of trickery.
	 */
743
	ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
744 745
	if (ret)
		goto unpin_out;
746

747
	if (needs_pd_load_pre(ppgtt, engine, to)) {
748 749 750 751 752
		/* Older GENs and non render rings still want the load first,
		 * "PP_DCLV followed by PP_DIR_BASE register through Load
		 * Register Immediate commands in Ring Buffer before submitting
		 * a context."*/
		trace_switch_mm(engine, to);
753
		ret = ppgtt->switch_mm(ppgtt, req);
754 755 756 757 758
		if (ret)
			goto unpin_out;
	}

	if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
B
Ben Widawsky 已提交
759 760 761 762
		/* NB: If we inhibit the restore, the context is not allowed to
		 * die because future work may end up depending on valid address
		 * space. This means we must enforce that a page table load
		 * occur when this occurs. */
763
		hw_flags = MI_RESTORE_INHIBIT;
764
	else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
765 766 767
		hw_flags = MI_FORCE_RESTORE;
	else
		hw_flags = 0;
768

769 770
	if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
		ret = mi_set_context(req, hw_flags);
771
		if (ret)
772
			goto unpin_out;
773 774
	}

775 776 777 778 779 780
	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
781
	if (from != NULL) {
782
		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
783
		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
784 785 786 787 788 789 790
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
791
		from->legacy_hw_ctx.rcs_state->dirty = 1;
792

793
		/* obj is kept alive until the next request by its active ref */
794
		i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
795
		i915_gem_context_unreference(from);
796
	}
797
	i915_gem_context_reference(to);
798
	engine->last_context = to;
799

800 801 802
	/* GEN8 does *not* require an explicit reload if the PDPs have been
	 * setup, and we do not wish to move them.
	 */
803
	if (needs_pd_load_post(ppgtt, to, hw_flags)) {
804
		trace_switch_mm(engine, to);
805
		ret = ppgtt->switch_mm(ppgtt, req);
806 807 808 809 810 811 812 813 814
		/* The hardware context switch is emitted, but we haven't
		 * actually changed the state - so it's probably safe to bail
		 * here. Still, let the user know something dangerous has
		 * happened.
		 */
		if (ret)
			return ret;
	}

815 816
	if (ppgtt)
		ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
817 818 819 820 821

	for (i = 0; i < MAX_L3_SLICES; i++) {
		if (!(to->remap_slice & (1<<i)))
			continue;

C
Chris Wilson 已提交
822
		ret = remap_l3(req, i);
823 824 825 826 827 828 829
		if (ret)
			return ret;

		to->remap_slice &= ~(1<<i);
	}

	if (!to->legacy_hw_ctx.initialized) {
830 831
		if (engine->init_context) {
			ret = engine->init_context(req);
832
			if (ret)
833
				return ret;
834
		}
835
		to->legacy_hw_ctx.initialized = true;
836 837
	}

838
	return 0;
839 840

unpin_out:
841
	i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
842
	return ret;
843 844 845 846
}

/**
 * i915_switch_context() - perform a GPU context switch.
847
 * @req: request for which we'll execute the context switch
848 849 850
 *
 * The context life cycle is simple. The context refcount is incremented and
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
851
 * it will have a refcount > 1. This allows us to destroy the context abstract
852
 * object while letting the normal object tracking destroy the backing BO.
853 854 855 856
 *
 * This function should not be used in execlists mode.  Instead the context is
 * switched by writing to the ELSP and requests keep a reference to their
 * context.
857
 */
858
int i915_switch_context(struct drm_i915_gem_request *req)
859
{
860
	struct intel_engine_cs *engine = req->engine;
861
	struct drm_i915_private *dev_priv = req->i915;
862

863
	WARN_ON(i915.enable_execlists);
864 865
	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));

866 867 868
	if (engine->id != RCS ||
	    req->ctx->legacy_hw_ctx.rcs_state == NULL) {
		struct intel_context *to = req->ctx;
869 870
		struct i915_hw_ppgtt *ppgtt =
			to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
871

872
		if (needs_pd_load_pre(ppgtt, engine, to)) {
873 874 875
			int ret;

			trace_switch_mm(engine, to);
876
			ret = ppgtt->switch_mm(ppgtt, req);
877 878 879
			if (ret)
				return ret;

880
			ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
881 882 883 884
		}

		if (to != engine->last_context) {
			i915_gem_context_reference(to);
885 886
			if (engine->last_context)
				i915_gem_context_unreference(engine->last_context);
887
			engine->last_context = to;
888
		}
889

890
		return 0;
891
	}
892

893
	return do_rcs_switch(req);
894
}
895

896
static bool contexts_enabled(struct drm_device *dev)
897
{
898
	return i915.enable_execlists || to_i915(dev)->hw_context_size;
899 900
}

901 902 903 904 905
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file)
{
	struct drm_i915_gem_context_create *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
906
	struct intel_context *ctx;
907 908
	int ret;

909
	if (!contexts_enabled(dev))
910 911
		return -ENODEV;

912 913 914
	if (args->pad != 0)
		return -EINVAL;

915 916 917 918
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

919
	ctx = i915_gem_create_context(dev, file_priv);
920
	mutex_unlock(&dev->struct_mutex);
921 922
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);
923

924
	args->ctx_id = ctx->user_handle;
925 926
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);

927
	return 0;
928 929 930 931 932 933 934
}

int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file)
{
	struct drm_i915_gem_context_destroy *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
935
	struct intel_context *ctx;
936 937
	int ret;

938 939 940
	if (args->pad != 0)
		return -EINVAL;

941
	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
942
		return -ENOENT;
943

944 945 946 947 948
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
949
	if (IS_ERR(ctx)) {
950
		mutex_unlock(&dev->struct_mutex);
951
		return PTR_ERR(ctx);
952 953
	}

954
	idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
955
	i915_gem_context_unreference(ctx);
956 957 958 959 960
	mutex_unlock(&dev->struct_mutex);

	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
	return 0;
}
961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984

int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
	struct intel_context *ctx;
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	args->size = 0;
	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		args->value = ctx->hang_stats.ban_period_seconds;
		break;
985 986 987
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
		break;
C
Chris Wilson 已提交
988 989 990 991 992 993
	case I915_CONTEXT_PARAM_GTT_SIZE:
		if (ctx->ppgtt)
			args->value = ctx->ppgtt->base.total;
		else if (to_i915(dev)->mm.aliasing_ppgtt)
			args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
		else
994
			args->value = to_i915(dev)->ggtt.base.total;
C
Chris Wilson 已提交
995
		break;
996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}

int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
	struct intel_context *ctx;
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		if (args->size)
			ret = -EINVAL;
		else if (args->value < ctx->hang_stats.ban_period_seconds &&
			 !capable(CAP_SYS_ADMIN))
			ret = -EPERM;
		else
			ctx->hang_stats.ban_period_seconds = args->value;
		break;
1033 1034 1035 1036 1037 1038 1039 1040
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		if (args->size) {
			ret = -EINVAL;
		} else {
			ctx->flags &= ~CONTEXT_NO_ZEROMAP;
			ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
		}
		break;
1041 1042 1043 1044 1045 1046 1047 1048
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}