i915_gem_context.c 28.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Copyright © 2011-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *
 */

/*
 * This file implements HW context support. On gen5+ a HW context consists of an
 * opaque GPU object which is referenced at times of context saves and restores.
 * With RC6 enabled, the context is also referenced as the GPU enters and exists
 * from RC6 (GPU has it's own internal power context, except on gen5). Though
 * something like a context does exist for the media ring, the code only
 * supports contexts for the render ring.
 *
 * In software, there is a distinction between contexts created by the user,
 * and the default HW context. The default HW context is used by GPU clients
 * that do not request setup of their own hardware context. The default
 * context's state is never restored to help prevent programming errors. This
 * would happen if a client ran and piggy-backed off another clients GPU state.
 * The default context only exists to give the GPU some offset to load as the
 * current to invoke a save of the context we actually care about. In fact, the
 * code could likely be constructed, albeit in a more complicated fashion, to
 * never use the default context, though that limits the driver's ability to
 * swap out, and/or destroy other contexts.
 *
 * All other contexts are created as a request by the GPU client. These contexts
 * store GPU state, and thus allow GPU clients to not re-emit state (and
 * potentially query certain state) at any time. The kernel driver makes
 * certain that the appropriate commands are inserted.
 *
 * The context life cycle is semi-complicated in that context BOs may live
 * longer than the context itself because of the way the hardware, and object
 * tracking works. Below is a very crude representation of the state machine
 * describing the context life.
 *                                         refcount     pincount     active
 * S0: initial state                          0            0           0
 * S1: context created                        1            0           0
 * S2: context is currently running           2            1           X
 * S3: GPU referenced, but not current        2            0           1
 * S4: context is current, but destroyed      1            1           0
 * S5: like S3, but destroyed                 1            0           1
 *
 * The most common (but not all) transitions:
 * S0->S1: client creates a context
 * S1->S2: client submits execbuf with context
 * S2->S3: other clients submits execbuf with context
 * S3->S1: context object was retired
 * S3->S2: clients submits another execbuf
 * S2->S4: context destroy called with current context
 * S3->S5->S0: destroy path
 * S4->S5->S0: destroy path on current context
 *
 * There are two confusing terms used above:
 *  The "current context" means the context which is currently running on the
D
Damien Lespiau 已提交
76
 *  GPU. The GPU has loaded its state already and has stored away the gtt
77 78 79 80 81 82 83 84 85 86 87
 *  offset of the BO. The GPU is not actively referencing the data at this
 *  offset, but it will on the next context switch. The only way to avoid this
 *  is to do a GPU reset.
 *
 *  An "active context' is one which was previously the "current context" and is
 *  on the active list waiting for the next context switch to occur. Until this
 *  happens, the object must remain at the same gtt offset. It is therefore
 *  possible to destroy a context, but it is still active.
 *
 */

88 89
#include <drm/drmP.h>
#include <drm/i915_drm.h>
90
#include "i915_drv.h"
91
#include "i915_trace.h"
92

93 94
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1

95 96 97 98
/* This is a HW constraint. The value below is the largest known requirement
 * I've seen in a spec to date, and that was a workaround for a non-shipping
 * part. It should be safe to decrease this, but it's more future proof as is.
 */
B
Ben Widawsky 已提交
99 100
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
101

B
Ben Widawsky 已提交
102 103 104 105 106 107 108 109
static size_t get_context_alignment(struct drm_device *dev)
{
	if (IS_GEN6(dev))
		return GEN6_CONTEXT_ALIGN;

	return GEN7_CONTEXT_ALIGN;
}

110 111 112 113 114 115 116 117 118 119 120 121
static int get_context_size(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;
	u32 reg;

	switch (INTEL_INFO(dev)->gen) {
	case 6:
		reg = I915_READ(CXT_SIZE);
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
		break;
	case 7:
B
Ben Widawsky 已提交
122
		reg = I915_READ(GEN7_CXT_SIZE);
B
Ben Widawsky 已提交
123
		if (IS_HASWELL(dev))
124
			ret = HSW_CXT_TOTAL_SIZE;
B
Ben Widawsky 已提交
125 126
		else
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
127
		break;
B
Ben Widawsky 已提交
128 129 130
	case 8:
		ret = GEN8_CXT_TOTAL_SIZE;
		break;
131 132 133 134 135 136 137
	default:
		BUG();
	}

	return ret;
}

138 139 140 141 142
static void i915_gem_context_clean(struct intel_context *ctx)
{
	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
	struct i915_vma *vma, *next;

143
	if (!ppgtt)
144 145 146
		return;

	list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
147
				 vm_link) {
148 149 150 151 152
		if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
			break;
	}
}

153
void i915_gem_context_free(struct kref *ctx_ref)
154
{
155
	struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
156

157 158
	trace_i915_context_free(ctx);

159
	if (i915.enable_execlists)
160
		intel_lr_context_free(ctx);
B
Ben Widawsky 已提交
161

162 163 164 165 166 167 168
	/*
	 * This context is going away and we need to remove all VMAs still
	 * around. This is to handle imported shared objects for which
	 * destructor did not run when their handles were closed.
	 */
	i915_gem_context_clean(ctx);

169 170
	i915_ppgtt_put(ctx->ppgtt);

171 172
	if (ctx->legacy_hw_ctx.rcs_state)
		drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
B
Ben Widawsky 已提交
173
	list_del(&ctx->link);
174 175 176
	kfree(ctx);
}

177
struct drm_i915_gem_object *
178 179 180 181 182
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
{
	struct drm_i915_gem_object *obj;
	int ret;

183
	obj = i915_gem_object_create(dev, size);
184 185
	if (IS_ERR(obj))
		return obj;
186 187 188 189 190 191 192 193

	/*
	 * Try to make the context utilize L3 as well as LLC.
	 *
	 * On VLV we don't have L3 controls in the PTEs so we
	 * shouldn't touch the cache level, especially as that
	 * would make the object snooped which might have a
	 * negative performance impact.
194 195 196 197 198 199 200
	 *
	 * Snooping is required on non-llc platforms in execlist
	 * mode, but since all GGTT accesses use PAT entry 0 we
	 * get snooping anyway regardless of cache_level.
	 *
	 * This is only applicable for Ivy Bridge devices since
	 * later platforms don't have L3 control bits in the PTE.
201
	 */
202
	if (IS_IVYBRIDGE(dev)) {
203 204 205 206 207 208 209 210 211 212 213
		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
		/* Failure shouldn't ever happen this early */
		if (WARN_ON(ret)) {
			drm_gem_object_unreference(&obj->base);
			return ERR_PTR(ret);
		}
	}

	return obj;
}

214
static struct intel_context *
215
__create_hw_context(struct drm_device *dev,
216
		    struct drm_i915_file_private *file_priv)
217 218
{
	struct drm_i915_private *dev_priv = dev->dev_private;
219
	struct intel_context *ctx;
T
Tejun Heo 已提交
220
	int ret;
221

222
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
223 224
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);
225

226
	kref_init(&ctx->ref);
227
	list_add_tail(&ctx->link, &dev_priv->context_list);
228
	ctx->i915 = dev_priv;
229

230
	if (dev_priv->hw_context_size) {
231 232 233 234
		struct drm_i915_gem_object *obj =
				i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
		if (IS_ERR(obj)) {
			ret = PTR_ERR(obj);
235
			goto err_out;
236
		}
237
		ctx->legacy_hw_ctx.rcs_state = obj;
238
	}
239 240

	/* Default context will never have a file_priv */
241 242
	if (file_priv != NULL) {
		ret = idr_alloc(&file_priv->context_idr, ctx,
243
				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
244 245 246
		if (ret < 0)
			goto err_out;
	} else
247
		ret = DEFAULT_CONTEXT_HANDLE;
248 249

	ctx->file_priv = file_priv;
250
	ctx->user_handle = ret;
251 252 253
	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
254
	ctx->remap_slice = ALL_L3_SLICES(dev_priv);
255

256 257
	ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;

258
	return ctx;
259 260

err_out:
261
	i915_gem_context_unreference(ctx);
262
	return ERR_PTR(ret);
263 264
}

265 266 267 268 269
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
270
static struct intel_context *
271
i915_gem_create_context(struct drm_device *dev,
272
			struct drm_i915_file_private *file_priv)
273
{
274
	const bool is_global_default_ctx = file_priv == NULL;
275
	struct intel_context *ctx;
276
	int ret = 0;
277

B
Ben Widawsky 已提交
278
	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
279

280
	ctx = __create_hw_context(dev, file_priv);
281
	if (IS_ERR(ctx))
282
		return ctx;
283

284
	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
285 286 287 288 289 290 291
		/* We may need to do things with the shrinker which
		 * require us to immediately switch back to the default
		 * context. This can cause a problem as pinning the
		 * default context also requires GTT space which may not
		 * be available. To avoid this we always pin the default
		 * context.
		 */
292
		ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
293
					    get_context_alignment(dev), 0);
294 295 296 297 298 299
		if (ret) {
			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
			goto err_destroy;
		}
	}

300
	if (USES_FULL_PPGTT(dev)) {
301
		struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
302 303

		if (IS_ERR_OR_NULL(ppgtt)) {
304 305
			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
					 PTR_ERR(ppgtt));
306
			ret = PTR_ERR(ppgtt);
307
			goto err_unpin;
308 309 310 311
		}

		ctx->ppgtt = ppgtt;
	}
312

313 314
	trace_i915_context_create(ctx);

315
	return ctx;
316

317
err_unpin:
318 319
	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
		i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
320
err_destroy:
321
	idr_remove(&file_priv->context_idr, ctx->user_handle);
322
	i915_gem_context_unreference(ctx);
323
	return ERR_PTR(ret);
324 325
}

326 327 328
static void i915_gem_context_unpin(struct intel_context *ctx,
				   struct intel_engine_cs *engine)
{
329 330 331 332 333 334 335
	if (i915.enable_execlists) {
		intel_lr_context_unpin(ctx, engine);
	} else {
		if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state)
			i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
		i915_gem_context_unreference(ctx);
	}
336 337
}

338 339 340 341
void i915_gem_context_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

342 343 344
	if (i915.enable_execlists) {
		struct intel_context *ctx;

345
		list_for_each_entry(ctx, &dev_priv->context_list, link)
346
			intel_lr_context_reset(dev_priv, ctx);
347
	}
348

349
	i915_gem_context_lost(dev_priv);
350 351
}

352
int i915_gem_context_init(struct drm_device *dev)
353 354
{
	struct drm_i915_private *dev_priv = dev->dev_private;
355
	struct intel_context *ctx;
356

357 358
	/* Init should only be called once per module load. Eventually the
	 * restriction on the context_disabled check can be loosened. */
359
	if (WARN_ON(dev_priv->kernel_context))
360
		return 0;
361

362 363 364 365 366 367 368
	if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
		if (!i915.enable_execlists) {
			DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
			return -EINVAL;
		}
	}

369 370 371 372 373
	if (i915.enable_execlists) {
		/* NB: intentionally left blank. We will allocate our own
		 * backing objects as we need them, thank you very much */
		dev_priv->hw_context_size = 0;
	} else if (HAS_HW_CONTEXTS(dev)) {
374 375 376 377 378 379
		dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
		if (dev_priv->hw_context_size > (1<<20)) {
			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
					 dev_priv->hw_context_size);
			dev_priv->hw_context_size = 0;
		}
380 381
	}

382
	ctx = i915_gem_create_context(dev, NULL);
383 384 385 386
	if (IS_ERR(ctx)) {
		DRM_ERROR("Failed to create default global context (error %ld)\n",
			  PTR_ERR(ctx));
		return PTR_ERR(ctx);
387 388
	}

389
	dev_priv->kernel_context = ctx;
390

391 392 393
	DRM_DEBUG_DRIVER("%s context support initialized\n",
			i915.enable_execlists ? "LR" :
			dev_priv->hw_context_size ? "HW" : "fake");
394
	return 0;
395 396
}

397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;

	for_each_engine(engine, dev_priv) {
		if (engine->last_context == NULL)
			continue;

		i915_gem_context_unpin(engine->last_context, engine);
		engine->last_context = NULL;
	}

	/* Force the GPU state to be reinitialised on enabling */
	dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
	dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv);
}

414 415 416
void i915_gem_context_fini(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
417
	struct intel_context *dctx = dev_priv->kernel_context;
418 419

	i915_gem_context_lost(dev_priv);
420

421
	if (dctx->legacy_hw_ctx.rcs_state) {
422 423 424
		/* The only known way to stop the gpu from accessing the hw context is
		 * to reset it. Do this as the very last operation to avoid confusing
		 * other code, leading to spurious errors. */
425
		intel_gpu_reset(dev, ALL_ENGINES);
426

427
		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
428 429
	}

430
	i915_gem_context_unreference(dctx);
431
	dev_priv->kernel_context = NULL;
432 433
}

434 435
static int context_idr_cleanup(int id, void *p, void *data)
{
436
	struct intel_context *ctx = p;
437

438
	i915_gem_context_unreference(ctx);
439
	return 0;
440 441
}

442 443 444
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
445
	struct intel_context *ctx;
446 447 448

	idr_init(&file_priv->context_idr);

449
	mutex_lock(&dev->struct_mutex);
450
	ctx = i915_gem_create_context(dev, file_priv);
451 452
	mutex_unlock(&dev->struct_mutex);

453
	if (IS_ERR(ctx)) {
454
		idr_destroy(&file_priv->context_idr);
455
		return PTR_ERR(ctx);
456 457
	}

458 459 460
	return 0;
}

461 462
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
463
	struct drm_i915_file_private *file_priv = file->driver_priv;
464

465
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
466 467 468
	idr_destroy(&file_priv->context_idr);
}

469
struct intel_context *
470 471
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
{
472
	struct intel_context *ctx;
473

474
	ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
475 476 477 478
	if (!ctx)
		return ERR_PTR(-ENOENT);

	return ctx;
479
}
480 481

static inline int
482
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
483
{
484
	struct intel_engine_cs *engine = req->engine;
485
	u32 flags = hw_flags | MI_MM_SPACE_GTT;
486 487
	const int num_rings =
		/* Use an extended w/a on ivb+ if signalling from other rings */
488 489
		i915_semaphore_is_enabled(engine->dev) ?
		hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
490
		0;
491
	int len, ret;
492

493 494 495 496 497
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
	 * explicitly, so we rely on the value at ring init, stored in
	 * itlb_before_ctx_switch.
	 */
498 499
	if (IS_GEN6(engine->dev)) {
		ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
500 501 502 503
		if (ret)
			return ret;
	}

504
	/* These flags are for resource streamer on HSW+ */
505
	if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
506
		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
507
	else if (INTEL_INFO(engine->dev)->gen < 8)
508 509
		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);

510 511

	len = 4;
512
	if (INTEL_INFO(engine->dev)->gen >= 7)
513
		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
514

515
	ret = intel_ring_begin(req, len);
516 517 518
	if (ret)
		return ret;

519
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
520 521
	if (INTEL_INFO(engine->dev)->gen >= 7) {
		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
522 523 524
		if (num_rings) {
			struct intel_engine_cs *signaller;

525 526
			intel_ring_emit(engine,
					MI_LOAD_REGISTER_IMM(num_rings));
527
			for_each_engine(signaller, to_i915(engine->dev)) {
528
				if (signaller == engine)
529 530
					continue;

531 532 533 534
				intel_ring_emit_reg(engine,
						    RING_PSMI_CTL(signaller->mmio_base));
				intel_ring_emit(engine,
						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
535 536 537
			}
		}
	}
538

539 540 541 542
	intel_ring_emit(engine, MI_NOOP);
	intel_ring_emit(engine, MI_SET_CONTEXT);
	intel_ring_emit(engine,
			i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
543
			flags);
544 545 546 547
	/*
	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
	 * WaMiSetContext_Hang:snb,ivb,vlv
	 */
548
	intel_ring_emit(engine, MI_NOOP);
549

550
	if (INTEL_INFO(engine->dev)->gen >= 7) {
551 552
		if (num_rings) {
			struct intel_engine_cs *signaller;
553
			i915_reg_t last_reg = {}; /* keep gcc quiet */
554

555 556
			intel_ring_emit(engine,
					MI_LOAD_REGISTER_IMM(num_rings));
557
			for_each_engine(signaller, to_i915(engine->dev)) {
558
				if (signaller == engine)
559 560
					continue;

561 562
				last_reg = RING_PSMI_CTL(signaller->mmio_base);
				intel_ring_emit_reg(engine, last_reg);
563 564
				intel_ring_emit(engine,
						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
565
			}
566 567 568 569 570 571 572 573

			/* Insert a delay before the next switch! */
			intel_ring_emit(engine,
					MI_STORE_REGISTER_MEM |
					MI_SRM_LRM_GLOBAL_GTT);
			intel_ring_emit_reg(engine, last_reg);
			intel_ring_emit(engine, engine->scratch.gtt_offset);
			intel_ring_emit(engine, MI_NOOP);
574
		}
575
		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
576
	}
577

578
	intel_ring_advance(engine);
579 580 581 582

	return ret;
}

C
Chris Wilson 已提交
583
static int remap_l3(struct drm_i915_gem_request *req, int slice)
584
{
585
	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
586 587 588
	struct intel_engine_cs *engine = req->engine;
	int i, ret;

589
	if (!remap_info)
590 591
		return 0;

592
	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
593 594 595 596 597 598 599 600
	if (ret)
		return ret;

	/*
	 * Note: We do not worry about the concurrent register cacheline hang
	 * here because no other code should access these registers other than
	 * at initialization time.
	 */
601 602
	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
603 604 605
		intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
		intel_ring_emit(engine, remap_info[i]);
	}
606
	intel_ring_emit(engine, MI_NOOP);
607 608
	intel_ring_advance(engine);

609
	return 0;
610 611
}

612 613
static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
				   struct intel_engine_cs *engine,
614
				   struct intel_context *to)
615
{
616 617 618
	if (to->remap_slice)
		return false;

619 620 621
	if (!to->legacy_hw_ctx.initialized)
		return false;

622
	if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
623
		return false;
624

625
	return to == engine->last_context;
626 627 628
}

static bool
629 630 631
needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
		  struct intel_engine_cs *engine,
		  struct intel_context *to)
632
{
633
	if (!ppgtt)
634 635
		return false;

636 637 638 639 640
	/* Always load the ppgtt on first use */
	if (!engine->last_context)
		return true;

	/* Same context without new entries, skip */
641
	if (engine->last_context == to &&
642
	    !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
643 644 645
		return false;

	if (engine->id != RCS)
646 647
		return true;

648
	if (INTEL_INFO(engine->dev)->gen < 8)
649 650 651 652 653 654
		return true;

	return false;
}

static bool
655 656 657
needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
		   struct intel_context *to,
		   u32 hw_flags)
658
{
659
	if (!ppgtt)
660 661
		return false;

662
	if (!IS_GEN8(to->i915))
663 664
		return false;

B
Ben Widawsky 已提交
665
	if (hw_flags & MI_RESTORE_INHIBIT)
666 667 668 669 670
		return true;

	return false;
}

671
static int do_rcs_switch(struct drm_i915_gem_request *req)
672
{
673
	struct intel_context *to = req->ctx;
674
	struct intel_engine_cs *engine = req->engine;
675
	struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
676 677
	struct intel_context *from;
	u32 hw_flags;
678
	int ret, i;
679

680
	if (skip_rcs_switch(ppgtt, engine, to))
681 682
		return 0;

683
	/* Trying to pin first makes error handling easier. */
684 685 686 687 688
	ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
				    get_context_alignment(engine->dev),
				    0);
	if (ret)
		return ret;
689

690 691 692 693
	/*
	 * Pin can switch back to the default context if we end up calling into
	 * evict_everything - as a last ditch gtt defrag effort that also
	 * switches to the default context. Hence we need to reload from here.
694 695
	 *
	 * XXX: Doing so is painfully broken!
696
	 */
697
	from = engine->last_context;
698 699 700

	/*
	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
701 702 703
	 * that thanks to write = false in this call and us not setting any gpu
	 * write domains when putting a context object onto the active list
	 * (when switching away from it), this won't block.
704 705 706
	 *
	 * XXX: We need a real interface to do this instead of trickery.
	 */
707
	ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
708 709
	if (ret)
		goto unpin_out;
710

711
	if (needs_pd_load_pre(ppgtt, engine, to)) {
712 713 714 715 716
		/* Older GENs and non render rings still want the load first,
		 * "PP_DCLV followed by PP_DIR_BASE register through Load
		 * Register Immediate commands in Ring Buffer before submitting
		 * a context."*/
		trace_switch_mm(engine, to);
717
		ret = ppgtt->switch_mm(ppgtt, req);
718 719 720 721 722
		if (ret)
			goto unpin_out;
	}

	if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
B
Ben Widawsky 已提交
723 724 725 726
		/* NB: If we inhibit the restore, the context is not allowed to
		 * die because future work may end up depending on valid address
		 * space. This means we must enforce that a page table load
		 * occur when this occurs. */
727
		hw_flags = MI_RESTORE_INHIBIT;
728
	else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
729 730 731
		hw_flags = MI_FORCE_RESTORE;
	else
		hw_flags = 0;
732

733 734
	if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
		ret = mi_set_context(req, hw_flags);
735
		if (ret)
736
			goto unpin_out;
737 738
	}

739 740 741 742 743 744
	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
745
	if (from != NULL) {
746
		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
747
		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
748 749 750 751 752 753 754
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
755
		from->legacy_hw_ctx.rcs_state->dirty = 1;
756

757
		/* obj is kept alive until the next request by its active ref */
758
		i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
759
		i915_gem_context_unreference(from);
760
	}
761
	i915_gem_context_reference(to);
762
	engine->last_context = to;
763

764 765 766
	/* GEN8 does *not* require an explicit reload if the PDPs have been
	 * setup, and we do not wish to move them.
	 */
767
	if (needs_pd_load_post(ppgtt, to, hw_flags)) {
768
		trace_switch_mm(engine, to);
769
		ret = ppgtt->switch_mm(ppgtt, req);
770 771 772 773 774 775 776 777 778
		/* The hardware context switch is emitted, but we haven't
		 * actually changed the state - so it's probably safe to bail
		 * here. Still, let the user know something dangerous has
		 * happened.
		 */
		if (ret)
			return ret;
	}

779 780
	if (ppgtt)
		ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
781 782 783 784 785

	for (i = 0; i < MAX_L3_SLICES; i++) {
		if (!(to->remap_slice & (1<<i)))
			continue;

C
Chris Wilson 已提交
786
		ret = remap_l3(req, i);
787 788 789 790 791 792 793
		if (ret)
			return ret;

		to->remap_slice &= ~(1<<i);
	}

	if (!to->legacy_hw_ctx.initialized) {
794 795
		if (engine->init_context) {
			ret = engine->init_context(req);
796
			if (ret)
797
				return ret;
798
		}
799
		to->legacy_hw_ctx.initialized = true;
800 801
	}

802
	return 0;
803 804

unpin_out:
805
	i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
806
	return ret;
807 808 809 810
}

/**
 * i915_switch_context() - perform a GPU context switch.
811
 * @req: request for which we'll execute the context switch
812 813 814
 *
 * The context life cycle is simple. The context refcount is incremented and
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
815
 * it will have a refcount > 1. This allows us to destroy the context abstract
816
 * object while letting the normal object tracking destroy the backing BO.
817 818 819 820
 *
 * This function should not be used in execlists mode.  Instead the context is
 * switched by writing to the ELSP and requests keep a reference to their
 * context.
821
 */
822
int i915_switch_context(struct drm_i915_gem_request *req)
823
{
824
	struct intel_engine_cs *engine = req->engine;
825
	struct drm_i915_private *dev_priv = req->i915;
826

827
	WARN_ON(i915.enable_execlists);
828 829
	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));

830 831 832
	if (engine->id != RCS ||
	    req->ctx->legacy_hw_ctx.rcs_state == NULL) {
		struct intel_context *to = req->ctx;
833 834
		struct i915_hw_ppgtt *ppgtt =
			to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
835

836
		if (needs_pd_load_pre(ppgtt, engine, to)) {
837 838 839
			int ret;

			trace_switch_mm(engine, to);
840
			ret = ppgtt->switch_mm(ppgtt, req);
841 842 843
			if (ret)
				return ret;

844
			ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
845 846 847 848
		}

		if (to != engine->last_context) {
			i915_gem_context_reference(to);
849 850
			if (engine->last_context)
				i915_gem_context_unreference(engine->last_context);
851
			engine->last_context = to;
852
		}
853

854
		return 0;
855
	}
856

857
	return do_rcs_switch(req);
858
}
859

860
static bool contexts_enabled(struct drm_device *dev)
861
{
862
	return i915.enable_execlists || to_i915(dev)->hw_context_size;
863 864
}

865 866 867 868 869
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file)
{
	struct drm_i915_gem_context_create *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
870
	struct intel_context *ctx;
871 872
	int ret;

873
	if (!contexts_enabled(dev))
874 875
		return -ENODEV;

876 877 878
	if (args->pad != 0)
		return -EINVAL;

879 880 881 882
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

883
	ctx = i915_gem_create_context(dev, file_priv);
884
	mutex_unlock(&dev->struct_mutex);
885 886
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);
887

888
	args->ctx_id = ctx->user_handle;
889 890
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);

891
	return 0;
892 893 894 895 896 897 898
}

int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file)
{
	struct drm_i915_gem_context_destroy *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
899
	struct intel_context *ctx;
900 901
	int ret;

902 903 904
	if (args->pad != 0)
		return -EINVAL;

905
	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
906
		return -ENOENT;
907

908 909 910 911 912
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
913
	if (IS_ERR(ctx)) {
914
		mutex_unlock(&dev->struct_mutex);
915
		return PTR_ERR(ctx);
916 917
	}

918
	idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
919
	i915_gem_context_unreference(ctx);
920 921 922 923 924
	mutex_unlock(&dev->struct_mutex);

	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
	return 0;
}
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948

int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
	struct intel_context *ctx;
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	args->size = 0;
	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		args->value = ctx->hang_stats.ban_period_seconds;
		break;
949 950 951
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
		break;
C
Chris Wilson 已提交
952 953 954 955 956 957
	case I915_CONTEXT_PARAM_GTT_SIZE:
		if (ctx->ppgtt)
			args->value = ctx->ppgtt->base.total;
		else if (to_i915(dev)->mm.aliasing_ppgtt)
			args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
		else
958
			args->value = to_i915(dev)->ggtt.base.total;
C
Chris Wilson 已提交
959
		break;
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}

int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
	struct intel_context *ctx;
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		if (args->size)
			ret = -EINVAL;
		else if (args->value < ctx->hang_stats.ban_period_seconds &&
			 !capable(CAP_SYS_ADMIN))
			ret = -EPERM;
		else
			ctx->hang_stats.ban_period_seconds = args->value;
		break;
997 998 999 1000 1001 1002 1003 1004
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		if (args->size) {
			ret = -EINVAL;
		} else {
			ctx->flags &= ~CONTEXT_NO_ZEROMAP;
			ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
		}
		break;
1005 1006 1007 1008 1009 1010 1011 1012
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}