i915_gem_context.c 31.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Copyright © 2011-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *
 */

/*
 * This file implements HW context support. On gen5+ a HW context consists of an
 * opaque GPU object which is referenced at times of context saves and restores.
 * With RC6 enabled, the context is also referenced as the GPU enters and exists
 * from RC6 (GPU has it's own internal power context, except on gen5). Though
 * something like a context does exist for the media ring, the code only
 * supports contexts for the render ring.
 *
 * In software, there is a distinction between contexts created by the user,
 * and the default HW context. The default HW context is used by GPU clients
 * that do not request setup of their own hardware context. The default
 * context's state is never restored to help prevent programming errors. This
 * would happen if a client ran and piggy-backed off another clients GPU state.
 * The default context only exists to give the GPU some offset to load as the
 * current to invoke a save of the context we actually care about. In fact, the
 * code could likely be constructed, albeit in a more complicated fashion, to
 * never use the default context, though that limits the driver's ability to
 * swap out, and/or destroy other contexts.
 *
 * All other contexts are created as a request by the GPU client. These contexts
 * store GPU state, and thus allow GPU clients to not re-emit state (and
 * potentially query certain state) at any time. The kernel driver makes
 * certain that the appropriate commands are inserted.
 *
 * The context life cycle is semi-complicated in that context BOs may live
 * longer than the context itself because of the way the hardware, and object
 * tracking works. Below is a very crude representation of the state machine
 * describing the context life.
 *                                         refcount     pincount     active
 * S0: initial state                          0            0           0
 * S1: context created                        1            0           0
 * S2: context is currently running           2            1           X
 * S3: GPU referenced, but not current        2            0           1
 * S4: context is current, but destroyed      1            1           0
 * S5: like S3, but destroyed                 1            0           1
 *
 * The most common (but not all) transitions:
 * S0->S1: client creates a context
 * S1->S2: client submits execbuf with context
 * S2->S3: other clients submits execbuf with context
 * S3->S1: context object was retired
 * S3->S2: clients submits another execbuf
 * S2->S4: context destroy called with current context
 * S3->S5->S0: destroy path
 * S4->S5->S0: destroy path on current context
 *
 * There are two confusing terms used above:
 *  The "current context" means the context which is currently running on the
D
Damien Lespiau 已提交
76
 *  GPU. The GPU has loaded its state already and has stored away the gtt
77 78 79 80 81 82 83 84 85 86 87
 *  offset of the BO. The GPU is not actively referencing the data at this
 *  offset, but it will on the next context switch. The only way to avoid this
 *  is to do a GPU reset.
 *
 *  An "active context' is one which was previously the "current context" and is
 *  on the active list waiting for the next context switch to occur. Until this
 *  happens, the object must remain at the same gtt offset. It is therefore
 *  possible to destroy a context, but it is still active.
 *
 */

88 89
#include <drm/drmP.h>
#include <drm/i915_drm.h>
90
#include "i915_drv.h"
91
#include "i915_trace.h"
92

93 94
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1

95 96 97 98
/* This is a HW constraint. The value below is the largest known requirement
 * I've seen in a spec to date, and that was a workaround for a non-shipping
 * part. It should be safe to decrease this, but it's more future proof as is.
 */
B
Ben Widawsky 已提交
99 100
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
101

102
static size_t get_context_alignment(struct drm_i915_private *dev_priv)
B
Ben Widawsky 已提交
103
{
104
	if (IS_GEN6(dev_priv))
B
Ben Widawsky 已提交
105 106 107 108 109
		return GEN6_CONTEXT_ALIGN;

	return GEN7_CONTEXT_ALIGN;
}

110
static int get_context_size(struct drm_i915_private *dev_priv)
111 112 113 114
{
	int ret;
	u32 reg;

115
	switch (INTEL_GEN(dev_priv)) {
116 117 118 119 120
	case 6:
		reg = I915_READ(CXT_SIZE);
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
		break;
	case 7:
B
Ben Widawsky 已提交
121
		reg = I915_READ(GEN7_CXT_SIZE);
122
		if (IS_HASWELL(dev_priv))
123
			ret = HSW_CXT_TOTAL_SIZE;
B
Ben Widawsky 已提交
124 125
		else
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
126
		break;
B
Ben Widawsky 已提交
127 128 129
	case 8:
		ret = GEN8_CXT_TOTAL_SIZE;
		break;
130 131 132 133 134 135 136
	default:
		BUG();
	}

	return ret;
}

137
static void i915_gem_context_clean(struct i915_gem_context *ctx)
138 139 140 141
{
	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
	struct i915_vma *vma, *next;

142
	if (!ppgtt)
143 144 145
		return;

	list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
146
				 vm_link) {
147 148 149 150 151
		if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
			break;
	}
}

152
void i915_gem_context_free(struct kref *ctx_ref)
153
{
154
	struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
155
	int i;
156

157
	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
158 159
	trace_i915_context_free(ctx);

160 161 162 163 164 165 166
	/*
	 * This context is going away and we need to remove all VMAs still
	 * around. This is to handle imported shared objects for which
	 * destructor did not run when their handles were closed.
	 */
	i915_gem_context_clean(ctx);

167 168
	i915_ppgtt_put(ctx->ppgtt);

169 170 171 172 173 174 175
	for (i = 0; i < I915_NUM_ENGINES; i++) {
		struct intel_context *ce = &ctx->engine[i];

		if (!ce->state)
			continue;

		WARN_ON(ce->pin_count);
176
		if (ce->ring)
177
			intel_ring_free(ce->ring);
178

179
		i915_gem_object_put(ce->state);
180 181
	}

B
Ben Widawsky 已提交
182
	list_del(&ctx->link);
183 184

	ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
185 186 187
	kfree(ctx);
}

188
struct drm_i915_gem_object *
189 190 191 192 193
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
{
	struct drm_i915_gem_object *obj;
	int ret;

194 195
	lockdep_assert_held(&dev->struct_mutex);

196
	obj = i915_gem_object_create(dev, size);
197 198
	if (IS_ERR(obj))
		return obj;
199 200 201 202 203 204 205 206

	/*
	 * Try to make the context utilize L3 as well as LLC.
	 *
	 * On VLV we don't have L3 controls in the PTEs so we
	 * shouldn't touch the cache level, especially as that
	 * would make the object snooped which might have a
	 * negative performance impact.
207 208 209 210 211 212 213
	 *
	 * Snooping is required on non-llc platforms in execlist
	 * mode, but since all GGTT accesses use PAT entry 0 we
	 * get snooping anyway regardless of cache_level.
	 *
	 * This is only applicable for Ivy Bridge devices since
	 * later platforms don't have L3 control bits in the PTE.
214
	 */
215
	if (IS_IVYBRIDGE(dev)) {
216 217 218
		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
		/* Failure shouldn't ever happen this early */
		if (WARN_ON(ret)) {
219
			i915_gem_object_put(obj);
220 221 222 223 224 225 226
			return ERR_PTR(ret);
		}
	}

	return obj;
}

227 228 229 230 231 232 233 234 235 236 237
static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
{
	int ret;

	ret = ida_simple_get(&dev_priv->context_hw_ida,
			     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
	if (ret < 0) {
		/* Contexts are only released when no longer active.
		 * Flush any pending retires to hopefully release some
		 * stale contexts and try again.
		 */
238
		i915_gem_retire_requests(dev_priv);
239 240 241 242 243 244 245 246 247 248
		ret = ida_simple_get(&dev_priv->context_hw_ida,
				     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
		if (ret < 0)
			return ret;
	}

	*out = ret;
	return 0;
}

249
static struct i915_gem_context *
250
__create_hw_context(struct drm_device *dev,
251
		    struct drm_i915_file_private *file_priv)
252
{
253
	struct drm_i915_private *dev_priv = to_i915(dev);
254
	struct i915_gem_context *ctx;
T
Tejun Heo 已提交
255
	int ret;
256

257
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
258 259
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);
260

261 262 263 264 265 266
	ret = assign_hw_id(dev_priv, &ctx->hw_id);
	if (ret) {
		kfree(ctx);
		return ERR_PTR(ret);
	}

267
	kref_init(&ctx->ref);
268
	list_add_tail(&ctx->link, &dev_priv->context_list);
269
	ctx->i915 = dev_priv;
270

271 272
	ctx->ggtt_alignment = get_context_alignment(dev_priv);

273
	if (dev_priv->hw_context_size) {
274 275 276 277
		struct drm_i915_gem_object *obj =
				i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
		if (IS_ERR(obj)) {
			ret = PTR_ERR(obj);
278
			goto err_out;
279
		}
280
		ctx->engine[RCS].state = obj;
281
	}
282 283

	/* Default context will never have a file_priv */
284 285
	if (file_priv != NULL) {
		ret = idr_alloc(&file_priv->context_idr, ctx,
286
				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
287 288 289
		if (ret < 0)
			goto err_out;
	} else
290
		ret = DEFAULT_CONTEXT_HANDLE;
291 292

	ctx->file_priv = file_priv;
293
	ctx->user_handle = ret;
294 295 296
	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
297
	ctx->remap_slice = ALL_L3_SLICES(dev_priv);
298

299
	ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
300
	ctx->ring_size = 4 * PAGE_SIZE;
301 302
	ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
			     GEN8_CTX_ADDRESSING_MODE_SHIFT;
303
	ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
304

305
	return ctx;
306 307

err_out:
308
	i915_gem_context_put(ctx);
309
	return ERR_PTR(ret);
310 311
}

312 313 314 315 316
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
317
static struct i915_gem_context *
318
i915_gem_create_context(struct drm_device *dev,
319
			struct drm_i915_file_private *file_priv)
320
{
321
	struct i915_gem_context *ctx;
322

323
	lockdep_assert_held(&dev->struct_mutex);
324

325
	ctx = __create_hw_context(dev, file_priv);
326
	if (IS_ERR(ctx))
327
		return ctx;
328

329
	if (USES_FULL_PPGTT(dev)) {
330 331
		struct i915_hw_ppgtt *ppgtt =
			i915_ppgtt_create(to_i915(dev), file_priv);
332

333
		if (IS_ERR(ppgtt)) {
334 335
			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
					 PTR_ERR(ppgtt));
336
			idr_remove(&file_priv->context_idr, ctx->user_handle);
337
			i915_gem_context_put(ctx);
338
			return ERR_CAST(ppgtt);
339 340 341 342
		}

		ctx->ppgtt = ppgtt;
	}
343

344 345
	trace_i915_context_create(ctx);

346
	return ctx;
347 348
}

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
/**
 * i915_gem_context_create_gvt - create a GVT GEM context
 * @dev: drm device *
 *
 * This function is used to create a GVT specific GEM context.
 *
 * Returns:
 * pointer to i915_gem_context on success, error pointer if failed
 *
 */
struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev)
{
	struct i915_gem_context *ctx;
	int ret;

	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
		return ERR_PTR(-ENODEV);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ERR_PTR(ret);

	ctx = i915_gem_create_context(dev, NULL);
	if (IS_ERR(ctx))
		goto out;

	ctx->execlists_force_single_submission = true;
	ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
out:
	mutex_unlock(&dev->struct_mutex);
	return ctx;
}

383
static void i915_gem_context_unpin(struct i915_gem_context *ctx,
384 385
				   struct intel_engine_cs *engine)
{
386 387 388
	if (i915.enable_execlists) {
		intel_lr_context_unpin(ctx, engine);
	} else {
389 390 391 392 393
		struct intel_context *ce = &ctx->engine[engine->id];

		if (ce->state)
			i915_gem_object_ggtt_unpin(ce->state);

394
		i915_gem_context_put(ctx);
395
	}
396 397
}

398 399
void i915_gem_context_reset(struct drm_device *dev)
{
400
	struct drm_i915_private *dev_priv = to_i915(dev);
401

402 403
	lockdep_assert_held(&dev->struct_mutex);

404
	if (i915.enable_execlists) {
405
		struct i915_gem_context *ctx;
406

407
		list_for_each_entry(ctx, &dev_priv->context_list, link)
408
			intel_lr_context_reset(dev_priv, ctx);
409
	}
410

411
	i915_gem_context_lost(dev_priv);
412 413
}

414
int i915_gem_context_init(struct drm_device *dev)
415
{
416
	struct drm_i915_private *dev_priv = to_i915(dev);
417
	struct i915_gem_context *ctx;
418

419 420
	/* Init should only be called once per module load. Eventually the
	 * restriction on the context_disabled check can be loosened. */
421
	if (WARN_ON(dev_priv->kernel_context))
422
		return 0;
423

424 425
	if (intel_vgpu_active(dev_priv) &&
	    HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
426 427 428 429 430 431
		if (!i915.enable_execlists) {
			DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
			return -EINVAL;
		}
	}

432 433 434 435
	/* Using the simple ida interface, the max is limited by sizeof(int) */
	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
	ida_init(&dev_priv->context_hw_ida);

436 437 438 439
	if (i915.enable_execlists) {
		/* NB: intentionally left blank. We will allocate our own
		 * backing objects as we need them, thank you very much */
		dev_priv->hw_context_size = 0;
440 441 442
	} else if (HAS_HW_CONTEXTS(dev_priv)) {
		dev_priv->hw_context_size =
			round_up(get_context_size(dev_priv), 4096);
443 444 445 446 447
		if (dev_priv->hw_context_size > (1<<20)) {
			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
					 dev_priv->hw_context_size);
			dev_priv->hw_context_size = 0;
		}
448 449
	}

450
	ctx = i915_gem_create_context(dev, NULL);
451 452 453 454
	if (IS_ERR(ctx)) {
		DRM_ERROR("Failed to create default global context (error %ld)\n",
			  PTR_ERR(ctx));
		return PTR_ERR(ctx);
455 456
	}

457
	dev_priv->kernel_context = ctx;
458

459 460 461
	DRM_DEBUG_DRIVER("%s context support initialized\n",
			i915.enable_execlists ? "LR" :
			dev_priv->hw_context_size ? "HW" : "fake");
462
	return 0;
463 464
}

465 466 467 468
void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;

469
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
470

471
	for_each_engine(engine, dev_priv) {
472 473 474 475
		if (engine->last_context) {
			i915_gem_context_unpin(engine->last_context, engine);
			engine->last_context = NULL;
		}
476 477
	}

478 479
	/* Force the GPU state to be restored on enabling */
	if (!i915.enable_execlists) {
480 481 482 483 484 485 486 487 488 489 490 491
		struct i915_gem_context *ctx;

		list_for_each_entry(ctx, &dev_priv->context_list, link) {
			if (!i915_gem_context_is_default(ctx))
				continue;

			for_each_engine(engine, dev_priv)
				ctx->engine[engine->id].initialised = false;

			ctx->remap_slice = ALL_L3_SLICES(dev_priv);
		}

492 493 494 495 496 497 498
		for_each_engine(engine, dev_priv) {
			struct intel_context *kce =
				&dev_priv->kernel_context->engine[engine->id];

			kce->initialised = true;
		}
	}
499 500
}

501 502
void i915_gem_context_fini(struct drm_device *dev)
{
503
	struct drm_i915_private *dev_priv = to_i915(dev);
504
	struct i915_gem_context *dctx = dev_priv->kernel_context;
505

506 507
	lockdep_assert_held(&dev->struct_mutex);

508
	i915_gem_context_put(dctx);
509
	dev_priv->kernel_context = NULL;
510 511

	ida_destroy(&dev_priv->context_hw_ida);
512 513
}

514 515
static int context_idr_cleanup(int id, void *p, void *data)
{
516
	struct i915_gem_context *ctx = p;
517

518
	ctx->file_priv = ERR_PTR(-EBADF);
519
	i915_gem_context_put(ctx);
520
	return 0;
521 522
}

523 524 525
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
526
	struct i915_gem_context *ctx;
527 528 529

	idr_init(&file_priv->context_idr);

530
	mutex_lock(&dev->struct_mutex);
531
	ctx = i915_gem_create_context(dev, file_priv);
532 533
	mutex_unlock(&dev->struct_mutex);

534
	if (IS_ERR(ctx)) {
535
		idr_destroy(&file_priv->context_idr);
536
		return PTR_ERR(ctx);
537 538
	}

539 540 541
	return 0;
}

542 543
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
544
	struct drm_i915_file_private *file_priv = file->driver_priv;
545

546 547
	lockdep_assert_held(&dev->struct_mutex);

548
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
549 550 551
	idr_destroy(&file_priv->context_idr);
}

552
static inline int
553
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
554
{
555
	struct drm_i915_private *dev_priv = req->i915;
556
	struct intel_ring *ring = req->ring;
557
	struct intel_engine_cs *engine = req->engine;
558
	u32 flags = hw_flags | MI_MM_SPACE_GTT;
559 560
	const int num_rings =
		/* Use an extended w/a on ivb+ if signalling from other rings */
561
		i915.semaphores ?
562
		hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
563
		0;
564
	int len, ret;
565

566 567 568 569 570
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
	 * explicitly, so we rely on the value at ring init, stored in
	 * itlb_before_ctx_switch.
	 */
571
	if (IS_GEN6(dev_priv)) {
572
		ret = engine->emit_flush(req, EMIT_INVALIDATE);
573 574 575 576
		if (ret)
			return ret;
	}

577
	/* These flags are for resource streamer on HSW+ */
578
	if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
579
		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
580
	else if (INTEL_GEN(dev_priv) < 8)
581 582
		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);

583 584

	len = 4;
585
	if (INTEL_GEN(dev_priv) >= 7)
586
		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
587

588
	ret = intel_ring_begin(req, len);
589 590 591
	if (ret)
		return ret;

592
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
593
	if (INTEL_GEN(dev_priv) >= 7) {
594
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
595 596 597
		if (num_rings) {
			struct intel_engine_cs *signaller;

598
			intel_ring_emit(ring,
599
					MI_LOAD_REGISTER_IMM(num_rings));
600
			for_each_engine(signaller, dev_priv) {
601
				if (signaller == engine)
602 603
					continue;

604
				intel_ring_emit_reg(ring,
605
						    RING_PSMI_CTL(signaller->mmio_base));
606
				intel_ring_emit(ring,
607
						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
608 609 610
			}
		}
	}
611

612 613 614
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_emit(ring, MI_SET_CONTEXT);
	intel_ring_emit(ring,
615
			i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
616
			flags);
617 618 619 620
	/*
	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
	 * WaMiSetContext_Hang:snb,ivb,vlv
	 */
621
	intel_ring_emit(ring, MI_NOOP);
622

623
	if (INTEL_GEN(dev_priv) >= 7) {
624 625
		if (num_rings) {
			struct intel_engine_cs *signaller;
626
			i915_reg_t last_reg = {}; /* keep gcc quiet */
627

628
			intel_ring_emit(ring,
629
					MI_LOAD_REGISTER_IMM(num_rings));
630
			for_each_engine(signaller, dev_priv) {
631
				if (signaller == engine)
632 633
					continue;

634
				last_reg = RING_PSMI_CTL(signaller->mmio_base);
635 636
				intel_ring_emit_reg(ring, last_reg);
				intel_ring_emit(ring,
637
						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
638
			}
639 640

			/* Insert a delay before the next switch! */
641
			intel_ring_emit(ring,
642 643
					MI_STORE_REGISTER_MEM |
					MI_SRM_LRM_GLOBAL_GTT);
644 645 646
			intel_ring_emit_reg(ring, last_reg);
			intel_ring_emit(ring, engine->scratch.gtt_offset);
			intel_ring_emit(ring, MI_NOOP);
647
		}
648
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
649
	}
650

651
	intel_ring_advance(ring);
652 653 654 655

	return ret;
}

C
Chris Wilson 已提交
656
static int remap_l3(struct drm_i915_gem_request *req, int slice)
657
{
658
	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
659
	struct intel_ring *ring = req->ring;
660 661
	int i, ret;

662
	if (!remap_info)
663 664
		return 0;

665
	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
666 667 668 669 670 671 672 673
	if (ret)
		return ret;

	/*
	 * Note: We do not worry about the concurrent register cacheline hang
	 * here because no other code should access these registers other than
	 * at initialization time.
	 */
674
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
675
	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
676 677
		intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
		intel_ring_emit(ring, remap_info[i]);
678
	}
679 680
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_advance(ring);
681

682
	return 0;
683 684
}

685 686
static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
				   struct intel_engine_cs *engine,
687
				   struct i915_gem_context *to)
688
{
689 690 691
	if (to->remap_slice)
		return false;

692
	if (!to->engine[RCS].initialised)
693 694
		return false;

695
	if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
696
		return false;
697

698
	return to == engine->last_context;
699 700 701
}

static bool
702 703
needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
		  struct intel_engine_cs *engine,
704
		  struct i915_gem_context *to)
705
{
706
	if (!ppgtt)
707 708
		return false;

709 710 711 712 713
	/* Always load the ppgtt on first use */
	if (!engine->last_context)
		return true;

	/* Same context without new entries, skip */
714
	if (engine->last_context == to &&
715
	    !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
716 717 718
		return false;

	if (engine->id != RCS)
719 720
		return true;

721
	if (INTEL_GEN(engine->i915) < 8)
722 723 724 725 726 727
		return true;

	return false;
}

static bool
728
needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
729
		   struct i915_gem_context *to,
730
		   u32 hw_flags)
731
{
732
	if (!ppgtt)
733 734
		return false;

735
	if (!IS_GEN8(to->i915))
736 737
		return false;

B
Ben Widawsky 已提交
738
	if (hw_flags & MI_RESTORE_INHIBIT)
739 740 741 742 743
		return true;

	return false;
}

744
static int do_rcs_switch(struct drm_i915_gem_request *req)
745
{
746
	struct i915_gem_context *to = req->ctx;
747
	struct intel_engine_cs *engine = req->engine;
748
	struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
749
	struct i915_gem_context *from;
750
	u32 hw_flags;
751
	int ret, i;
752

753
	if (skip_rcs_switch(ppgtt, engine, to))
754 755
		return 0;

756
	/* Trying to pin first makes error handling easier. */
757
	ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
758
				    to->ggtt_alignment,
759 760 761
				    0);
	if (ret)
		return ret;
762

763 764 765 766
	/*
	 * Pin can switch back to the default context if we end up calling into
	 * evict_everything - as a last ditch gtt defrag effort that also
	 * switches to the default context. Hence we need to reload from here.
767 768
	 *
	 * XXX: Doing so is painfully broken!
769
	 */
770
	from = engine->last_context;
771 772 773

	/*
	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
774 775 776
	 * that thanks to write = false in this call and us not setting any gpu
	 * write domains when putting a context object onto the active list
	 * (when switching away from it), this won't block.
777 778 779
	 *
	 * XXX: We need a real interface to do this instead of trickery.
	 */
780
	ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
781 782
	if (ret)
		goto unpin_out;
783

784
	if (needs_pd_load_pre(ppgtt, engine, to)) {
785 786 787 788 789
		/* Older GENs and non render rings still want the load first,
		 * "PP_DCLV followed by PP_DIR_BASE register through Load
		 * Register Immediate commands in Ring Buffer before submitting
		 * a context."*/
		trace_switch_mm(engine, to);
790
		ret = ppgtt->switch_mm(ppgtt, req);
791 792 793 794
		if (ret)
			goto unpin_out;
	}

795
	if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
B
Ben Widawsky 已提交
796 797 798 799
		/* NB: If we inhibit the restore, the context is not allowed to
		 * die because future work may end up depending on valid address
		 * space. This means we must enforce that a page table load
		 * occur when this occurs. */
800
		hw_flags = MI_RESTORE_INHIBIT;
801
	else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
802 803 804
		hw_flags = MI_FORCE_RESTORE;
	else
		hw_flags = 0;
805

806 807
	if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
		ret = mi_set_context(req, hw_flags);
808
		if (ret)
809
			goto unpin_out;
810 811
	}

812 813 814 815 816 817
	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
818
	if (from != NULL) {
819 820
		from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
821 822 823 824 825 826 827
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
828
		from->engine[RCS].state->dirty = 1;
829

830
		/* obj is kept alive until the next request by its active ref */
831
		i915_gem_object_ggtt_unpin(from->engine[RCS].state);
832
		i915_gem_context_put(from);
833
	}
834
	engine->last_context = i915_gem_context_get(to);
835

836 837 838
	/* GEN8 does *not* require an explicit reload if the PDPs have been
	 * setup, and we do not wish to move them.
	 */
839
	if (needs_pd_load_post(ppgtt, to, hw_flags)) {
840
		trace_switch_mm(engine, to);
841
		ret = ppgtt->switch_mm(ppgtt, req);
842 843 844 845 846 847 848 849 850
		/* The hardware context switch is emitted, but we haven't
		 * actually changed the state - so it's probably safe to bail
		 * here. Still, let the user know something dangerous has
		 * happened.
		 */
		if (ret)
			return ret;
	}

851 852
	if (ppgtt)
		ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
853 854 855 856 857

	for (i = 0; i < MAX_L3_SLICES; i++) {
		if (!(to->remap_slice & (1<<i)))
			continue;

C
Chris Wilson 已提交
858
		ret = remap_l3(req, i);
859 860 861 862 863 864
		if (ret)
			return ret;

		to->remap_slice &= ~(1<<i);
	}

865
	if (!to->engine[RCS].initialised) {
866 867
		if (engine->init_context) {
			ret = engine->init_context(req);
868
			if (ret)
869
				return ret;
870
		}
871
		to->engine[RCS].initialised = true;
872 873
	}

874
	return 0;
875 876

unpin_out:
877
	i915_gem_object_ggtt_unpin(to->engine[RCS].state);
878
	return ret;
879 880 881 882
}

/**
 * i915_switch_context() - perform a GPU context switch.
883
 * @req: request for which we'll execute the context switch
884 885 886
 *
 * The context life cycle is simple. The context refcount is incremented and
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
887
 * it will have a refcount > 1. This allows us to destroy the context abstract
888
 * object while letting the normal object tracking destroy the backing BO.
889 890 891 892
 *
 * This function should not be used in execlists mode.  Instead the context is
 * switched by writing to the ELSP and requests keep a reference to their
 * context.
893
 */
894
int i915_switch_context(struct drm_i915_gem_request *req)
895
{
896
	struct intel_engine_cs *engine = req->engine;
897

898
	lockdep_assert_held(&req->i915->drm.struct_mutex);
899 900
	if (i915.enable_execlists)
		return 0;
901

902
	if (!req->ctx->engine[engine->id].state) {
903
		struct i915_gem_context *to = req->ctx;
904 905
		struct i915_hw_ppgtt *ppgtt =
			to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
906

907
		if (needs_pd_load_pre(ppgtt, engine, to)) {
908 909 910
			int ret;

			trace_switch_mm(engine, to);
911
			ret = ppgtt->switch_mm(ppgtt, req);
912 913 914
			if (ret)
				return ret;

915
			ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
916 917 918
		}

		if (to != engine->last_context) {
919
			if (engine->last_context)
920 921
				i915_gem_context_put(engine->last_context);
			engine->last_context = i915_gem_context_get(to);
922
		}
923

924
		return 0;
925
	}
926

927
	return do_rcs_switch(req);
928
}
929

930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;

	for_each_engine(engine, dev_priv) {
		struct drm_i915_gem_request *req;
		int ret;

		if (engine->last_context == NULL)
			continue;

		if (engine->last_context == dev_priv->kernel_context)
			continue;

		req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
		if (IS_ERR(req))
			return PTR_ERR(req);

948
		ret = i915_switch_context(req);
949 950 951 952 953 954 955 956
		i915_add_request_no_flush(req);
		if (ret)
			return ret;
	}

	return 0;
}

957
static bool contexts_enabled(struct drm_device *dev)
958
{
959
	return i915.enable_execlists || to_i915(dev)->hw_context_size;
960 961
}

962 963 964 965 966
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file)
{
	struct drm_i915_gem_context_create *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
967
	struct i915_gem_context *ctx;
968 969
	int ret;

970
	if (!contexts_enabled(dev))
971 972
		return -ENODEV;

973 974 975
	if (args->pad != 0)
		return -EINVAL;

976 977 978 979
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

980
	ctx = i915_gem_create_context(dev, file_priv);
981
	mutex_unlock(&dev->struct_mutex);
982 983
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);
984

985
	args->ctx_id = ctx->user_handle;
986 987
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);

988
	return 0;
989 990 991 992 993 994 995
}

int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file)
{
	struct drm_i915_gem_context_destroy *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
996
	struct i915_gem_context *ctx;
997 998
	int ret;

999 1000 1001
	if (args->pad != 0)
		return -EINVAL;

1002
	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
1003
		return -ENOENT;
1004

1005 1006 1007 1008
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1009
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1010
	if (IS_ERR(ctx)) {
1011
		mutex_unlock(&dev->struct_mutex);
1012
		return PTR_ERR(ctx);
1013 1014
	}

1015
	idr_remove(&file_priv->context_idr, ctx->user_handle);
1016
	i915_gem_context_put(ctx);
1017 1018 1019 1020 1021
	mutex_unlock(&dev->struct_mutex);

	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
	return 0;
}
1022 1023 1024 1025 1026 1027

int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
1028
	struct i915_gem_context *ctx;
1029 1030 1031 1032 1033 1034
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1035
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	args->size = 0;
	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		args->value = ctx->hang_stats.ban_period_seconds;
		break;
1046 1047 1048
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
		break;
C
Chris Wilson 已提交
1049 1050 1051 1052 1053 1054
	case I915_CONTEXT_PARAM_GTT_SIZE:
		if (ctx->ppgtt)
			args->value = ctx->ppgtt->base.total;
		else if (to_i915(dev)->mm.aliasing_ppgtt)
			args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
		else
1055
			args->value = to_i915(dev)->ggtt.base.total;
C
Chris Wilson 已提交
1056
		break;
1057 1058 1059
	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
		args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE);
		break;
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}

int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
1074
	struct i915_gem_context *ctx;
1075 1076 1077 1078 1079 1080
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1081
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		if (args->size)
			ret = -EINVAL;
		else if (args->value < ctx->hang_stats.ban_period_seconds &&
			 !capable(CAP_SYS_ADMIN))
			ret = -EPERM;
		else
			ctx->hang_stats.ban_period_seconds = args->value;
		break;
1097 1098 1099 1100 1101 1102
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		if (args->size) {
			ret = -EINVAL;
		} else {
			ctx->flags &= ~CONTEXT_NO_ZEROMAP;
			ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112
		}
		break;
	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
		if (args->size) {
			ret = -EINVAL;
		} else {
			if (args->value)
				ctx->flags |= CONTEXT_NO_ERROR_CAPTURE;
			else
				ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE;
1113 1114
		}
		break;
1115 1116 1117 1118 1119 1120 1121 1122
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}
1123 1124 1125 1126

int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
				       void *data, struct drm_file *file)
{
1127
	struct drm_i915_private *dev_priv = to_i915(dev);
1128 1129
	struct drm_i915_reset_stats *args = data;
	struct i915_ctx_hang_stats *hs;
1130
	struct i915_gem_context *ctx;
1131 1132 1133 1134 1135 1136 1137 1138
	int ret;

	if (args->flags || args->pad)
		return -EINVAL;

	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
		return -EPERM;

1139
	ret = i915_mutex_lock_interruptible(dev);
1140 1141 1142
	if (ret)
		return ret;

1143
	ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}
	hs = &ctx->hang_stats;

	if (capable(CAP_SYS_ADMIN))
		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
	else
		args->reset_count = 0;

	args->batch_active = hs->batch_active;
	args->batch_pending = hs->batch_pending;

	mutex_unlock(&dev->struct_mutex);

	return 0;
}