i915_gem_context.c 31.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Copyright © 2011-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *
 */

/*
 * This file implements HW context support. On gen5+ a HW context consists of an
 * opaque GPU object which is referenced at times of context saves and restores.
 * With RC6 enabled, the context is also referenced as the GPU enters and exists
 * from RC6 (GPU has it's own internal power context, except on gen5). Though
 * something like a context does exist for the media ring, the code only
 * supports contexts for the render ring.
 *
 * In software, there is a distinction between contexts created by the user,
 * and the default HW context. The default HW context is used by GPU clients
 * that do not request setup of their own hardware context. The default
 * context's state is never restored to help prevent programming errors. This
 * would happen if a client ran and piggy-backed off another clients GPU state.
 * The default context only exists to give the GPU some offset to load as the
 * current to invoke a save of the context we actually care about. In fact, the
 * code could likely be constructed, albeit in a more complicated fashion, to
 * never use the default context, though that limits the driver's ability to
 * swap out, and/or destroy other contexts.
 *
 * All other contexts are created as a request by the GPU client. These contexts
 * store GPU state, and thus allow GPU clients to not re-emit state (and
 * potentially query certain state) at any time. The kernel driver makes
 * certain that the appropriate commands are inserted.
 *
 * The context life cycle is semi-complicated in that context BOs may live
 * longer than the context itself because of the way the hardware, and object
 * tracking works. Below is a very crude representation of the state machine
 * describing the context life.
 *                                         refcount     pincount     active
 * S0: initial state                          0            0           0
 * S1: context created                        1            0           0
 * S2: context is currently running           2            1           X
 * S3: GPU referenced, but not current        2            0           1
 * S4: context is current, but destroyed      1            1           0
 * S5: like S3, but destroyed                 1            0           1
 *
 * The most common (but not all) transitions:
 * S0->S1: client creates a context
 * S1->S2: client submits execbuf with context
 * S2->S3: other clients submits execbuf with context
 * S3->S1: context object was retired
 * S3->S2: clients submits another execbuf
 * S2->S4: context destroy called with current context
 * S3->S5->S0: destroy path
 * S4->S5->S0: destroy path on current context
 *
 * There are two confusing terms used above:
 *  The "current context" means the context which is currently running on the
D
Damien Lespiau 已提交
76
 *  GPU. The GPU has loaded its state already and has stored away the gtt
77 78 79 80 81 82 83 84 85 86 87
 *  offset of the BO. The GPU is not actively referencing the data at this
 *  offset, but it will on the next context switch. The only way to avoid this
 *  is to do a GPU reset.
 *
 *  An "active context' is one which was previously the "current context" and is
 *  on the active list waiting for the next context switch to occur. Until this
 *  happens, the object must remain at the same gtt offset. It is therefore
 *  possible to destroy a context, but it is still active.
 *
 */

88 89
#include <drm/drmP.h>
#include <drm/i915_drm.h>
90
#include "i915_drv.h"
91
#include "i915_trace.h"
92

93 94
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1

95 96 97 98
/* This is a HW constraint. The value below is the largest known requirement
 * I've seen in a spec to date, and that was a workaround for a non-shipping
 * part. It should be safe to decrease this, but it's more future proof as is.
 */
B
Ben Widawsky 已提交
99 100
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
101

102
static size_t get_context_alignment(struct drm_i915_private *dev_priv)
B
Ben Widawsky 已提交
103
{
104
	if (IS_GEN6(dev_priv))
B
Ben Widawsky 已提交
105 106 107 108 109
		return GEN6_CONTEXT_ALIGN;

	return GEN7_CONTEXT_ALIGN;
}

110
static int get_context_size(struct drm_i915_private *dev_priv)
111 112 113 114
{
	int ret;
	u32 reg;

115
	switch (INTEL_GEN(dev_priv)) {
116 117 118 119 120
	case 6:
		reg = I915_READ(CXT_SIZE);
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
		break;
	case 7:
B
Ben Widawsky 已提交
121
		reg = I915_READ(GEN7_CXT_SIZE);
122
		if (IS_HASWELL(dev_priv))
123
			ret = HSW_CXT_TOTAL_SIZE;
B
Ben Widawsky 已提交
124 125
		else
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
126
		break;
B
Ben Widawsky 已提交
127 128 129
	case 8:
		ret = GEN8_CXT_TOTAL_SIZE;
		break;
130 131 132 133 134 135 136
	default:
		BUG();
	}

	return ret;
}

137
static void i915_gem_context_clean(struct i915_gem_context *ctx)
138 139 140 141
{
	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
	struct i915_vma *vma, *next;

142
	if (!ppgtt)
143 144 145
		return;

	list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
146
				 vm_link) {
147 148 149 150 151
		if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
			break;
	}
}

152
void i915_gem_context_free(struct kref *ctx_ref)
153
{
154
	struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
155
	int i;
156

157
	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
158 159
	trace_i915_context_free(ctx);

160 161 162 163 164 165 166
	/*
	 * This context is going away and we need to remove all VMAs still
	 * around. This is to handle imported shared objects for which
	 * destructor did not run when their handles were closed.
	 */
	i915_gem_context_clean(ctx);

167 168
	i915_ppgtt_put(ctx->ppgtt);

169 170 171 172 173 174 175 176 177 178
	for (i = 0; i < I915_NUM_ENGINES; i++) {
		struct intel_context *ce = &ctx->engine[i];

		if (!ce->state)
			continue;

		WARN_ON(ce->pin_count);
		if (ce->ringbuf)
			intel_ringbuffer_free(ce->ringbuf);

179
		i915_gem_object_put(ce->state);
180 181
	}

B
Ben Widawsky 已提交
182
	list_del(&ctx->link);
183 184

	ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
185 186 187
	kfree(ctx);
}

188
struct drm_i915_gem_object *
189 190 191 192 193
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
{
	struct drm_i915_gem_object *obj;
	int ret;

194 195
	lockdep_assert_held(&dev->struct_mutex);

196
	obj = i915_gem_object_create(dev, size);
197 198
	if (IS_ERR(obj))
		return obj;
199 200 201 202 203 204 205 206

	/*
	 * Try to make the context utilize L3 as well as LLC.
	 *
	 * On VLV we don't have L3 controls in the PTEs so we
	 * shouldn't touch the cache level, especially as that
	 * would make the object snooped which might have a
	 * negative performance impact.
207 208 209 210 211 212 213
	 *
	 * Snooping is required on non-llc platforms in execlist
	 * mode, but since all GGTT accesses use PAT entry 0 we
	 * get snooping anyway regardless of cache_level.
	 *
	 * This is only applicable for Ivy Bridge devices since
	 * later platforms don't have L3 control bits in the PTE.
214
	 */
215
	if (IS_IVYBRIDGE(dev)) {
216 217 218
		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
		/* Failure shouldn't ever happen this early */
		if (WARN_ON(ret)) {
219
			i915_gem_object_put(obj);
220 221 222 223 224 225 226
			return ERR_PTR(ret);
		}
	}

	return obj;
}

227 228 229 230 231 232 233 234 235 236 237
static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
{
	int ret;

	ret = ida_simple_get(&dev_priv->context_hw_ida,
			     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
	if (ret < 0) {
		/* Contexts are only released when no longer active.
		 * Flush any pending retires to hopefully release some
		 * stale contexts and try again.
		 */
238
		i915_gem_retire_requests(dev_priv);
239 240 241 242 243 244 245 246 247 248
		ret = ida_simple_get(&dev_priv->context_hw_ida,
				     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
		if (ret < 0)
			return ret;
	}

	*out = ret;
	return 0;
}

249
static struct i915_gem_context *
250
__create_hw_context(struct drm_device *dev,
251
		    struct drm_i915_file_private *file_priv)
252
{
253
	struct drm_i915_private *dev_priv = to_i915(dev);
254
	struct i915_gem_context *ctx;
T
Tejun Heo 已提交
255
	int ret;
256

257
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
258 259
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);
260

261 262 263 264 265 266
	ret = assign_hw_id(dev_priv, &ctx->hw_id);
	if (ret) {
		kfree(ctx);
		return ERR_PTR(ret);
	}

267
	kref_init(&ctx->ref);
268
	list_add_tail(&ctx->link, &dev_priv->context_list);
269
	ctx->i915 = dev_priv;
270

271 272
	ctx->ggtt_alignment = get_context_alignment(dev_priv);

273
	if (dev_priv->hw_context_size) {
274 275 276 277
		struct drm_i915_gem_object *obj =
				i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
		if (IS_ERR(obj)) {
			ret = PTR_ERR(obj);
278
			goto err_out;
279
		}
280
		ctx->engine[RCS].state = obj;
281
	}
282 283

	/* Default context will never have a file_priv */
284 285
	if (file_priv != NULL) {
		ret = idr_alloc(&file_priv->context_idr, ctx,
286
				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
287 288 289
		if (ret < 0)
			goto err_out;
	} else
290
		ret = DEFAULT_CONTEXT_HANDLE;
291 292

	ctx->file_priv = file_priv;
293
	ctx->user_handle = ret;
294 295 296
	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
297
	ctx->remap_slice = ALL_L3_SLICES(dev_priv);
298

299
	ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
300
	ctx->ring_size = 4 * PAGE_SIZE;
301 302
	ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
			     GEN8_CTX_ADDRESSING_MODE_SHIFT;
303
	ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
304

305
	return ctx;
306 307

err_out:
308
	i915_gem_context_put(ctx);
309
	return ERR_PTR(ret);
310 311
}

312 313 314 315 316
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
317
static struct i915_gem_context *
318
i915_gem_create_context(struct drm_device *dev,
319
			struct drm_i915_file_private *file_priv)
320
{
321
	struct i915_gem_context *ctx;
322

323
	lockdep_assert_held(&dev->struct_mutex);
324

325
	ctx = __create_hw_context(dev, file_priv);
326
	if (IS_ERR(ctx))
327
		return ctx;
328

329
	if (USES_FULL_PPGTT(dev)) {
330
		struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
331

332
		if (IS_ERR(ppgtt)) {
333 334
			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
					 PTR_ERR(ppgtt));
335
			idr_remove(&file_priv->context_idr, ctx->user_handle);
336
			i915_gem_context_put(ctx);
337
			return ERR_CAST(ppgtt);
338 339 340 341
		}

		ctx->ppgtt = ppgtt;
	}
342

343 344
	trace_i915_context_create(ctx);

345
	return ctx;
346 347
}

348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
/**
 * i915_gem_context_create_gvt - create a GVT GEM context
 * @dev: drm device *
 *
 * This function is used to create a GVT specific GEM context.
 *
 * Returns:
 * pointer to i915_gem_context on success, error pointer if failed
 *
 */
struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev)
{
	struct i915_gem_context *ctx;
	int ret;

	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
		return ERR_PTR(-ENODEV);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ERR_PTR(ret);

	ctx = i915_gem_create_context(dev, NULL);
	if (IS_ERR(ctx))
		goto out;

	ctx->execlists_force_single_submission = true;
	ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
out:
	mutex_unlock(&dev->struct_mutex);
	return ctx;
}

382
static void i915_gem_context_unpin(struct i915_gem_context *ctx,
383 384
				   struct intel_engine_cs *engine)
{
385 386 387
	if (i915.enable_execlists) {
		intel_lr_context_unpin(ctx, engine);
	} else {
388 389 390 391 392
		struct intel_context *ce = &ctx->engine[engine->id];

		if (ce->state)
			i915_gem_object_ggtt_unpin(ce->state);

393
		i915_gem_context_put(ctx);
394
	}
395 396
}

397 398
void i915_gem_context_reset(struct drm_device *dev)
{
399
	struct drm_i915_private *dev_priv = to_i915(dev);
400

401 402
	lockdep_assert_held(&dev->struct_mutex);

403
	if (i915.enable_execlists) {
404
		struct i915_gem_context *ctx;
405

406
		list_for_each_entry(ctx, &dev_priv->context_list, link)
407
			intel_lr_context_reset(dev_priv, ctx);
408
	}
409

410
	i915_gem_context_lost(dev_priv);
411 412
}

413
int i915_gem_context_init(struct drm_device *dev)
414
{
415
	struct drm_i915_private *dev_priv = to_i915(dev);
416
	struct i915_gem_context *ctx;
417

418 419
	/* Init should only be called once per module load. Eventually the
	 * restriction on the context_disabled check can be loosened. */
420
	if (WARN_ON(dev_priv->kernel_context))
421
		return 0;
422

423 424
	if (intel_vgpu_active(dev_priv) &&
	    HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
425 426 427 428 429 430
		if (!i915.enable_execlists) {
			DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
			return -EINVAL;
		}
	}

431 432 433 434
	/* Using the simple ida interface, the max is limited by sizeof(int) */
	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
	ida_init(&dev_priv->context_hw_ida);

435 436 437 438
	if (i915.enable_execlists) {
		/* NB: intentionally left blank. We will allocate our own
		 * backing objects as we need them, thank you very much */
		dev_priv->hw_context_size = 0;
439 440 441
	} else if (HAS_HW_CONTEXTS(dev_priv)) {
		dev_priv->hw_context_size =
			round_up(get_context_size(dev_priv), 4096);
442 443 444 445 446
		if (dev_priv->hw_context_size > (1<<20)) {
			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
					 dev_priv->hw_context_size);
			dev_priv->hw_context_size = 0;
		}
447 448
	}

449
	ctx = i915_gem_create_context(dev, NULL);
450 451 452 453
	if (IS_ERR(ctx)) {
		DRM_ERROR("Failed to create default global context (error %ld)\n",
			  PTR_ERR(ctx));
		return PTR_ERR(ctx);
454 455
	}

456
	dev_priv->kernel_context = ctx;
457

458 459 460
	DRM_DEBUG_DRIVER("%s context support initialized\n",
			i915.enable_execlists ? "LR" :
			dev_priv->hw_context_size ? "HW" : "fake");
461
	return 0;
462 463
}

464 465 466 467
void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;

468
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
469

470
	for_each_engine(engine, dev_priv) {
471 472 473 474
		if (engine->last_context) {
			i915_gem_context_unpin(engine->last_context, engine);
			engine->last_context = NULL;
		}
475 476
	}

477 478
	/* Force the GPU state to be restored on enabling */
	if (!i915.enable_execlists) {
479 480 481 482 483 484 485 486 487 488 489 490
		struct i915_gem_context *ctx;

		list_for_each_entry(ctx, &dev_priv->context_list, link) {
			if (!i915_gem_context_is_default(ctx))
				continue;

			for_each_engine(engine, dev_priv)
				ctx->engine[engine->id].initialised = false;

			ctx->remap_slice = ALL_L3_SLICES(dev_priv);
		}

491 492 493 494 495 496 497
		for_each_engine(engine, dev_priv) {
			struct intel_context *kce =
				&dev_priv->kernel_context->engine[engine->id];

			kce->initialised = true;
		}
	}
498 499
}

500 501
void i915_gem_context_fini(struct drm_device *dev)
{
502
	struct drm_i915_private *dev_priv = to_i915(dev);
503
	struct i915_gem_context *dctx = dev_priv->kernel_context;
504

505 506
	lockdep_assert_held(&dev->struct_mutex);

507
	i915_gem_context_put(dctx);
508
	dev_priv->kernel_context = NULL;
509 510

	ida_destroy(&dev_priv->context_hw_ida);
511 512
}

513 514
static int context_idr_cleanup(int id, void *p, void *data)
{
515
	struct i915_gem_context *ctx = p;
516

517
	ctx->file_priv = ERR_PTR(-EBADF);
518
	i915_gem_context_put(ctx);
519
	return 0;
520 521
}

522 523 524
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
525
	struct i915_gem_context *ctx;
526 527 528

	idr_init(&file_priv->context_idr);

529
	mutex_lock(&dev->struct_mutex);
530
	ctx = i915_gem_create_context(dev, file_priv);
531 532
	mutex_unlock(&dev->struct_mutex);

533
	if (IS_ERR(ctx)) {
534
		idr_destroy(&file_priv->context_idr);
535
		return PTR_ERR(ctx);
536 537
	}

538 539 540
	return 0;
}

541 542
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
543
	struct drm_i915_file_private *file_priv = file->driver_priv;
544

545 546
	lockdep_assert_held(&dev->struct_mutex);

547
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
548 549 550
	idr_destroy(&file_priv->context_idr);
}

551
static inline int
552
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
553
{
554
	struct drm_i915_private *dev_priv = req->i915;
555
	struct intel_engine_cs *engine = req->engine;
556
	u32 flags = hw_flags | MI_MM_SPACE_GTT;
557 558
	const int num_rings =
		/* Use an extended w/a on ivb+ if signalling from other rings */
559 560
		i915_semaphore_is_enabled(dev_priv) ?
		hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
561
		0;
562
	int len, ret;
563

564 565 566 567 568
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
	 * explicitly, so we rely on the value at ring init, stored in
	 * itlb_before_ctx_switch.
	 */
569
	if (IS_GEN6(dev_priv)) {
570
		ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
571 572 573 574
		if (ret)
			return ret;
	}

575
	/* These flags are for resource streamer on HSW+ */
576
	if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
577
		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
578
	else if (INTEL_GEN(dev_priv) < 8)
579 580
		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);

581 582

	len = 4;
583
	if (INTEL_GEN(dev_priv) >= 7)
584
		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
585

586
	ret = intel_ring_begin(req, len);
587 588 589
	if (ret)
		return ret;

590
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
591
	if (INTEL_GEN(dev_priv) >= 7) {
592
		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
593 594 595
		if (num_rings) {
			struct intel_engine_cs *signaller;

596 597
			intel_ring_emit(engine,
					MI_LOAD_REGISTER_IMM(num_rings));
598
			for_each_engine(signaller, dev_priv) {
599
				if (signaller == engine)
600 601
					continue;

602 603 604 605
				intel_ring_emit_reg(engine,
						    RING_PSMI_CTL(signaller->mmio_base));
				intel_ring_emit(engine,
						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
606 607 608
			}
		}
	}
609

610 611 612
	intel_ring_emit(engine, MI_NOOP);
	intel_ring_emit(engine, MI_SET_CONTEXT);
	intel_ring_emit(engine,
613
			i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
614
			flags);
615 616 617 618
	/*
	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
	 * WaMiSetContext_Hang:snb,ivb,vlv
	 */
619
	intel_ring_emit(engine, MI_NOOP);
620

621
	if (INTEL_GEN(dev_priv) >= 7) {
622 623
		if (num_rings) {
			struct intel_engine_cs *signaller;
624
			i915_reg_t last_reg = {}; /* keep gcc quiet */
625

626 627
			intel_ring_emit(engine,
					MI_LOAD_REGISTER_IMM(num_rings));
628
			for_each_engine(signaller, dev_priv) {
629
				if (signaller == engine)
630 631
					continue;

632 633
				last_reg = RING_PSMI_CTL(signaller->mmio_base);
				intel_ring_emit_reg(engine, last_reg);
634 635
				intel_ring_emit(engine,
						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
636
			}
637 638 639 640 641 642 643 644

			/* Insert a delay before the next switch! */
			intel_ring_emit(engine,
					MI_STORE_REGISTER_MEM |
					MI_SRM_LRM_GLOBAL_GTT);
			intel_ring_emit_reg(engine, last_reg);
			intel_ring_emit(engine, engine->scratch.gtt_offset);
			intel_ring_emit(engine, MI_NOOP);
645
		}
646
		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
647
	}
648

649
	intel_ring_advance(engine);
650 651 652 653

	return ret;
}

C
Chris Wilson 已提交
654
static int remap_l3(struct drm_i915_gem_request *req, int slice)
655
{
656
	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
657 658 659
	struct intel_engine_cs *engine = req->engine;
	int i, ret;

660
	if (!remap_info)
661 662
		return 0;

663
	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
664 665 666 667 668 669 670 671
	if (ret)
		return ret;

	/*
	 * Note: We do not worry about the concurrent register cacheline hang
	 * here because no other code should access these registers other than
	 * at initialization time.
	 */
672 673
	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
674 675 676
		intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
		intel_ring_emit(engine, remap_info[i]);
	}
677
	intel_ring_emit(engine, MI_NOOP);
678 679
	intel_ring_advance(engine);

680
	return 0;
681 682
}

683 684
static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
				   struct intel_engine_cs *engine,
685
				   struct i915_gem_context *to)
686
{
687 688 689
	if (to->remap_slice)
		return false;

690
	if (!to->engine[RCS].initialised)
691 692
		return false;

693
	if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
694
		return false;
695

696
	return to == engine->last_context;
697 698 699
}

static bool
700 701
needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
		  struct intel_engine_cs *engine,
702
		  struct i915_gem_context *to)
703
{
704
	if (!ppgtt)
705 706
		return false;

707 708 709 710 711
	/* Always load the ppgtt on first use */
	if (!engine->last_context)
		return true;

	/* Same context without new entries, skip */
712
	if (engine->last_context == to &&
713
	    !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
714 715 716
		return false;

	if (engine->id != RCS)
717 718
		return true;

719
	if (INTEL_GEN(engine->i915) < 8)
720 721 722 723 724 725
		return true;

	return false;
}

static bool
726
needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
727
		   struct i915_gem_context *to,
728
		   u32 hw_flags)
729
{
730
	if (!ppgtt)
731 732
		return false;

733
	if (!IS_GEN8(to->i915))
734 735
		return false;

B
Ben Widawsky 已提交
736
	if (hw_flags & MI_RESTORE_INHIBIT)
737 738 739 740 741
		return true;

	return false;
}

742
static int do_rcs_switch(struct drm_i915_gem_request *req)
743
{
744
	struct i915_gem_context *to = req->ctx;
745
	struct intel_engine_cs *engine = req->engine;
746
	struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
747
	struct i915_gem_context *from;
748
	u32 hw_flags;
749
	int ret, i;
750

751
	if (skip_rcs_switch(ppgtt, engine, to))
752 753
		return 0;

754
	/* Trying to pin first makes error handling easier. */
755
	ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
756
				    to->ggtt_alignment,
757 758 759
				    0);
	if (ret)
		return ret;
760

761 762 763 764
	/*
	 * Pin can switch back to the default context if we end up calling into
	 * evict_everything - as a last ditch gtt defrag effort that also
	 * switches to the default context. Hence we need to reload from here.
765 766
	 *
	 * XXX: Doing so is painfully broken!
767
	 */
768
	from = engine->last_context;
769 770 771

	/*
	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
772 773 774
	 * that thanks to write = false in this call and us not setting any gpu
	 * write domains when putting a context object onto the active list
	 * (when switching away from it), this won't block.
775 776 777
	 *
	 * XXX: We need a real interface to do this instead of trickery.
	 */
778
	ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
779 780
	if (ret)
		goto unpin_out;
781

782
	if (needs_pd_load_pre(ppgtt, engine, to)) {
783 784 785 786 787
		/* Older GENs and non render rings still want the load first,
		 * "PP_DCLV followed by PP_DIR_BASE register through Load
		 * Register Immediate commands in Ring Buffer before submitting
		 * a context."*/
		trace_switch_mm(engine, to);
788
		ret = ppgtt->switch_mm(ppgtt, req);
789 790 791 792
		if (ret)
			goto unpin_out;
	}

793
	if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
B
Ben Widawsky 已提交
794 795 796 797
		/* NB: If we inhibit the restore, the context is not allowed to
		 * die because future work may end up depending on valid address
		 * space. This means we must enforce that a page table load
		 * occur when this occurs. */
798
		hw_flags = MI_RESTORE_INHIBIT;
799
	else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
800 801 802
		hw_flags = MI_FORCE_RESTORE;
	else
		hw_flags = 0;
803

804 805
	if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
		ret = mi_set_context(req, hw_flags);
806
		if (ret)
807
			goto unpin_out;
808 809
	}

810 811 812 813 814 815
	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
816
	if (from != NULL) {
817 818
		from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
819 820 821 822 823 824 825
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
826
		from->engine[RCS].state->dirty = 1;
827

828
		/* obj is kept alive until the next request by its active ref */
829
		i915_gem_object_ggtt_unpin(from->engine[RCS].state);
830
		i915_gem_context_put(from);
831
	}
832
	engine->last_context = i915_gem_context_get(to);
833

834 835 836
	/* GEN8 does *not* require an explicit reload if the PDPs have been
	 * setup, and we do not wish to move them.
	 */
837
	if (needs_pd_load_post(ppgtt, to, hw_flags)) {
838
		trace_switch_mm(engine, to);
839
		ret = ppgtt->switch_mm(ppgtt, req);
840 841 842 843 844 845 846 847 848
		/* The hardware context switch is emitted, but we haven't
		 * actually changed the state - so it's probably safe to bail
		 * here. Still, let the user know something dangerous has
		 * happened.
		 */
		if (ret)
			return ret;
	}

849 850
	if (ppgtt)
		ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
851 852 853 854 855

	for (i = 0; i < MAX_L3_SLICES; i++) {
		if (!(to->remap_slice & (1<<i)))
			continue;

C
Chris Wilson 已提交
856
		ret = remap_l3(req, i);
857 858 859 860 861 862
		if (ret)
			return ret;

		to->remap_slice &= ~(1<<i);
	}

863
	if (!to->engine[RCS].initialised) {
864 865
		if (engine->init_context) {
			ret = engine->init_context(req);
866
			if (ret)
867
				return ret;
868
		}
869
		to->engine[RCS].initialised = true;
870 871
	}

872
	return 0;
873 874

unpin_out:
875
	i915_gem_object_ggtt_unpin(to->engine[RCS].state);
876
	return ret;
877 878 879 880
}

/**
 * i915_switch_context() - perform a GPU context switch.
881
 * @req: request for which we'll execute the context switch
882 883 884
 *
 * The context life cycle is simple. The context refcount is incremented and
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
885
 * it will have a refcount > 1. This allows us to destroy the context abstract
886
 * object while letting the normal object tracking destroy the backing BO.
887 888 889 890
 *
 * This function should not be used in execlists mode.  Instead the context is
 * switched by writing to the ELSP and requests keep a reference to their
 * context.
891
 */
892
int i915_switch_context(struct drm_i915_gem_request *req)
893
{
894
	struct intel_engine_cs *engine = req->engine;
895

896
	WARN_ON(i915.enable_execlists);
897
	lockdep_assert_held(&req->i915->drm.struct_mutex);
898

899
	if (!req->ctx->engine[engine->id].state) {
900
		struct i915_gem_context *to = req->ctx;
901 902
		struct i915_hw_ppgtt *ppgtt =
			to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
903

904
		if (needs_pd_load_pre(ppgtt, engine, to)) {
905 906 907
			int ret;

			trace_switch_mm(engine, to);
908
			ret = ppgtt->switch_mm(ppgtt, req);
909 910 911
			if (ret)
				return ret;

912
			ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
913 914 915
		}

		if (to != engine->last_context) {
916
			if (engine->last_context)
917 918
				i915_gem_context_put(engine->last_context);
			engine->last_context = i915_gem_context_get(to);
919
		}
920

921
		return 0;
922
	}
923

924
	return do_rcs_switch(req);
925
}
926

927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;

	for_each_engine(engine, dev_priv) {
		struct drm_i915_gem_request *req;
		int ret;

		if (engine->last_context == NULL)
			continue;

		if (engine->last_context == dev_priv->kernel_context)
			continue;

		req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
		if (IS_ERR(req))
			return PTR_ERR(req);

		ret = 0;
		if (!i915.enable_execlists)
			ret = i915_switch_context(req);
		i915_add_request_no_flush(req);
		if (ret)
			return ret;
	}

	return 0;
}

956
static bool contexts_enabled(struct drm_device *dev)
957
{
958
	return i915.enable_execlists || to_i915(dev)->hw_context_size;
959 960
}

961 962 963 964 965
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file)
{
	struct drm_i915_gem_context_create *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
966
	struct i915_gem_context *ctx;
967 968
	int ret;

969
	if (!contexts_enabled(dev))
970 971
		return -ENODEV;

972 973 974
	if (args->pad != 0)
		return -EINVAL;

975 976 977 978
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

979
	ctx = i915_gem_create_context(dev, file_priv);
980
	mutex_unlock(&dev->struct_mutex);
981 982
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);
983

984
	args->ctx_id = ctx->user_handle;
985 986
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);

987
	return 0;
988 989 990 991 992 993 994
}

int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file)
{
	struct drm_i915_gem_context_destroy *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
995
	struct i915_gem_context *ctx;
996 997
	int ret;

998 999 1000
	if (args->pad != 0)
		return -EINVAL;

1001
	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
1002
		return -ENOENT;
1003

1004 1005 1006 1007
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1008
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1009
	if (IS_ERR(ctx)) {
1010
		mutex_unlock(&dev->struct_mutex);
1011
		return PTR_ERR(ctx);
1012 1013
	}

1014
	idr_remove(&file_priv->context_idr, ctx->user_handle);
1015
	i915_gem_context_put(ctx);
1016 1017 1018 1019 1020
	mutex_unlock(&dev->struct_mutex);

	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
	return 0;
}
1021 1022 1023 1024 1025 1026

int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
1027
	struct i915_gem_context *ctx;
1028 1029 1030 1031 1032 1033
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1034
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	args->size = 0;
	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		args->value = ctx->hang_stats.ban_period_seconds;
		break;
1045 1046 1047
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
		break;
C
Chris Wilson 已提交
1048 1049 1050 1051 1052 1053
	case I915_CONTEXT_PARAM_GTT_SIZE:
		if (ctx->ppgtt)
			args->value = ctx->ppgtt->base.total;
		else if (to_i915(dev)->mm.aliasing_ppgtt)
			args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
		else
1054
			args->value = to_i915(dev)->ggtt.base.total;
C
Chris Wilson 已提交
1055
		break;
1056 1057 1058
	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
		args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE);
		break;
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}

int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
1073
	struct i915_gem_context *ctx;
1074 1075 1076 1077 1078 1079
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1080
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
		if (args->size)
			ret = -EINVAL;
		else if (args->value < ctx->hang_stats.ban_period_seconds &&
			 !capable(CAP_SYS_ADMIN))
			ret = -EPERM;
		else
			ctx->hang_stats.ban_period_seconds = args->value;
		break;
1096 1097 1098 1099 1100 1101
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		if (args->size) {
			ret = -EINVAL;
		} else {
			ctx->flags &= ~CONTEXT_NO_ZEROMAP;
			ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
		}
		break;
	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
		if (args->size) {
			ret = -EINVAL;
		} else {
			if (args->value)
				ctx->flags |= CONTEXT_NO_ERROR_CAPTURE;
			else
				ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE;
1112 1113
		}
		break;
1114 1115 1116 1117 1118 1119 1120 1121
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}
1122 1123 1124 1125

int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
				       void *data, struct drm_file *file)
{
1126
	struct drm_i915_private *dev_priv = to_i915(dev);
1127 1128
	struct drm_i915_reset_stats *args = data;
	struct i915_ctx_hang_stats *hs;
1129
	struct i915_gem_context *ctx;
1130 1131 1132 1133 1134 1135 1136 1137
	int ret;

	if (args->flags || args->pad)
		return -EINVAL;

	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
		return -EPERM;

1138
	ret = i915_mutex_lock_interruptible(dev);
1139 1140 1141
	if (ret)
		return ret;

1142
	ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}
	hs = &ctx->hang_stats;

	if (capable(CAP_SYS_ADMIN))
		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
	else
		args->reset_count = 0;

	args->batch_active = hs->batch_active;
	args->batch_pending = hs->batch_pending;

	mutex_unlock(&dev->struct_mutex);

	return 0;
}