i915_gem_context.c 32.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Copyright © 2011-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *
 */

/*
 * This file implements HW context support. On gen5+ a HW context consists of an
 * opaque GPU object which is referenced at times of context saves and restores.
 * With RC6 enabled, the context is also referenced as the GPU enters and exists
 * from RC6 (GPU has it's own internal power context, except on gen5). Though
 * something like a context does exist for the media ring, the code only
 * supports contexts for the render ring.
 *
 * In software, there is a distinction between contexts created by the user,
 * and the default HW context. The default HW context is used by GPU clients
 * that do not request setup of their own hardware context. The default
 * context's state is never restored to help prevent programming errors. This
 * would happen if a client ran and piggy-backed off another clients GPU state.
 * The default context only exists to give the GPU some offset to load as the
 * current to invoke a save of the context we actually care about. In fact, the
 * code could likely be constructed, albeit in a more complicated fashion, to
 * never use the default context, though that limits the driver's ability to
 * swap out, and/or destroy other contexts.
 *
 * All other contexts are created as a request by the GPU client. These contexts
 * store GPU state, and thus allow GPU clients to not re-emit state (and
 * potentially query certain state) at any time. The kernel driver makes
 * certain that the appropriate commands are inserted.
 *
 * The context life cycle is semi-complicated in that context BOs may live
 * longer than the context itself because of the way the hardware, and object
 * tracking works. Below is a very crude representation of the state machine
 * describing the context life.
 *                                         refcount     pincount     active
 * S0: initial state                          0            0           0
 * S1: context created                        1            0           0
 * S2: context is currently running           2            1           X
 * S3: GPU referenced, but not current        2            0           1
 * S4: context is current, but destroyed      1            1           0
 * S5: like S3, but destroyed                 1            0           1
 *
 * The most common (but not all) transitions:
 * S0->S1: client creates a context
 * S1->S2: client submits execbuf with context
 * S2->S3: other clients submits execbuf with context
 * S3->S1: context object was retired
 * S3->S2: clients submits another execbuf
 * S2->S4: context destroy called with current context
 * S3->S5->S0: destroy path
 * S4->S5->S0: destroy path on current context
 *
 * There are two confusing terms used above:
 *  The "current context" means the context which is currently running on the
D
Damien Lespiau 已提交
76
 *  GPU. The GPU has loaded its state already and has stored away the gtt
77 78 79 80 81 82 83 84 85 86 87
 *  offset of the BO. The GPU is not actively referencing the data at this
 *  offset, but it will on the next context switch. The only way to avoid this
 *  is to do a GPU reset.
 *
 *  An "active context' is one which was previously the "current context" and is
 *  on the active list waiting for the next context switch to occur. Until this
 *  happens, the object must remain at the same gtt offset. It is therefore
 *  possible to destroy a context, but it is still active.
 *
 */

88 89
#include <drm/drmP.h>
#include <drm/i915_drm.h>
90
#include "i915_drv.h"
91
#include "i915_trace.h"
92

93 94
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1

95 96 97 98
/* This is a HW constraint. The value below is the largest known requirement
 * I've seen in a spec to date, and that was a workaround for a non-shipping
 * part. It should be safe to decrease this, but it's more future proof as is.
 */
B
Ben Widawsky 已提交
99 100
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
101

102
static size_t get_context_alignment(struct drm_i915_private *dev_priv)
B
Ben Widawsky 已提交
103
{
104
	if (IS_GEN6(dev_priv))
B
Ben Widawsky 已提交
105 106 107 108 109
		return GEN6_CONTEXT_ALIGN;

	return GEN7_CONTEXT_ALIGN;
}

110
static int get_context_size(struct drm_i915_private *dev_priv)
111 112 113 114
{
	int ret;
	u32 reg;

115
	switch (INTEL_GEN(dev_priv)) {
116 117 118 119 120
	case 6:
		reg = I915_READ(CXT_SIZE);
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
		break;
	case 7:
B
Ben Widawsky 已提交
121
		reg = I915_READ(GEN7_CXT_SIZE);
122
		if (IS_HASWELL(dev_priv))
123
			ret = HSW_CXT_TOTAL_SIZE;
B
Ben Widawsky 已提交
124 125
		else
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
126
		break;
B
Ben Widawsky 已提交
127 128 129
	case 8:
		ret = GEN8_CXT_TOTAL_SIZE;
		break;
130 131 132 133 134 135 136
	default:
		BUG();
	}

	return ret;
}

137
void i915_gem_context_free(struct kref *ctx_ref)
138
{
139
	struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
140
	int i;
141

142
	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
143
	trace_i915_context_free(ctx);
144
	GEM_BUG_ON(!ctx->closed);
145

146 147
	i915_ppgtt_put(ctx->ppgtt);

148 149 150 151 152 153 154
	for (i = 0; i < I915_NUM_ENGINES; i++) {
		struct intel_context *ce = &ctx->engine[i];

		if (!ce->state)
			continue;

		WARN_ON(ce->pin_count);
155
		if (ce->ring)
156
			intel_ring_free(ce->ring);
157

158
		__i915_gem_object_release_unless_active(ce->state->obj);
159 160
	}

161
	kfree(ctx->name);
162
	put_pid(ctx->pid);
B
Ben Widawsky 已提交
163
	list_del(&ctx->link);
164 165

	ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
166 167 168
	kfree(ctx);
}

169 170
static struct drm_i915_gem_object *
alloc_context_obj(struct drm_device *dev, u64 size)
171 172 173 174
{
	struct drm_i915_gem_object *obj;
	int ret;

175 176
	lockdep_assert_held(&dev->struct_mutex);

177
	obj = i915_gem_object_create(dev, size);
178 179
	if (IS_ERR(obj))
		return obj;
180 181 182 183 184 185 186 187

	/*
	 * Try to make the context utilize L3 as well as LLC.
	 *
	 * On VLV we don't have L3 controls in the PTEs so we
	 * shouldn't touch the cache level, especially as that
	 * would make the object snooped which might have a
	 * negative performance impact.
188 189 190 191 192 193 194
	 *
	 * Snooping is required on non-llc platforms in execlist
	 * mode, but since all GGTT accesses use PAT entry 0 we
	 * get snooping anyway regardless of cache_level.
	 *
	 * This is only applicable for Ivy Bridge devices since
	 * later platforms don't have L3 control bits in the PTE.
195
	 */
196
	if (IS_IVYBRIDGE(to_i915(dev))) {
197 198 199
		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
		/* Failure shouldn't ever happen this early */
		if (WARN_ON(ret)) {
200
			i915_gem_object_put(obj);
201 202 203 204 205 206 207
			return ERR_PTR(ret);
		}
	}

	return obj;
}

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
static void i915_ppgtt_close(struct i915_address_space *vm)
{
	struct list_head *phases[] = {
		&vm->active_list,
		&vm->inactive_list,
		&vm->unbound_list,
		NULL,
	}, **phase;

	GEM_BUG_ON(vm->closed);
	vm->closed = true;

	for (phase = phases; *phase; phase++) {
		struct i915_vma *vma, *vn;

		list_for_each_entry_safe(vma, vn, *phase, vm_link)
224
			if (!i915_vma_is_closed(vma))
225 226 227 228 229 230 231 232 233 234 235 236 237 238
				i915_vma_close(vma);
	}
}

static void context_close(struct i915_gem_context *ctx)
{
	GEM_BUG_ON(ctx->closed);
	ctx->closed = true;
	if (ctx->ppgtt)
		i915_ppgtt_close(&ctx->ppgtt->base);
	ctx->file_priv = ERR_PTR(-EBADF);
	i915_gem_context_put(ctx);
}

239 240 241 242 243 244 245 246 247 248 249
static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
{
	int ret;

	ret = ida_simple_get(&dev_priv->context_hw_ida,
			     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
	if (ret < 0) {
		/* Contexts are only released when no longer active.
		 * Flush any pending retires to hopefully release some
		 * stale contexts and try again.
		 */
250
		i915_gem_retire_requests(dev_priv);
251 252 253 254 255 256 257 258 259 260
		ret = ida_simple_get(&dev_priv->context_hw_ida,
				     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
		if (ret < 0)
			return ret;
	}

	*out = ret;
	return 0;
}

261
static struct i915_gem_context *
262
__create_hw_context(struct drm_device *dev,
263
		    struct drm_i915_file_private *file_priv)
264
{
265
	struct drm_i915_private *dev_priv = to_i915(dev);
266
	struct i915_gem_context *ctx;
T
Tejun Heo 已提交
267
	int ret;
268

269
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
270 271
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);
272

273 274 275 276 277 278
	ret = assign_hw_id(dev_priv, &ctx->hw_id);
	if (ret) {
		kfree(ctx);
		return ERR_PTR(ret);
	}

279
	kref_init(&ctx->ref);
280
	list_add_tail(&ctx->link, &dev_priv->context_list);
281
	ctx->i915 = dev_priv;
282

283 284
	ctx->ggtt_alignment = get_context_alignment(dev_priv);

285
	if (dev_priv->hw_context_size) {
286 287 288
		struct drm_i915_gem_object *obj;
		struct i915_vma *vma;

289
		obj = alloc_context_obj(dev, dev_priv->hw_context_size);
290 291
		if (IS_ERR(obj)) {
			ret = PTR_ERR(obj);
292
			goto err_out;
293
		}
294 295 296 297 298 299 300 301 302

		vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
		if (IS_ERR(vma)) {
			i915_gem_object_put(obj);
			ret = PTR_ERR(vma);
			goto err_out;
		}

		ctx->engine[RCS].state = vma;
303
	}
304 305

	/* Default context will never have a file_priv */
306 307
	ret = DEFAULT_CONTEXT_HANDLE;
	if (file_priv) {
308
		ret = idr_alloc(&file_priv->context_idr, ctx,
309
				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
310 311
		if (ret < 0)
			goto err_out;
312 313
	}
	ctx->user_handle = ret;
314 315

	ctx->file_priv = file_priv;
316
	if (file_priv) {
317
		ctx->pid = get_task_pid(current, PIDTYPE_PID);
318 319 320 321 322 323 324 325 326
		ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
				      current->comm,
				      pid_nr(ctx->pid),
				      ctx->user_handle);
		if (!ctx->name) {
			ret = -ENOMEM;
			goto err_pid;
		}
	}
327

328 329 330
	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
331
	ctx->remap_slice = ALL_L3_SLICES(dev_priv);
332

333
	ctx->bannable = true;
334
	ctx->ring_size = 4 * PAGE_SIZE;
335 336
	ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
			     GEN8_CTX_ADDRESSING_MODE_SHIFT;
337
	ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
338

339
	return ctx;
340

341 342 343
err_pid:
	put_pid(ctx->pid);
	idr_remove(&file_priv->context_idr, ctx->user_handle);
344
err_out:
345
	context_close(ctx);
346
	return ERR_PTR(ret);
347 348
}

349 350 351 352 353
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
354
static struct i915_gem_context *
355
i915_gem_create_context(struct drm_device *dev,
356
			struct drm_i915_file_private *file_priv)
357
{
358
	struct i915_gem_context *ctx;
359

360
	lockdep_assert_held(&dev->struct_mutex);
361

362
	ctx = __create_hw_context(dev, file_priv);
363
	if (IS_ERR(ctx))
364
		return ctx;
365

366
	if (USES_FULL_PPGTT(dev)) {
C
Chris Wilson 已提交
367
		struct i915_hw_ppgtt *ppgtt;
368

C
Chris Wilson 已提交
369
		ppgtt = i915_ppgtt_create(to_i915(dev), file_priv, ctx->name);
370
		if (IS_ERR(ppgtt)) {
371 372
			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
					 PTR_ERR(ppgtt));
373
			idr_remove(&file_priv->context_idr, ctx->user_handle);
374
			context_close(ctx);
375
			return ERR_CAST(ppgtt);
376 377 378 379
		}

		ctx->ppgtt = ppgtt;
	}
380

381 382
	trace_i915_context_create(ctx);

383
	return ctx;
384 385
}

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
/**
 * i915_gem_context_create_gvt - create a GVT GEM context
 * @dev: drm device *
 *
 * This function is used to create a GVT specific GEM context.
 *
 * Returns:
 * pointer to i915_gem_context on success, error pointer if failed
 *
 */
struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev)
{
	struct i915_gem_context *ctx;
	int ret;

	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
		return ERR_PTR(-ENODEV);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ERR_PTR(ret);

	ctx = i915_gem_create_context(dev, NULL);
	if (IS_ERR(ctx))
		goto out;

	ctx->execlists_force_single_submission = true;
	ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
out:
	mutex_unlock(&dev->struct_mutex);
	return ctx;
}

420
static void i915_gem_context_unpin(struct i915_gem_context *ctx,
421 422
				   struct intel_engine_cs *engine)
{
423 424 425
	if (i915.enable_execlists) {
		intel_lr_context_unpin(ctx, engine);
	} else {
426 427 428
		struct intel_context *ce = &ctx->engine[engine->id];

		if (ce->state)
429
			i915_vma_unpin(ce->state);
430

431
		i915_gem_context_put(ctx);
432
	}
433 434
}

435
int i915_gem_context_init(struct drm_device *dev)
436
{
437
	struct drm_i915_private *dev_priv = to_i915(dev);
438
	struct i915_gem_context *ctx;
439

440 441
	/* Init should only be called once per module load. Eventually the
	 * restriction on the context_disabled check can be loosened. */
442
	if (WARN_ON(dev_priv->kernel_context))
443
		return 0;
444

445 446
	if (intel_vgpu_active(dev_priv) &&
	    HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
447 448 449 450 451 452
		if (!i915.enable_execlists) {
			DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
			return -EINVAL;
		}
	}

453 454 455 456
	/* Using the simple ida interface, the max is limited by sizeof(int) */
	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
	ida_init(&dev_priv->context_hw_ida);

457 458 459 460
	if (i915.enable_execlists) {
		/* NB: intentionally left blank. We will allocate our own
		 * backing objects as we need them, thank you very much */
		dev_priv->hw_context_size = 0;
461 462 463
	} else if (HAS_HW_CONTEXTS(dev_priv)) {
		dev_priv->hw_context_size =
			round_up(get_context_size(dev_priv), 4096);
464 465 466 467 468
		if (dev_priv->hw_context_size > (1<<20)) {
			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
					 dev_priv->hw_context_size);
			dev_priv->hw_context_size = 0;
		}
469 470
	}

471
	ctx = i915_gem_create_context(dev, NULL);
472 473 474 475
	if (IS_ERR(ctx)) {
		DRM_ERROR("Failed to create default global context (error %ld)\n",
			  PTR_ERR(ctx));
		return PTR_ERR(ctx);
476 477
	}

478
	ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */
479
	dev_priv->kernel_context = ctx;
480

481 482 483
	DRM_DEBUG_DRIVER("%s context support initialized\n",
			i915.enable_execlists ? "LR" :
			dev_priv->hw_context_size ? "HW" : "fake");
484
	return 0;
485 486
}

487 488 489
void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
490
	enum intel_engine_id id;
491

492
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
493

494
	for_each_engine(engine, dev_priv, id) {
495 496 497 498
		if (engine->last_context) {
			i915_gem_context_unpin(engine->last_context, engine);
			engine->last_context = NULL;
		}
499 500
	}

501 502
	/* Force the GPU state to be restored on enabling */
	if (!i915.enable_execlists) {
503 504 505 506 507 508
		struct i915_gem_context *ctx;

		list_for_each_entry(ctx, &dev_priv->context_list, link) {
			if (!i915_gem_context_is_default(ctx))
				continue;

509
			for_each_engine(engine, dev_priv, id)
510 511 512 513 514
				ctx->engine[engine->id].initialised = false;

			ctx->remap_slice = ALL_L3_SLICES(dev_priv);
		}

515
		for_each_engine(engine, dev_priv, id) {
516 517 518 519 520 521
			struct intel_context *kce =
				&dev_priv->kernel_context->engine[engine->id];

			kce->initialised = true;
		}
	}
522 523
}

524 525
void i915_gem_context_fini(struct drm_device *dev)
{
526
	struct drm_i915_private *dev_priv = to_i915(dev);
527
	struct i915_gem_context *dctx = dev_priv->kernel_context;
528

529 530
	lockdep_assert_held(&dev->struct_mutex);

531
	context_close(dctx);
532
	dev_priv->kernel_context = NULL;
533 534

	ida_destroy(&dev_priv->context_hw_ida);
535 536
}

537 538
static int context_idr_cleanup(int id, void *p, void *data)
{
539
	struct i915_gem_context *ctx = p;
540

541
	context_close(ctx);
542
	return 0;
543 544
}

545 546 547
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
548
	struct i915_gem_context *ctx;
549 550 551

	idr_init(&file_priv->context_idr);

552
	mutex_lock(&dev->struct_mutex);
553
	ctx = i915_gem_create_context(dev, file_priv);
554 555
	mutex_unlock(&dev->struct_mutex);

556
	if (IS_ERR(ctx)) {
557
		idr_destroy(&file_priv->context_idr);
558
		return PTR_ERR(ctx);
559 560
	}

561 562 563
	return 0;
}

564 565
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
566
	struct drm_i915_file_private *file_priv = file->driver_priv;
567

568 569
	lockdep_assert_held(&dev->struct_mutex);

570
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
571 572 573
	idr_destroy(&file_priv->context_idr);
}

574
static inline int
575
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
576
{
577
	struct drm_i915_private *dev_priv = req->i915;
578
	struct intel_ring *ring = req->ring;
579
	struct intel_engine_cs *engine = req->engine;
580
	enum intel_engine_id id;
581
	u32 flags = hw_flags | MI_MM_SPACE_GTT;
582 583
	const int num_rings =
		/* Use an extended w/a on ivb+ if signalling from other rings */
584
		i915.semaphores ?
585
		INTEL_INFO(dev_priv)->num_rings - 1 :
586
		0;
587
	int len, ret;
588

589 590 591 592 593
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
	 * explicitly, so we rely on the value at ring init, stored in
	 * itlb_before_ctx_switch.
	 */
594
	if (IS_GEN6(dev_priv)) {
595
		ret = engine->emit_flush(req, EMIT_INVALIDATE);
596 597 598 599
		if (ret)
			return ret;
	}

600
	/* These flags are for resource streamer on HSW+ */
601
	if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
602
		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
603
	else if (INTEL_GEN(dev_priv) < 8)
604 605
		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);

606 607

	len = 4;
608
	if (INTEL_GEN(dev_priv) >= 7)
609
		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
610

611
	ret = intel_ring_begin(req, len);
612 613 614
	if (ret)
		return ret;

615
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
616
	if (INTEL_GEN(dev_priv) >= 7) {
617
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
618 619 620
		if (num_rings) {
			struct intel_engine_cs *signaller;

621
			intel_ring_emit(ring,
622
					MI_LOAD_REGISTER_IMM(num_rings));
623
			for_each_engine(signaller, dev_priv, id) {
624
				if (signaller == engine)
625 626
					continue;

627
				intel_ring_emit_reg(ring,
628
						    RING_PSMI_CTL(signaller->mmio_base));
629
				intel_ring_emit(ring,
630
						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
631 632 633
			}
		}
	}
634

635 636
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_emit(ring, MI_SET_CONTEXT);
637 638
	intel_ring_emit(ring,
			i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
639 640 641 642
	/*
	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
	 * WaMiSetContext_Hang:snb,ivb,vlv
	 */
643
	intel_ring_emit(ring, MI_NOOP);
644

645
	if (INTEL_GEN(dev_priv) >= 7) {
646 647
		if (num_rings) {
			struct intel_engine_cs *signaller;
648
			i915_reg_t last_reg = {}; /* keep gcc quiet */
649

650
			intel_ring_emit(ring,
651
					MI_LOAD_REGISTER_IMM(num_rings));
652
			for_each_engine(signaller, dev_priv, id) {
653
				if (signaller == engine)
654 655
					continue;

656
				last_reg = RING_PSMI_CTL(signaller->mmio_base);
657 658
				intel_ring_emit_reg(ring, last_reg);
				intel_ring_emit(ring,
659
						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
660
			}
661 662

			/* Insert a delay before the next switch! */
663
			intel_ring_emit(ring,
664 665
					MI_STORE_REGISTER_MEM |
					MI_SRM_LRM_GLOBAL_GTT);
666
			intel_ring_emit_reg(ring, last_reg);
667 668
			intel_ring_emit(ring,
					i915_ggtt_offset(engine->scratch));
669
			intel_ring_emit(ring, MI_NOOP);
670
		}
671
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
672
	}
673

674
	intel_ring_advance(ring);
675 676 677 678

	return ret;
}

C
Chris Wilson 已提交
679
static int remap_l3(struct drm_i915_gem_request *req, int slice)
680
{
681
	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
682
	struct intel_ring *ring = req->ring;
683 684
	int i, ret;

685
	if (!remap_info)
686 687
		return 0;

688
	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
689 690 691 692 693 694 695 696
	if (ret)
		return ret;

	/*
	 * Note: We do not worry about the concurrent register cacheline hang
	 * here because no other code should access these registers other than
	 * at initialization time.
	 */
697
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
698
	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
699 700
		intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
		intel_ring_emit(ring, remap_info[i]);
701
	}
702 703
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_advance(ring);
704

705
	return 0;
706 707
}

708 709
static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
				   struct intel_engine_cs *engine,
710
				   struct i915_gem_context *to)
711
{
712 713 714
	if (to->remap_slice)
		return false;

715
	if (!to->engine[RCS].initialised)
716 717
		return false;

718
	if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
719
		return false;
720

721
	return to == engine->last_context;
722 723 724
}

static bool
725 726
needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
		  struct intel_engine_cs *engine,
727
		  struct i915_gem_context *to)
728
{
729
	if (!ppgtt)
730 731
		return false;

732 733 734 735 736
	/* Always load the ppgtt on first use */
	if (!engine->last_context)
		return true;

	/* Same context without new entries, skip */
737
	if (engine->last_context == to &&
738
	    !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
739 740 741
		return false;

	if (engine->id != RCS)
742 743
		return true;

744
	if (INTEL_GEN(engine->i915) < 8)
745 746 747 748 749 750
		return true;

	return false;
}

static bool
751
needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
752
		   struct i915_gem_context *to,
753
		   u32 hw_flags)
754
{
755
	if (!ppgtt)
756 757
		return false;

758
	if (!IS_GEN8(to->i915))
759 760
		return false;

B
Ben Widawsky 已提交
761
	if (hw_flags & MI_RESTORE_INHIBIT)
762 763 764 765 766
		return true;

	return false;
}

767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
struct i915_vma *
i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
			    unsigned int flags)
{
	struct i915_vma *vma = ctx->engine[RCS].state;
	int ret;

	/* Clear this page out of any CPU caches for coherent swap-in/out.
	 * We only want to do this on the first bind so that we do not stall
	 * on an active context (which by nature is already on the GPU).
	 */
	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
		ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
		if (ret)
			return ERR_PTR(ret);
	}

	ret = i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags);
	if (ret)
		return ERR_PTR(ret);

	return vma;
}

791
static int do_rcs_switch(struct drm_i915_gem_request *req)
792
{
793
	struct i915_gem_context *to = req->ctx;
794
	struct intel_engine_cs *engine = req->engine;
795
	struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
796
	struct i915_vma *vma;
797
	struct i915_gem_context *from;
798
	u32 hw_flags;
799
	int ret, i;
800

801
	if (skip_rcs_switch(ppgtt, engine, to))
802 803
		return 0;

804
	/* Trying to pin first makes error handling easier. */
805 806 807
	vma = i915_gem_context_pin_legacy(to, 0);
	if (IS_ERR(vma))
		return PTR_ERR(vma);
808

809 810 811 812
	/*
	 * Pin can switch back to the default context if we end up calling into
	 * evict_everything - as a last ditch gtt defrag effort that also
	 * switches to the default context. Hence we need to reload from here.
813 814
	 *
	 * XXX: Doing so is painfully broken!
815
	 */
816
	from = engine->last_context;
817

818
	if (needs_pd_load_pre(ppgtt, engine, to)) {
819 820 821 822 823
		/* Older GENs and non render rings still want the load first,
		 * "PP_DCLV followed by PP_DIR_BASE register through Load
		 * Register Immediate commands in Ring Buffer before submitting
		 * a context."*/
		trace_switch_mm(engine, to);
824
		ret = ppgtt->switch_mm(ppgtt, req);
825
		if (ret)
826
			goto err;
827 828
	}

829
	if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
B
Ben Widawsky 已提交
830 831 832 833
		/* NB: If we inhibit the restore, the context is not allowed to
		 * die because future work may end up depending on valid address
		 * space. This means we must enforce that a page table load
		 * occur when this occurs. */
834
		hw_flags = MI_RESTORE_INHIBIT;
835
	else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
836 837 838
		hw_flags = MI_FORCE_RESTORE;
	else
		hw_flags = 0;
839

840 841
	if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
		ret = mi_set_context(req, hw_flags);
842
		if (ret)
843
			goto err;
844 845
	}

846 847 848 849 850 851
	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
852
	if (from != NULL) {
853 854 855 856 857 858 859
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
860 861 862
		i915_vma_move_to_active(from->engine[RCS].state, req, 0);
		/* state is kept alive until the next request */
		i915_vma_unpin(from->engine[RCS].state);
863
		i915_gem_context_put(from);
864
	}
865
	engine->last_context = i915_gem_context_get(to);
866

867 868 869
	/* GEN8 does *not* require an explicit reload if the PDPs have been
	 * setup, and we do not wish to move them.
	 */
870
	if (needs_pd_load_post(ppgtt, to, hw_flags)) {
871
		trace_switch_mm(engine, to);
872
		ret = ppgtt->switch_mm(ppgtt, req);
873 874 875 876 877 878 879 880 881
		/* The hardware context switch is emitted, but we haven't
		 * actually changed the state - so it's probably safe to bail
		 * here. Still, let the user know something dangerous has
		 * happened.
		 */
		if (ret)
			return ret;
	}

882 883
	if (ppgtt)
		ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
884 885 886 887 888

	for (i = 0; i < MAX_L3_SLICES; i++) {
		if (!(to->remap_slice & (1<<i)))
			continue;

C
Chris Wilson 已提交
889
		ret = remap_l3(req, i);
890 891 892 893 894 895
		if (ret)
			return ret;

		to->remap_slice &= ~(1<<i);
	}

896
	if (!to->engine[RCS].initialised) {
897 898
		if (engine->init_context) {
			ret = engine->init_context(req);
899
			if (ret)
900
				return ret;
901
		}
902
		to->engine[RCS].initialised = true;
903 904
	}

905
	return 0;
906

907 908
err:
	i915_vma_unpin(vma);
909
	return ret;
910 911 912 913
}

/**
 * i915_switch_context() - perform a GPU context switch.
914
 * @req: request for which we'll execute the context switch
915 916 917
 *
 * The context life cycle is simple. The context refcount is incremented and
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
918
 * it will have a refcount > 1. This allows us to destroy the context abstract
919
 * object while letting the normal object tracking destroy the backing BO.
920 921 922 923
 *
 * This function should not be used in execlists mode.  Instead the context is
 * switched by writing to the ELSP and requests keep a reference to their
 * context.
924
 */
925
int i915_switch_context(struct drm_i915_gem_request *req)
926
{
927
	struct intel_engine_cs *engine = req->engine;
928

929
	lockdep_assert_held(&req->i915->drm.struct_mutex);
930 931
	if (i915.enable_execlists)
		return 0;
932

933
	if (!req->ctx->engine[engine->id].state) {
934
		struct i915_gem_context *to = req->ctx;
935 936
		struct i915_hw_ppgtt *ppgtt =
			to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
937

938
		if (needs_pd_load_pre(ppgtt, engine, to)) {
939 940 941
			int ret;

			trace_switch_mm(engine, to);
942
			ret = ppgtt->switch_mm(ppgtt, req);
943 944 945
			if (ret)
				return ret;

946
			ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
947 948 949
		}

		if (to != engine->last_context) {
950
			if (engine->last_context)
951 952
				i915_gem_context_put(engine->last_context);
			engine->last_context = i915_gem_context_get(to);
953
		}
954

955
		return 0;
956
	}
957

958
	return do_rcs_switch(req);
959
}
960

961 962 963
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
964
	struct i915_gem_timeline *timeline;
965
	enum intel_engine_id id;
966

967 968
	lockdep_assert_held(&dev_priv->drm.struct_mutex);

969
	for_each_engine(engine, dev_priv, id) {
970 971 972 973 974 975 976
		struct drm_i915_gem_request *req;
		int ret;

		req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
		if (IS_ERR(req))
			return PTR_ERR(req);

977 978 979 980 981 982 983 984 985 986 987 988 989 990
		/* Queue this switch after all other activity */
		list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
			struct drm_i915_gem_request *prev;
			struct intel_timeline *tl;

			tl = &timeline->engine[engine->id];
			prev = i915_gem_active_raw(&tl->last_request,
						   &dev_priv->drm.struct_mutex);
			if (prev)
				i915_sw_fence_await_sw_fence_gfp(&req->submit,
								 &prev->submit,
								 GFP_KERNEL);
		}

991
		ret = i915_switch_context(req);
992 993 994 995 996 997 998 999
		i915_add_request_no_flush(req);
		if (ret)
			return ret;
	}

	return 0;
}

1000
static bool contexts_enabled(struct drm_device *dev)
1001
{
1002
	return i915.enable_execlists || to_i915(dev)->hw_context_size;
1003 1004
}

1005 1006 1007 1008 1009
static bool client_is_banned(struct drm_i915_file_private *file_priv)
{
	return file_priv->context_bans > I915_MAX_CLIENT_CONTEXT_BANS;
}

1010 1011 1012 1013 1014
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file)
{
	struct drm_i915_gem_context_create *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
1015
	struct i915_gem_context *ctx;
1016 1017
	int ret;

1018
	if (!contexts_enabled(dev))
1019 1020
		return -ENODEV;

1021 1022 1023
	if (args->pad != 0)
		return -EINVAL;

1024 1025 1026 1027 1028 1029 1030 1031
	if (client_is_banned(file_priv)) {
		DRM_DEBUG("client %s[%d] banned from creating ctx\n",
			  current->comm,
			  pid_nr(get_task_pid(current, PIDTYPE_PID)));

		return -EIO;
	}

1032 1033 1034 1035
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1036
	ctx = i915_gem_create_context(dev, file_priv);
1037
	mutex_unlock(&dev->struct_mutex);
1038 1039
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);
1040

1041
	args->ctx_id = ctx->user_handle;
1042
	DRM_DEBUG("HW context %d created\n", args->ctx_id);
1043

1044
	return 0;
1045 1046 1047 1048 1049 1050 1051
}

int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file)
{
	struct drm_i915_gem_context_destroy *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
1052
	struct i915_gem_context *ctx;
1053 1054
	int ret;

1055 1056 1057
	if (args->pad != 0)
		return -EINVAL;

1058
	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
1059
		return -ENOENT;
1060

1061 1062 1063 1064
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1065
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1066
	if (IS_ERR(ctx)) {
1067
		mutex_unlock(&dev->struct_mutex);
1068
		return PTR_ERR(ctx);
1069 1070
	}

1071
	idr_remove(&file_priv->context_idr, ctx->user_handle);
1072
	context_close(ctx);
1073 1074
	mutex_unlock(&dev->struct_mutex);

1075
	DRM_DEBUG("HW context %d destroyed\n", args->ctx_id);
1076 1077
	return 0;
}
1078 1079 1080 1081 1082 1083

int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
1084
	struct i915_gem_context *ctx;
1085 1086 1087 1088 1089 1090
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1091
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1092 1093 1094 1095 1096 1097 1098 1099
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	args->size = 0;
	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
1100
		ret = -EINVAL;
1101
		break;
1102 1103 1104
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
		break;
C
Chris Wilson 已提交
1105 1106 1107 1108 1109 1110
	case I915_CONTEXT_PARAM_GTT_SIZE:
		if (ctx->ppgtt)
			args->value = ctx->ppgtt->base.total;
		else if (to_i915(dev)->mm.aliasing_ppgtt)
			args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
		else
1111
			args->value = to_i915(dev)->ggtt.base.total;
C
Chris Wilson 已提交
1112
		break;
1113 1114 1115
	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
		args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE);
		break;
1116
	case I915_CONTEXT_PARAM_BANNABLE:
1117
		args->value = ctx->bannable;
1118
		break;
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}

int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
1133
	struct i915_gem_context *ctx;
1134 1135 1136 1137 1138 1139
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

1140
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1141 1142 1143 1144 1145 1146 1147
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
1148
		ret = -EINVAL;
1149
		break;
1150 1151 1152 1153 1154 1155
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		if (args->size) {
			ret = -EINVAL;
		} else {
			ctx->flags &= ~CONTEXT_NO_ZEROMAP;
			ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
		}
		break;
	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
		if (args->size) {
			ret = -EINVAL;
		} else {
			if (args->value)
				ctx->flags |= CONTEXT_NO_ERROR_CAPTURE;
			else
				ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE;
1166 1167
		}
		break;
1168 1169 1170 1171 1172 1173
	case I915_CONTEXT_PARAM_BANNABLE:
		if (args->size)
			ret = -EINVAL;
		else if (!capable(CAP_SYS_ADMIN) && !args->value)
			ret = -EPERM;
		else
1174
			ctx->bannable = args->value;
1175
		break;
1176 1177 1178 1179 1180 1181 1182 1183
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

	return ret;
}
1184 1185 1186 1187

int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
				       void *data, struct drm_file *file)
{
1188
	struct drm_i915_private *dev_priv = to_i915(dev);
1189
	struct drm_i915_reset_stats *args = data;
1190
	struct i915_gem_context *ctx;
1191 1192 1193 1194 1195 1196 1197 1198
	int ret;

	if (args->flags || args->pad)
		return -EINVAL;

	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
		return -EPERM;

1199
	ret = i915_mutex_lock_interruptible(dev);
1200 1201 1202
	if (ret)
		return ret;

1203
	ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
	if (IS_ERR(ctx)) {
		mutex_unlock(&dev->struct_mutex);
		return PTR_ERR(ctx);
	}

	if (capable(CAP_SYS_ADMIN))
		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
	else
		args->reset_count = 0;

1214 1215
	args->batch_active = ctx->guilty_count;
	args->batch_pending = ctx->active_count;
1216 1217 1218 1219 1220

	mutex_unlock(&dev->struct_mutex);

	return 0;
}