i915_gem_context.c 20.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Copyright © 2011-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *
 */

/*
 * This file implements HW context support. On gen5+ a HW context consists of an
 * opaque GPU object which is referenced at times of context saves and restores.
 * With RC6 enabled, the context is also referenced as the GPU enters and exists
 * from RC6 (GPU has it's own internal power context, except on gen5). Though
 * something like a context does exist for the media ring, the code only
 * supports contexts for the render ring.
 *
 * In software, there is a distinction between contexts created by the user,
 * and the default HW context. The default HW context is used by GPU clients
 * that do not request setup of their own hardware context. The default
 * context's state is never restored to help prevent programming errors. This
 * would happen if a client ran and piggy-backed off another clients GPU state.
 * The default context only exists to give the GPU some offset to load as the
 * current to invoke a save of the context we actually care about. In fact, the
 * code could likely be constructed, albeit in a more complicated fashion, to
 * never use the default context, though that limits the driver's ability to
 * swap out, and/or destroy other contexts.
 *
 * All other contexts are created as a request by the GPU client. These contexts
 * store GPU state, and thus allow GPU clients to not re-emit state (and
 * potentially query certain state) at any time. The kernel driver makes
 * certain that the appropriate commands are inserted.
 *
 * The context life cycle is semi-complicated in that context BOs may live
 * longer than the context itself because of the way the hardware, and object
 * tracking works. Below is a very crude representation of the state machine
 * describing the context life.
 *                                         refcount     pincount     active
 * S0: initial state                          0            0           0
 * S1: context created                        1            0           0
 * S2: context is currently running           2            1           X
 * S3: GPU referenced, but not current        2            0           1
 * S4: context is current, but destroyed      1            1           0
 * S5: like S3, but destroyed                 1            0           1
 *
 * The most common (but not all) transitions:
 * S0->S1: client creates a context
 * S1->S2: client submits execbuf with context
 * S2->S3: other clients submits execbuf with context
 * S3->S1: context object was retired
 * S3->S2: clients submits another execbuf
 * S2->S4: context destroy called with current context
 * S3->S5->S0: destroy path
 * S4->S5->S0: destroy path on current context
 *
 * There are two confusing terms used above:
 *  The "current context" means the context which is currently running on the
D
Damien Lespiau 已提交
76
 *  GPU. The GPU has loaded its state already and has stored away the gtt
77 78 79 80 81 82 83 84 85 86 87
 *  offset of the BO. The GPU is not actively referencing the data at this
 *  offset, but it will on the next context switch. The only way to avoid this
 *  is to do a GPU reset.
 *
 *  An "active context' is one which was previously the "current context" and is
 *  on the active list waiting for the next context switch to occur. Until this
 *  happens, the object must remain at the same gtt offset. It is therefore
 *  possible to destroy a context, but it is still active.
 *
 */

88 89
#include <drm/drmP.h>
#include <drm/i915_drm.h>
90 91
#include "i915_drv.h"

92 93 94 95
/* This is a HW constraint. The value below is the largest known requirement
 * I've seen in a spec to date, and that was a workaround for a non-shipping
 * part. It should be safe to decrease this, but it's more future proof as is.
 */
B
Ben Widawsky 已提交
96 97
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
98 99 100

static struct i915_hw_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
101 102
static int do_switch(struct intel_ring_buffer *ring,
		     struct i915_hw_context *to);
103

B
Ben Widawsky 已提交
104 105 106 107 108 109 110 111
static size_t get_context_alignment(struct drm_device *dev)
{
	if (IS_GEN6(dev))
		return GEN6_CONTEXT_ALIGN;

	return GEN7_CONTEXT_ALIGN;
}

112 113 114 115 116 117 118 119 120 121 122 123
static int get_context_size(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;
	u32 reg;

	switch (INTEL_INFO(dev)->gen) {
	case 6:
		reg = I915_READ(CXT_SIZE);
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
		break;
	case 7:
B
Ben Widawsky 已提交
124
		reg = I915_READ(GEN7_CXT_SIZE);
B
Ben Widawsky 已提交
125
		if (IS_HASWELL(dev))
126
			ret = HSW_CXT_TOTAL_SIZE;
B
Ben Widawsky 已提交
127 128
		else
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
129
		break;
B
Ben Widawsky 已提交
130 131 132
	case 8:
		ret = GEN8_CXT_TOTAL_SIZE;
		break;
133 134 135 136 137 138 139
	default:
		BUG();
	}

	return ret;
}

140
void i915_gem_context_free(struct kref *ctx_ref)
141
{
142 143
	struct i915_hw_context *ctx = container_of(ctx_ref,
						   typeof(*ctx), ref);
144

145
	list_del(&ctx->link);
146 147 148 149
	drm_gem_object_unreference(&ctx->obj->base);
	kfree(ctx);
}

150
static struct i915_hw_context *
151
create_hw_context(struct drm_device *dev,
152
		  struct drm_i915_file_private *file_priv)
153 154
{
	struct drm_i915_private *dev_priv = dev->dev_private;
155
	struct i915_hw_context *ctx;
T
Tejun Heo 已提交
156
	int ret;
157

158
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
159 160
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);
161

162
	kref_init(&ctx->ref);
163
	ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
164
	INIT_LIST_HEAD(&ctx->link);
165 166
	if (ctx->obj == NULL) {
		kfree(ctx);
167
		DRM_DEBUG_DRIVER("Context object allocated failed\n");
168
		return ERR_PTR(-ENOMEM);
169 170
	}

171 172
	if (INTEL_INFO(dev)->gen >= 7) {
		ret = i915_gem_object_set_cache_level(ctx->obj,
173
						      I915_CACHE_L3_LLC);
B
Ben Widawsky 已提交
174 175
		/* Failure shouldn't ever happen this early */
		if (WARN_ON(ret))
176 177 178
			goto err_out;
	}

179
	list_add_tail(&ctx->link, &dev_priv->context_list);
180 181 182

	/* Default context will never have a file_priv */
	if (file_priv == NULL)
183
		return ctx;
184

T
Tejun Heo 已提交
185 186 187
	ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
			GFP_KERNEL);
	if (ret < 0)
188
		goto err_out;
189 190

	ctx->file_priv = file_priv;
T
Tejun Heo 已提交
191
	ctx->id = ret;
192 193 194 195
	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
	ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
196

197
	return ctx;
198 199

err_out:
200
	i915_gem_context_unreference(ctx);
201
	return ERR_PTR(ret);
202 203
}

204 205
static inline bool is_default_context(struct i915_hw_context *ctx)
{
206 207
	/* Cheap trick to determine default contexts */
	return ctx->file_priv ? false : true;
208 209
}

210 211 212 213 214
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
215 216
static struct i915_hw_context *
create_default_context(struct drm_device *dev)
217
{
218 219 220
	struct i915_hw_context *ctx;
	int ret;

B
Ben Widawsky 已提交
221
	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
222

B
Ben Widawsky 已提交
223
	ctx = create_hw_context(dev, NULL);
224
	if (IS_ERR(ctx))
225
		return ctx;
226 227 228 229 230 231 232

	/* We may need to do things with the shrinker which require us to
	 * immediately switch back to the default context. This can cause a
	 * problem as pinning the default context also requires GTT space which
	 * may not be available. To avoid this we always pin the
	 * default context.
	 */
B
Ben Widawsky 已提交
233 234
	ret = i915_gem_obj_ggtt_pin(ctx->obj, get_context_alignment(dev),
				    false, false);
B
Ben Widawsky 已提交
235 236
	if (ret) {
		DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
237
		goto err_destroy;
B
Ben Widawsky 已提交
238
	}
239

240
	DRM_DEBUG_DRIVER("Default HW context loaded\n");
241
	return ctx;
242 243

err_destroy:
244
	i915_gem_context_unreference(ctx);
245
	return ERR_PTR(ret);
246 247
}

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
void i915_gem_context_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring;
	int i;

	if (!HAS_HW_CONTEXTS(dev))
		return;

	/* Prevent the hardware from restoring the last context (which hung) on
	 * the next switch */
	for (i = 0; i < I915_NUM_RINGS; i++) {
		struct i915_hw_context *dctx;
		if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
			continue;

		/* Do a fake switch to the default context */
		ring = &dev_priv->ring[i];
		dctx = ring->default_context;
		if (WARN_ON(!dctx))
			continue;

		if (!ring->last_context)
			continue;

		if (ring->last_context == dctx)
			continue;

		if (i == RCS) {
			WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
						      get_context_alignment(dev),
						      false, false));
			/* Fake a finish/inactive */
			dctx->obj->base.write_domain = 0;
			dctx->obj->active = 0;
		}

		i915_gem_context_unreference(ring->last_context);
		i915_gem_context_reference(dctx);
		ring->last_context = dctx;
	}
}

291
int i915_gem_context_init(struct drm_device *dev)
292 293
{
	struct drm_i915_private *dev_priv = dev->dev_private;
294
	struct intel_ring_buffer *ring;
295
	int i;
296

297 298
	if (!HAS_HW_CONTEXTS(dev))
		return 0;
299

300 301 302
	/* Init should only be called once per module load. Eventually the
	 * restriction on the context_disabled check can be loosened. */
	if (WARN_ON(dev_priv->ring[RCS].default_context))
303
		return 0;
304

305
	dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
306

307
	if (dev_priv->hw_context_size > (1<<20)) {
B
Ben Widawsky 已提交
308
		DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
309
		return -E2BIG;
310 311
	}

312 313 314 315 316 317

	dev_priv->ring[RCS].default_context = create_default_context(dev);
	if (IS_ERR_OR_NULL(dev_priv->ring[RCS].default_context)) {
		DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %ld\n",
				 PTR_ERR(dev_priv->ring[RCS].default_context));
		return PTR_ERR(dev_priv->ring[RCS].default_context);
318 319
	}

320 321 322 323 324 325 326 327 328 329
	for (i = RCS + 1; i < I915_NUM_RINGS; i++) {
		if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
			continue;

		ring = &dev_priv->ring[i];

		/* NB: RCS will hold a ref for all rings */
		ring->default_context = dev_priv->ring[RCS].default_context;
	}

330
	DRM_DEBUG_DRIVER("HW context support initialized\n");
331
	return 0;
332 333 334 335 336
}

void i915_gem_context_fini(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
337
	struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
338
	int i;
339

340
	if (!HAS_HW_CONTEXTS(dev))
341
		return;
342

343 344 345 346 347
	/* The only known way to stop the gpu from accessing the hw context is
	 * to reset it. Do this as the very last operation to avoid confusing
	 * other code, leading to spurious errors. */
	intel_gpu_reset(dev);

348 349 350 351 352 353
	/* When default context is created and switched to, base object refcount
	 * will be 2 (+1 from object creation and +1 from do_switch()).
	 * i915_gem_context_fini() will be called after gpu_idle() has switched
	 * to default context. So we need to unreference the base object once
	 * to offset the do_switch part, so that i915_gem_context_unreference()
	 * can then free the base object correctly. */
B
Ben Widawsky 已提交
354 355 356 357
	WARN_ON(!dev_priv->ring[RCS].last_context);
	if (dev_priv->ring[RCS].last_context == dctx) {
		/* Fake switch to NULL context */
		WARN_ON(dctx->obj->active);
B
Ben Widawsky 已提交
358
		i915_gem_object_ggtt_unpin(dctx->obj);
B
Ben Widawsky 已提交
359
		i915_gem_context_unreference(dctx);
360 361 362 363 364 365 366 367 368 369 370 371
		dev_priv->ring[RCS].last_context = NULL;
	}

	for (i = 0; i < I915_NUM_RINGS; i++) {
		struct intel_ring_buffer *ring = &dev_priv->ring[i];
		if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
			continue;

		if (ring->last_context)
			i915_gem_context_unreference(ring->last_context);

		ring->default_context = NULL;
372
		ring->last_context = NULL;
B
Ben Widawsky 已提交
373 374
	}

B
Ben Widawsky 已提交
375
	i915_gem_object_ggtt_unpin(dctx->obj);
376
	i915_gem_context_unreference(dctx);
377 378
}

379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
int i915_gem_context_enable(struct drm_i915_private *dev_priv)
{
	struct intel_ring_buffer *ring;
	int ret, i;

	if (!HAS_HW_CONTEXTS(dev_priv->dev))
		return 0;

	/* FIXME: We should make this work, even in reset */
	if (i915_reset_in_progress(&dev_priv->gpu_error))
		return 0;

	BUG_ON(!dev_priv->ring[RCS].default_context);
	for_each_ring(ring, dev_priv, i) {
		ret = do_switch(ring, ring->default_context);
		if (ret)
			return ret;
	}

	return 0;
}

401 402
static int context_idr_cleanup(int id, void *p, void *data)
{
403
	struct i915_hw_context *ctx = p;
404 405 406

	BUG_ON(id == DEFAULT_CONTEXT_ID);

407
	i915_gem_context_unreference(ctx);
408
	return 0;
409 410
}

411
struct i915_ctx_hang_stats *
412
i915_gem_context_get_hang_stats(struct drm_device *dev,
413 414 415 416
				struct drm_file *file,
				u32 id)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
417
	struct i915_hw_context *ctx;
418 419 420 421

	if (id == DEFAULT_CONTEXT_ID)
		return &file_priv->hang_stats;

422 423 424 425
	if (!HAS_HW_CONTEXTS(dev))
		return ERR_PTR(-ENOENT);

	ctx = i915_gem_context_get(file->driver_priv, id);
426
	if (ctx == NULL)
427 428
		return ERR_PTR(-ENOENT);

429
	return &ctx->hang_stats;
430 431
}

432 433 434 435 436 437 438 439 440 441 442 443
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;

	if (!HAS_HW_CONTEXTS(dev))
		return 0;

	idr_init(&file_priv->context_idr);

	return 0;
}

444 445
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
446
	struct drm_i915_file_private *file_priv = file->driver_priv;
447

448 449 450
	if (!HAS_HW_CONTEXTS(dev))
		return;

451
	mutex_lock(&dev->struct_mutex);
452
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
453 454 455 456
	idr_destroy(&file_priv->context_idr);
	mutex_unlock(&dev->struct_mutex);
}

457
static struct i915_hw_context *
458 459 460
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
{
	return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
461
}
462 463 464 465 466 467 468 469

static inline int
mi_set_context(struct intel_ring_buffer *ring,
	       struct i915_hw_context *new_context,
	       u32 hw_flags)
{
	int ret;

470 471 472 473 474 475
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
	 * explicitly, so we rely on the value at ring init, stored in
	 * itlb_before_ctx_switch.
	 */
	if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
476
		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
477 478 479 480
		if (ret)
			return ret;
	}

481
	ret = intel_ring_begin(ring, 6);
482 483 484
	if (ret)
		return ret;

485
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */
486 487 488 489 490
	if (IS_GEN7(ring->dev))
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
	else
		intel_ring_emit(ring, MI_NOOP);

491 492
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_emit(ring, MI_SET_CONTEXT);
493
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
494 495 496 497 498 499 500
			MI_MM_SPACE_GTT |
			MI_SAVE_EXT_STATE_EN |
			MI_RESTORE_EXT_STATE_EN |
			hw_flags);
	/* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */
	intel_ring_emit(ring, MI_NOOP);

501 502 503 504 505
	if (IS_GEN7(ring->dev))
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
	else
		intel_ring_emit(ring, MI_NOOP);

506 507 508 509 510
	intel_ring_advance(ring);

	return ret;
}

511 512
static int do_switch(struct intel_ring_buffer *ring,
		     struct i915_hw_context *to)
513
{
514
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
515
	struct i915_hw_context *from = ring->last_context;
516
	u32 hw_flags = 0;
517
	int ret, i;
518

519 520 521 522
	if (from != NULL && ring == &dev_priv->ring[RCS]) {
		BUG_ON(from->obj == NULL);
		BUG_ON(!i915_gem_obj_is_pinned(from->obj));
	}
523

524
	if (from == to && from->last_ring == ring && !to->remap_slice)
525 526
		return 0;

527 528 529 530 531 532
	if (ring != &dev_priv->ring[RCS]) {
		if (from)
			i915_gem_context_unreference(from);
		goto done;
	}

B
Ben Widawsky 已提交
533 534
	ret = i915_gem_obj_ggtt_pin(to->obj, get_context_alignment(ring->dev),
				    false, false);
535 536 537
	if (ret)
		return ret;

538 539 540 541 542 543 544
	/* Clear this page out of any CPU caches for coherent swap-in/out. Note
	 * that thanks to write = false in this call and us not setting any gpu
	 * write domains when putting a context object onto the active list
	 * (when switching away from it), this won't block.
	 * XXX: We need a real interface to do this instead of trickery. */
	ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
	if (ret) {
B
Ben Widawsky 已提交
545
		i915_gem_object_ggtt_unpin(to->obj);
546 547 548
		return ret;
	}

549 550 551 552 553
	if (!to->obj->has_global_gtt_mapping) {
		struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
							   &dev_priv->gtt.base);
		vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
	}
554

555 556 557 558 559
	if (!to->is_initialized || is_default_context(to))
		hw_flags |= MI_RESTORE_INHIBIT;

	ret = mi_set_context(ring, to, hw_flags);
	if (ret) {
B
Ben Widawsky 已提交
560
		i915_gem_object_ggtt_unpin(to->obj);
561 562 563
		return ret;
	}

564 565 566 567 568 569 570 571 572 573 574 575
	for (i = 0; i < MAX_L3_SLICES; i++) {
		if (!(to->remap_slice & (1<<i)))
			continue;

		ret = i915_gem_l3_remap(ring, i);
		/* If it failed, try again next round */
		if (ret)
			DRM_DEBUG_DRIVER("L3 remapping failed\n");
		else
			to->remap_slice &= ~(1<<i);
	}

576 577 578 579 580 581
	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
582 583
	if (from != NULL) {
		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
B
Ben Widawsky 已提交
584
		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
585 586 587 588 589 590 591
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
592 593 594
		from->obj->dirty = 1;
		BUG_ON(from->obj->ring != ring);

595
		/* obj is kept alive until the next request by its active ref */
B
Ben Widawsky 已提交
596
		i915_gem_object_ggtt_unpin(from->obj);
597
		i915_gem_context_unreference(from);
598 599
	}

600
done:
601 602
	i915_gem_context_reference(to);
	ring->last_context = to;
603
	to->is_initialized = true;
604
	to->last_ring = ring;
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626

	return 0;
}

/**
 * i915_switch_context() - perform a GPU context switch.
 * @ring: ring for which we'll execute the context switch
 * @file_priv: file_priv associated with the context, may be NULL
 * @id: context id number
 *
 * The context life cycle is simple. The context refcount is incremented and
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
 * it will have a refoucnt > 1. This allows us to destroy the context abstract
 * object while letting the normal object tracking destroy the backing BO.
 */
int i915_switch_context(struct intel_ring_buffer *ring,
			struct drm_file *file,
			int to_id)
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
	struct i915_hw_context *to;

627
	if (!HAS_HW_CONTEXTS(ring->dev))
628 629
		return 0;

630 631
	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));

632 633 634
	if (to_id == DEFAULT_CONTEXT_ID) {
		to = ring->default_context;
	} else {
635 636 637 638
		if (file == NULL)
			return -EINVAL;

		to = i915_gem_context_get(file->driver_priv, to_id);
639
		if (to == NULL)
640
			return -ENOENT;
641 642
	}

643
	return do_switch(ring, to);
644
}
645 646 647 648 649 650 651 652 653 654 655 656

int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file)
{
	struct drm_i915_gem_context_create *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct i915_hw_context *ctx;
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

657
	if (!HAS_HW_CONTEXTS(dev))
658 659
		return -ENODEV;

660 661 662 663
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

664
	ctx = create_hw_context(dev, file_priv);
665
	mutex_unlock(&dev->struct_mutex);
666 667
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);
668 669 670 671

	args->ctx_id = ctx->id;
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);

672
	return 0;
673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
}

int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file)
{
	struct drm_i915_gem_context_destroy *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct i915_hw_context *ctx;
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
	if (!ctx) {
		mutex_unlock(&dev->struct_mutex);
693
		return -ENOENT;
694 695
	}

696 697
	idr_remove(&ctx->file_priv->context_idr, ctx->id);
	i915_gem_context_unreference(ctx);
698 699 700 701 702
	mutex_unlock(&dev->struct_mutex);

	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
	return 0;
}