i915_gem_context.c 23.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Copyright © 2011-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *
 */

/*
 * This file implements HW context support. On gen5+ a HW context consists of an
 * opaque GPU object which is referenced at times of context saves and restores.
 * With RC6 enabled, the context is also referenced as the GPU enters and exists
 * from RC6 (GPU has it's own internal power context, except on gen5). Though
 * something like a context does exist for the media ring, the code only
 * supports contexts for the render ring.
 *
 * In software, there is a distinction between contexts created by the user,
 * and the default HW context. The default HW context is used by GPU clients
 * that do not request setup of their own hardware context. The default
 * context's state is never restored to help prevent programming errors. This
 * would happen if a client ran and piggy-backed off another clients GPU state.
 * The default context only exists to give the GPU some offset to load as the
 * current to invoke a save of the context we actually care about. In fact, the
 * code could likely be constructed, albeit in a more complicated fashion, to
 * never use the default context, though that limits the driver's ability to
 * swap out, and/or destroy other contexts.
 *
 * All other contexts are created as a request by the GPU client. These contexts
 * store GPU state, and thus allow GPU clients to not re-emit state (and
 * potentially query certain state) at any time. The kernel driver makes
 * certain that the appropriate commands are inserted.
 *
 * The context life cycle is semi-complicated in that context BOs may live
 * longer than the context itself because of the way the hardware, and object
 * tracking works. Below is a very crude representation of the state machine
 * describing the context life.
 *                                         refcount     pincount     active
 * S0: initial state                          0            0           0
 * S1: context created                        1            0           0
 * S2: context is currently running           2            1           X
 * S3: GPU referenced, but not current        2            0           1
 * S4: context is current, but destroyed      1            1           0
 * S5: like S3, but destroyed                 1            0           1
 *
 * The most common (but not all) transitions:
 * S0->S1: client creates a context
 * S1->S2: client submits execbuf with context
 * S2->S3: other clients submits execbuf with context
 * S3->S1: context object was retired
 * S3->S2: clients submits another execbuf
 * S2->S4: context destroy called with current context
 * S3->S5->S0: destroy path
 * S4->S5->S0: destroy path on current context
 *
 * There are two confusing terms used above:
 *  The "current context" means the context which is currently running on the
D
Damien Lespiau 已提交
76
 *  GPU. The GPU has loaded its state already and has stored away the gtt
77 78 79 80 81 82 83 84 85 86 87
 *  offset of the BO. The GPU is not actively referencing the data at this
 *  offset, but it will on the next context switch. The only way to avoid this
 *  is to do a GPU reset.
 *
 *  An "active context' is one which was previously the "current context" and is
 *  on the active list waiting for the next context switch to occur. Until this
 *  happens, the object must remain at the same gtt offset. It is therefore
 *  possible to destroy a context, but it is still active.
 *
 */

88
#include <linux/log2.h>
89 90
#include <drm/drmP.h>
#include <drm/i915_drm.h>
91
#include "i915_drv.h"
92
#include "i915_trace.h"
93

94 95
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1

96
static void lut_close(struct i915_gem_context *ctx)
97
{
98 99 100 101 102 103 104
	struct i915_lut_handle *lut, *ln;
	struct radix_tree_iter iter;
	void __rcu **slot;

	list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
		list_del(&lut->obj_link);
		kmem_cache_free(ctx->i915->luts, lut);
105 106
	}

107
	rcu_read_lock();
108 109
	radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
		struct i915_vma *vma = rcu_dereference_raw(*slot);
110

111
		radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
112
		__i915_gem_object_release_unless_active(vma->obj);
113
	}
114
	rcu_read_unlock();
115 116
}

117
static void i915_gem_context_free(struct i915_gem_context *ctx)
118
{
119
	int i;
120

121
	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
122
	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
123

124 125
	i915_ppgtt_put(ctx->ppgtt);

126 127 128 129 130 131 132
	for (i = 0; i < I915_NUM_ENGINES; i++) {
		struct intel_context *ce = &ctx->engine[i];

		if (!ce->state)
			continue;

		WARN_ON(ce->pin_count);
133
		if (ce->ring)
134
			intel_ring_free(ce->ring);
135

136
		__i915_gem_object_release_unless_active(ce->state->obj);
137 138
	}

139
	kfree(ctx->name);
140
	put_pid(ctx->pid);
141

B
Ben Widawsky 已提交
142
	list_del(&ctx->link);
143

144
	ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id);
145
	kfree_rcu(ctx, rcu);
146 147
}

148 149 150
static void contexts_free(struct drm_i915_private *i915)
{
	struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
151
	struct i915_gem_context *ctx, *cn;
152 153 154

	lockdep_assert_held(&i915->drm.struct_mutex);

155
	llist_for_each_entry_safe(ctx, cn, freed, free_link)
156 157 158
		i915_gem_context_free(ctx);
}

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
static void contexts_free_first(struct drm_i915_private *i915)
{
	struct i915_gem_context *ctx;
	struct llist_node *freed;

	lockdep_assert_held(&i915->drm.struct_mutex);

	freed = llist_del_first(&i915->contexts.free_list);
	if (!freed)
		return;

	ctx = container_of(freed, typeof(*ctx), free_link);
	i915_gem_context_free(ctx);
}

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
static void contexts_free_worker(struct work_struct *work)
{
	struct drm_i915_private *i915 =
		container_of(work, typeof(*i915), contexts.free_work);

	mutex_lock(&i915->drm.struct_mutex);
	contexts_free(i915);
	mutex_unlock(&i915->drm.struct_mutex);
}

void i915_gem_context_release(struct kref *ref)
{
	struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
	struct drm_i915_private *i915 = ctx->i915;

	trace_i915_context_free(ctx);
	if (llist_add(&ctx->free_link, &i915->contexts.free_list))
		queue_work(i915->wq, &i915->contexts.free_work);
}

194 195
static void context_close(struct i915_gem_context *ctx)
{
196
	i915_gem_context_set_closed(ctx);
197

198 199 200 201 202
	/*
	 * The LUT uses the VMA as a backpointer to unref the object,
	 * so we need to clear the LUT before we close all the VMA (inside
	 * the ppgtt).
	 */
203
	lut_close(ctx);
204 205
	if (ctx->ppgtt)
		i915_ppgtt_close(&ctx->ppgtt->base);
206

207 208 209 210
	ctx->file_priv = ERR_PTR(-EBADF);
	i915_gem_context_put(ctx);
}

211 212 213 214
static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
{
	int ret;

215
	ret = ida_simple_get(&dev_priv->contexts.hw_ida,
216 217 218 219 220 221
			     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
	if (ret < 0) {
		/* Contexts are only released when no longer active.
		 * Flush any pending retires to hopefully release some
		 * stale contexts and try again.
		 */
222
		i915_gem_retire_requests(dev_priv);
223
		ret = ida_simple_get(&dev_priv->contexts.hw_ida,
224 225 226 227 228 229 230 231 232
				     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
		if (ret < 0)
			return ret;
	}

	*out = ret;
	return 0;
}

233 234
static u32 default_desc_template(const struct drm_i915_private *i915,
				 const struct i915_hw_ppgtt *ppgtt)
235
{
236
	u32 address_mode;
237 238
	u32 desc;

239
	desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
240

241 242 243 244 245 246
	address_mode = INTEL_LEGACY_32B_CONTEXT;
	if (ppgtt && i915_vm_is_48bit(&ppgtt->base))
		address_mode = INTEL_LEGACY_64B_CONTEXT;
	desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;

	if (IS_GEN8(i915))
247 248 249 250 251 252 253 254 255 256
		desc |= GEN8_CTX_L3LLC_COHERENT;

	/* TODO: WaDisableLiteRestore when we start using semaphore
	 * signalling between Command Streamers
	 * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
	 */

	return desc;
}

257
static struct i915_gem_context *
258
__create_hw_context(struct drm_i915_private *dev_priv,
259
		    struct drm_i915_file_private *file_priv)
260
{
261
	struct i915_gem_context *ctx;
T
Tejun Heo 已提交
262
	int ret;
263

264
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
265 266
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);
267

268 269 270 271 272 273
	ret = assign_hw_id(dev_priv, &ctx->hw_id);
	if (ret) {
		kfree(ctx);
		return ERR_PTR(ret);
	}

274
	kref_init(&ctx->ref);
275
	list_add_tail(&ctx->link, &dev_priv->contexts.list);
276
	ctx->i915 = dev_priv;
277
	ctx->priority = I915_PRIORITY_NORMAL;
278

279 280
	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
	INIT_LIST_HEAD(&ctx->handles_list);
281

282
	/* Default context will never have a file_priv */
283 284
	ret = DEFAULT_CONTEXT_HANDLE;
	if (file_priv) {
285
		ret = idr_alloc(&file_priv->context_idr, ctx,
286
				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
287
		if (ret < 0)
288
			goto err_lut;
289 290
	}
	ctx->user_handle = ret;
291 292

	ctx->file_priv = file_priv;
293
	if (file_priv) {
294
		ctx->pid = get_task_pid(current, PIDTYPE_PID);
295 296 297 298 299 300 301 302 303
		ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
				      current->comm,
				      pid_nr(ctx->pid),
				      ctx->user_handle);
		if (!ctx->name) {
			ret = -ENOMEM;
			goto err_pid;
		}
	}
304

305 306 307
	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
308
	ctx->remap_slice = ALL_L3_SLICES(dev_priv);
309

310
	i915_gem_context_set_bannable(ctx);
311
	ctx->ring_size = 4 * PAGE_SIZE;
312 313
	ctx->desc_template =
		default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
314

315 316 317 318
	/* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
	 * present or not in use we still need a small bias as ring wraparound
	 * at offset 0 sometimes hangs. No idea why.
	 */
319
	if (USES_GUC(dev_priv))
320 321
		ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
	else
322
		ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
323

324
	return ctx;
325

326 327 328
err_pid:
	put_pid(ctx->pid);
	idr_remove(&file_priv->context_idr, ctx->user_handle);
329
err_lut:
330
	context_close(ctx);
331
	return ERR_PTR(ret);
332 333
}

334 335 336 337 338 339 340
static void __destroy_hw_context(struct i915_gem_context *ctx,
				 struct drm_i915_file_private *file_priv)
{
	idr_remove(&file_priv->context_idr, ctx->user_handle);
	context_close(ctx);
}

341
static struct i915_gem_context *
342
i915_gem_create_context(struct drm_i915_private *dev_priv,
343
			struct drm_i915_file_private *file_priv)
344
{
345
	struct i915_gem_context *ctx;
346

347
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
348

349 350
	/* Reap the most stale context */
	contexts_free_first(dev_priv);
351

352
	ctx = __create_hw_context(dev_priv, file_priv);
353
	if (IS_ERR(ctx))
354
		return ctx;
355

356
	if (USES_FULL_PPGTT(dev_priv)) {
C
Chris Wilson 已提交
357
		struct i915_hw_ppgtt *ppgtt;
358

359
		ppgtt = i915_ppgtt_create(dev_priv, file_priv, ctx->name);
360
		if (IS_ERR(ppgtt)) {
361 362
			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
					 PTR_ERR(ppgtt));
363
			__destroy_hw_context(ctx, file_priv);
364
			return ERR_CAST(ppgtt);
365 366 367
		}

		ctx->ppgtt = ppgtt;
368
		ctx->desc_template = default_desc_template(dev_priv, ppgtt);
369
	}
370

371 372
	trace_i915_context_create(ctx);

373
	return ctx;
374 375
}

376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
/**
 * i915_gem_context_create_gvt - create a GVT GEM context
 * @dev: drm device *
 *
 * This function is used to create a GVT specific GEM context.
 *
 * Returns:
 * pointer to i915_gem_context on success, error pointer if failed
 *
 */
struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev)
{
	struct i915_gem_context *ctx;
	int ret;

	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
		return ERR_PTR(-ENODEV);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ERR_PTR(ret);

399
	ctx = __create_hw_context(to_i915(dev), NULL);
400 401 402
	if (IS_ERR(ctx))
		goto out;

403
	ctx->file_priv = ERR_PTR(-EBADF);
404 405 406
	i915_gem_context_set_closed(ctx); /* not user accessible */
	i915_gem_context_clear_bannable(ctx);
	i915_gem_context_set_force_single_submission(ctx);
407
	if (!USES_GUC_SUBMISSION(to_i915(dev)))
408
		ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
409 410

	GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
411 412 413 414 415
out:
	mutex_unlock(&dev->struct_mutex);
	return ctx;
}

416 417
struct i915_gem_context *
i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
{
	struct i915_gem_context *ctx;

	ctx = i915_gem_create_context(i915, NULL);
	if (IS_ERR(ctx))
		return ctx;

	i915_gem_context_clear_bannable(ctx);
	ctx->priority = prio;
	ctx->ring_size = PAGE_SIZE;

	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));

	return ctx;
}

static void
destroy_kernel_context(struct i915_gem_context **ctxp)
{
	struct i915_gem_context *ctx;

	/* Keep the context ref so that we can free it immediately ourselves */
	ctx = i915_gem_context_get(fetch_and_zero(ctxp));
	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));

	context_close(ctx);
	i915_gem_context_free(ctx);
}

447 448 449 450 451
static bool needs_preempt_context(struct drm_i915_private *i915)
{
	return HAS_LOGICAL_RING_PREEMPTION(i915);
}

452
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
453
{
454
	struct i915_gem_context *ctx;
455

456
	/* Reassure ourselves we are only called once */
457
	GEM_BUG_ON(dev_priv->kernel_context);
458
	GEM_BUG_ON(dev_priv->preempt_context);
459

460
	INIT_LIST_HEAD(&dev_priv->contexts.list);
461 462
	INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
	init_llist_head(&dev_priv->contexts.free_list);
463

464 465
	/* Using the simple ida interface, the max is limited by sizeof(int) */
	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
466
	ida_init(&dev_priv->contexts.hw_ida);
467

468
	/* lowest priority; idle task */
469
	ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
470
	if (IS_ERR(ctx)) {
471
		DRM_ERROR("Failed to create default global context\n");
472
		return PTR_ERR(ctx);
473
	}
474 475
	/*
	 * For easy recognisablity, we want the kernel context to be 0 and then
476 477 478
	 * all user contexts will have non-zero hw_id.
	 */
	GEM_BUG_ON(ctx->hw_id);
479
	dev_priv->kernel_context = ctx;
480

481
	/* highest priority; preempting task */
482 483 484 485 486 487
	if (needs_preempt_context(dev_priv)) {
		ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
		if (!IS_ERR(ctx))
			dev_priv->preempt_context = ctx;
		else
			DRM_ERROR("Failed to create preempt context; disabling preemption\n");
488
	}
489

490
	DRM_DEBUG_DRIVER("%s context support initialized\n",
491 492
			 dev_priv->engine[RCS]->context_size ? "logical" :
			 "fake");
493
	return 0;
494 495
}

496
void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
497 498
{
	struct intel_engine_cs *engine;
499
	enum intel_engine_id id;
500

501
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
502

503
	for_each_engine(engine, dev_priv, id) {
504
		engine->legacy_active_context = NULL;
505
		engine->legacy_active_ppgtt = NULL;
506 507 508 509 510 511

		if (!engine->last_retired_context)
			continue;

		engine->context_unpin(engine, engine->last_retired_context);
		engine->last_retired_context = NULL;
512 513 514
	}
}

515
void i915_gem_contexts_fini(struct drm_i915_private *i915)
516
{
517
	lockdep_assert_held(&i915->drm.struct_mutex);
518

519 520
	if (i915->preempt_context)
		destroy_kernel_context(&i915->preempt_context);
521
	destroy_kernel_context(&i915->kernel_context);
522

523 524
	/* Must free all deferred contexts (via flush_workqueue) first */
	ida_destroy(&i915->contexts.hw_ida);
525 526
}

527 528
static int context_idr_cleanup(int id, void *p, void *data)
{
529
	struct i915_gem_context *ctx = p;
530

531
	context_close(ctx);
532
	return 0;
533 534
}

535 536
int i915_gem_context_open(struct drm_i915_private *i915,
			  struct drm_file *file)
537 538
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
539
	struct i915_gem_context *ctx;
540 541 542

	idr_init(&file_priv->context_idr);

543 544 545
	mutex_lock(&i915->drm.struct_mutex);
	ctx = i915_gem_create_context(i915, file_priv);
	mutex_unlock(&i915->drm.struct_mutex);
546
	if (IS_ERR(ctx)) {
547
		idr_destroy(&file_priv->context_idr);
548
		return PTR_ERR(ctx);
549 550
	}

551 552
	GEM_BUG_ON(i915_gem_context_is_kernel(ctx));

553 554 555
	return 0;
}

556
void i915_gem_context_close(struct drm_file *file)
557
{
558
	struct drm_i915_file_private *file_priv = file->driver_priv;
559

560
	lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
561

562
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
563 564 565
	idr_destroy(&file_priv->context_idr);
}

566
static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
{
	struct i915_gem_timeline *timeline;

	list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
		struct intel_timeline *tl;

		if (timeline == &engine->i915->gt.global_timeline)
			continue;

		tl = &timeline->engine[engine->id];
		if (i915_gem_active_peek(&tl->last_request,
					 &engine->i915->drm.struct_mutex))
			return false;
	}

582
	return intel_engine_has_kernel_context(engine);
583 584
}

585 586 587
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
588
	struct i915_gem_timeline *timeline;
589
	enum intel_engine_id id;
590

591 592
	lockdep_assert_held(&dev_priv->drm.struct_mutex);

593 594
	i915_gem_retire_requests(dev_priv);

595
	for_each_engine(engine, dev_priv, id) {
596 597
		struct drm_i915_gem_request *req;

598
		if (engine_has_idle_kernel_context(engine))
599 600
			continue;

601 602 603 604
		req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
		if (IS_ERR(req))
			return PTR_ERR(req);

605 606 607 608 609 610 611 612 613 614 615
		/* Queue this switch after all other activity */
		list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
			struct drm_i915_gem_request *prev;
			struct intel_timeline *tl;

			tl = &timeline->engine[engine->id];
			prev = i915_gem_active_raw(&tl->last_request,
						   &dev_priv->drm.struct_mutex);
			if (prev)
				i915_sw_fence_await_sw_fence_gfp(&req->submit,
								 &prev->submit,
616
								 I915_FENCE_GFP);
617 618
		}

619 620 621 622 623 624 625 626
		/*
		 * Force a flush after the switch to ensure that all rendering
		 * and operations prior to switching to the kernel context hits
		 * memory. This should be guaranteed by the previous request,
		 * but an extra layer of paranoia before we declare the system
		 * idle (on suspend etc) is advisable!
		 */
		__i915_add_request(req, true);
627 628 629 630 631
	}

	return 0;
}

632 633
static bool client_is_banned(struct drm_i915_file_private *file_priv)
{
634
	return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS;
635 636
}

637 638 639
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file)
{
640
	struct drm_i915_private *dev_priv = to_i915(dev);
641 642
	struct drm_i915_gem_context_create *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
643
	struct i915_gem_context *ctx;
644 645
	int ret;

646
	if (!dev_priv->engine[RCS]->context_size)
647 648
		return -ENODEV;

649 650 651
	if (args->pad != 0)
		return -EINVAL;

652 653 654 655 656 657 658 659
	if (client_is_banned(file_priv)) {
		DRM_DEBUG("client %s[%d] banned from creating ctx\n",
			  current->comm,
			  pid_nr(get_task_pid(current, PIDTYPE_PID)));

		return -EIO;
	}

660 661 662 663
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

664
	ctx = i915_gem_create_context(dev_priv, file_priv);
665
	mutex_unlock(&dev->struct_mutex);
666 667
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);
668

669 670
	GEM_BUG_ON(i915_gem_context_is_kernel(ctx));

671
	args->ctx_id = ctx->user_handle;
672
	DRM_DEBUG("HW context %d created\n", args->ctx_id);
673

674
	return 0;
675 676 677 678 679 680 681
}

int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file)
{
	struct drm_i915_gem_context_destroy *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
682
	struct i915_gem_context *ctx;
683 684
	int ret;

685 686 687
	if (args->pad != 0)
		return -EINVAL;

688
	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
689
		return -ENOENT;
690

691
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
692 693 694 695 696 697
	if (!ctx)
		return -ENOENT;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		goto out;
698

699
	__destroy_hw_context(ctx, file_priv);
700 701
	mutex_unlock(&dev->struct_mutex);

702 703
out:
	i915_gem_context_put(ctx);
704 705
	return 0;
}
706 707 708 709 710 711

int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
712
	struct i915_gem_context *ctx;
713
	int ret = 0;
714

715
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
716 717
	if (!ctx)
		return -ENOENT;
718 719 720 721

	args->size = 0;
	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
722
		ret = -EINVAL;
723
		break;
724 725 726
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
		break;
C
Chris Wilson 已提交
727 728 729 730 731 732
	case I915_CONTEXT_PARAM_GTT_SIZE:
		if (ctx->ppgtt)
			args->value = ctx->ppgtt->base.total;
		else if (to_i915(dev)->mm.aliasing_ppgtt)
			args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
		else
733
			args->value = to_i915(dev)->ggtt.base.total;
C
Chris Wilson 已提交
734
		break;
735
	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
736
		args->value = i915_gem_context_no_error_capture(ctx);
737
		break;
738
	case I915_CONTEXT_PARAM_BANNABLE:
739
		args->value = i915_gem_context_is_bannable(ctx);
740
		break;
741 742 743
	case I915_CONTEXT_PARAM_PRIORITY:
		args->value = ctx->priority;
		break;
744 745 746 747 748
	default:
		ret = -EINVAL;
		break;
	}

749
	i915_gem_context_put(ctx);
750 751 752 753 754 755 756 757
	return ret;
}

int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct drm_i915_gem_context_param *args = data;
758
	struct i915_gem_context *ctx;
759 760
	int ret;

761 762 763 764
	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
	if (!ctx)
		return -ENOENT;

765 766
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
767
		goto out;
768 769 770

	switch (args->param) {
	case I915_CONTEXT_PARAM_BAN_PERIOD:
771
		ret = -EINVAL;
772
		break;
773 774 775 776 777 778
	case I915_CONTEXT_PARAM_NO_ZEROMAP:
		if (args->size) {
			ret = -EINVAL;
		} else {
			ctx->flags &= ~CONTEXT_NO_ZEROMAP;
			ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
779 780 781
		}
		break;
	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
782
		if (args->size)
783
			ret = -EINVAL;
784 785 786 787
		else if (args->value)
			i915_gem_context_set_no_error_capture(ctx);
		else
			i915_gem_context_clear_no_error_capture(ctx);
788
		break;
789 790 791 792 793
	case I915_CONTEXT_PARAM_BANNABLE:
		if (args->size)
			ret = -EINVAL;
		else if (!capable(CAP_SYS_ADMIN) && !args->value)
			ret = -EPERM;
794 795
		else if (args->value)
			i915_gem_context_set_bannable(ctx);
796
		else
797
			i915_gem_context_clear_bannable(ctx);
798
		break;
799 800 801

	case I915_CONTEXT_PARAM_PRIORITY:
		{
802
			s64 priority = args->value;
803 804 805

			if (args->size)
				ret = -EINVAL;
806
			else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
807 808 809 810 811 812 813 814 815 816 817 818
				ret = -ENODEV;
			else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
				 priority < I915_CONTEXT_MIN_USER_PRIORITY)
				ret = -EINVAL;
			else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
				 !capable(CAP_SYS_NICE))
				ret = -EPERM;
			else
				ctx->priority = priority;
		}
		break;

819 820 821 822 823 824
	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dev->struct_mutex);

825 826
out:
	i915_gem_context_put(ctx);
827 828
	return ret;
}
829 830 831 832

int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
				       void *data, struct drm_file *file)
{
833
	struct drm_i915_private *dev_priv = to_i915(dev);
834
	struct drm_i915_reset_stats *args = data;
835
	struct i915_gem_context *ctx;
836 837 838 839 840
	int ret;

	if (args->flags || args->pad)
		return -EINVAL;

841 842 843 844 845
	ret = -ENOENT;
	rcu_read_lock();
	ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
	if (!ctx)
		goto out;
846

847 848 849 850 851 852
	/*
	 * We opt for unserialised reads here. This may result in tearing
	 * in the extremely unlikely event of a GPU hang on this context
	 * as we are querying them. If we need that extra layer of protection,
	 * we should wrap the hangstats with a seqlock.
	 */
853 854 855 856 857 858

	if (capable(CAP_SYS_ADMIN))
		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
	else
		args->reset_count = 0;

859 860
	args->batch_active = atomic_read(&ctx->guilty_count);
	args->batch_pending = atomic_read(&ctx->active_count);
861

862 863 864 865
	ret = 0;
out:
	rcu_read_unlock();
	return ret;
866
}
867 868 869

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_context.c"
870
#include "selftests/i915_gem_context.c"
871
#endif