intel_context.c 10.6 KB
Newer Older
1 2 3 4 5 6
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2019 Intel Corporation
 */

7 8 9
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"

10 11
#include "i915_drv.h"
#include "i915_globals.h"
12

13
#include "intel_context.h"
14
#include "intel_engine.h"
15
#include "intel_engine_pm.h"
16
#include "intel_ring.h"
17 18 19 20 21 22

static struct i915_global_context {
	struct i915_global base;
	struct kmem_cache *slab_ce;
} global;

23
static struct intel_context *intel_context_alloc(void)
24 25 26 27 28 29 30 31 32 33
{
	return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
}

void intel_context_free(struct intel_context *ce)
{
	kmem_cache_free(global.slab_ce, ce);
}

struct intel_context *
34
intel_context_create(struct intel_engine_cs *engine)
35
{
36
	struct intel_context *ce;
37 38 39 40 41

	ce = intel_context_alloc();
	if (!ce)
		return ERR_PTR(-ENOMEM);

42
	intel_context_init(ce, engine);
43
	return ce;
44 45
}

46 47 48 49 50 51 52 53
int intel_context_alloc_state(struct intel_context *ce)
{
	int err = 0;

	if (mutex_lock_interruptible(&ce->pin_mutex))
		return -EINTR;

	if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
54 55 56 57 58
		if (intel_context_is_banned(ce)) {
			err = -EIO;
			goto unlock;
		}

59 60 61 62 63 64 65 66 67 68 69 70
		err = ce->ops->alloc(ce);
		if (unlikely(err))
			goto unlock;

		set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
	}

unlock:
	mutex_unlock(&ce->pin_mutex);
	return err;
}

71 72 73 74
static int intel_context_active_acquire(struct intel_context *ce)
{
	int err;

75 76 77 78
	__i915_active_acquire(&ce->active);

	if (intel_context_is_barrier(ce))
		return 0;
79 80

	/* Preallocate tracking nodes */
81 82 83 84
	err = i915_active_acquire_preallocate_barrier(&ce->active,
						      ce->engine);
	if (err)
		i915_active_release(&ce->active);
85

86
	return err;
87 88 89 90 91 92 93 94 95
}

static void intel_context_active_release(struct intel_context *ce)
{
	/* Nodes preallocated in intel_context_active() */
	i915_active_acquire_barrier(&ce->active);
	i915_active_release(&ce->active);
}

96
static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
97
{
98
	unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
99
	int err;
100

101
	err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH);
102 103 104
	if (err)
		return err;

105 106 107 108
	err = i915_active_acquire(&vma->active);
	if (err)
		goto err_unpin;

109 110 111 112
	/*
	 * And mark it as a globally pinned object to let the shrinker know
	 * it cannot reclaim the object until we release it.
	 */
113
	i915_vma_make_unshrinkable(vma);
114 115 116
	vma->obj->mm.dirty = true;

	return 0;
117 118 119 120

err_unpin:
	i915_vma_unpin(vma);
	return err;
121 122 123 124
}

static void __context_unpin_state(struct i915_vma *vma)
{
125
	i915_vma_make_shrinkable(vma);
126
	i915_active_release(&vma->active);
127
	__i915_vma_unpin(vma);
128 129
}

130 131
static int __ring_active(struct intel_ring *ring,
			 struct i915_gem_ww_ctx *ww)
132 133 134
{
	int err;

135
	err = intel_ring_pin(ring, ww);
136 137 138
	if (err)
		return err;

139
	err = i915_active_acquire(&ring->vma->active);
140
	if (err)
141
		goto err_pin;
142 143 144

	return 0;

145 146
err_pin:
	intel_ring_unpin(ring);
147 148 149 150 151 152
	return err;
}

static void __ring_retire(struct intel_ring *ring)
{
	i915_active_release(&ring->vma->active);
153
	intel_ring_unpin(ring);
154 155
}

156 157
static int intel_context_pre_pin(struct intel_context *ce,
				 struct i915_gem_ww_ctx *ww)
158 159 160 161 162
{
	int err;

	CE_TRACE(ce, "active\n");

163
	err = __ring_active(ce->ring, ww);
164 165 166
	if (err)
		return err;

167
	err = intel_timeline_pin(ce->timeline, ww);
168 169 170 171 172 173
	if (err)
		goto err_ring;

	if (!ce->state)
		return 0;

174
	err = __context_pin_state(ce->state, ww);
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
	if (err)
		goto err_timeline;


	return 0;

err_timeline:
	intel_timeline_unpin(ce->timeline);
err_ring:
	__ring_retire(ce->ring);
	return err;
}

static void intel_context_post_unpin(struct intel_context *ce)
{
	if (ce->state)
		__context_unpin_state(ce->state);

	intel_timeline_unpin(ce->timeline);
	__ring_retire(ce->ring);
}

197 198
int __intel_context_do_pin_ww(struct intel_context *ce,
			      struct i915_gem_ww_ctx *ww)
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
{
	bool handoff = false;
	void *vaddr;
	int err = 0;

	if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
		err = intel_context_alloc_state(ce);
		if (err)
			return err;
	}

	/*
	 * We always pin the context/ring/timeline here, to ensure a pin
	 * refcount for __intel_context_active(), which prevent a lock
	 * inversion of ce->pin_mutex vs dma_resv_lock().
	 */
215 216 217 218 219 220 221 222

	err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
	if (!err && ce->ring->vma->obj)
		err = i915_gem_object_lock(ce->ring->vma->obj, ww);
	if (!err && ce->state)
		err = i915_gem_object_lock(ce->state->obj, ww);
	if (!err)
		err = intel_context_pre_pin(ce, ww);
223 224 225 226 227 228 229
	if (err)
		return err;

	err = i915_active_acquire(&ce->active);
	if (err)
		goto err_ctx_unpin;

230
	err = ce->ops->pre_pin(ce, ww, &vaddr);
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
	if (err)
		goto err_release;

	err = mutex_lock_interruptible(&ce->pin_mutex);
	if (err)
		goto err_post_unpin;

	if (unlikely(intel_context_is_closed(ce))) {
		err = -ENOENT;
		goto err_unlock;
	}

	if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
		err = intel_context_active_acquire(ce);
		if (unlikely(err))
			goto err_unlock;

		err = ce->ops->pin(ce, vaddr);
		if (err) {
			intel_context_active_release(ce);
			goto err_unlock;
		}

		CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
			 i915_ggtt_offset(ce->ring->vma),
			 ce->ring->head, ce->ring->tail);

		handoff = true;
		smp_mb__before_atomic(); /* flush pin before it is visible */
		atomic_inc(&ce->pin_count);
	}

	GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */

err_unlock:
	mutex_unlock(&ce->pin_mutex);
err_post_unpin:
	if (!handoff)
		ce->ops->post_unpin(ce);
err_release:
	i915_active_release(&ce->active);
err_ctx_unpin:
	intel_context_post_unpin(ce);
274 275 276 277 278 279 280 281 282

	/*
	 * Unlock the hwsp_ggtt object since it's shared.
	 * In principle we can unlock all the global state locked above
	 * since it's pinned and doesn't need fencing, and will
	 * thus remain resident until it is explicitly unpinned.
	 */
	i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj);

283 284 285
	return err;
}

286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
int __intel_context_do_pin(struct intel_context *ce)
{
	struct i915_gem_ww_ctx ww;
	int err;

	i915_gem_ww_ctx_init(&ww, true);
retry:
	err = __intel_context_do_pin_ww(ce, &ww);
	if (err == -EDEADLK) {
		err = i915_gem_ww_ctx_backoff(&ww);
		if (!err)
			goto retry;
	}
	i915_gem_ww_ctx_fini(&ww);
	return err;
}

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
void intel_context_unpin(struct intel_context *ce)
{
	if (!atomic_dec_and_test(&ce->pin_count))
		return;

	CE_TRACE(ce, "unpin\n");
	ce->ops->unpin(ce);
	ce->ops->post_unpin(ce);

	/*
	 * Once released, we may asynchronously drop the active reference.
	 * As that may be the only reference keeping the context alive,
	 * take an extra now so that it is not freed before we finish
	 * dereferencing it.
	 */
	intel_context_get(ce);
	intel_context_active_release(ce);
	intel_context_put(ce);
}

323
__i915_active_call
324
static void __intel_context_retire(struct i915_active *active)
325 326 327
{
	struct intel_context *ce = container_of(active, typeof(*ce), active);

328 329 330
	CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n",
		 intel_context_get_total_runtime_ns(ce),
		 intel_context_get_avg_runtime_ns(ce));
331

332
	set_bit(CONTEXT_VALID_BIT, &ce->flags);
333
	intel_context_post_unpin(ce);
334
	intel_context_put(ce);
335 336
}

337
static int __intel_context_active(struct i915_active *active)
338
{
339
	struct intel_context *ce = container_of(active, typeof(*ce), active);
340 341 342

	intel_context_get(ce);

343
	/* everything should already be activated by intel_context_pre_pin() */
344 345
	GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active));
	__intel_ring_pin(ce->ring);
346

347
	__intel_timeline_pin(ce->timeline);
348

349 350 351 352 353
	if (ce->state) {
		GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active));
		__i915_vma_pin(ce->state);
		i915_vma_make_unshrinkable(ce->state);
	}
354

355 356 357
	return 0;
}

358 359 360
void
intel_context_init(struct intel_context *ce,
		   struct intel_engine_cs *engine)
361
{
362
	GEM_BUG_ON(!engine->cops);
363
	GEM_BUG_ON(!engine->gt->vm);
364 365 366 367 368 369

	kref_init(&ce->ref);

	ce->engine = engine;
	ce->ops = engine->cops;
	ce->sseu = engine->sseu;
370 371
	ce->ring = __intel_context_ring_size(SZ_4K);

372 373
	ewma_runtime_init(&ce->runtime.avg);

374
	ce->vm = i915_vm_get(engine->gt->vm);
375 376 377 378 379 380

	INIT_LIST_HEAD(&ce->signal_link);
	INIT_LIST_HEAD(&ce->signals);

	mutex_init(&ce->pin_mutex);

381
	i915_active_init(&ce->active,
382
			 __intel_context_active, __intel_context_retire);
383 384
}

385 386
void intel_context_fini(struct intel_context *ce)
{
387 388
	if (ce->timeline)
		intel_timeline_put(ce->timeline);
389 390
	i915_vm_put(ce->vm);

391 392 393 394
	mutex_destroy(&ce->pin_mutex);
	i915_active_fini(&ce->active);
}

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
static void i915_global_context_shrink(void)
{
	kmem_cache_shrink(global.slab_ce);
}

static void i915_global_context_exit(void)
{
	kmem_cache_destroy(global.slab_ce);
}

static struct i915_global_context global = { {
	.shrink = i915_global_context_shrink,
	.exit = i915_global_context_exit,
} };

int __init i915_global_context_init(void)
{
	global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
	if (!global.slab_ce)
		return -ENOMEM;

	i915_global_register(&global.base);
	return 0;
}
419 420 421

void intel_context_enter_engine(struct intel_context *ce)
{
422
	intel_engine_pm_get(ce->engine);
423
	intel_timeline_enter(ce->timeline);
424 425 426 427
}

void intel_context_exit_engine(struct intel_context *ce)
{
428
	intel_timeline_exit(ce->timeline);
429
	intel_engine_pm_put(ce->engine);
430
}
431

432 433 434
int intel_context_prepare_remote_request(struct intel_context *ce,
					 struct i915_request *rq)
{
435
	struct intel_timeline *tl = ce->timeline;
436 437 438
	int err;

	/* Only suitable for use in remotely modifying this context */
439
	GEM_BUG_ON(rq->context == ce);
440

441
	if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
442
		/* Queue this switch after current activity by this context. */
443
		err = i915_active_fence_set(&tl->last_request, rq);
444
		if (err)
445
			return err;
446
	}
447 448 449 450 451 452 453 454 455

	/*
	 * Guarantee context image and the timeline remains pinned until the
	 * modifying request is retired by setting the ce activity tracker.
	 *
	 * But we only need to take one pin on the account of it. Or in other
	 * words transfer the pinned ce object to tracked active request.
	 */
	GEM_BUG_ON(i915_active_is_idle(&ce->active));
456
	return i915_active_add_request(&ce->active, rq);
457 458
}

459 460
struct i915_request *intel_context_create_request(struct intel_context *ce)
{
461
	struct i915_gem_ww_ctx ww;
462 463 464
	struct i915_request *rq;
	int err;

465 466 467 468 469 470 471 472 473 474
	i915_gem_ww_ctx_init(&ww, true);
retry:
	err = intel_context_pin_ww(ce, &ww);
	if (!err) {
		rq = i915_request_create(ce);
		intel_context_unpin(ce);
	} else if (err == -EDEADLK) {
		err = i915_gem_ww_ctx_backoff(&ww);
		if (!err)
			goto retry;
475
		rq = ERR_PTR(err);
476 477 478
	} else {
		rq = ERR_PTR(err);
	}
479

480
	i915_gem_ww_ctx_fini(&ww);
481

482 483 484 485 486 487 488 489 490 491 492 493
	if (IS_ERR(rq))
		return rq;

	/*
	 * timeline->mutex should be the inner lock, but is used as outer lock.
	 * Hack around this to shut up lockdep in selftests..
	 */
	lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie);
	mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_);
	mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
	rq->cookie = lockdep_pin_lock(&ce->timeline->mutex);

494 495
	return rq;
}
496 497 498 499

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_context.c"
#endif