intel_context.c 9.7 KB
Newer Older
1 2 3 4 5 6
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2019 Intel Corporation
 */

7 8 9
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"

10 11
#include "i915_drv.h"
#include "i915_globals.h"
12

13
#include "intel_context.h"
14
#include "intel_engine.h"
15
#include "intel_engine_pm.h"
16
#include "intel_ring.h"
17 18 19 20 21 22

static struct i915_global_context {
	struct i915_global base;
	struct kmem_cache *slab_ce;
} global;

23
static struct intel_context *intel_context_alloc(void)
24 25 26 27 28 29 30 31 32 33
{
	return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
}

void intel_context_free(struct intel_context *ce)
{
	kmem_cache_free(global.slab_ce, ce);
}

struct intel_context *
34
intel_context_create(struct intel_engine_cs *engine)
35
{
36
	struct intel_context *ce;
37 38 39 40 41

	ce = intel_context_alloc();
	if (!ce)
		return ERR_PTR(-ENOMEM);

42
	intel_context_init(ce, engine);
43
	return ce;
44 45
}

46 47 48 49 50 51 52 53
int intel_context_alloc_state(struct intel_context *ce)
{
	int err = 0;

	if (mutex_lock_interruptible(&ce->pin_mutex))
		return -EINTR;

	if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
54 55 56 57 58
		if (intel_context_is_banned(ce)) {
			err = -EIO;
			goto unlock;
		}

59 60 61 62 63 64 65 66 67 68 69 70
		err = ce->ops->alloc(ce);
		if (unlikely(err))
			goto unlock;

		set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
	}

unlock:
	mutex_unlock(&ce->pin_mutex);
	return err;
}

71 72 73 74
static int intel_context_active_acquire(struct intel_context *ce)
{
	int err;

75 76 77 78
	__i915_active_acquire(&ce->active);

	if (intel_context_is_barrier(ce))
		return 0;
79 80

	/* Preallocate tracking nodes */
81 82 83 84
	err = i915_active_acquire_preallocate_barrier(&ce->active,
						      ce->engine);
	if (err)
		i915_active_release(&ce->active);
85

86
	return err;
87 88 89 90 91 92 93 94 95
}

static void intel_context_active_release(struct intel_context *ce)
{
	/* Nodes preallocated in intel_context_active() */
	i915_active_acquire_barrier(&ce->active);
	i915_active_release(&ce->active);
}

96
static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
97
{
98
	unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
99
	int err;
100

101
	err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH);
102 103 104
	if (err)
		return err;

105 106 107 108
	err = i915_active_acquire(&vma->active);
	if (err)
		goto err_unpin;

109 110 111 112
	/*
	 * And mark it as a globally pinned object to let the shrinker know
	 * it cannot reclaim the object until we release it.
	 */
113
	i915_vma_make_unshrinkable(vma);
114 115 116
	vma->obj->mm.dirty = true;

	return 0;
117 118 119 120

err_unpin:
	i915_vma_unpin(vma);
	return err;
121 122 123 124
}

static void __context_unpin_state(struct i915_vma *vma)
{
125
	i915_vma_make_shrinkable(vma);
126
	i915_active_release(&vma->active);
127
	__i915_vma_unpin(vma);
128 129
}

130 131
static int __ring_active(struct intel_ring *ring,
			 struct i915_gem_ww_ctx *ww)
132 133 134
{
	int err;

135
	err = intel_ring_pin(ring, ww);
136 137 138
	if (err)
		return err;

139
	err = i915_active_acquire(&ring->vma->active);
140
	if (err)
141
		goto err_pin;
142 143 144

	return 0;

145 146
err_pin:
	intel_ring_unpin(ring);
147 148 149 150 151 152
	return err;
}

static void __ring_retire(struct intel_ring *ring)
{
	i915_active_release(&ring->vma->active);
153
	intel_ring_unpin(ring);
154 155
}

156 157
static int intel_context_pre_pin(struct intel_context *ce,
				 struct i915_gem_ww_ctx *ww)
158 159 160 161 162
{
	int err;

	CE_TRACE(ce, "active\n");

163
	err = __ring_active(ce->ring, ww);
164 165 166
	if (err)
		return err;

167
	err = intel_timeline_pin(ce->timeline, ww);
168 169 170 171 172 173
	if (err)
		goto err_ring;

	if (!ce->state)
		return 0;

174
	err = __context_pin_state(ce->state, ww);
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
	if (err)
		goto err_timeline;


	return 0;

err_timeline:
	intel_timeline_unpin(ce->timeline);
err_ring:
	__ring_retire(ce->ring);
	return err;
}

static void intel_context_post_unpin(struct intel_context *ce)
{
	if (ce->state)
		__context_unpin_state(ce->state);

	intel_timeline_unpin(ce->timeline);
	__ring_retire(ce->ring);
}

197 198
int __intel_context_do_pin_ww(struct intel_context *ce,
			      struct i915_gem_ww_ctx *ww)
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
{
	bool handoff = false;
	void *vaddr;
	int err = 0;

	if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
		err = intel_context_alloc_state(ce);
		if (err)
			return err;
	}

	/*
	 * We always pin the context/ring/timeline here, to ensure a pin
	 * refcount for __intel_context_active(), which prevent a lock
	 * inversion of ce->pin_mutex vs dma_resv_lock().
	 */
215 216 217 218 219 220 221 222

	err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
	if (!err && ce->ring->vma->obj)
		err = i915_gem_object_lock(ce->ring->vma->obj, ww);
	if (!err && ce->state)
		err = i915_gem_object_lock(ce->state->obj, ww);
	if (!err)
		err = intel_context_pre_pin(ce, ww);
223 224 225 226 227 228 229
	if (err)
		return err;

	err = i915_active_acquire(&ce->active);
	if (err)
		goto err_ctx_unpin;

230
	err = ce->ops->pre_pin(ce, ww, &vaddr);
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
	if (err)
		goto err_release;

	err = mutex_lock_interruptible(&ce->pin_mutex);
	if (err)
		goto err_post_unpin;

	if (unlikely(intel_context_is_closed(ce))) {
		err = -ENOENT;
		goto err_unlock;
	}

	if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
		err = intel_context_active_acquire(ce);
		if (unlikely(err))
			goto err_unlock;

		err = ce->ops->pin(ce, vaddr);
		if (err) {
			intel_context_active_release(ce);
			goto err_unlock;
		}

		CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
			 i915_ggtt_offset(ce->ring->vma),
			 ce->ring->head, ce->ring->tail);

		handoff = true;
		smp_mb__before_atomic(); /* flush pin before it is visible */
		atomic_inc(&ce->pin_count);
	}

	GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */

err_unlock:
	mutex_unlock(&ce->pin_mutex);
err_post_unpin:
	if (!handoff)
		ce->ops->post_unpin(ce);
err_release:
	i915_active_release(&ce->active);
err_ctx_unpin:
	intel_context_post_unpin(ce);
	return err;
}

277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
int __intel_context_do_pin(struct intel_context *ce)
{
	struct i915_gem_ww_ctx ww;
	int err;

	i915_gem_ww_ctx_init(&ww, true);
retry:
	err = __intel_context_do_pin_ww(ce, &ww);
	if (err == -EDEADLK) {
		err = i915_gem_ww_ctx_backoff(&ww);
		if (!err)
			goto retry;
	}
	i915_gem_ww_ctx_fini(&ww);
	return err;
}

294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
void intel_context_unpin(struct intel_context *ce)
{
	if (!atomic_dec_and_test(&ce->pin_count))
		return;

	CE_TRACE(ce, "unpin\n");
	ce->ops->unpin(ce);
	ce->ops->post_unpin(ce);

	/*
	 * Once released, we may asynchronously drop the active reference.
	 * As that may be the only reference keeping the context alive,
	 * take an extra now so that it is not freed before we finish
	 * dereferencing it.
	 */
	intel_context_get(ce);
	intel_context_active_release(ce);
	intel_context_put(ce);
}

314
__i915_active_call
315
static void __intel_context_retire(struct i915_active *active)
316 317 318
{
	struct intel_context *ce = container_of(active, typeof(*ce), active);

319 320 321
	CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n",
		 intel_context_get_total_runtime_ns(ce),
		 intel_context_get_avg_runtime_ns(ce));
322

323
	set_bit(CONTEXT_VALID_BIT, &ce->flags);
324
	intel_context_post_unpin(ce);
325
	intel_context_put(ce);
326 327
}

328
static int __intel_context_active(struct i915_active *active)
329
{
330
	struct intel_context *ce = container_of(active, typeof(*ce), active);
331 332 333

	intel_context_get(ce);

334
	/* everything should already be activated by intel_context_pre_pin() */
335 336
	GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active));
	__intel_ring_pin(ce->ring);
337

338
	__intel_timeline_pin(ce->timeline);
339

340 341 342 343 344
	if (ce->state) {
		GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active));
		__i915_vma_pin(ce->state);
		i915_vma_make_unshrinkable(ce->state);
	}
345

346 347 348
	return 0;
}

349 350 351
void
intel_context_init(struct intel_context *ce,
		   struct intel_engine_cs *engine)
352
{
353
	GEM_BUG_ON(!engine->cops);
354
	GEM_BUG_ON(!engine->gt->vm);
355 356 357 358 359 360

	kref_init(&ce->ref);

	ce->engine = engine;
	ce->ops = engine->cops;
	ce->sseu = engine->sseu;
361 362
	ce->ring = __intel_context_ring_size(SZ_4K);

363 364
	ewma_runtime_init(&ce->runtime.avg);

365
	ce->vm = i915_vm_get(engine->gt->vm);
366 367 368 369 370 371

	INIT_LIST_HEAD(&ce->signal_link);
	INIT_LIST_HEAD(&ce->signals);

	mutex_init(&ce->pin_mutex);

372
	i915_active_init(&ce->active,
373
			 __intel_context_active, __intel_context_retire);
374 375
}

376 377
void intel_context_fini(struct intel_context *ce)
{
378 379
	if (ce->timeline)
		intel_timeline_put(ce->timeline);
380 381
	i915_vm_put(ce->vm);

382 383 384 385
	mutex_destroy(&ce->pin_mutex);
	i915_active_fini(&ce->active);
}

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
static void i915_global_context_shrink(void)
{
	kmem_cache_shrink(global.slab_ce);
}

static void i915_global_context_exit(void)
{
	kmem_cache_destroy(global.slab_ce);
}

static struct i915_global_context global = { {
	.shrink = i915_global_context_shrink,
	.exit = i915_global_context_exit,
} };

int __init i915_global_context_init(void)
{
	global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
	if (!global.slab_ce)
		return -ENOMEM;

	i915_global_register(&global.base);
	return 0;
}
410 411 412

void intel_context_enter_engine(struct intel_context *ce)
{
413
	intel_engine_pm_get(ce->engine);
414
	intel_timeline_enter(ce->timeline);
415 416 417 418
}

void intel_context_exit_engine(struct intel_context *ce)
{
419
	intel_timeline_exit(ce->timeline);
420
	intel_engine_pm_put(ce->engine);
421
}
422

423 424 425
int intel_context_prepare_remote_request(struct intel_context *ce,
					 struct i915_request *rq)
{
426
	struct intel_timeline *tl = ce->timeline;
427 428 429
	int err;

	/* Only suitable for use in remotely modifying this context */
430
	GEM_BUG_ON(rq->context == ce);
431

432
	if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
433
		/* Queue this switch after current activity by this context. */
434
		err = i915_active_fence_set(&tl->last_request, rq);
435
		if (err)
436
			return err;
437
	}
438 439 440 441 442 443 444 445 446

	/*
	 * Guarantee context image and the timeline remains pinned until the
	 * modifying request is retired by setting the ce activity tracker.
	 *
	 * But we only need to take one pin on the account of it. Or in other
	 * words transfer the pinned ce object to tracked active request.
	 */
	GEM_BUG_ON(i915_active_is_idle(&ce->active));
447
	return i915_active_add_request(&ce->active, rq);
448 449
}

450 451 452 453 454 455 456 457 458 459 460 461 462 463
struct i915_request *intel_context_create_request(struct intel_context *ce)
{
	struct i915_request *rq;
	int err;

	err = intel_context_pin(ce);
	if (unlikely(err))
		return ERR_PTR(err);

	rq = i915_request_create(ce);
	intel_context_unpin(ce);

	return rq;
}
464 465 466 467

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_context.c"
#endif