intel_context.c 4.9 KB
Newer Older
1 2 3 4 5 6
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2019 Intel Corporation
 */

7 8 9
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"

10 11
#include "i915_drv.h"
#include "i915_globals.h"
12

13
#include "intel_context.h"
14
#include "intel_engine.h"
15
#include "intel_engine_pm.h"
16 17 18 19 20 21

static struct i915_global_context {
	struct i915_global base;
	struct kmem_cache *slab_ce;
} global;

22
static struct intel_context *intel_context_alloc(void)
23 24 25 26 27 28 29 30 31 32
{
	return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
}

void intel_context_free(struct intel_context *ce)
{
	kmem_cache_free(global.slab_ce, ce);
}

struct intel_context *
33
intel_context_create(struct i915_gem_context *ctx,
34 35
		     struct intel_engine_cs *engine)
{
36
	struct intel_context *ce;
37 38 39 40 41 42

	ce = intel_context_alloc();
	if (!ce)
		return ERR_PTR(-ENOMEM);

	intel_context_init(ce, ctx, engine);
43
	return ce;
44 45
}

46 47 48
int __intel_context_do_pin(struct intel_context *ce)
{
	int err;
49 50

	if (mutex_lock_interruptible(&ce->pin_mutex))
51
		return -EINTR;
52 53

	if (likely(!atomic_read(&ce->pin_count))) {
54 55 56
		intel_wakeref_t wakeref;

		err = 0;
57
		with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
58
			err = ce->ops->pin(ce);
59
		if (err)
60
			goto err;
61

62
		i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
63

64
		smp_mb__before_atomic(); /* flush pin before it is visible */
65 66
	}

67 68 69 70
	atomic_inc(&ce->pin_count);
	GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */

	mutex_unlock(&ce->pin_mutex);
71
	return 0;
72

73 74
err:
	mutex_unlock(&ce->pin_mutex);
75
	return err;
76 77
}

78 79 80 81 82 83
void intel_context_unpin(struct intel_context *ce)
{
	if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
		return;

	/* We may be called from inside intel_context_pin() to evict another */
84
	intel_context_get(ce);
85 86
	mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);

87
	if (likely(atomic_dec_and_test(&ce->pin_count))) {
88 89
		ce->ops->unpin(ce);

90
		i915_gem_context_put(ce->gem_context);
91
		intel_context_active_release(ce);
92 93
	}

94
	mutex_unlock(&ce->pin_mutex);
95
	intel_context_put(ce);
96 97
}

98
static int __context_pin_state(struct i915_vma *vma, unsigned long flags)
99
{
100
	int err;
101

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
	err = i915_vma_pin(vma, 0, 0, flags | PIN_GLOBAL);
	if (err)
		return err;

	/*
	 * And mark it as a globally pinned object to let the shrinker know
	 * it cannot reclaim the object until we release it.
	 */
	vma->obj->pin_global++;
	vma->obj->mm.dirty = true;

	return 0;
}

static void __context_unpin_state(struct i915_vma *vma)
{
	vma->obj->pin_global--;
	__i915_vma_unpin(vma);
}

static void intel_context_retire(struct i915_active *active)
{
	struct intel_context *ce = container_of(active, typeof(*ce), active);

	if (ce->state)
		__context_unpin_state(ce->state);

129
	intel_ring_unpin(ce->ring);
130
	intel_context_put(ce);
131 132 133 134 135 136 137
}

void
intel_context_init(struct intel_context *ce,
		   struct i915_gem_context *ctx,
		   struct intel_engine_cs *engine)
{
138 139
	GEM_BUG_ON(!engine->cops);

140 141
	kref_init(&ce->ref);

142 143 144
	ce->gem_context = ctx;
	ce->engine = engine;
	ce->ops = engine->cops;
145
	ce->sseu = engine->sseu;
146 147 148 149

	INIT_LIST_HEAD(&ce->signal_link);
	INIT_LIST_HEAD(&ce->signals);

150 151
	mutex_init(&ce->pin_mutex);

152 153 154 155 156 157 158 159 160 161 162 163
	i915_active_init(ctx->i915, &ce->active, intel_context_retire);
}

int intel_context_active_acquire(struct intel_context *ce, unsigned long flags)
{
	int err;

	if (!i915_active_acquire(&ce->active))
		return 0;

	intel_context_get(ce);

164 165 166 167
	err = intel_ring_pin(ce->ring);
	if (err)
		goto err_put;

168 169 170 171
	if (!ce->state)
		return 0;

	err = __context_pin_state(ce->state, flags);
172 173
	if (err)
		goto err_ring;
174 175 176 177 178

	/* Preallocate tracking nodes */
	if (!i915_gem_context_is_kernel(ce->gem_context)) {
		err = i915_active_acquire_preallocate_barrier(&ce->active,
							      ce->engine);
179 180
		if (err)
			goto err_state;
181 182 183
	}

	return 0;
184 185 186 187 188 189 190 191 192

err_state:
	__context_unpin_state(ce->state);
err_ring:
	intel_ring_unpin(ce->ring);
err_put:
	intel_context_put(ce);
	i915_active_cancel(&ce->active);
	return err;
193 194 195 196 197 198 199
}

void intel_context_active_release(struct intel_context *ce)
{
	/* Nodes preallocated in intel_context_active() */
	i915_active_acquire_barrier(&ce->active);
	i915_active_release(&ce->active);
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
}

static void i915_global_context_shrink(void)
{
	kmem_cache_shrink(global.slab_ce);
}

static void i915_global_context_exit(void)
{
	kmem_cache_destroy(global.slab_ce);
}

static struct i915_global_context global = { {
	.shrink = i915_global_context_shrink,
	.exit = i915_global_context_exit,
} };

int __init i915_global_context_init(void)
{
	global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
	if (!global.slab_ce)
		return -ENOMEM;

	i915_global_register(&global.base);
	return 0;
}
226 227 228

void intel_context_enter_engine(struct intel_context *ce)
{
229
	intel_engine_pm_get(ce->engine);
230 231 232 233
}

void intel_context_exit_engine(struct intel_context *ce)
{
234
	intel_engine_pm_put(ce->engine);
235
}
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250

struct i915_request *intel_context_create_request(struct intel_context *ce)
{
	struct i915_request *rq;
	int err;

	err = intel_context_pin(ce);
	if (unlikely(err))
		return ERR_PTR(err);

	rq = i915_request_create(ce);
	intel_context_unpin(ce);

	return rq;
}