i915_active.c 9.7 KB
Newer Older
1 2 3 4 5 6
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2019 Intel Corporation
 */

7 8
#include "gt/intel_engine_pm.h"

9 10
#include "i915_drv.h"
#include "i915_active.h"
11
#include "i915_globals.h"
12 13 14

#define BKL(ref) (&(ref)->i915->drm.struct_mutex)

15 16 17 18 19 20 21 22
/*
 * Active refs memory management
 *
 * To be more economical with memory, we reap all the i915_active trees as
 * they idle (when we know the active requests are inactive) and allocate the
 * nodes from a local slab cache to hopefully reduce the fragmentation.
 */
static struct i915_global_active {
23
	struct i915_global base;
24 25 26
	struct kmem_cache *slab_cache;
} global;

27
struct active_node {
28
	struct i915_active_request base;
29 30 31 32 33
	struct i915_active *ref;
	struct rb_node node;
	u64 timeline;
};

34 35 36 37 38 39
static void
__active_park(struct i915_active *ref)
{
	struct active_node *it, *n;

	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
40
		GEM_BUG_ON(i915_active_request_isset(&it->base));
41
		kmem_cache_free(global.slab_cache, it);
42 43 44 45
	}
	ref->tree = RB_ROOT;
}

46 47 48 49
static void
__active_retire(struct i915_active *ref)
{
	GEM_BUG_ON(!ref->count);
50 51 52 53 54 55 56
	if (--ref->count)
		return;

	/* return the unused nodes to our slabcache */
	__active_park(ref);

	ref->retire(ref);
57 58 59
}

static void
60
node_retire(struct i915_active_request *base, struct i915_request *rq)
61 62 63 64 65
{
	__active_retire(container_of(base, struct active_node, base)->ref);
}

static void
66
last_retire(struct i915_active_request *base, struct i915_request *rq)
67 68 69 70
{
	__active_retire(container_of(base, struct i915_active, last));
}

71
static struct i915_active_request *
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
active_instance(struct i915_active *ref, u64 idx)
{
	struct active_node *node;
	struct rb_node **p, *parent;
	struct i915_request *old;

	/*
	 * We track the most recently used timeline to skip a rbtree search
	 * for the common case, under typical loads we never need the rbtree
	 * at all. We can reuse the last slot if it is empty, that is
	 * after the previous activity has been retired, or if it matches the
	 * current timeline.
	 *
	 * Note that we allow the timeline to be active simultaneously in
	 * the rbtree and the last cache. We do this to avoid having
	 * to search and replace the rbtree element for a new timeline, with
	 * the cost being that we must be aware that the ref may be retired
	 * twice for the same timeline (as the older rbtree element will be
	 * retired before the new request added to last).
	 */
92
	old = i915_active_request_raw(&ref->last, BKL(ref));
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
	if (!old || old->fence.context == idx)
		goto out;

	/* Move the currently active fence into the rbtree */
	idx = old->fence.context;

	parent = NULL;
	p = &ref->tree.rb_node;
	while (*p) {
		parent = *p;

		node = rb_entry(parent, struct active_node, node);
		if (node->timeline == idx)
			goto replace;

		if (node->timeline < idx)
			p = &parent->rb_right;
		else
			p = &parent->rb_left;
	}

114
	node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
115 116

	/* kmalloc may retire the ref->last (thanks shrinker)! */
117
	if (unlikely(!i915_active_request_raw(&ref->last, BKL(ref)))) {
118
		kmem_cache_free(global.slab_cache, node);
119 120 121 122 123 124
		goto out;
	}

	if (unlikely(!node))
		return ERR_PTR(-ENOMEM);

125
	i915_active_request_init(&node->base, NULL, node_retire);
126 127 128 129 130 131 132 133 134 135 136 137 138 139
	node->ref = ref;
	node->timeline = idx;

	rb_link_node(&node->node, parent, p);
	rb_insert_color(&node->node, &ref->tree);

replace:
	/*
	 * Overwrite the previous active slot in the rbtree with last,
	 * leaving last zeroed. If the previous slot is still active,
	 * we must be careful as we now only expect to receive one retire
	 * callback not two, and so much undo the active counting for the
	 * overwritten slot.
	 */
140
	if (i915_active_request_isset(&node->base)) {
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
		/* Retire ourselves from the old rq->active_list */
		__list_del_entry(&node->base.link);
		ref->count--;
		GEM_BUG_ON(!ref->count);
	}
	GEM_BUG_ON(list_empty(&ref->last.link));
	list_replace_init(&ref->last.link, &node->base.link);
	node->base.request = fetch_and_zero(&ref->last.request);

out:
	return &ref->last;
}

void i915_active_init(struct drm_i915_private *i915,
		      struct i915_active *ref,
		      void (*retire)(struct i915_active *ref))
{
	ref->i915 = i915;
	ref->retire = retire;
	ref->tree = RB_ROOT;
161
	i915_active_request_init(&ref->last, NULL, last_retire);
162
	init_llist_head(&ref->barriers);
163 164 165 166 167 168 169
	ref->count = 0;
}

int i915_active_ref(struct i915_active *ref,
		    u64 timeline,
		    struct i915_request *rq)
{
170
	struct i915_active_request *active;
171 172 173 174
	int err = 0;

	/* Prevent reaping in case we malloc/wait while building the tree */
	i915_active_acquire(ref);
175 176

	active = active_instance(ref, timeline);
177 178 179 180
	if (IS_ERR(active)) {
		err = PTR_ERR(active);
		goto out;
	}
181

182
	if (!i915_active_request_isset(active))
183
		ref->count++;
184
	__i915_active_request_set(active, rq);
185 186

	GEM_BUG_ON(!ref->count);
187 188 189
out:
	i915_active_release(ref);
	return err;
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
}

bool i915_active_acquire(struct i915_active *ref)
{
	lockdep_assert_held(BKL(ref));
	return !ref->count++;
}

void i915_active_release(struct i915_active *ref)
{
	lockdep_assert_held(BKL(ref));
	__active_retire(ref);
}

int i915_active_wait(struct i915_active *ref)
{
	struct active_node *it, *n;
	int ret = 0;

	if (i915_active_acquire(ref))
		goto out_release;

212
	ret = i915_active_request_retire(&ref->last, BKL(ref));
213 214 215 216
	if (ret)
		goto out_release;

	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
217
		ret = i915_active_request_retire(&it->base, BKL(ref));
218 219 220 221 222 223 224 225 226
		if (ret)
			break;
	}

out_release:
	i915_active_release(ref);
	return ret;
}

227 228
int i915_request_await_active_request(struct i915_request *rq,
				      struct i915_active_request *active)
229 230
{
	struct i915_request *barrier =
231
		i915_active_request_raw(active, &rq->i915->drm.struct_mutex);
232 233 234 235 236 237 238

	return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
}

int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
{
	struct active_node *it, *n;
239
	int err = 0;
240

241 242 243 244 245 246 247
	/* await allocates and so we need to avoid hitting the shrinker */
	if (i915_active_acquire(ref))
		goto out; /* was idle */

	err = i915_request_await_active_request(rq, &ref->last);
	if (err)
		goto out;
248 249

	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
250 251 252
		err = i915_request_await_active_request(rq, &it->base);
		if (err)
			goto out;
253 254
	}

255 256 257
out:
	i915_active_release(ref);
	return err;
258 259
}

260
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
261 262
void i915_active_fini(struct i915_active *ref)
{
263
	GEM_BUG_ON(i915_active_request_isset(&ref->last));
264 265
	GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
	GEM_BUG_ON(ref->count);
266
}
267
#endif
268

269 270 271 272
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
					    struct intel_engine_cs *engine)
{
	struct drm_i915_private *i915 = engine->i915;
273
	struct llist_node *pos, *next;
274
	unsigned long tmp;
275
	int err;
276 277 278 279 280 281 282 283 284

	GEM_BUG_ON(!engine->mask);
	for_each_engine_masked(engine, i915, engine->mask, tmp) {
		struct intel_context *kctx = engine->kernel_context;
		struct active_node *node;

		node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
		if (unlikely(!node)) {
			err = -ENOMEM;
285
			goto unwind;
286 287 288 289 290 291 292 293
		}

		i915_active_request_init(&node->base,
					 (void *)engine, node_retire);
		node->timeline = kctx->ring->timeline->fence_context;
		node->ref = ref;
		ref->count++;

294
		intel_engine_pm_get(engine);
295 296 297 298
		llist_add((struct llist_node *)&node->base.link,
			  &ref->barriers);
	}

299 300 301 302 303 304 305 306 307 308 309 310 311
	return 0;

unwind:
	llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
		struct active_node *node;

		node = container_of((struct list_head *)pos,
				    typeof(*node), base.link);
		engine = (void *)rcu_access_pointer(node->base.request);

		intel_engine_pm_put(engine);
		kmem_cache_free(global.slab_cache, node);
	}
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
	return err;
}

void i915_active_acquire_barrier(struct i915_active *ref)
{
	struct llist_node *pos, *next;

	i915_active_acquire(ref);

	llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
		struct intel_engine_cs *engine;
		struct active_node *node;
		struct rb_node **p, *parent;

		node = container_of((struct list_head *)pos,
				    typeof(*node), base.link);

		engine = (void *)rcu_access_pointer(node->base.request);
		RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));

		parent = NULL;
		p = &ref->tree.rb_node;
		while (*p) {
			parent = *p;
			if (rb_entry(parent,
				     struct active_node,
				     node)->timeline < node->timeline)
				p = &parent->rb_right;
			else
				p = &parent->rb_left;
		}
		rb_link_node(&node->node, parent, p);
		rb_insert_color(&node->node, &ref->tree);

		llist_add((struct llist_node *)&node->base.link,
			  &engine->barrier_tasks);
348
		intel_engine_pm_put(engine);
349 350 351 352 353 354 355 356 357 358 359 360 361
	}
	i915_active_release(ref);
}

void i915_request_add_barriers(struct i915_request *rq)
{
	struct intel_engine_cs *engine = rq->engine;
	struct llist_node *node, *next;

	llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks))
		list_add_tail((struct list_head *)node, &rq->active_list);
}

362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
int i915_active_request_set(struct i915_active_request *active,
			    struct i915_request *rq)
{
	int err;

	/* Must maintain ordering wrt previous active requests */
	err = i915_request_await_active_request(rq, active);
	if (err)
		return err;

	__i915_active_request_set(active, rq);
	return 0;
}

void i915_active_retire_noop(struct i915_active_request *active,
			     struct i915_request *request)
{
	/* Space left intentionally blank */
}

382 383 384
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_active.c"
#endif
385

386
static void i915_global_active_shrink(void)
387
{
388
	kmem_cache_shrink(global.slab_cache);
389 390
}

391
static void i915_global_active_exit(void)
392
{
393
	kmem_cache_destroy(global.slab_cache);
394 395
}

396 397 398 399 400 401
static struct i915_global_active global = { {
	.shrink = i915_global_active_shrink,
	.exit = i915_global_active_exit,
} };

int __init i915_global_active_init(void)
402
{
403 404 405 406 407 408
	global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
	if (!global.slab_cache)
		return -ENOMEM;

	i915_global_register(&global.base);
	return 0;
409
}