i915_gem_shrinker.c 16.4 KB
Newer Older
1
/*
2
 * SPDX-License-Identifier: MIT
3
 *
4
 * Copyright © 2008-2015 Intel Corporation
5 6 7
 */

#include <linux/oom.h>
8
#include <linux/sched/mm.h>
9 10 11 12 13
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
14
#include <linux/vmalloc.h>
15 16 17 18
#include <drm/i915_drm.h>

#include "i915_trace.h"

19 20 21
static bool shrinker_lock(struct drm_i915_private *i915,
			  unsigned int flags,
			  bool *unlock)
22
{
23 24 25
	struct mutex *m = &i915->drm.struct_mutex;

	switch (mutex_trylock_recursive(m)) {
26
	case MUTEX_TRYLOCK_RECURSIVE:
27
		*unlock = false;
28
		return true;
29 30

	case MUTEX_TRYLOCK_FAILED:
31
		*unlock = false;
32 33
		if (flags & I915_SHRINK_ACTIVE &&
		    mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0)
34
			*unlock = true;
35
		return *unlock;
36

37 38 39
	case MUTEX_TRYLOCK_SUCCESS:
		*unlock = true;
		return true;
40 41
	}

42
	BUG();
43 44
}

45
static void shrinker_unlock(struct drm_i915_private *i915, bool unlock)
46 47 48 49
{
	if (!unlock)
		return;

50
	mutex_unlock(&i915->drm.struct_mutex);
51 52
}

53 54 55 56 57 58 59
static bool swap_available(void)
{
	return get_nr_swap_pages() > 0;
}

static bool can_release_pages(struct drm_i915_gem_object *obj)
{
60 61
	/* Consider only shrinkable ojects. */
	if (!i915_gem_object_is_shrinkable(obj))
62 63
		return false;

64 65
	/*
	 * Only report true if by unbinding the object and putting its pages
66 67 68 69 70 71 72
	 * we can actually make forward progress towards freeing physical
	 * pages.
	 *
	 * If the pages are pinned for any other reason than being bound
	 * to the GPU, simply unbinding from the GPU is not going to succeed
	 * in releasing our pin count on the pages themselves.
	 */
73
	if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
74 75
		return false;

76 77
	/*
	 * We can only return physical pages to the system if we can either
78 79 80
	 * discard the contents (because the user has marked them as being
	 * purgeable) or if we can move their contents out to swap.
	 */
C
Chris Wilson 已提交
81
	return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
82 83
}

84 85
static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
			      unsigned long shrink)
86
{
87 88 89 90 91 92 93
	unsigned long flags;

	flags = 0;
	if (shrink & I915_SHRINK_ACTIVE)
		flags = I915_GEM_OBJECT_UNBIND_ACTIVE;

	if (i915_gem_object_unbind(obj, flags) == 0)
94
		__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
95

96
	return !i915_gem_object_has_pages(obj);
97 98
}

99 100
static void try_to_writeback(struct drm_i915_gem_object *obj,
			     unsigned int flags)
101 102 103
{
	switch (obj->mm.madv) {
	case I915_MADV_DONTNEED:
104
		i915_gem_object_truncate(obj);
105 106 107 108
	case __I915_MADV_PURGED:
		return;
	}

109 110
	if (flags & I915_SHRINK_WRITEBACK)
		i915_gem_object_writeback(obj);
111 112
}

113 114
/**
 * i915_gem_shrink - Shrink buffer object caches
115
 * @i915: i915 device
116
 * @target: amount of memory to make available, in pages
117
 * @nr_scanned: optional output for number of pages scanned (incremental)
118
 * @shrink: control flags for selecting cache types
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
 *
 * This function is the main interface to the shrinker. It will try to release
 * up to @target pages of main memory backing storage from buffer objects.
 * Selection of the specific caches can be done with @flags. This is e.g. useful
 * when purgeable objects should be removed from caches preferentially.
 *
 * Note that it's not guaranteed that released amount is actually available as
 * free system memory - the pages might still be in-used to due to other reasons
 * (like cpu mmaps) or the mm core has reused them before we could grab them.
 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
 *
 * Also note that any kind of pinning (both per-vma address space pins and
 * backing storage pins at the buffer object level) result in the shrinker code
 * having to skip the object.
 *
 * Returns:
 * The number of pages of backing storage actually released.
 */
138
unsigned long
139
i915_gem_shrink(struct drm_i915_private *i915,
140 141
		unsigned long target,
		unsigned long *nr_scanned,
142
		unsigned int shrink)
143 144 145 146 147
{
	const struct {
		struct list_head *list;
		unsigned int bit;
	} phases[] = {
148
		{ &i915->mm.purge_list, ~0u },
149 150 151 152
		{
			&i915->mm.shrink_list,
			I915_SHRINK_BOUND | I915_SHRINK_UNBOUND
		},
153 154
		{ NULL, 0 },
	}, *phase;
155
	intel_wakeref_t wakeref = 0;
156
	unsigned long count = 0;
157
	unsigned long scanned = 0;
158 159
	bool unlock;

160
	if (!shrinker_lock(i915, shrink, &unlock))
161
		return 0;
162

163
	/*
164 165 166 167 168 169
	 * When shrinking the active list, we should also consider active
	 * contexts. Active contexts are pinned until they are retired, and
	 * so can not be simply unbound to retire and unpin their pages. To
	 * shrink the contexts, we must wait until the gpu is idle and
	 * completed its switch to the kernel context. In short, we do
	 * not have a good mechanism for idling a specific context.
170 171
	 */

172
	trace_i915_gem_shrink(i915, target, shrink);
173

174 175 176 177 178
	/*
	 * Unbinding of objects will require HW access; Let us not wake the
	 * device just to recover a little memory. If absolutely necessary,
	 * we will force the wake during oom-notifier.
	 */
179
	if (shrink & I915_SHRINK_BOUND) {
180
		wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
181
		if (!wakeref)
182
			shrink &= ~I915_SHRINK_BOUND;
183
	}
184

185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
	/*
	 * As we may completely rewrite the (un)bound list whilst unbinding
	 * (due to retiring requests) we have to strictly process only
	 * one element of the list at the time, and recheck the list
	 * on every iteration.
	 *
	 * In particular, we must hold a reference whilst removing the
	 * object as we may end up waiting for and/or retiring the objects.
	 * This might release the final reference (held by the active list)
	 * and result in the object being freed from under us. This is
	 * similar to the precautions the eviction code must take whilst
	 * removing objects.
	 *
	 * Also note that although these lists do not hold a reference to
	 * the object we can safely grab one here: The final object
	 * unreferencing and the bound_list are both protected by the
	 * dev->struct_mutex and so we won't ever be able to observe an
	 * object on the bound_list with a reference count equals 0.
	 */
	for (phase = phases; phase->list; phase++) {
		struct list_head still_in_list;
206
		struct drm_i915_gem_object *obj;
207
		unsigned long flags;
208

209
		if ((shrink & phase->bit) == 0)
210 211 212
			continue;

		INIT_LIST_HEAD(&still_in_list);
213 214 215 216 217 218 219 220

		/*
		 * We serialize our access to unreferenced objects through
		 * the use of the struct_mutex. While the objects are not
		 * yet freed (due to RCU then a workqueue) we still want
		 * to be able to shrink their pages, so they remain on
		 * the unbound/bound list until actually freed.
		 */
221
		spin_lock_irqsave(&i915->mm.obj_lock, flags);
222 223 224
		while (count < target &&
		       (obj = list_first_entry_or_null(phase->list,
						       typeof(*obj),
225 226
						       mm.link))) {
			list_move_tail(&obj->mm.link, &still_in_list);
227

228
			if (shrink & I915_SHRINK_VMAPS &&
C
Chris Wilson 已提交
229
			    !is_vmalloc_addr(obj->mm.mapping))
230 231
				continue;

232
			if (!(shrink & I915_SHRINK_ACTIVE) &&
233
			    i915_gem_object_is_framebuffer(obj))
234 235
				continue;

236
			if (!(shrink & I915_SHRINK_BOUND) &&
237
			    atomic_read(&obj->bind_count))
238 239
				continue;

240 241 242
			if (!can_release_pages(obj))
				continue;

243 244 245
			if (!kref_get_unless_zero(&obj->base.refcount))
				continue;

246
			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
247

248
			if (unsafe_drop_pages(obj, shrink)) {
249 250
				/* May arrive from get_pages on another bo */
				mutex_lock_nested(&obj->mm.lock,
251
						  I915_MM_SHRINKER);
252
				if (!i915_gem_object_has_pages(obj)) {
253
					try_to_writeback(obj, shrink);
254 255 256 257
					count += obj->base.size >> PAGE_SHIFT;
				}
				mutex_unlock(&obj->mm.lock);
			}
258

259
			scanned += obj->base.size >> PAGE_SHIFT;
260
			i915_gem_object_put(obj);
261

262
			spin_lock_irqsave(&i915->mm.obj_lock, flags);
263
		}
264
		list_splice_tail(&still_in_list, phase->list);
265
		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
266 267
	}

268
	if (shrink & I915_SHRINK_BOUND)
269
		intel_runtime_pm_put(&i915->runtime_pm, wakeref);
270

271
	shrinker_unlock(i915, unlock);
272

273 274
	if (nr_scanned)
		*nr_scanned += scanned;
275 276 277
	return count;
}

278
/**
279
 * i915_gem_shrink_all - Shrink buffer object caches completely
280
 * @i915: i915 device
281 282 283 284 285 286 287 288 289 290 291
 *
 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
 * caches completely. It also first waits for and retires all outstanding
 * requests to also be able to release backing storage for active objects.
 *
 * This should only be used in code to intentionally quiescent the gpu or as a
 * last-ditch effort when memory seems to have run out.
 *
 * Returns:
 * The number of pages of backing storage actually released.
 */
292
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
293
{
294
	intel_wakeref_t wakeref;
295
	unsigned long freed = 0;
296

297
	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
298 299 300 301 302
		freed = i915_gem_shrink(i915, -1UL, NULL,
					I915_SHRINK_BOUND |
					I915_SHRINK_UNBOUND |
					I915_SHRINK_ACTIVE);
	}
303

304
	return freed;
305 306 307 308 309
}

static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
310
	struct drm_i915_private *i915 =
311
		container_of(shrinker, struct drm_i915_private, mm.shrinker);
312 313
	unsigned long num_objects;
	unsigned long count;
314

315 316
	count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT;
	num_objects = READ_ONCE(i915->mm.shrink_count);
317

318 319
	/*
	 * Update our preferred vmscan batch size for the next pass.
320 321 322 323 324 325 326 327 328 329 330 331
	 * Our rough guess for an effective batch size is roughly 2
	 * available GEM objects worth of pages. That is we don't want
	 * the shrinker to fire, until it is worth the cost of freeing an
	 * entire GEM object.
	 */
	if (num_objects) {
		unsigned long avg = 2 * count / num_objects;

		i915->mm.shrinker.batch =
			max((i915->mm.shrinker.batch + avg) >> 1,
			    128ul /* default SHRINK_BATCH */);
	}
332 333 334 335 336 337 338

	return count;
}

static unsigned long
i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
339
	struct drm_i915_private *i915 =
340 341 342 343
		container_of(shrinker, struct drm_i915_private, mm.shrinker);
	unsigned long freed;
	bool unlock;

344 345
	sc->nr_scanned = 0;

346
	if (!shrinker_lock(i915, 0, &unlock))
347 348
		return SHRINK_STOP;

349
	freed = i915_gem_shrink(i915,
350
				sc->nr_to_scan,
351
				&sc->nr_scanned,
352 353
				I915_SHRINK_BOUND |
				I915_SHRINK_UNBOUND |
354
				I915_SHRINK_WRITEBACK);
355
	if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
356 357
		intel_wakeref_t wakeref;

358
		with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
359 360 361 362 363
			freed += i915_gem_shrink(i915,
						 sc->nr_to_scan - sc->nr_scanned,
						 &sc->nr_scanned,
						 I915_SHRINK_ACTIVE |
						 I915_SHRINK_BOUND |
364 365
						 I915_SHRINK_UNBOUND |
						 I915_SHRINK_WRITEBACK);
366
		}
367
	}
368

369
	shrinker_unlock(i915, unlock);
370

371
	return sc->nr_scanned ? freed : SHRINK_STOP;
372 373 374 375 376
}

static int
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
{
377
	struct drm_i915_private *i915 =
378 379
		container_of(nb, struct drm_i915_private, mm.oom_notifier);
	struct drm_i915_gem_object *obj;
380
	unsigned long unevictable, available, freed_pages;
381
	intel_wakeref_t wakeref;
382
	unsigned long flags;
383

384
	freed_pages = 0;
385
	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
386 387
		freed_pages += i915_gem_shrink(i915, -1UL, NULL,
					       I915_SHRINK_BOUND |
388 389
					       I915_SHRINK_UNBOUND |
					       I915_SHRINK_WRITEBACK);
390 391 392 393 394

	/* Because we may be allocating inside our own driver, we cannot
	 * assert that there are no objects with pinned pages that are not
	 * being pointed to by hardware.
	 */
395
	available = unevictable = 0;
396
	spin_lock_irqsave(&i915->mm.obj_lock, flags);
397
	list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
398 399
		if (!can_release_pages(obj))
			unevictable += obj->base.size >> PAGE_SHIFT;
400
		else
401
			available += obj->base.size >> PAGE_SHIFT;
402
	}
403
	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
404

405
	if (freed_pages || available)
406
		pr_info("Purging GPU memory, %lu pages freed, "
407 408
			"%lu pages still pinned, %lu pages left available.\n",
			freed_pages, unevictable, available);
409 410 411 412 413

	*(unsigned long *)ptr += freed_pages;
	return NOTIFY_DONE;
}

414 415 416
static int
i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
417
	struct drm_i915_private *i915 =
418
		container_of(nb, struct drm_i915_private, mm.vmap_notifier);
419 420
	struct i915_vma *vma, *next;
	unsigned long freed_pages = 0;
421
	intel_wakeref_t wakeref;
422
	bool unlock;
423

424
	if (!shrinker_lock(i915, 0, &unlock))
425 426
		return NOTIFY_DONE;

427
	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
428 429 430 431
		freed_pages += i915_gem_shrink(i915, -1UL, NULL,
					       I915_SHRINK_BOUND |
					       I915_SHRINK_UNBOUND |
					       I915_SHRINK_VMAPS);
432 433

	/* We also want to clear any cached iomaps as they wrap vmap */
434
	mutex_lock(&i915->ggtt.vm.mutex);
435
	list_for_each_entry_safe(vma, next,
436
				 &i915->ggtt.vm.bound_list, vm_link) {
437
		unsigned long count = vma->node.size >> PAGE_SHIFT;
438 439 440 441

		if (!vma->iomap || i915_vma_is_active(vma))
			continue;

442
		mutex_unlock(&i915->ggtt.vm.mutex);
443
		if (i915_vma_unbind(vma) == 0)
444
			freed_pages += count;
445
		mutex_lock(&i915->ggtt.vm.mutex);
446
	}
447
	mutex_unlock(&i915->ggtt.vm.mutex);
448

449
	shrinker_unlock(i915, unlock);
450 451 452 453 454

	*(unsigned long *)ptr += freed_pages;
	return NOTIFY_DONE;
}

455
void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
456
{
457 458 459 460 461
	i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
	i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
	i915->mm.shrinker.seeks = DEFAULT_SEEKS;
	i915->mm.shrinker.batch = 4096;
	WARN_ON(register_shrinker(&i915->mm.shrinker));
462

463 464
	i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
	WARN_ON(register_oom_notifier(&i915->mm.oom_notifier));
465

466 467
	i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
	WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
468 469
}

470
void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
471
{
472 473 474
	WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
	WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
	unregister_shrinker(&i915->mm.shrinker);
475
}
476

477 478
void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
				    struct mutex *mutex)
479
{
480 481
	bool unlock = false;

482 483 484
	if (!IS_ENABLED(CONFIG_LOCKDEP))
		return;

485 486 487 488 489 490
	if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) {
		mutex_acquire(&i915->drm.struct_mutex.dep_map,
			      I915_MM_NORMAL, 0, _RET_IP_);
		unlock = true;
	}

491
	fs_reclaim_acquire(GFP_KERNEL);
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508

	/*
	 * As we invariably rely on the struct_mutex within the shrinker,
	 * but have a complicated recursion dance, taint all the mutexes used
	 * within the shrinker with the struct_mutex. For completeness, we
	 * taint with all subclass of struct_mutex, even though we should
	 * only need tainting by I915_MM_NORMAL to catch possible ABBA
	 * deadlocks from using struct_mutex inside @mutex.
	 */
	mutex_acquire(&i915->drm.struct_mutex.dep_map,
		      I915_MM_SHRINKER, 0, _RET_IP_);

	mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
	mutex_release(&mutex->dep_map, 0, _RET_IP_);

	mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);

509
	fs_reclaim_release(GFP_KERNEL);
510 511 512

	if (unlock)
		mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
513
}
514 515 516 517 518

#define obj_to_i915(obj__) to_i915((obj__)->base.dev)

void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
{
519 520 521
	struct drm_i915_private *i915 = obj_to_i915(obj);
	unsigned long flags;

522 523 524 525 526 527
	/*
	 * We can only be called while the pages are pinned or when
	 * the pages are released. If pinned, we should only be called
	 * from a single caller under controlled conditions; and on release
	 * only one caller may release us. Neither the two may cross.
	 */
528 529
	if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
		return;
530

531 532 533
	spin_lock_irqsave(&i915->mm.obj_lock, flags);
	if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
	    !list_empty(&obj->mm.link)) {
534 535 536 537
		list_del_init(&obj->mm.link);
		i915->mm.shrink_count--;
		i915->mm.shrink_memory -= obj->base.size;
	}
538
	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
539 540 541 542 543
}

static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
					      struct list_head *head)
{
544 545 546
	struct drm_i915_private *i915 = obj_to_i915(obj);
	unsigned long flags;

547
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
548 549
	if (!i915_gem_object_is_shrinkable(obj))
		return;
550

551 552
	if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
		return;
553

554 555 556 557
	spin_lock_irqsave(&i915->mm.obj_lock, flags);
	GEM_BUG_ON(!kref_read(&obj->base.refcount));
	if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
		GEM_BUG_ON(!list_empty(&obj->mm.link));
558 559 560 561 562 563

		list_add_tail(&obj->mm.link, head);
		i915->mm.shrink_count++;
		i915->mm.shrink_memory += obj->base.size;

	}
564
	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
565 566 567 568 569 570 571 572 573 574 575 576 577
}

void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
{
	__i915_gem_object_make_shrinkable(obj,
					  &obj_to_i915(obj)->mm.shrink_list);
}

void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
{
	__i915_gem_object_make_shrinkable(obj,
					  &obj_to_i915(obj)->mm.purge_list);
}