list_lru.c 12.3 KB
Newer Older
D
Dave Chinner 已提交
1 2 3 4 5 6 7 8
/*
 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
 * Authors: David Chinner and Glauber Costa
 *
 * Generic LRU infrastructure
 */
#include <linux/kernel.h>
#include <linux/module.h>
9
#include <linux/mm.h>
D
Dave Chinner 已提交
10
#include <linux/list_lru.h>
11
#include <linux/slab.h>
12
#include <linux/mutex.h>
13
#include <linux/memcontrol.h>
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40

#ifdef CONFIG_MEMCG_KMEM
static LIST_HEAD(list_lrus);
static DEFINE_MUTEX(list_lrus_mutex);

static void list_lru_register(struct list_lru *lru)
{
	mutex_lock(&list_lrus_mutex);
	list_add(&lru->list, &list_lrus);
	mutex_unlock(&list_lrus_mutex);
}

static void list_lru_unregister(struct list_lru *lru)
{
	mutex_lock(&list_lrus_mutex);
	list_del(&lru->list);
	mutex_unlock(&list_lrus_mutex);
}
#else
static void list_lru_register(struct list_lru *lru)
{
}

static void list_lru_unregister(struct list_lru *lru)
{
}
#endif /* CONFIG_MEMCG_KMEM */
D
Dave Chinner 已提交
41

42 43 44
#ifdef CONFIG_MEMCG_KMEM
static inline bool list_lru_memcg_aware(struct list_lru *lru)
{
45 46 47 48
	/*
	 * This needs node 0 to be always present, even
	 * in the systems supporting sparse numa ids.
	 */
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
	return !!lru->node[0].memcg_lrus;
}

static inline struct list_lru_one *
list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
{
	/*
	 * The lock protects the array of per cgroup lists from relocation
	 * (see memcg_update_list_lru_node).
	 */
	lockdep_assert_held(&nlru->lock);
	if (nlru->memcg_lrus && idx >= 0)
		return nlru->memcg_lrus->lru[idx];

	return &nlru->lru;
}

static inline struct list_lru_one *
list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
{
	struct mem_cgroup *memcg;

	if (!nlru->memcg_lrus)
		return &nlru->lru;

	memcg = mem_cgroup_from_kmem(ptr);
	if (!memcg)
		return &nlru->lru;

	return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
}
#else
static inline bool list_lru_memcg_aware(struct list_lru *lru)
{
	return false;
}

static inline struct list_lru_one *
list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
{
	return &nlru->lru;
}

static inline struct list_lru_one *
list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
{
	return &nlru->lru;
}
#endif /* CONFIG_MEMCG_KMEM */

D
Dave Chinner 已提交
99 100
bool list_lru_add(struct list_lru *lru, struct list_head *item)
{
101 102
	int nid = page_to_nid(virt_to_page(item));
	struct list_lru_node *nlru = &lru->node[nid];
103
	struct list_lru_one *l;
104 105

	spin_lock(&nlru->lock);
D
Dave Chinner 已提交
106
	if (list_empty(item)) {
107
		l = list_lru_from_kmem(nlru, item);
108 109
		list_add_tail(item, &l->list);
		l->nr_items++;
110
		spin_unlock(&nlru->lock);
D
Dave Chinner 已提交
111 112
		return true;
	}
113
	spin_unlock(&nlru->lock);
D
Dave Chinner 已提交
114 115 116 117 118 119
	return false;
}
EXPORT_SYMBOL_GPL(list_lru_add);

bool list_lru_del(struct list_lru *lru, struct list_head *item)
{
120 121
	int nid = page_to_nid(virt_to_page(item));
	struct list_lru_node *nlru = &lru->node[nid];
122
	struct list_lru_one *l;
123 124

	spin_lock(&nlru->lock);
D
Dave Chinner 已提交
125
	if (!list_empty(item)) {
126
		l = list_lru_from_kmem(nlru, item);
D
Dave Chinner 已提交
127
		list_del_init(item);
128
		l->nr_items--;
129
		spin_unlock(&nlru->lock);
D
Dave Chinner 已提交
130 131
		return true;
	}
132
	spin_unlock(&nlru->lock);
D
Dave Chinner 已提交
133 134 135 136
	return false;
}
EXPORT_SYMBOL_GPL(list_lru_del);

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
{
	list_del_init(item);
	list->nr_items--;
}
EXPORT_SYMBOL_GPL(list_lru_isolate);

void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
			   struct list_head *head)
{
	list_move(item, head);
	list->nr_items--;
}
EXPORT_SYMBOL_GPL(list_lru_isolate_move);

152 153
static unsigned long __list_lru_count_one(struct list_lru *lru,
					  int nid, int memcg_idx)
D
Dave Chinner 已提交
154
{
G
Glauber Costa 已提交
155
	struct list_lru_node *nlru = &lru->node[nid];
156 157
	struct list_lru_one *l;
	unsigned long count;
158

G
Glauber Costa 已提交
159
	spin_lock(&nlru->lock);
160 161
	l = list_lru_from_memcg_idx(nlru, memcg_idx);
	count = l->nr_items;
G
Glauber Costa 已提交
162
	spin_unlock(&nlru->lock);
163 164 165

	return count;
}
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185

unsigned long list_lru_count_one(struct list_lru *lru,
				 int nid, struct mem_cgroup *memcg)
{
	return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
}
EXPORT_SYMBOL_GPL(list_lru_count_one);

unsigned long list_lru_count_node(struct list_lru *lru, int nid)
{
	long count = 0;
	int memcg_idx;

	count += __list_lru_count_one(lru, nid, -1);
	if (list_lru_memcg_aware(lru)) {
		for_each_memcg_cache_index(memcg_idx)
			count += __list_lru_count_one(lru, nid, memcg_idx);
	}
	return count;
}
G
Glauber Costa 已提交
186
EXPORT_SYMBOL_GPL(list_lru_count_node);
187

188 189 190 191
static unsigned long
__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
		    list_lru_walk_cb isolate, void *cb_arg,
		    unsigned long *nr_to_walk)
192 193
{

194 195
	struct list_lru_node *nlru = &lru->node[nid];
	struct list_lru_one *l;
D
Dave Chinner 已提交
196
	struct list_head *item, *n;
197
	unsigned long isolated = 0;
D
Dave Chinner 已提交
198

199
	spin_lock(&nlru->lock);
200
	l = list_lru_from_memcg_idx(nlru, memcg_idx);
D
Dave Chinner 已提交
201
restart:
202
	list_for_each_safe(item, n, &l->list) {
D
Dave Chinner 已提交
203
		enum lru_status ret;
204 205 206 207 208

		/*
		 * decrement nr_to_walk first so that we don't livelock if we
		 * get stuck on large numbesr of LRU_RETRY items
		 */
209
		if (!*nr_to_walk)
210
			break;
211
		--*nr_to_walk;
212

213
		ret = isolate(item, l, &nlru->lock, cb_arg);
D
Dave Chinner 已提交
214
		switch (ret) {
215 216
		case LRU_REMOVED_RETRY:
			assert_spin_locked(&nlru->lock);
D
Dave Chinner 已提交
217
		case LRU_REMOVED:
218
			isolated++;
219 220 221 222 223 224 225
			/*
			 * If the lru lock has been dropped, our list
			 * traversal is now invalid and so we have to
			 * restart from scratch.
			 */
			if (ret == LRU_REMOVED_RETRY)
				goto restart;
D
Dave Chinner 已提交
226 227
			break;
		case LRU_ROTATE:
228
			list_move_tail(item, &l->list);
D
Dave Chinner 已提交
229 230 231 232
			break;
		case LRU_SKIP:
			break;
		case LRU_RETRY:
233 234 235 236
			/*
			 * The lru lock has been dropped, our list traversal is
			 * now invalid and so we have to restart from scratch.
			 */
237
			assert_spin_locked(&nlru->lock);
D
Dave Chinner 已提交
238 239 240 241 242
			goto restart;
		default:
			BUG();
		}
	}
243 244 245 246

	spin_unlock(&nlru->lock);
	return isolated;
}
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276

unsigned long
list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
		  list_lru_walk_cb isolate, void *cb_arg,
		  unsigned long *nr_to_walk)
{
	return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
				   isolate, cb_arg, nr_to_walk);
}
EXPORT_SYMBOL_GPL(list_lru_walk_one);

unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
				 list_lru_walk_cb isolate, void *cb_arg,
				 unsigned long *nr_to_walk)
{
	long isolated = 0;
	int memcg_idx;

	isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
					nr_to_walk);
	if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
		for_each_memcg_cache_index(memcg_idx) {
			isolated += __list_lru_walk_one(lru, nid, memcg_idx,
						isolate, cb_arg, nr_to_walk);
			if (*nr_to_walk <= 0)
				break;
		}
	}
	return isolated;
}
277 278
EXPORT_SYMBOL_GPL(list_lru_walk_node);

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
static void init_one_lru(struct list_lru_one *l)
{
	INIT_LIST_HEAD(&l->list);
	l->nr_items = 0;
}

#ifdef CONFIG_MEMCG_KMEM
static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
					  int begin, int end)
{
	int i;

	for (i = begin; i < end; i++)
		kfree(memcg_lrus->lru[i]);
}

static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
				      int begin, int end)
{
	int i;

	for (i = begin; i < end; i++) {
		struct list_lru_one *l;

		l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
		if (!l)
			goto fail;

		init_one_lru(l);
		memcg_lrus->lru[i] = l;
	}
	return 0;
fail:
	__memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
	return -ENOMEM;
}

static int memcg_init_list_lru_node(struct list_lru_node *nlru)
{
	int size = memcg_nr_cache_ids;

	nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
	if (!nlru->memcg_lrus)
		return -ENOMEM;

	if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
		kfree(nlru->memcg_lrus);
		return -ENOMEM;
	}

	return 0;
}

static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
{
	__memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
	kfree(nlru->memcg_lrus);
}

static int memcg_update_list_lru_node(struct list_lru_node *nlru,
				      int old_size, int new_size)
{
	struct list_lru_memcg *old, *new;

	BUG_ON(old_size > new_size);

	old = nlru->memcg_lrus;
	new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
	if (!new)
		return -ENOMEM;

	if (__memcg_init_list_lru_node(new, old_size, new_size)) {
		kfree(new);
		return -ENOMEM;
	}

	memcpy(new, old, old_size * sizeof(void *));

	/*
	 * The lock guarantees that we won't race with a reader
	 * (see list_lru_from_memcg_idx).
	 *
	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
	 * we have to use IRQ-safe primitives here to avoid deadlock.
	 */
	spin_lock_irq(&nlru->lock);
	nlru->memcg_lrus = new;
	spin_unlock_irq(&nlru->lock);

	kfree(old);
	return 0;
}

static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
					      int old_size, int new_size)
{
	/* do not bother shrinking the array back to the old size, because we
	 * cannot handle allocation failures here */
	__memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size);
}

static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
{
	int i;

384 385 386 387 388
	if (!memcg_aware)
		return 0;

	for_each_node(i) {
		if (memcg_init_list_lru_node(&lru->node[i]))
389 390 391 392
			goto fail;
	}
	return 0;
fail:
393 394 395
	for (i = i - 1; i >= 0; i--) {
		if (!lru->node[i].memcg_lrus)
			continue;
396
		memcg_destroy_list_lru_node(&lru->node[i]);
397
	}
398 399 400 401 402 403 404 405 406 407
	return -ENOMEM;
}

static void memcg_destroy_list_lru(struct list_lru *lru)
{
	int i;

	if (!list_lru_memcg_aware(lru))
		return;

408
	for_each_node(i)
409 410 411 412 413 414 415 416 417 418 419
		memcg_destroy_list_lru_node(&lru->node[i]);
}

static int memcg_update_list_lru(struct list_lru *lru,
				 int old_size, int new_size)
{
	int i;

	if (!list_lru_memcg_aware(lru))
		return 0;

420
	for_each_node(i) {
421 422 423 424 425 426
		if (memcg_update_list_lru_node(&lru->node[i],
					       old_size, new_size))
			goto fail;
	}
	return 0;
fail:
427 428 429 430
	for (i = i - 1; i >= 0; i--) {
		if (!lru->node[i].memcg_lrus)
			continue;

431 432
		memcg_cancel_update_list_lru_node(&lru->node[i],
						  old_size, new_size);
433
	}
434 435 436 437 438 439 440 441 442 443 444
	return -ENOMEM;
}

static void memcg_cancel_update_list_lru(struct list_lru *lru,
					 int old_size, int new_size)
{
	int i;

	if (!list_lru_memcg_aware(lru))
		return;

445
	for_each_node(i)
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
		memcg_cancel_update_list_lru_node(&lru->node[i],
						  old_size, new_size);
}

int memcg_update_all_list_lrus(int new_size)
{
	int ret = 0;
	struct list_lru *lru;
	int old_size = memcg_nr_cache_ids;

	mutex_lock(&list_lrus_mutex);
	list_for_each_entry(lru, &list_lrus, list) {
		ret = memcg_update_list_lru(lru, old_size, new_size);
		if (ret)
			goto fail;
	}
out:
	mutex_unlock(&list_lrus_mutex);
	return ret;
fail:
	list_for_each_entry_continue_reverse(lru, &list_lrus, list)
		memcg_cancel_update_list_lru(lru, old_size, new_size);
	goto out;
}
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499

static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
				      int src_idx, int dst_idx)
{
	struct list_lru_one *src, *dst;

	/*
	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
	 * we have to use IRQ-safe primitives here to avoid deadlock.
	 */
	spin_lock_irq(&nlru->lock);

	src = list_lru_from_memcg_idx(nlru, src_idx);
	dst = list_lru_from_memcg_idx(nlru, dst_idx);

	list_splice_init(&src->list, &dst->list);
	dst->nr_items += src->nr_items;
	src->nr_items = 0;

	spin_unlock_irq(&nlru->lock);
}

static void memcg_drain_list_lru(struct list_lru *lru,
				 int src_idx, int dst_idx)
{
	int i;

	if (!list_lru_memcg_aware(lru))
		return;

500
	for_each_node(i)
501 502 503 504 505 506 507 508 509 510 511 512
		memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
}

void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
{
	struct list_lru *lru;

	mutex_lock(&list_lrus_mutex);
	list_for_each_entry(lru, &list_lrus, list)
		memcg_drain_list_lru(lru, src_idx, dst_idx);
	mutex_unlock(&list_lrus_mutex);
}
513 514 515 516 517 518 519 520 521 522 523 524 525
#else
static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
{
	return 0;
}

static void memcg_destroy_list_lru(struct list_lru *lru)
{
}
#endif /* CONFIG_MEMCG_KMEM */

int __list_lru_init(struct list_lru *lru, bool memcg_aware,
		    struct lock_class_key *key)
D
Dave Chinner 已提交
526
{
527
	int i;
528
	size_t size = sizeof(*lru->node) * nr_node_ids;
529 530 531
	int err = -ENOMEM;

	memcg_get_cache_ids();
532 533 534

	lru->node = kzalloc(size, GFP_KERNEL);
	if (!lru->node)
535
		goto out;
D
Dave Chinner 已提交
536

537
	for_each_node(i) {
538
		spin_lock_init(&lru->node[i].lock);
539 540
		if (key)
			lockdep_set_class(&lru->node[i].lock, key);
541 542 543 544 545 546 547
		init_one_lru(&lru->node[i].lru);
	}

	err = memcg_init_list_lru(lru, memcg_aware);
	if (err) {
		kfree(lru->node);
		goto out;
548
	}
549

550
	list_lru_register(lru);
551 552 553
out:
	memcg_put_cache_ids();
	return err;
D
Dave Chinner 已提交
554
}
555
EXPORT_SYMBOL_GPL(__list_lru_init);
556 557 558

void list_lru_destroy(struct list_lru *lru)
{
559 560 561
	/* Already destroyed or not yet initialized? */
	if (!lru->node)
		return;
562 563 564

	memcg_get_cache_ids();

565
	list_lru_unregister(lru);
566 567

	memcg_destroy_list_lru(lru);
568
	kfree(lru->node);
569
	lru->node = NULL;
570 571

	memcg_put_cache_ids();
572 573
}
EXPORT_SYMBOL_GPL(list_lru_destroy);