list_lru.c 11.3 KB
Newer Older
D
Dave Chinner 已提交
1 2 3 4 5 6 7 8
/*
 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
 * Authors: David Chinner and Glauber Costa
 *
 * Generic LRU infrastructure
 */
#include <linux/kernel.h>
#include <linux/module.h>
9
#include <linux/mm.h>
D
Dave Chinner 已提交
10
#include <linux/list_lru.h>
11
#include <linux/slab.h>
12
#include <linux/mutex.h>
13
#include <linux/memcontrol.h>
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40

#ifdef CONFIG_MEMCG_KMEM
static LIST_HEAD(list_lrus);
static DEFINE_MUTEX(list_lrus_mutex);

static void list_lru_register(struct list_lru *lru)
{
	mutex_lock(&list_lrus_mutex);
	list_add(&lru->list, &list_lrus);
	mutex_unlock(&list_lrus_mutex);
}

static void list_lru_unregister(struct list_lru *lru)
{
	mutex_lock(&list_lrus_mutex);
	list_del(&lru->list);
	mutex_unlock(&list_lrus_mutex);
}
#else
static void list_lru_register(struct list_lru *lru)
{
}

static void list_lru_unregister(struct list_lru *lru)
{
}
#endif /* CONFIG_MEMCG_KMEM */
D
Dave Chinner 已提交
41

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
#ifdef CONFIG_MEMCG_KMEM
static inline bool list_lru_memcg_aware(struct list_lru *lru)
{
	return !!lru->node[0].memcg_lrus;
}

static inline struct list_lru_one *
list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
{
	/*
	 * The lock protects the array of per cgroup lists from relocation
	 * (see memcg_update_list_lru_node).
	 */
	lockdep_assert_held(&nlru->lock);
	if (nlru->memcg_lrus && idx >= 0)
		return nlru->memcg_lrus->lru[idx];

	return &nlru->lru;
}

static inline struct list_lru_one *
list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
{
	struct mem_cgroup *memcg;

	if (!nlru->memcg_lrus)
		return &nlru->lru;

	memcg = mem_cgroup_from_kmem(ptr);
	if (!memcg)
		return &nlru->lru;

	return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
}
#else
static inline bool list_lru_memcg_aware(struct list_lru *lru)
{
	return false;
}

static inline struct list_lru_one *
list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
{
	return &nlru->lru;
}

static inline struct list_lru_one *
list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
{
	return &nlru->lru;
}
#endif /* CONFIG_MEMCG_KMEM */

D
Dave Chinner 已提交
95 96
bool list_lru_add(struct list_lru *lru, struct list_head *item)
{
97 98
	int nid = page_to_nid(virt_to_page(item));
	struct list_lru_node *nlru = &lru->node[nid];
99
	struct list_lru_one *l;
100 101

	spin_lock(&nlru->lock);
102 103
	l = list_lru_from_kmem(nlru, item);
	WARN_ON_ONCE(l->nr_items < 0);
D
Dave Chinner 已提交
104
	if (list_empty(item)) {
105 106
		list_add_tail(item, &l->list);
		l->nr_items++;
107
		spin_unlock(&nlru->lock);
D
Dave Chinner 已提交
108 109
		return true;
	}
110
	spin_unlock(&nlru->lock);
D
Dave Chinner 已提交
111 112 113 114 115 116
	return false;
}
EXPORT_SYMBOL_GPL(list_lru_add);

bool list_lru_del(struct list_lru *lru, struct list_head *item)
{
117 118
	int nid = page_to_nid(virt_to_page(item));
	struct list_lru_node *nlru = &lru->node[nid];
119
	struct list_lru_one *l;
120 121

	spin_lock(&nlru->lock);
122
	l = list_lru_from_kmem(nlru, item);
D
Dave Chinner 已提交
123 124
	if (!list_empty(item)) {
		list_del_init(item);
125 126
		l->nr_items--;
		WARN_ON_ONCE(l->nr_items < 0);
127
		spin_unlock(&nlru->lock);
D
Dave Chinner 已提交
128 129
		return true;
	}
130
	spin_unlock(&nlru->lock);
D
Dave Chinner 已提交
131 132 133 134
	return false;
}
EXPORT_SYMBOL_GPL(list_lru_del);

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
{
	list_del_init(item);
	list->nr_items--;
}
EXPORT_SYMBOL_GPL(list_lru_isolate);

void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
			   struct list_head *head)
{
	list_move(item, head);
	list->nr_items--;
}
EXPORT_SYMBOL_GPL(list_lru_isolate_move);

150 151
static unsigned long __list_lru_count_one(struct list_lru *lru,
					  int nid, int memcg_idx)
D
Dave Chinner 已提交
152
{
G
Glauber Costa 已提交
153
	struct list_lru_node *nlru = &lru->node[nid];
154 155
	struct list_lru_one *l;
	unsigned long count;
156

G
Glauber Costa 已提交
157
	spin_lock(&nlru->lock);
158 159 160
	l = list_lru_from_memcg_idx(nlru, memcg_idx);
	WARN_ON_ONCE(l->nr_items < 0);
	count = l->nr_items;
G
Glauber Costa 已提交
161
	spin_unlock(&nlru->lock);
162 163 164

	return count;
}
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184

unsigned long list_lru_count_one(struct list_lru *lru,
				 int nid, struct mem_cgroup *memcg)
{
	return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
}
EXPORT_SYMBOL_GPL(list_lru_count_one);

unsigned long list_lru_count_node(struct list_lru *lru, int nid)
{
	long count = 0;
	int memcg_idx;

	count += __list_lru_count_one(lru, nid, -1);
	if (list_lru_memcg_aware(lru)) {
		for_each_memcg_cache_index(memcg_idx)
			count += __list_lru_count_one(lru, nid, memcg_idx);
	}
	return count;
}
G
Glauber Costa 已提交
185
EXPORT_SYMBOL_GPL(list_lru_count_node);
186

187 188 189 190
static unsigned long
__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
		    list_lru_walk_cb isolate, void *cb_arg,
		    unsigned long *nr_to_walk)
191 192
{

193 194
	struct list_lru_node *nlru = &lru->node[nid];
	struct list_lru_one *l;
D
Dave Chinner 已提交
195
	struct list_head *item, *n;
196
	unsigned long isolated = 0;
D
Dave Chinner 已提交
197

198
	spin_lock(&nlru->lock);
199
	l = list_lru_from_memcg_idx(nlru, memcg_idx);
D
Dave Chinner 已提交
200
restart:
201
	list_for_each_safe(item, n, &l->list) {
D
Dave Chinner 已提交
202
		enum lru_status ret;
203 204 205 206 207

		/*
		 * decrement nr_to_walk first so that we don't livelock if we
		 * get stuck on large numbesr of LRU_RETRY items
		 */
208
		if (!*nr_to_walk)
209
			break;
210
		--*nr_to_walk;
211

212
		ret = isolate(item, l, &nlru->lock, cb_arg);
D
Dave Chinner 已提交
213
		switch (ret) {
214 215
		case LRU_REMOVED_RETRY:
			assert_spin_locked(&nlru->lock);
D
Dave Chinner 已提交
216
		case LRU_REMOVED:
217
			isolated++;
218 219 220 221 222 223 224
			/*
			 * If the lru lock has been dropped, our list
			 * traversal is now invalid and so we have to
			 * restart from scratch.
			 */
			if (ret == LRU_REMOVED_RETRY)
				goto restart;
D
Dave Chinner 已提交
225 226
			break;
		case LRU_ROTATE:
227
			list_move_tail(item, &l->list);
D
Dave Chinner 已提交
228 229 230 231
			break;
		case LRU_SKIP:
			break;
		case LRU_RETRY:
232 233 234 235
			/*
			 * The lru lock has been dropped, our list traversal is
			 * now invalid and so we have to restart from scratch.
			 */
236
			assert_spin_locked(&nlru->lock);
D
Dave Chinner 已提交
237 238 239 240 241
			goto restart;
		default:
			BUG();
		}
	}
242 243 244 245

	spin_unlock(&nlru->lock);
	return isolated;
}
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275

unsigned long
list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
		  list_lru_walk_cb isolate, void *cb_arg,
		  unsigned long *nr_to_walk)
{
	return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
				   isolate, cb_arg, nr_to_walk);
}
EXPORT_SYMBOL_GPL(list_lru_walk_one);

unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
				 list_lru_walk_cb isolate, void *cb_arg,
				 unsigned long *nr_to_walk)
{
	long isolated = 0;
	int memcg_idx;

	isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
					nr_to_walk);
	if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
		for_each_memcg_cache_index(memcg_idx) {
			isolated += __list_lru_walk_one(lru, nid, memcg_idx,
						isolate, cb_arg, nr_to_walk);
			if (*nr_to_walk <= 0)
				break;
		}
	}
	return isolated;
}
276 277
EXPORT_SYMBOL_GPL(list_lru_walk_node);

278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
static void init_one_lru(struct list_lru_one *l)
{
	INIT_LIST_HEAD(&l->list);
	l->nr_items = 0;
}

#ifdef CONFIG_MEMCG_KMEM
static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
					  int begin, int end)
{
	int i;

	for (i = begin; i < end; i++)
		kfree(memcg_lrus->lru[i]);
}

static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
				      int begin, int end)
{
	int i;

	for (i = begin; i < end; i++) {
		struct list_lru_one *l;

		l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
		if (!l)
			goto fail;

		init_one_lru(l);
		memcg_lrus->lru[i] = l;
	}
	return 0;
fail:
	__memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
	return -ENOMEM;
}

static int memcg_init_list_lru_node(struct list_lru_node *nlru)
{
	int size = memcg_nr_cache_ids;

	nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
	if (!nlru->memcg_lrus)
		return -ENOMEM;

	if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
		kfree(nlru->memcg_lrus);
		return -ENOMEM;
	}

	return 0;
}

static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
{
	__memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
	kfree(nlru->memcg_lrus);
}

static int memcg_update_list_lru_node(struct list_lru_node *nlru,
				      int old_size, int new_size)
{
	struct list_lru_memcg *old, *new;

	BUG_ON(old_size > new_size);

	old = nlru->memcg_lrus;
	new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
	if (!new)
		return -ENOMEM;

	if (__memcg_init_list_lru_node(new, old_size, new_size)) {
		kfree(new);
		return -ENOMEM;
	}

	memcpy(new, old, old_size * sizeof(void *));

	/*
	 * The lock guarantees that we won't race with a reader
	 * (see list_lru_from_memcg_idx).
	 *
	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
	 * we have to use IRQ-safe primitives here to avoid deadlock.
	 */
	spin_lock_irq(&nlru->lock);
	nlru->memcg_lrus = new;
	spin_unlock_irq(&nlru->lock);

	kfree(old);
	return 0;
}

static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
					      int old_size, int new_size)
{
	/* do not bother shrinking the array back to the old size, because we
	 * cannot handle allocation failures here */
	__memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size);
}

static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
{
	int i;

	for (i = 0; i < nr_node_ids; i++) {
		if (!memcg_aware)
			lru->node[i].memcg_lrus = NULL;
		else if (memcg_init_list_lru_node(&lru->node[i]))
			goto fail;
	}
	return 0;
fail:
	for (i = i - 1; i >= 0; i--)
		memcg_destroy_list_lru_node(&lru->node[i]);
	return -ENOMEM;
}

static void memcg_destroy_list_lru(struct list_lru *lru)
{
	int i;

	if (!list_lru_memcg_aware(lru))
		return;

	for (i = 0; i < nr_node_ids; i++)
		memcg_destroy_list_lru_node(&lru->node[i]);
}

static int memcg_update_list_lru(struct list_lru *lru,
				 int old_size, int new_size)
{
	int i;

	if (!list_lru_memcg_aware(lru))
		return 0;

	for (i = 0; i < nr_node_ids; i++) {
		if (memcg_update_list_lru_node(&lru->node[i],
					       old_size, new_size))
			goto fail;
	}
	return 0;
fail:
	for (i = i - 1; i >= 0; i--)
		memcg_cancel_update_list_lru_node(&lru->node[i],
						  old_size, new_size);
	return -ENOMEM;
}

static void memcg_cancel_update_list_lru(struct list_lru *lru,
					 int old_size, int new_size)
{
	int i;

	if (!list_lru_memcg_aware(lru))
		return;

	for (i = 0; i < nr_node_ids; i++)
		memcg_cancel_update_list_lru_node(&lru->node[i],
						  old_size, new_size);
}

int memcg_update_all_list_lrus(int new_size)
{
	int ret = 0;
	struct list_lru *lru;
	int old_size = memcg_nr_cache_ids;

	mutex_lock(&list_lrus_mutex);
	list_for_each_entry(lru, &list_lrus, list) {
		ret = memcg_update_list_lru(lru, old_size, new_size);
		if (ret)
			goto fail;
	}
out:
	mutex_unlock(&list_lrus_mutex);
	return ret;
fail:
	list_for_each_entry_continue_reverse(lru, &list_lrus, list)
		memcg_cancel_update_list_lru(lru, old_size, new_size);
	goto out;
}
#else
static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
{
	return 0;
}

static void memcg_destroy_list_lru(struct list_lru *lru)
{
}
#endif /* CONFIG_MEMCG_KMEM */

int __list_lru_init(struct list_lru *lru, bool memcg_aware,
		    struct lock_class_key *key)
D
Dave Chinner 已提交
474
{
475
	int i;
476
	size_t size = sizeof(*lru->node) * nr_node_ids;
477 478 479
	int err = -ENOMEM;

	memcg_get_cache_ids();
480 481 482

	lru->node = kzalloc(size, GFP_KERNEL);
	if (!lru->node)
483
		goto out;
D
Dave Chinner 已提交
484

485
	for (i = 0; i < nr_node_ids; i++) {
486
		spin_lock_init(&lru->node[i].lock);
487 488
		if (key)
			lockdep_set_class(&lru->node[i].lock, key);
489 490 491 492 493 494 495
		init_one_lru(&lru->node[i].lru);
	}

	err = memcg_init_list_lru(lru, memcg_aware);
	if (err) {
		kfree(lru->node);
		goto out;
496
	}
497

498
	list_lru_register(lru);
499 500 501
out:
	memcg_put_cache_ids();
	return err;
D
Dave Chinner 已提交
502
}
503
EXPORT_SYMBOL_GPL(__list_lru_init);
504 505 506

void list_lru_destroy(struct list_lru *lru)
{
507 508 509
	/* Already destroyed or not yet initialized? */
	if (!lru->node)
		return;
510 511 512

	memcg_get_cache_ids();

513
	list_lru_unregister(lru);
514 515

	memcg_destroy_list_lru(lru);
516
	kfree(lru->node);
517
	lru->node = NULL;
518 519

	memcg_put_cache_ids();
520 521
}
EXPORT_SYMBOL_GPL(list_lru_destroy);