list_lru.c 4.0 KB
Newer Older
D
Dave Chinner 已提交
1 2 3 4 5 6 7 8
/*
 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
 * Authors: David Chinner and Glauber Costa
 *
 * Generic LRU infrastructure
 */
#include <linux/kernel.h>
#include <linux/module.h>
9
#include <linux/mm.h>
D
Dave Chinner 已提交
10
#include <linux/list_lru.h>
11
#include <linux/slab.h>
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
#include <linux/mutex.h>

#ifdef CONFIG_MEMCG_KMEM
static LIST_HEAD(list_lrus);
static DEFINE_MUTEX(list_lrus_mutex);

static void list_lru_register(struct list_lru *lru)
{
	mutex_lock(&list_lrus_mutex);
	list_add(&lru->list, &list_lrus);
	mutex_unlock(&list_lrus_mutex);
}

static void list_lru_unregister(struct list_lru *lru)
{
	mutex_lock(&list_lrus_mutex);
	list_del(&lru->list);
	mutex_unlock(&list_lrus_mutex);
}
#else
static void list_lru_register(struct list_lru *lru)
{
}

static void list_lru_unregister(struct list_lru *lru)
{
}
#endif /* CONFIG_MEMCG_KMEM */
D
Dave Chinner 已提交
40 41 42

bool list_lru_add(struct list_lru *lru, struct list_head *item)
{
43 44 45 46 47
	int nid = page_to_nid(virt_to_page(item));
	struct list_lru_node *nlru = &lru->node[nid];

	spin_lock(&nlru->lock);
	WARN_ON_ONCE(nlru->nr_items < 0);
D
Dave Chinner 已提交
48
	if (list_empty(item)) {
49
		list_add_tail(item, &nlru->list);
50
		nlru->nr_items++;
51
		spin_unlock(&nlru->lock);
D
Dave Chinner 已提交
52 53
		return true;
	}
54
	spin_unlock(&nlru->lock);
D
Dave Chinner 已提交
55 56 57 58 59 60
	return false;
}
EXPORT_SYMBOL_GPL(list_lru_add);

bool list_lru_del(struct list_lru *lru, struct list_head *item)
{
61 62 63 64
	int nid = page_to_nid(virt_to_page(item));
	struct list_lru_node *nlru = &lru->node[nid];

	spin_lock(&nlru->lock);
D
Dave Chinner 已提交
65 66
	if (!list_empty(item)) {
		list_del_init(item);
67
		nlru->nr_items--;
68 69
		WARN_ON_ONCE(nlru->nr_items < 0);
		spin_unlock(&nlru->lock);
D
Dave Chinner 已提交
70 71
		return true;
	}
72
	spin_unlock(&nlru->lock);
D
Dave Chinner 已提交
73 74 75 76
	return false;
}
EXPORT_SYMBOL_GPL(list_lru_del);

G
Glauber Costa 已提交
77 78
unsigned long
list_lru_count_node(struct list_lru *lru, int nid)
D
Dave Chinner 已提交
79
{
80
	unsigned long count = 0;
G
Glauber Costa 已提交
81
	struct list_lru_node *nlru = &lru->node[nid];
82

G
Glauber Costa 已提交
83 84 85 86
	spin_lock(&nlru->lock);
	WARN_ON_ONCE(nlru->nr_items < 0);
	count += nlru->nr_items;
	spin_unlock(&nlru->lock);
87 88 89

	return count;
}
G
Glauber Costa 已提交
90
EXPORT_SYMBOL_GPL(list_lru_count_node);
91

G
Glauber Costa 已提交
92
unsigned long
93 94 95 96 97
list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
		   void *cb_arg, unsigned long *nr_to_walk)
{

	struct list_lru_node	*nlru = &lru->node[nid];
D
Dave Chinner 已提交
98
	struct list_head *item, *n;
99
	unsigned long isolated = 0;
D
Dave Chinner 已提交
100

101
	spin_lock(&nlru->lock);
D
Dave Chinner 已提交
102
restart:
103
	list_for_each_safe(item, n, &nlru->list) {
D
Dave Chinner 已提交
104
		enum lru_status ret;
105 106 107 108 109

		/*
		 * decrement nr_to_walk first so that we don't livelock if we
		 * get stuck on large numbesr of LRU_RETRY items
		 */
110
		if (!*nr_to_walk)
111
			break;
112
		--*nr_to_walk;
113

114
		ret = isolate(item, &nlru->lock, cb_arg);
D
Dave Chinner 已提交
115
		switch (ret) {
116 117
		case LRU_REMOVED_RETRY:
			assert_spin_locked(&nlru->lock);
D
Dave Chinner 已提交
118
		case LRU_REMOVED:
119
			nlru->nr_items--;
120 121
			WARN_ON_ONCE(nlru->nr_items < 0);
			isolated++;
122 123 124 125 126 127 128
			/*
			 * If the lru lock has been dropped, our list
			 * traversal is now invalid and so we have to
			 * restart from scratch.
			 */
			if (ret == LRU_REMOVED_RETRY)
				goto restart;
D
Dave Chinner 已提交
129 130
			break;
		case LRU_ROTATE:
131
			list_move_tail(item, &nlru->list);
D
Dave Chinner 已提交
132 133 134 135
			break;
		case LRU_SKIP:
			break;
		case LRU_RETRY:
136 137 138 139
			/*
			 * The lru lock has been dropped, our list traversal is
			 * now invalid and so we have to restart from scratch.
			 */
140
			assert_spin_locked(&nlru->lock);
D
Dave Chinner 已提交
141 142 143 144 145
			goto restart;
		default:
			BUG();
		}
	}
146 147 148 149 150 151

	spin_unlock(&nlru->lock);
	return isolated;
}
EXPORT_SYMBOL_GPL(list_lru_walk_node);

152
int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key)
D
Dave Chinner 已提交
153
{
154
	int i;
155 156 157 158 159
	size_t size = sizeof(*lru->node) * nr_node_ids;

	lru->node = kzalloc(size, GFP_KERNEL);
	if (!lru->node)
		return -ENOMEM;
D
Dave Chinner 已提交
160

161
	for (i = 0; i < nr_node_ids; i++) {
162
		spin_lock_init(&lru->node[i].lock);
163 164
		if (key)
			lockdep_set_class(&lru->node[i].lock, key);
165 166 167
		INIT_LIST_HEAD(&lru->node[i].list);
		lru->node[i].nr_items = 0;
	}
168
	list_lru_register(lru);
D
Dave Chinner 已提交
169 170
	return 0;
}
171
EXPORT_SYMBOL_GPL(list_lru_init_key);
172 173 174

void list_lru_destroy(struct list_lru *lru)
{
175 176 177 178
	/* Already destroyed or not yet initialized? */
	if (!lru->node)
		return;
	list_lru_unregister(lru);
179
	kfree(lru->node);
180
	lru->node = NULL;
181 182
}
EXPORT_SYMBOL_GPL(list_lru_destroy);