提交 3b1d58a4 编写于 作者: D Dave Chinner 提交者: Al Viro

list_lru: per-node list infrastructure

Now that we have an LRU list API, we can start to enhance the
implementation.  This splits the single LRU list into per-node lists and
locks to enhance scalability.  Items are placed on lists according to the
node the memory belongs to.  To make scanning the lists efficient, also
track whether the per-node lists have entries in them in a active
nodemask.

Note: We use a fixed-size array for the node LRU, this struct can be very
big if MAX_NUMNODES is big.  If this becomes a problem this is fixable by
turning this into a pointer and dynamically allocating this to
nr_node_ids.  This quantity is firwmare-provided, and still would provide
room for all nodes at the cost of a pointer lookup and an extra
allocation.  Because that allocation will most likely come from a may very
well fail.

[glommer@openvz.org: fix warnings, added note about node lru]
Signed-off-by: NDave Chinner <dchinner@redhat.com>
Signed-off-by: NGlauber Costa <glommer@openvz.org>
Reviewed-by: NGreg Thelen <gthelen@google.com>
Acked-by: NMel Gorman <mgorman@suse.de>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NAl Viro <viro@zeniv.linux.org.uk>
上级 f6041567
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#define _LRU_LIST_H #define _LRU_LIST_H
#include <linux/list.h> #include <linux/list.h>
#include <linux/nodemask.h>
/* list_lru_walk_cb has to always return one of those */ /* list_lru_walk_cb has to always return one of those */
enum lru_status { enum lru_status {
...@@ -18,11 +19,26 @@ enum lru_status { ...@@ -18,11 +19,26 @@ enum lru_status {
internally, but has to return locked. */ internally, but has to return locked. */
}; };
struct list_lru { struct list_lru_node {
spinlock_t lock; spinlock_t lock;
struct list_head list; struct list_head list;
/* kept as signed so we can catch imbalance bugs */ /* kept as signed so we can catch imbalance bugs */
long nr_items; long nr_items;
} ____cacheline_aligned_in_smp;
struct list_lru {
/*
* Because we use a fixed-size array, this struct can be very big if
* MAX_NUMNODES is big. If this becomes a problem this is fixable by
* turning this into a pointer and dynamically allocating this to
* nr_node_ids. This quantity is firwmare-provided, and still would
* provide room for all nodes at the cost of a pointer lookup and an
* extra allocation. Because that allocation will most likely come from
* a different slab cache than the main structure holding this
* structure, we may very well fail.
*/
struct list_lru_node node[MAX_NUMNODES];
nodemask_t active_nodes;
}; };
int list_lru_init(struct list_lru *lru); int list_lru_init(struct list_lru *lru);
...@@ -66,10 +82,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item); ...@@ -66,10 +82,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item);
* guarantee that the list is not updated while the count is being computed. * guarantee that the list is not updated while the count is being computed.
* Callers that want such a guarantee need to provide an outer lock. * Callers that want such a guarantee need to provide an outer lock.
*/ */
static inline unsigned long list_lru_count(struct list_lru *lru) unsigned long list_lru_count(struct list_lru *lru);
{
return lru->nr_items;
}
typedef enum lru_status typedef enum lru_status
(*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg); (*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg);
......
...@@ -6,41 +6,73 @@ ...@@ -6,41 +6,73 @@
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h>
#include <linux/list_lru.h> #include <linux/list_lru.h>
bool list_lru_add(struct list_lru *lru, struct list_head *item) bool list_lru_add(struct list_lru *lru, struct list_head *item)
{ {
spin_lock(&lru->lock); int nid = page_to_nid(virt_to_page(item));
struct list_lru_node *nlru = &lru->node[nid];
spin_lock(&nlru->lock);
WARN_ON_ONCE(nlru->nr_items < 0);
if (list_empty(item)) { if (list_empty(item)) {
list_add_tail(item, &lru->list); list_add_tail(item, &nlru->list);
lru->nr_items++; if (nlru->nr_items++ == 0)
spin_unlock(&lru->lock); node_set(nid, lru->active_nodes);
spin_unlock(&nlru->lock);
return true; return true;
} }
spin_unlock(&lru->lock); spin_unlock(&nlru->lock);
return false; return false;
} }
EXPORT_SYMBOL_GPL(list_lru_add); EXPORT_SYMBOL_GPL(list_lru_add);
bool list_lru_del(struct list_lru *lru, struct list_head *item) bool list_lru_del(struct list_lru *lru, struct list_head *item)
{ {
spin_lock(&lru->lock); int nid = page_to_nid(virt_to_page(item));
struct list_lru_node *nlru = &lru->node[nid];
spin_lock(&nlru->lock);
if (!list_empty(item)) { if (!list_empty(item)) {
list_del_init(item); list_del_init(item);
lru->nr_items--; if (--nlru->nr_items == 0)
spin_unlock(&lru->lock); node_clear(nid, lru->active_nodes);
WARN_ON_ONCE(nlru->nr_items < 0);
spin_unlock(&nlru->lock);
return true; return true;
} }
spin_unlock(&lru->lock); spin_unlock(&nlru->lock);
return false; return false;
} }
EXPORT_SYMBOL_GPL(list_lru_del); EXPORT_SYMBOL_GPL(list_lru_del);
unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, unsigned long list_lru_count(struct list_lru *lru)
void *cb_arg, unsigned long nr_to_walk)
{ {
unsigned long count = 0;
int nid;
for_each_node_mask(nid, lru->active_nodes) {
struct list_lru_node *nlru = &lru->node[nid];
spin_lock(&nlru->lock);
WARN_ON_ONCE(nlru->nr_items < 0);
count += nlru->nr_items;
spin_unlock(&nlru->lock);
}
return count;
}
EXPORT_SYMBOL_GPL(list_lru_count);
static unsigned long
list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
void *cb_arg, unsigned long *nr_to_walk)
{
struct list_lru_node *nlru = &lru->node[nid];
struct list_head *item, *n; struct list_head *item, *n;
unsigned long removed = 0; unsigned long isolated = 0;
/* /*
* If we don't keep state of at which pass we are, we can loop at * If we don't keep state of at which pass we are, we can loop at
* LRU_RETRY, since we have no guarantees that the caller will be able * LRU_RETRY, since we have no guarantees that the caller will be able
...@@ -50,18 +82,20 @@ unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, ...@@ -50,18 +82,20 @@ unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
*/ */
bool first_pass = true; bool first_pass = true;
spin_lock(&lru->lock); spin_lock(&nlru->lock);
restart: restart:
list_for_each_safe(item, n, &lru->list) { list_for_each_safe(item, n, &nlru->list) {
enum lru_status ret; enum lru_status ret;
ret = isolate(item, &lru->lock, cb_arg); ret = isolate(item, &nlru->lock, cb_arg);
switch (ret) { switch (ret) {
case LRU_REMOVED: case LRU_REMOVED:
lru->nr_items--; if (--nlru->nr_items == 0)
removed++; node_clear(nid, lru->active_nodes);
WARN_ON_ONCE(nlru->nr_items < 0);
isolated++;
break; break;
case LRU_ROTATE: case LRU_ROTATE:
list_move_tail(item, &lru->list); list_move_tail(item, &nlru->list);
break; break;
case LRU_SKIP: case LRU_SKIP:
break; break;
...@@ -76,42 +110,84 @@ unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, ...@@ -76,42 +110,84 @@ unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
BUG(); BUG();
} }
if (nr_to_walk-- == 0) if ((*nr_to_walk)-- == 0)
break; break;
} }
spin_unlock(&lru->lock);
return removed; spin_unlock(&nlru->lock);
return isolated;
}
EXPORT_SYMBOL_GPL(list_lru_walk_node);
unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
void *cb_arg, unsigned long nr_to_walk)
{
unsigned long isolated = 0;
int nid;
for_each_node_mask(nid, lru->active_nodes) {
isolated += list_lru_walk_node(lru, nid, isolate,
cb_arg, &nr_to_walk);
if (nr_to_walk <= 0)
break;
}
return isolated;
} }
EXPORT_SYMBOL_GPL(list_lru_walk); EXPORT_SYMBOL_GPL(list_lru_walk);
unsigned long list_lru_dispose_all(struct list_lru *lru, static unsigned long list_lru_dispose_all_node(struct list_lru *lru, int nid,
list_lru_dispose_cb dispose) list_lru_dispose_cb dispose)
{ {
unsigned long disposed = 0; struct list_lru_node *nlru = &lru->node[nid];
LIST_HEAD(dispose_list); LIST_HEAD(dispose_list);
unsigned long disposed = 0;
spin_lock(&lru->lock); spin_lock(&nlru->lock);
while (!list_empty(&lru->list)) { while (!list_empty(&nlru->list)) {
list_splice_init(&lru->list, &dispose_list); list_splice_init(&nlru->list, &dispose_list);
disposed += lru->nr_items; disposed += nlru->nr_items;
lru->nr_items = 0; nlru->nr_items = 0;
spin_unlock(&lru->lock); node_clear(nid, lru->active_nodes);
spin_unlock(&nlru->lock);
dispose(&dispose_list); dispose(&dispose_list);
spin_lock(&lru->lock); spin_lock(&nlru->lock);
} }
spin_unlock(&lru->lock); spin_unlock(&nlru->lock);
return disposed; return disposed;
} }
unsigned long list_lru_dispose_all(struct list_lru *lru,
list_lru_dispose_cb dispose)
{
unsigned long disposed;
unsigned long total = 0;
int nid;
do {
disposed = 0;
for_each_node_mask(nid, lru->active_nodes) {
disposed += list_lru_dispose_all_node(lru, nid,
dispose);
}
total += disposed;
} while (disposed != 0);
return total;
}
int list_lru_init(struct list_lru *lru) int list_lru_init(struct list_lru *lru)
{ {
spin_lock_init(&lru->lock); int i;
INIT_LIST_HEAD(&lru->list);
lru->nr_items = 0;
nodes_clear(lru->active_nodes);
for (i = 0; i < MAX_NUMNODES; i++) {
spin_lock_init(&lru->node[i].lock);
INIT_LIST_HEAD(&lru->node[i].list);
lru->node[i].nr_items = 0;
}
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(list_lru_init); EXPORT_SYMBOL_GPL(list_lru_init);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册