提交 6adef3eb 编写于 作者: J Jack Steiner 提交者: Linus Torvalds

cpusets: new round-robin rotor for SLAB allocations

We have observed several workloads running on multi-node systems where
memory is assigned unevenly across the nodes in the system.  There are
numerous reasons for this but one is the round-robin rotor in
cpuset_mem_spread_node().

For example, a simple test that writes a multi-page file will allocate
pages on nodes 0 2 4 6 ...  Odd nodes are skipped.  (Sometimes it
allocates on odd nodes & skips even nodes).

An example is shown below.  The program "lfile" writes a file consisting
of 10 pages.  The program then mmaps the file & uses get_mempolicy(...,
MPOL_F_NODE) to determine the nodes where the file pages were allocated.
The output is shown below:

	# ./lfile
	 allocated on nodes: 2 4 6 0 1 2 6 0 2

There is a single rotor that is used for allocating both file pages & slab
pages.  Writing the file allocates both a data page & a slab page
(buffer_head).  This advances the RR rotor 2 nodes for each page
allocated.

A quick confirmation seems to confirm this is the cause of the uneven
allocation:

	# echo 0 >/dev/cpuset/memory_spread_slab
	# ./lfile
	 allocated on nodes: 6 7 8 9 0 1 2 3 4 5

This patch introduces a second rotor that is used for slab allocations.
Signed-off-by: NJack Steiner <steiner@sgi.com>
Acked-by: NChristoph Lameter <cl@linux-foundation.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Paul Menage <menage@google.com>
Cc: Jack Steiner <steiner@sgi.com>
Cc: Robin Holt <holt@sgi.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 2c488db2
...@@ -69,6 +69,7 @@ extern void cpuset_task_status_allowed(struct seq_file *m, ...@@ -69,6 +69,7 @@ extern void cpuset_task_status_allowed(struct seq_file *m,
struct task_struct *task); struct task_struct *task);
extern int cpuset_mem_spread_node(void); extern int cpuset_mem_spread_node(void);
extern int cpuset_slab_spread_node(void);
static inline int cpuset_do_page_mem_spread(void) static inline int cpuset_do_page_mem_spread(void)
{ {
...@@ -194,6 +195,11 @@ static inline int cpuset_mem_spread_node(void) ...@@ -194,6 +195,11 @@ static inline int cpuset_mem_spread_node(void)
return 0; return 0;
} }
static inline int cpuset_slab_spread_node(void)
{
return 0;
}
static inline int cpuset_do_page_mem_spread(void) static inline int cpuset_do_page_mem_spread(void)
{ {
return 0; return 0;
......
...@@ -1423,6 +1423,7 @@ struct task_struct { ...@@ -1423,6 +1423,7 @@ struct task_struct {
nodemask_t mems_allowed; /* Protected by alloc_lock */ nodemask_t mems_allowed; /* Protected by alloc_lock */
int mems_allowed_change_disable; int mems_allowed_change_disable;
int cpuset_mem_spread_rotor; int cpuset_mem_spread_rotor;
int cpuset_slab_spread_rotor;
#endif #endif
#ifdef CONFIG_CGROUPS #ifdef CONFIG_CGROUPS
/* Control Group info protected by css_set_lock */ /* Control Group info protected by css_set_lock */
......
...@@ -2469,7 +2469,8 @@ void cpuset_unlock(void) ...@@ -2469,7 +2469,8 @@ void cpuset_unlock(void)
} }
/** /**
* cpuset_mem_spread_node() - On which node to begin search for a page * cpuset_mem_spread_node() - On which node to begin search for a file page
* cpuset_slab_spread_node() - On which node to begin search for a slab page
* *
* If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
* tasks in a cpuset with is_spread_page or is_spread_slab set), * tasks in a cpuset with is_spread_page or is_spread_slab set),
...@@ -2494,16 +2495,27 @@ void cpuset_unlock(void) ...@@ -2494,16 +2495,27 @@ void cpuset_unlock(void)
* See kmem_cache_alloc_node(). * See kmem_cache_alloc_node().
*/ */
int cpuset_mem_spread_node(void) static int cpuset_spread_node(int *rotor)
{ {
int node; int node;
node = next_node(current->cpuset_mem_spread_rotor, current->mems_allowed); node = next_node(*rotor, current->mems_allowed);
if (node == MAX_NUMNODES) if (node == MAX_NUMNODES)
node = first_node(current->mems_allowed); node = first_node(current->mems_allowed);
current->cpuset_mem_spread_rotor = node; *rotor = node;
return node; return node;
} }
int cpuset_mem_spread_node(void)
{
return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
}
int cpuset_slab_spread_node(void)
{
return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
}
EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
/** /**
......
...@@ -3219,7 +3219,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) ...@@ -3219,7 +3219,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
nid_alloc = nid_here = numa_node_id(); nid_alloc = nid_here = numa_node_id();
get_mems_allowed(); get_mems_allowed();
if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
nid_alloc = cpuset_mem_spread_node(); nid_alloc = cpuset_slab_spread_node();
else if (current->mempolicy) else if (current->mempolicy)
nid_alloc = slab_node(current->mempolicy); nid_alloc = slab_node(current->mempolicy);
put_mems_allowed(); put_mems_allowed();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册