diff --git a/include/linux/slab.h b/include/linux/slab.h index 2b28c849d75acf5dcbd7085c76256ec1054372a2..e2ee5b268797884427a4f81aff347980136de3e0 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -46,6 +46,7 @@ typedef struct kmem_cache kmem_cache_t; what is reclaimable later*/ #define SLAB_PANIC 0x00040000UL /* panic if kmem_cache_create() fails */ #define SLAB_DESTROY_BY_RCU 0x00080000UL /* defer freeing pages to RCU */ +#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ /* flags passed to a constructor func */ #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */ diff --git a/mm/slab.c b/mm/slab.c index 1c8f5ee230d5dfeaf59fdcf5a1b6334cfa5f3691..de516658d3d868ec0b1ca40d1314b9420f0ceba2 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -94,6 +94,7 @@ #include #include #include +#include #include #include #include @@ -173,12 +174,12 @@ SLAB_CACHE_DMA | \ SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ - SLAB_DESTROY_BY_RCU) + SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) #else # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ - SLAB_DESTROY_BY_RCU) + SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) #endif /* @@ -2810,6 +2811,14 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) if (unlikely(current->mempolicy && !in_interrupt())) { int nid = slab_node(current->mempolicy); + if (nid != numa_node_id()) + return __cache_alloc_node(cachep, flags, nid); + } + if (unlikely(cpuset_do_slab_mem_spread() && + (cachep->flags & SLAB_MEM_SPREAD) && + !in_interrupt())) { + int nid = cpuset_mem_spread_node(); + if (nid != numa_node_id()) return __cache_alloc_node(cachep, flags, nid); }