diff --git a/mm/slab_common.c b/mm/slab_common.c index abea414ede86c975c7d31c433a6b1ff861c2d874..734426035256b95e6c3a93ba8d76f0e14bb75265 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -445,7 +445,6 @@ kmem_cache_create_usercopy(const char *name, int err; get_online_cpus(); - get_online_mems(); memcg_get_cache_ids(); mutex_lock(&slab_mutex); @@ -497,7 +496,6 @@ kmem_cache_create_usercopy(const char *name, mutex_unlock(&slab_mutex); memcg_put_cache_ids(); - put_online_mems(); put_online_cpus(); if (err) { @@ -888,7 +886,6 @@ void kmem_cache_destroy(struct kmem_cache *s) return; get_online_cpus(); - get_online_mems(); mutex_lock(&slab_mutex); @@ -901,13 +898,11 @@ void kmem_cache_destroy(struct kmem_cache *s) mutex_unlock(&slab_mutex); - put_online_mems(); put_online_cpus(); flush_memcg_workqueue(s); get_online_cpus(); - get_online_mems(); mutex_lock(&slab_mutex); @@ -934,7 +929,6 @@ void kmem_cache_destroy(struct kmem_cache *s) out_unlock: mutex_unlock(&slab_mutex); - put_online_mems(); put_online_cpus(); } EXPORT_SYMBOL(kmem_cache_destroy); @@ -951,10 +945,10 @@ int kmem_cache_shrink(struct kmem_cache *cachep) int ret; get_online_cpus(); - get_online_mems(); + kasan_cache_shrink(cachep); ret = __kmem_cache_shrink(cachep); - put_online_mems(); + put_online_cpus(); return ret; } diff --git a/mm/slub.c b/mm/slub.c index ce8fa0d0c1e82d1f36a5c2190d77eda747271fb5..04aa35f650877f248ddbaa009c36230a9878fcf9 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -236,6 +236,14 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si) #endif } +/* + * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. + * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily + * differ during memory hotplug/hotremove operations. + * Protected by slab_mutex. + */ +static nodemask_t slab_nodes; + /******************************************************************** * Core slab cache functions *******************************************************************/ @@ -2534,7 +2542,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, * ignore the node constraint */ if (unlikely(node != NUMA_NO_NODE && - !node_state(node, N_NORMAL_MEMORY))) + !node_isset(node, slab_nodes))) node = NUMA_NO_NODE; goto new_slab; } @@ -2545,7 +2553,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, * same as above but node_match() being false already * implies node != NUMA_NO_NODE */ - if (!node_state(node, N_NORMAL_MEMORY)) { + if (!node_isset(node, slab_nodes)) { node = NUMA_NO_NODE; goto redo; } else { @@ -3413,7 +3421,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s) { int node; - for_each_node_state(node, N_NORMAL_MEMORY) { + for_each_node_mask(node, slab_nodes) { struct kmem_cache_node *n; if (slab_state == DOWN) { @@ -4087,6 +4095,7 @@ static void slab_mem_offline_callback(void *arg) return; mutex_lock(&slab_mutex); + node_clear(offline_node, slab_nodes); /* * We no longer free kmem_cache_node structures here, as it would be * racy with all get_node() users, and infeasible to protect them with @@ -4136,6 +4145,11 @@ static int slab_mem_going_online_callback(void *arg) init_kmem_cache_node(n); s->node[nid] = n; } + /* + * Any cache created after this point will also have kmem_cache_node + * initialized for the new node. + */ + node_set(nid, slab_nodes); out: mutex_unlock(&slab_mutex); return ret; @@ -4218,6 +4232,7 @@ void __init kmem_cache_init(void) { static __initdata struct kmem_cache boot_kmem_cache, boot_kmem_cache_node; + int node; if (debug_guardpage_minorder()) slub_max_order = 0; @@ -4225,6 +4240,13 @@ void __init kmem_cache_init(void) kmem_cache_node = &boot_kmem_cache_node; kmem_cache = &boot_kmem_cache; + /* + * Initialize the nodemask for which we will allocate per node + * structures. Here we don't need taking slab_mutex yet. + */ + for_each_node_state(node, N_NORMAL_MEMORY) + node_set(node, slab_nodes); + create_boot_cache(kmem_cache_node, "kmem_cache_node", sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);