diff --git a/MAINTAINERS b/MAINTAINERS index 89e4d4b145bb7e73b4c45671a84b401a5d8694c1..1af022e63668fd0c024292ba82515b39a89d83b0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3684,7 +3684,7 @@ F: kernel/debug/ KMEMCHECK M: Vegard Nossum -M: Pekka Enberg +M: Pekka Enberg S: Maintained F: Documentation/kmemcheck.txt F: arch/x86/include/asm/kmemcheck.h @@ -5646,7 +5646,7 @@ F: drivers/net/sky2.* SLAB ALLOCATOR M: Christoph Lameter -M: Pekka Enberg +M: Pekka Enberg M: Matt Mackall L: linux-mm@kvack.org S: Maintained diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 24376fe7ee68b5cc71cca000cfdc260527de207b..8122018d300029167518eb4a9c93e395e3efd304 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -165,6 +165,12 @@ extern void register_page_bootmem_info_node(struct pglist_data *pgdat); extern void put_page_bootmem(struct page *page); #endif +/* + * Lock for memory hotplug guarantees 1) all callbacks for memory hotplug + * notifier will be called under this. 2) offline/online/add/remove memory + * will not run simultaneously. + */ + void lock_memory_hotplug(void); void unlock_memory_hotplug(void); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index e92f04749fcb23fa283642dbb2a00dcda1ccd38d..321fc7455df7328c08441b54300054ff963e487b 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -409,6 +409,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) int ret; struct memory_notify arg; + lock_memory_hotplug(); arg.start_pfn = pfn; arg.nr_pages = nr_pages; arg.status_change_nid = -1; @@ -421,6 +422,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) ret = notifier_to_errno(ret); if (ret) { memory_notify(MEM_CANCEL_ONLINE, &arg); + unlock_memory_hotplug(); return ret; } /* @@ -445,6 +447,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) printk(KERN_DEBUG "online_pages %lx at %lx failed\n", nr_pages, pfn); memory_notify(MEM_CANCEL_ONLINE, &arg); + unlock_memory_hotplug(); return ret; } @@ -469,6 +472,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) if (onlined_pages) memory_notify(MEM_ONLINE, &arg); + unlock_memory_hotplug(); return 0; } diff --git a/mm/slab.c b/mm/slab.c index 264037449f080a9247d247d9a18e401a25ab932a..37961d1f584fed737f52a9981673bc7be9011efe 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -284,7 +284,7 @@ struct kmem_list3 { * Need this for bootstrapping a per node allocator. */ #define NUM_INIT_LISTS (3 * MAX_NUMNODES) -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; +static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; #define CACHE_CACHE 0 #define SIZE_AC MAX_NUMNODES #define SIZE_L3 (2 * MAX_NUMNODES) @@ -4053,7 +4053,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) * necessary. Note that the l3 listlock also protects the array_cache * if drain_array() is used on the shared array. */ -void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, +static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, struct array_cache *ac, int force, int node) { int tofree; @@ -4317,7 +4317,7 @@ static const struct seq_operations slabinfo_op = { * @count: data length * @ppos: unused */ -ssize_t slabinfo_write(struct file *file, const char __user * buffer, +static ssize_t slabinfo_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; diff --git a/mm/slub.c b/mm/slub.c index c7ef0070dd864efd8bd2261c428228cbffbabe44..e15aa7f193c9734518f3c3508210c70f294e4ad3 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3797,7 +3797,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, } } - down_read(&slub_lock); + lock_memory_hotplug(); #ifdef CONFIG_SLUB_DEBUG if (flags & SO_ALL) { for_each_node_state(node, N_NORMAL_MEMORY) { @@ -3838,7 +3838,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, x += sprintf(buf + x, " N%d=%lu", node, nodes[node]); #endif - up_read(&slub_lock); + unlock_memory_hotplug(); kfree(nodes); return x + sprintf(buf + x, "\n"); }