diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 619d843ba231492dfb7100d32734ea711c391e7c..dcb0ad098c601d00b339fe4685d7452de96c25ef 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig @@ -754,6 +754,7 @@ config NUMA depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI)) default n if X86_PC default y if (X86_NUMAQ || X86_SUMMIT) + select SPARSEMEM_STATIC # Need comments to help the hapless user trying to turn on NUMA support comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support" diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b97054bbc39410d82f90df82fe10b11304908e05..79cf578e21b974cf3e5e570f67f6e2bc9bce0126 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -487,39 +487,29 @@ struct mem_section { unsigned long section_mem_map; }; -#ifdef CONFIG_ARCH_SPARSEMEM_EXTREME -/* - * Should we ever require GCC 4 or later then the flat array scheme - * can be eliminated and a uniform solution for EXTREME and !EXTREME can - * be arrived at. - */ -#define SECTION_ROOT_SHIFT (PAGE_SHIFT-3) -#define SECTION_ROOT_MASK ((1UL<> SECTION_ROOT_SHIFT) -#define NR_SECTION_ROOTS (NR_MEM_SECTIONS >> SECTION_ROOT_SHIFT) +#ifdef CONFIG_SPARSEMEM_EXTREME +#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) +#else +#define SECTIONS_PER_ROOT 1 +#endif -extern struct mem_section *mem_section[NR_SECTION_ROOTS]; - -static inline struct mem_section *__nr_to_section(unsigned long nr) -{ - if (!mem_section[SECTION_TO_ROOT(nr)]) - return NULL; - return &mem_section[SECTION_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; -} +#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) +#define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT) +#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) +#ifdef CONFIG_SPARSEMEM_EXTREME +extern struct mem_section *mem_section[NR_SECTION_ROOTS]; #else - -extern struct mem_section mem_section[NR_MEM_SECTIONS]; +extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; +#endif static inline struct mem_section *__nr_to_section(unsigned long nr) { - return &mem_section[nr]; + if (!mem_section[SECTION_NR_TO_ROOT(nr)]) + return NULL; + return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; } -#define sparse_index_init(_sec, _nid) do {} while (0) - -#endif - /* * We use the lower bits of the mem_map pointer to store * a little bit of information. There should be at least diff --git a/mm/Kconfig b/mm/Kconfig index fc644c5c065d2b5a2b05864b23b8e78385f6753b..4e9937ac3529ae391762f5071e938f8ac5497077 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -90,11 +90,24 @@ config HAVE_MEMORY_PRESENT def_bool y depends on ARCH_HAVE_MEMORY_PRESENT || SPARSEMEM +# +# SPARSEMEM_EXTREME (which is the default) does some bootmem +# allocations when memory_present() is called. If this can not +# be done on your architecture, select this option. However, +# statically allocating the mem_section[] array can potentially +# consume vast quantities of .bss, so be careful. +# +# This option will also potentially produce smaller runtime code +# with gcc 3.4 and later. +# +config SPARSEMEM_STATIC + def_bool n + # # Architectecture platforms which require a two level mem_section in SPARSEMEM # must select this option. This is usually for architecture platforms with # an extremely sparse physical address space. # -config ARCH_SPARSEMEM_EXTREME - def_bool n - depends on SPARSEMEM && 64BIT +config SPARSEMEM_EXTREME + def_bool y + depends on SPARSEMEM && !SPARSEMEM_STATIC diff --git a/mm/sparse.c b/mm/sparse.c index b2b456bf0a5d03cb10ceb3c3cd63443040a47137..fa01292157a9aca0b66c461d9074e998ef43dcb5 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -13,28 +13,36 @@ * * 1) mem_section - memory sections, mem_map's for valid memory */ -#ifdef CONFIG_ARCH_SPARSEMEM_EXTREME +#ifdef CONFIG_SPARSEMEM_EXTREME struct mem_section *mem_section[NR_SECTION_ROOTS] ____cacheline_maxaligned_in_smp; +#else +struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] + ____cacheline_maxaligned_in_smp; +#endif +EXPORT_SYMBOL(mem_section); + +static void sparse_alloc_root(unsigned long root, int nid) +{ +#ifdef CONFIG_SPARSEMEM_EXTREME + mem_section[root] = alloc_bootmem_node(NODE_DATA(nid), PAGE_SIZE); +#endif +} static void sparse_index_init(unsigned long section, int nid) { - unsigned long root = SECTION_TO_ROOT(section); + unsigned long root = SECTION_NR_TO_ROOT(section); if (mem_section[root]) return; - mem_section[root] = alloc_bootmem_node(NODE_DATA(nid), PAGE_SIZE); + + sparse_alloc_root(root, nid); + if (mem_section[root]) memset(mem_section[root], 0, PAGE_SIZE); else panic("memory_present: NO MEMORY\n"); } -#else -struct mem_section mem_section[NR_MEM_SECTIONS] - ____cacheline_maxaligned_in_smp; -#endif -EXPORT_SYMBOL(mem_section); - /* Record a memory area against a node. */ void memory_present(int nid, unsigned long start, unsigned long end) {