diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 88420af98140dd0cbc1357f698fa66e6616a1c0f..0a82b889d76ea236f521477b680e2d79b01b253b 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -47,6 +47,18 @@ config DEBUG_PAGEALLOC This results in a large slowdown, but helps to find certain types of memory corruptions. +config DEBUG_PER_CPU_MAPS + bool "Debug access to per_cpu maps" + depends on DEBUG_KERNEL + depends on X86_64_SMP + default n + help + Say Y to verify that the per_cpu map being accessed has + been setup. Adds a fair amount of code to kernel memory + and decreases performance. + + Say N if unsure. + config DEBUG_RODATA bool "Write protect kernel read-only data structures" depends on DEBUG_KERNEL diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 5d24dc1ec2375726cc80d66774c27e72da55cdc5..e157cb274b25d6dcd4e6a47b87be95619af9366c 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -37,6 +37,9 @@ u16 x86_cpu_to_node_map_init[NR_CPUS] = { void *x86_cpu_to_node_map_early_ptr; DEFINE_PER_CPU(u16, x86_cpu_to_node_map) = NUMA_NO_NODE; EXPORT_PER_CPU_SYMBOL(x86_cpu_to_node_map); +#ifdef CONFIG_DEBUG_PER_CPU_MAPS +EXPORT_SYMBOL(x86_cpu_to_node_map_early_ptr); +#endif u16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h index 040374f030cf85518e338566787477eeafc34c41..f1e12329078ee67e88a3b2ee3dfa7a96840e2e02 100644 --- a/include/asm-x86/topology.h +++ b/include/asm-x86/topology.h @@ -66,6 +66,13 @@ static inline int early_cpu_to_node(int cpu) static inline int cpu_to_node(int cpu) { +#ifdef CONFIG_DEBUG_PER_CPU_MAPS + if(x86_cpu_to_node_map_early_ptr) { + printk("KERN_NOTICE cpu_to_node(%d): usage too early!\n", + (int)cpu); + BUG(); + } +#endif if(per_cpu_offset(cpu)) return per_cpu(x86_cpu_to_node_map, cpu); else