From 056e342dce727c897b5e9a7315e7733aa56a22b8 Mon Sep 17 00:00:00 2001 From: Hongchen Zhang Date: Sun, 9 Jul 2023 18:51:11 +0800 Subject: [PATCH] LoongArch: fix percpu area not allocated as expected LoongArch inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7PT2H -------------------------------- Percpu area not allocated as expected because some CONFIG not defined, Defined the required CONFIG to fix it. Signed-off-by: Hongchen Zhang --- arch/loongarch/Kconfig | 12 +++++++ arch/loongarch/kernel/numa.c | 61 ++++++++++++++++++++++++++++++++++-- 2 files changed, 71 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 7c2c552ac7ed..d88e64b7d85b 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -131,6 +131,18 @@ config LOONGARCH select USER_STACKTRACE_SUPPORT select ZONE_DMA32 +config HAVE_SETUP_PER_CPU_AREA + def_bool y + depends on NUMA + +config NEED_PER_CPU_EMBED_FIRST_CHUNK + def_bool y + depends on NUMA + +config NEED_PER_CPU_PAGE_FIRST_CHUNK + def_bool y + depends on NUMA + config 32BIT bool diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c index 1098fb1fc0cc..35c610277ad0 100644 --- a/arch/loongarch/kernel/numa.c +++ b/arch/loongarch/kernel/numa.c @@ -52,6 +52,60 @@ nodemask_t numa_nodes_parsed __initdata; unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); +/** + * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu + * @cpu: cpu to allocate for + * @size: size allocation in bytes + * @align: alignment + * + * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper + * does the right thing for NUMA regardless of the current + * configuration. + * + * RETURNS: + * Pointer to the allocated area on success, NULL on failure. + */ +static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, + unsigned long align) +{ + const unsigned long goal = __pa(MAX_DMA_ADDRESS); +#ifdef CONFIG_NEED_MULTIPLE_NODES + int node = early_cpu_to_node(cpu); + void *ptr; + + if (!node_online(node) || !NODE_DATA(node)) { + ptr = memblock_alloc_from(size, align, goal); + pr_info("cpu %d has no node %d or node-local memory\n", + cpu, node); + pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", + cpu, size, __pa(ptr)); + } else { + ptr = memblock_alloc_try_nid(size, align, goal, + MEMBLOCK_ALLOC_ACCESSIBLE, + node); + + pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n", + cpu, size, node, __pa(ptr)); + } + return ptr; +#else + return memblock_alloc_from(size, align, goal); +#endif +} + +/* + * Helpers for first chunk memory allocation + */ +static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) +{ + return pcpu_alloc_bootmem(cpu, size, align); +} + +static void __init pcpu_fc_free(void *ptr, size_t size) +{ + memblock_free(__pa(ptr), size); +} + static int __init pcpu_cpu_to_node(int cpu) { return early_cpu_to_node(cpu); @@ -122,13 +176,16 @@ void __init setup_per_cpu_areas(void) if (pcpu_chosen_fc != PCPU_FC_PAGE) { rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE, PMD_SIZE, - pcpu_cpu_distance, pcpu_cpu_to_node); + pcpu_cpu_distance, + pcpu_fc_alloc, pcpu_fc_free); if (rc < 0) pr_warn("%s allocator failed (%d), falling back to page size\n", pcpu_fc_names[pcpu_chosen_fc], rc); } if (rc < 0) - rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_cpu_to_node); + rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, + pcpu_fc_alloc, pcpu_fc_free, + pcpu_populate_pte); if (rc < 0) panic("cannot initialize percpu area (err=%d)", rc); -- GitLab