diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 7c2c552ac7ed52fb2e7e5028b9ffebc6caac702a..0b608275b4bbfb69afae50a86b42050abd607b29 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -131,12 +131,30 @@ config LOONGARCH select USER_STACKTRACE_SUPPORT select ZONE_DMA32 +config TRACE_IRQFLAGS_SUPPORT + def_bool y + +config HAVE_SETUP_PER_CPU_AREA + def_bool y + depends on NUMA + +config NEED_PER_CPU_EMBED_FIRST_CHUNK + def_bool y + depends on NUMA + +config NEED_PER_CPU_PAGE_FIRST_CHUNK + def_bool y + depends on NUMA + config 32BIT bool config 64BIT def_bool y +config ZONE_DMA32 + bool + config USE_PERCPU_NUMA_NODE_ID def_bool y depends on NUMA diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c index 1098fb1fc0ccfd4adcfb07fd2ffb9c4a8d89cfdc..35c610277ad0d3b7e93d93c2dce92c849f7cc5ec 100644 --- a/arch/loongarch/kernel/numa.c +++ b/arch/loongarch/kernel/numa.c @@ -52,6 +52,60 @@ nodemask_t numa_nodes_parsed __initdata; unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); +/** + * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu + * @cpu: cpu to allocate for + * @size: size allocation in bytes + * @align: alignment + * + * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper + * does the right thing for NUMA regardless of the current + * configuration. + * + * RETURNS: + * Pointer to the allocated area on success, NULL on failure. + */ +static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, + unsigned long align) +{ + const unsigned long goal = __pa(MAX_DMA_ADDRESS); +#ifdef CONFIG_NEED_MULTIPLE_NODES + int node = early_cpu_to_node(cpu); + void *ptr; + + if (!node_online(node) || !NODE_DATA(node)) { + ptr = memblock_alloc_from(size, align, goal); + pr_info("cpu %d has no node %d or node-local memory\n", + cpu, node); + pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", + cpu, size, __pa(ptr)); + } else { + ptr = memblock_alloc_try_nid(size, align, goal, + MEMBLOCK_ALLOC_ACCESSIBLE, + node); + + pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n", + cpu, size, node, __pa(ptr)); + } + return ptr; +#else + return memblock_alloc_from(size, align, goal); +#endif +} + +/* + * Helpers for first chunk memory allocation + */ +static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) +{ + return pcpu_alloc_bootmem(cpu, size, align); +} + +static void __init pcpu_fc_free(void *ptr, size_t size) +{ + memblock_free(__pa(ptr), size); +} + static int __init pcpu_cpu_to_node(int cpu) { return early_cpu_to_node(cpu); @@ -122,13 +176,16 @@ void __init setup_per_cpu_areas(void) if (pcpu_chosen_fc != PCPU_FC_PAGE) { rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE, PMD_SIZE, - pcpu_cpu_distance, pcpu_cpu_to_node); + pcpu_cpu_distance, + pcpu_fc_alloc, pcpu_fc_free); if (rc < 0) pr_warn("%s allocator failed (%d), falling back to page size\n", pcpu_fc_names[pcpu_chosen_fc], rc); } if (rc < 0) - rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_cpu_to_node); + rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, + pcpu_fc_alloc, pcpu_fc_free, + pcpu_populate_pte); if (rc < 0) panic("cannot initialize percpu area (err=%d)", rc);