setup.c 3.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/percpu.h>
#include <asm/smp.h>
#include <asm/percpu.h>
#include <asm/sections.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/topology.h>
12
#include <asm/mpspec.h>
13 14
#include <asm/apicdef.h>

15
#ifdef CONFIG_X86_LOCAL_APIC
16 17 18 19 20 21
unsigned int num_processors;
unsigned disabled_cpus __cpuinitdata;
/* Processor that is doing the boot up */
unsigned int boot_cpu_physical_apicid = -1U;
EXPORT_SYMBOL(boot_cpu_physical_apicid);

22 23
DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID;
EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
24

25 26
/* Bitmask of physically existing CPUs */
physid_mask_t phys_cpu_present_map;
27
#endif
28

29
#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
30 31 32 33 34 35 36 37 38 39
/*
 * Copy data used in early init routines from the initial arrays to the
 * per cpu data areas.  These arrays then become expendable and the
 * *_early_ptr's are zeroed indicating that the static arrays are gone.
 */
static void __init setup_per_cpu_maps(void)
{
	int cpu;

	for_each_possible_cpu(cpu) {
40 41
		per_cpu(x86_cpu_to_apicid, cpu) = x86_cpu_to_apicid_init[cpu];
		per_cpu(x86_bios_cpu_apicid, cpu) =
42 43
						x86_bios_cpu_apicid_init[cpu];
#ifdef CONFIG_NUMA
44
		per_cpu(x86_cpu_to_node_map, cpu) =
45 46 47 48 49 50 51 52 53 54 55 56
						x86_cpu_to_node_map_init[cpu];
#endif
	}

	/* indicate the early static arrays will soon be gone */
	x86_cpu_to_apicid_early_ptr = NULL;
	x86_bios_cpu_apicid_early_ptr = NULL;
#ifdef CONFIG_NUMA
	x86_cpu_to_node_map_early_ptr = NULL;
#endif
}

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
cpumask_t *cpumask_of_cpu_map __read_mostly;
EXPORT_SYMBOL(cpumask_of_cpu_map);

/* requires nr_cpu_ids to be initialized */
static void __init setup_cpumask_of_cpu(void)
{
	int i;

	/* alloc_bootmem zeroes memory */
	cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
	for (i = 0; i < nr_cpu_ids; i++)
		cpu_set(i, cpumask_of_cpu_map[i]);
}
#else
static inline void setup_cpumask_of_cpu(void) { }
#endif

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
#ifdef CONFIG_X86_32
/*
 * Great future not-so-futuristic plan: make i386 and x86_64 do it
 * the same way
 */
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
#endif

/*
 * Great future plan:
 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
 * Always point %gs to its beginning
 */
void __init setup_per_cpu_areas(void)
{
91
	int i, highest_cpu = 0;
92 93 94 95 96 97 98 99
	unsigned long size;

#ifdef CONFIG_HOTPLUG_CPU
	prefill_possible_map();
#endif

	/* Copy section for each CPU (we discard the original) */
	size = PERCPU_ENOUGH_ROOM;
100
	printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n",
101
			  size);
102 103

	for_each_possible_cpu(i) {
104 105 106 107 108
		char *ptr;
#ifndef CONFIG_NEED_MULTIPLE_NODES
		ptr = alloc_bootmem_pages(size);
#else
		int node = early_cpu_to_node(i);
109
		if (!node_online(node) || !NODE_DATA(node)) {
110
			ptr = alloc_bootmem_pages(size);
111 112 113
			printk(KERN_INFO
			       "cpu %d has no node or node-local memory\n", i);
		}
114 115 116 117 118 119 120 121 122 123 124
		else
			ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
#endif
		if (!ptr)
			panic("Cannot allocate cpu data for CPU %d\n", i);
#ifdef CONFIG_X86_64
		cpu_pda(i)->data_offset = ptr - __per_cpu_start;
#else
		__per_cpu_offset[i] = ptr - __per_cpu_start;
#endif
		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
125 126

		highest_cpu = i;
127 128
	}

129 130 131
	nr_cpu_ids = highest_cpu + 1;
	printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d\n", NR_CPUS, nr_cpu_ids);

132
	/* Setup percpu data maps */
133
	setup_per_cpu_maps();
134 135 136

	/* Setup cpumask_of_cpu map */
	setup_cpumask_of_cpu();
137 138 139
}

#endif