setup_percpu.c 9.5 KB
Newer Older
1 2 3 4 5
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/percpu.h>
6
#include <linux/kexec.h>
7
#include <linux/crash_dump.h>
8 9 10 11 12 13
#include <asm/smp.h>
#include <asm/percpu.h>
#include <asm/sections.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/topology.h>
14
#include <asm/mpspec.h>
15
#include <asm/apicdef.h>
16
#include <asm/highmem.h>
17

18
#ifdef CONFIG_X86_LOCAL_APIC
19 20 21 22
unsigned int num_processors;
unsigned disabled_cpus __cpuinitdata;
/* Processor that is doing the boot up */
unsigned int boot_cpu_physical_apicid = -1U;
23
unsigned int max_physical_apicid;
24 25
EXPORT_SYMBOL(boot_cpu_physical_apicid);

26 27
/* Bitmask of physically existing CPUs */
physid_mask_t phys_cpu_present_map;
28
#endif
29

30 31 32 33 34 35 36 37 38
/* map cpu index to physical APIC ID */
DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);

#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
#define	X86_64_NUMA	1

M
Mike Travis 已提交
39
/* map cpu index to node index */
40 41
DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
42 43 44 45 46 47 48 49 50 51

/* which logical CPUs are on which nodes */
cpumask_t *node_to_cpumask_map;
EXPORT_SYMBOL(node_to_cpumask_map);

/* setup node_to_cpumask_map */
static void __init setup_node_to_cpumask_map(void);

#else
static inline void setup_node_to_cpumask_map(void) { }
52 53
#endif

54
#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
55 56 57 58 59 60 61 62 63 64
/*
 * Copy data used in early init routines from the initial arrays to the
 * per cpu data areas.  These arrays then become expendable and the
 * *_early_ptr's are zeroed indicating that the static arrays are gone.
 */
static void __init setup_per_cpu_maps(void)
{
	int cpu;

	for_each_possible_cpu(cpu) {
65 66
		per_cpu(x86_cpu_to_apicid, cpu) =
				early_per_cpu_map(x86_cpu_to_apicid, cpu);
67
		per_cpu(x86_bios_cpu_apicid, cpu) =
68 69
				early_per_cpu_map(x86_bios_cpu_apicid, cpu);
#ifdef X86_64_NUMA
70
		per_cpu(x86_cpu_to_node_map, cpu) =
71
				early_per_cpu_map(x86_cpu_to_node_map, cpu);
72 73 74 75
#endif
	}

	/* indicate the early static arrays will soon be gone */
76 77 78 79
	early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
	early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
#ifdef X86_64_NUMA
	early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
80 81 82
#endif
}

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
cpumask_t *cpumask_of_cpu_map __read_mostly;
EXPORT_SYMBOL(cpumask_of_cpu_map);

/* requires nr_cpu_ids to be initialized */
static void __init setup_cpumask_of_cpu(void)
{
	int i;

	/* alloc_bootmem zeroes memory */
	cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
	for (i = 0; i < nr_cpu_ids; i++)
		cpu_set(i, cpumask_of_cpu_map[i]);
}
#else
static inline void setup_cpumask_of_cpu(void) { }
#endif

101 102 103 104 105 106 107
#ifdef CONFIG_X86_32
/*
 * Great future not-so-futuristic plan: make i386 and x86_64 do it
 * the same way
 */
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
static inline void setup_cpu_pda_map(void) { }

#elif !defined(CONFIG_SMP)
static inline void setup_cpu_pda_map(void) { }

#else /* CONFIG_SMP && CONFIG_X86_64 */

/*
 * Allocate cpu_pda pointer table and array via alloc_bootmem.
 */
static void __init setup_cpu_pda_map(void)
{
	char *pda;
	struct x8664_pda **new_cpu_pda;
	unsigned long size;
	int cpu;

	size = roundup(sizeof(struct x8664_pda), cache_line_size());

	/* allocate cpu_pda array and pointer table */
	{
		unsigned long tsize = nr_cpu_ids * sizeof(void *);
		unsigned long asize = size * (nr_cpu_ids - 1);

		tsize = roundup(tsize, cache_line_size());
		new_cpu_pda = alloc_bootmem(tsize + asize);
		pda = (char *)new_cpu_pda + tsize;
	}

	/* initialize pointer table to static pda's */
	for_each_possible_cpu(cpu) {
		if (cpu == 0) {
			/* leave boot cpu pda in place */
			new_cpu_pda[0] = cpu_pda(0);
			continue;
		}
		new_cpu_pda[cpu] = (struct x8664_pda *)pda;
		new_cpu_pda[cpu]->in_bootmem = 1;
		pda += size;
	}

	/* point to new pointer table */
	_cpu_pda = new_cpu_pda;
}
152 153 154 155 156 157 158 159 160
#endif

/*
 * Great future plan:
 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
 * Always point %gs to its beginning
 */
void __init setup_per_cpu_areas(void)
{
161 162 163
	ssize_t size = PERCPU_ENOUGH_ROOM;
	char *ptr;
	int cpu;
164

165 166 167
	/* Setup cpu_pda map */
	setup_cpu_pda_map();

168 169
	/* Copy section for each CPU (we discard the original) */
	size = PERCPU_ENOUGH_ROOM;
170
	printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
171
			  size);
172

173
	for_each_possible_cpu(cpu) {
174 175 176
#ifndef CONFIG_NEED_MULTIPLE_NODES
		ptr = alloc_bootmem_pages(size);
#else
177
		int node = early_cpu_to_node(cpu);
178
		if (!node_online(node) || !NODE_DATA(node)) {
179
			ptr = alloc_bootmem_pages(size);
180
			printk(KERN_INFO
181
			       "cpu %d has no node %d or node-local memory\n",
182
				cpu, node);
183
		}
184 185 186
		else
			ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
#endif
187
		per_cpu_offset(cpu) = ptr - __per_cpu_start;
188
		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
189

190 191
	}

192 193
	printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
		NR_CPUS, nr_cpu_ids, nr_node_ids);
194

195
	/* Setup percpu data maps */
196
	setup_per_cpu_maps();
197

198 199 200
	/* Setup node to cpumask map */
	setup_node_to_cpumask_map();

201 202
	/* Setup cpumask_of_cpu map */
	setup_cpumask_of_cpu();
203 204 205
}

#endif
206

207
#ifdef X86_64_NUMA
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236

/*
 * Allocate node_to_cpumask_map based on number of available nodes
 * Requires node_possible_map to be valid.
 *
 * Note: node_to_cpumask() is not valid until after this is done.
 */
static void __init setup_node_to_cpumask_map(void)
{
	unsigned int node, num = 0;
	cpumask_t *map;

	/* setup nr_node_ids if not done yet */
	if (nr_node_ids == MAX_NUMNODES) {
		for_each_node_mask(node, node_possible_map)
			num = node;
		nr_node_ids = num + 1;
	}

	/* allocate the map */
	map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));

	Dprintk(KERN_DEBUG "Node to cpumask map at %p for %d nodes\n",
		map, nr_node_ids);

	/* node_to_cpumask() will now work */
	node_to_cpumask_map = map;
}

237 238 239 240
void __cpuinit numa_set_node(int cpu, int node)
{
	int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);

241
	if (cpu_pda(cpu) && node != NUMA_NO_NODE)
M
Mike Travis 已提交
242 243
		cpu_pda(cpu)->nodenumber = node;

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
	if (cpu_to_node_map)
		cpu_to_node_map[cpu] = node;

	else if (per_cpu_offset(cpu))
		per_cpu(x86_cpu_to_node_map, cpu) = node;

	else
		Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu);
}

void __cpuinit numa_clear_node(int cpu)
{
	numa_set_node(cpu, NUMA_NO_NODE);
}

259 260
#ifndef CONFIG_DEBUG_PER_CPU_MAPS

261 262 263 264 265 266 267 268 269 270
void __cpuinit numa_add_cpu(int cpu)
{
	cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}

void __cpuinit numa_remove_cpu(int cpu)
{
	cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
}

271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
#else /* CONFIG_DEBUG_PER_CPU_MAPS */

/*
 * --------- debug versions of the numa functions ---------
 */
static void __cpuinit numa_set_cpumask(int cpu, int enable)
{
	int node = cpu_to_node(cpu);
	cpumask_t *mask;
	char buf[64];

	if (node_to_cpumask_map == NULL) {
		printk(KERN_ERR "node_to_cpumask_map NULL\n");
		dump_stack();
		return;
	}

	mask = &node_to_cpumask_map[node];
	if (enable)
		cpu_set(cpu, *mask);
	else
		cpu_clear(cpu, *mask);

	cpulist_scnprintf(buf, sizeof(buf), *mask);
	printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
		enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
 }

void __cpuinit numa_add_cpu(int cpu)
{
	numa_set_cpumask(cpu, 1);
}

void __cpuinit numa_remove_cpu(int cpu)
{
	numa_set_cpumask(cpu, 0);
}
308 309 310 311 312 313 314 315 316 317 318 319 320

int cpu_to_node(int cpu)
{
	if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
		printk(KERN_WARNING
			"cpu_to_node(%d): usage too early!\n", cpu);
		dump_stack();
		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
	}
	return per_cpu(x86_cpu_to_node_map, cpu);
}
EXPORT_SYMBOL(cpu_to_node);

321 322 323 324
/*
 * Same function as cpu_to_node() but used if called before the
 * per_cpu areas are setup.
 */
325 326 327 328 329 330 331 332
int early_cpu_to_node(int cpu)
{
	if (early_per_cpu_ptr(x86_cpu_to_node_map))
		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];

	if (!per_cpu_offset(cpu)) {
		printk(KERN_WARNING
			"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
333
		dump_stack();
334 335 336 337
		return NUMA_NO_NODE;
	}
	return per_cpu(x86_cpu_to_node_map, cpu);
}
338

339 340 341 342

/* empty cpumask */
static const cpumask_t cpu_mask_none;

343 344 345
/*
 * Returns a pointer to the bitmask of CPUs on Node 'node'.
 */
346
const cpumask_t *_node_to_cpumask_ptr(int node)
347 348 349 350 351 352
{
	if (node_to_cpumask_map == NULL) {
		printk(KERN_WARNING
			"_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n",
			node);
		dump_stack();
353
		return (const cpumask_t *)&cpu_online_map;
354
	}
355 356 357 358 359
	if (node >= nr_node_ids) {
		printk(KERN_WARNING
			"_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n",
			node, nr_node_ids);
		dump_stack();
360
		return &cpu_mask_none;
361
	}
362
	return &node_to_cpumask_map[node];
363 364 365 366 367
}
EXPORT_SYMBOL(_node_to_cpumask_ptr);

/*
 * Returns a bitmask of CPUs on Node 'node'.
368 369 370 371
 *
 * Side note: this function creates the returned cpumask on the stack
 * so with a high NR_CPUS count, excessive stack space is used.  The
 * node_to_cpumask_ptr function should be used whenever possible.
372 373 374 375 376 377 378 379 380
 */
cpumask_t node_to_cpumask(int node)
{
	if (node_to_cpumask_map == NULL) {
		printk(KERN_WARNING
			"node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
		dump_stack();
		return cpu_online_map;
	}
381 382 383 384 385 386 387
	if (node >= nr_node_ids) {
		printk(KERN_WARNING
			"node_to_cpumask(%d): node > nr_node_ids(%d)\n",
			node, nr_node_ids);
		dump_stack();
		return cpu_mask_none;
	}
388 389 390 391 392 393 394 395 396 397 398
	return node_to_cpumask_map[node];
}
EXPORT_SYMBOL(node_to_cpumask);

/*
 * --------- end of debug versions of the numa functions ---------
 */

#endif /* CONFIG_DEBUG_PER_CPU_MAPS */

#endif /* X86_64_NUMA */
399