setup_percpu.c 10.2 KB
Newer Older
1 2 3 4 5
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/percpu.h>
6
#include <linux/kexec.h>
7
#include <linux/crash_dump.h>
8 9
#include <linux/smp.h>
#include <linux/topology.h>
10 11 12
#include <asm/sections.h>
#include <asm/processor.h>
#include <asm/setup.h>
13
#include <asm/mpspec.h>
14
#include <asm/apicdef.h>
15
#include <asm/highmem.h>
16
#include <asm/proto.h>
17
#include <asm/cpumask.h>
18

19 20 21 22 23 24
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
# define DBG(x...) printk(KERN_DEBUG x)
#else
# define DBG(x...)
#endif

25
#ifdef CONFIG_X86_LOCAL_APIC
26 27 28 29 30
unsigned int num_processors;
unsigned disabled_cpus __cpuinitdata;
/* Processor that is doing the boot up */
unsigned int boot_cpu_physical_apicid = -1U;
EXPORT_SYMBOL(boot_cpu_physical_apicid);
31
unsigned int max_physical_apicid;
32

33 34
/* Bitmask of physically existing CPUs */
physid_mask_t phys_cpu_present_map;
35
#endif
36

37 38 39
/*
 * Map cpu index to physical APIC ID
 */
40 41 42 43 44 45
DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);

#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
46
#define	X86_64_NUMA	1	/* (used later) */
47

48 49 50
/*
 * Map cpu index to node index
 */
51 52
DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
53

54 55 56
/*
 * Which logical CPUs are on which nodes
 */
57 58 59
cpumask_t *node_to_cpumask_map;
EXPORT_SYMBOL(node_to_cpumask_map);

60 61 62
/*
 * Setup node_to_cpumask_map
 */
63 64 65 66
static void __init setup_node_to_cpumask_map(void);

#else
static inline void setup_node_to_cpumask_map(void) { }
67 68
#endif

T
Tejun Heo 已提交
69 70 71 72 73 74 75 76 77 78
/*
 * Define load_pda_offset() and per-cpu __pda for x86_64.
 * load_pda_offset() is responsible for loading the offset of pda into
 * %gs.
 *
 * On SMP, pda offset also duals as percpu base address and thus it
 * should be at the start of per-cpu area.  To achieve this, it's
 * preallocated in vmlinux_64.lds.S directly instead of using
 * DEFINE_PER_CPU().
 */
79 80 81 82 83 84 85 86
#ifdef CONFIG_X86_64
void __cpuinit load_pda_offset(int cpu)
{
	/* Memory clobbers used to order pda/percpu accesses */
	mb();
	wrmsrl(MSR_GS_BASE, cpu_pda(cpu));
	mb();
}
T
Tejun Heo 已提交
87 88 89
#ifndef CONFIG_SMP
DEFINE_PER_CPU(struct x8664_pda, __pda);
#endif
90
EXPORT_PER_CPU_SYMBOL(__pda);
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
#endif /* CONFIG_SMP && CONFIG_X86_64 */

#ifdef CONFIG_X86_64

/* correctly size the local cpu masks */
static void setup_cpu_local_masks(void)
{
	alloc_bootmem_cpumask_var(&cpu_initialized_mask);
	alloc_bootmem_cpumask_var(&cpu_callin_mask);
	alloc_bootmem_cpumask_var(&cpu_callout_mask);
	alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
}

#else /* CONFIG_X86_32 */

static inline void setup_cpu_local_masks(void)
{
}

#endif /* CONFIG_X86_32 */

112
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
113 114 115 116 117 118 119 120 121 122
/*
 * Copy data used in early init routines from the initial arrays to the
 * per cpu data areas.  These arrays then become expendable and the
 * *_early_ptr's are zeroed indicating that the static arrays are gone.
 */
static void __init setup_per_cpu_maps(void)
{
	int cpu;

	for_each_possible_cpu(cpu) {
123 124
		per_cpu(x86_cpu_to_apicid, cpu) =
				early_per_cpu_map(x86_cpu_to_apicid, cpu);
125
		per_cpu(x86_bios_cpu_apicid, cpu) =
126 127
				early_per_cpu_map(x86_bios_cpu_apicid, cpu);
#ifdef X86_64_NUMA
128
		per_cpu(x86_cpu_to_node_map, cpu) =
129
				early_per_cpu_map(x86_cpu_to_node_map, cpu);
130 131 132 133
#endif
	}

	/* indicate the early static arrays will soon be gone */
134 135 136 137
	early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
	early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
#ifdef X86_64_NUMA
	early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
138 139 140
#endif
}

141 142 143 144 145
#ifdef CONFIG_X86_64
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
	[0] = (unsigned long)__per_cpu_load,
};
#else
146
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
147
#endif
148
EXPORT_SYMBOL(__per_cpu_offset);
149 150 151 152 153 154 155 156

/*
 * Great future plan:
 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
 * Always point %gs to its beginning
 */
void __init setup_per_cpu_areas(void)
{
T
Thomas Gleixner 已提交
157
	ssize_t size, old_size;
158 159
	char *ptr;
	int cpu;
Y
Yinghai Lu 已提交
160
	unsigned long align = 1;
161 162

	/* Copy section for each CPU (we discard the original) */
Y
Yinghai Lu 已提交
163
	old_size = PERCPU_ENOUGH_ROOM;
Y
Yinghai Lu 已提交
164
	align = max_t(unsigned long, PAGE_SIZE, align);
T
Thomas Gleixner 已提交
165
	size = roundup(old_size, align);
166

167
	pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
168 169
		NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);

170
	pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
171

172
	for_each_possible_cpu(cpu) {
173
#ifndef CONFIG_NEED_MULTIPLE_NODES
Y
Yinghai Lu 已提交
174 175
		ptr = __alloc_bootmem(size, align,
				 __pa(MAX_DMA_ADDRESS));
176
#else
177
		int node = early_cpu_to_node(cpu);
178
		if (!node_online(node) || !NODE_DATA(node)) {
Y
Yinghai Lu 已提交
179 180
			ptr = __alloc_bootmem(size, align,
					 __pa(MAX_DMA_ADDRESS));
181
			pr_info("cpu %d has no node %d or node-local memory\n",
182
				cpu, node);
183 184 185
			pr_debug("per cpu data for cpu%d at %016lx\n",
				 cpu, __pa(ptr));
		} else {
Y
Yinghai Lu 已提交
186 187
			ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
							__pa(MAX_DMA_ADDRESS));
188 189
			pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
				cpu, node, __pa(ptr));
Y
Yinghai Lu 已提交
190
		}
191
#endif
192

193
		memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
194
		per_cpu_offset(cpu) = ptr - __per_cpu_start;
195
		per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
196
#ifdef CONFIG_X86_64
197 198
		per_cpu(irq_stack_ptr, cpu) =
			(char *)per_cpu(irq_stack, cpu) + IRQ_STACK_SIZE - 64;
199 200 201 202 203 204 205 206 207
		/*
		 * CPU0 modified pda in the init data area, reload pda
		 * offset for CPU0 and clear the area for others.
		 */
		if (cpu == 0)
			load_pda_offset(0);
		else
			memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu)));
#endif
208 209

		DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
210 211
	}

212
	/* Setup percpu data maps */
213
	setup_per_cpu_maps();
214

215 216
	/* Setup node to cpumask map */
	setup_node_to_cpumask_map();
217 218 219

	/* Setup cpu initialized, callin, callout masks */
	setup_cpu_local_masks();
220 221 222
}

#endif
223

224
#ifdef X86_64_NUMA
225 226 227 228 229 230

/*
 * Allocate node_to_cpumask_map based on number of available nodes
 * Requires node_possible_map to be valid.
 *
 * Note: node_to_cpumask() is not valid until after this is done.
231
 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
 */
static void __init setup_node_to_cpumask_map(void)
{
	unsigned int node, num = 0;
	cpumask_t *map;

	/* setup nr_node_ids if not done yet */
	if (nr_node_ids == MAX_NUMNODES) {
		for_each_node_mask(node, node_possible_map)
			num = node;
		nr_node_ids = num + 1;
	}

	/* allocate the map */
	map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
247
	DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
248

249
	pr_debug("Node to cpumask map at %p for %d nodes\n",
250
		 map, nr_node_ids);
251 252 253 254 255

	/* node_to_cpumask() will now work */
	node_to_cpumask_map = map;
}

256 257 258 259
void __cpuinit numa_set_node(int cpu, int node)
{
	int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);

260 261
	/* early setting, no percpu area yet */
	if (cpu_to_node_map) {
262
		cpu_to_node_map[cpu] = node;
263 264
		return;
	}
265

266 267 268 269 270 271 272 273
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
	if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) {
		printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
		dump_stack();
		return;
	}
#endif
	per_cpu(x86_cpu_to_node_map, cpu) = node;
274

275 276
	if (node != NUMA_NO_NODE)
		cpu_pda(cpu)->nodenumber = node;
277 278 279 280 281 282 283
}

void __cpuinit numa_clear_node(int cpu)
{
	numa_set_node(cpu, NUMA_NO_NODE);
}

284 285
#ifndef CONFIG_DEBUG_PER_CPU_MAPS

286 287 288 289 290 291 292
void __cpuinit numa_add_cpu(int cpu)
{
	cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}

void __cpuinit numa_remove_cpu(int cpu)
{
293
	cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
294 295
}

296 297 298 299 300 301 302
#else /* CONFIG_DEBUG_PER_CPU_MAPS */

/*
 * --------- debug versions of the numa functions ---------
 */
static void __cpuinit numa_set_cpumask(int cpu, int enable)
{
303
	int node = early_cpu_to_node(cpu);
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
	cpumask_t *mask;
	char buf[64];

	if (node_to_cpumask_map == NULL) {
		printk(KERN_ERR "node_to_cpumask_map NULL\n");
		dump_stack();
		return;
	}

	mask = &node_to_cpumask_map[node];
	if (enable)
		cpu_set(cpu, *mask);
	else
		cpu_clear(cpu, *mask);

319
	cpulist_scnprintf(buf, sizeof(buf), mask);
320
	printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
321 322
		enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
}
323 324 325 326 327 328 329 330 331 332

void __cpuinit numa_add_cpu(int cpu)
{
	numa_set_cpumask(cpu, 1);
}

void __cpuinit numa_remove_cpu(int cpu)
{
	numa_set_cpumask(cpu, 0);
}
333 334 335 336 337 338 339 340 341 342 343 344 345

int cpu_to_node(int cpu)
{
	if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
		printk(KERN_WARNING
			"cpu_to_node(%d): usage too early!\n", cpu);
		dump_stack();
		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
	}
	return per_cpu(x86_cpu_to_node_map, cpu);
}
EXPORT_SYMBOL(cpu_to_node);

346 347 348 349
/*
 * Same function as cpu_to_node() but used if called before the
 * per_cpu areas are setup.
 */
350 351 352 353 354 355 356 357
int early_cpu_to_node(int cpu)
{
	if (early_per_cpu_ptr(x86_cpu_to_node_map))
		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];

	if (!per_cpu_offset(cpu)) {
		printk(KERN_WARNING
			"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
358
		dump_stack();
359 360 361 362
		return NUMA_NO_NODE;
	}
	return per_cpu(x86_cpu_to_node_map, cpu);
}
363

364 365 366 367

/* empty cpumask */
static const cpumask_t cpu_mask_none;

368 369 370
/*
 * Returns a pointer to the bitmask of CPUs on Node 'node'.
 */
371
const cpumask_t *cpumask_of_node(int node)
372 373 374
{
	if (node_to_cpumask_map == NULL) {
		printk(KERN_WARNING
375
			"cpumask_of_node(%d): no node_to_cpumask_map!\n",
376 377
			node);
		dump_stack();
378
		return (const cpumask_t *)&cpu_online_map;
379
	}
380 381
	if (node >= nr_node_ids) {
		printk(KERN_WARNING
382
			"cpumask_of_node(%d): node > nr_node_ids(%d)\n",
383 384
			node, nr_node_ids);
		dump_stack();
385
		return &cpu_mask_none;
386
	}
387
	return &node_to_cpumask_map[node];
388
}
389
EXPORT_SYMBOL(cpumask_of_node);
390 391 392

/*
 * Returns a bitmask of CPUs on Node 'node'.
393 394 395 396
 *
 * Side note: this function creates the returned cpumask on the stack
 * so with a high NR_CPUS count, excessive stack space is used.  The
 * node_to_cpumask_ptr function should be used whenever possible.
397 398 399 400 401 402 403 404 405
 */
cpumask_t node_to_cpumask(int node)
{
	if (node_to_cpumask_map == NULL) {
		printk(KERN_WARNING
			"node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
		dump_stack();
		return cpu_online_map;
	}
406 407 408 409 410 411 412
	if (node >= nr_node_ids) {
		printk(KERN_WARNING
			"node_to_cpumask(%d): node > nr_node_ids(%d)\n",
			node, nr_node_ids);
		dump_stack();
		return cpu_mask_none;
	}
413 414 415 416 417 418 419 420 421 422 423
	return node_to_cpumask_map[node];
}
EXPORT_SYMBOL(node_to_cpumask);

/*
 * --------- end of debug versions of the numa functions ---------
 */

#endif /* CONFIG_DEBUG_PER_CPU_MAPS */

#endif /* X86_64_NUMA */
424