setup.c 9.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/percpu.h>
#include <asm/smp.h>
#include <asm/percpu.h>
#include <asm/sections.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/topology.h>
12
#include <asm/mpspec.h>
13 14
#include <asm/apicdef.h>

15
#ifdef CONFIG_X86_LOCAL_APIC
16 17 18 19
unsigned int num_processors;
unsigned disabled_cpus __cpuinitdata;
/* Processor that is doing the boot up */
unsigned int boot_cpu_physical_apicid = -1U;
20
unsigned int max_physical_apicid;
21 22
EXPORT_SYMBOL(boot_cpu_physical_apicid);

23 24
/* Bitmask of physically existing CPUs */
physid_mask_t phys_cpu_present_map;
25
#endif
26

27 28 29 30 31 32 33 34 35
/* map cpu index to physical APIC ID */
DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);

#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
#define	X86_64_NUMA	1

M
Mike Travis 已提交
36
/* map cpu index to node index */
37 38
DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
39 40 41 42 43 44 45 46 47 48

/* which logical CPUs are on which nodes */
cpumask_t *node_to_cpumask_map;
EXPORT_SYMBOL(node_to_cpumask_map);

/* setup node_to_cpumask_map */
static void __init setup_node_to_cpumask_map(void);

#else
static inline void setup_node_to_cpumask_map(void) { }
49 50
#endif

51
#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
52 53 54 55 56 57 58 59 60 61
/*
 * Copy data used in early init routines from the initial arrays to the
 * per cpu data areas.  These arrays then become expendable and the
 * *_early_ptr's are zeroed indicating that the static arrays are gone.
 */
static void __init setup_per_cpu_maps(void)
{
	int cpu;

	for_each_possible_cpu(cpu) {
62 63
		per_cpu(x86_cpu_to_apicid, cpu) =
				early_per_cpu_map(x86_cpu_to_apicid, cpu);
64
		per_cpu(x86_bios_cpu_apicid, cpu) =
65 66
				early_per_cpu_map(x86_bios_cpu_apicid, cpu);
#ifdef X86_64_NUMA
67
		per_cpu(x86_cpu_to_node_map, cpu) =
68
				early_per_cpu_map(x86_cpu_to_node_map, cpu);
69 70 71 72
#endif
	}

	/* indicate the early static arrays will soon be gone */
73 74 75 76
	early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
	early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
#ifdef X86_64_NUMA
	early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
77 78 79
#endif
}

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
cpumask_t *cpumask_of_cpu_map __read_mostly;
EXPORT_SYMBOL(cpumask_of_cpu_map);

/* requires nr_cpu_ids to be initialized */
static void __init setup_cpumask_of_cpu(void)
{
	int i;

	/* alloc_bootmem zeroes memory */
	cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
	for (i = 0; i < nr_cpu_ids; i++)
		cpu_set(i, cpumask_of_cpu_map[i]);
}
#else
static inline void setup_cpumask_of_cpu(void) { }
#endif

98 99 100 101 102 103 104
#ifdef CONFIG_X86_32
/*
 * Great future not-so-futuristic plan: make i386 and x86_64 do it
 * the same way
 */
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
static inline void setup_cpu_pda_map(void) { }

#elif !defined(CONFIG_SMP)
static inline void setup_cpu_pda_map(void) { }

#else /* CONFIG_SMP && CONFIG_X86_64 */

/*
 * Allocate cpu_pda pointer table and array via alloc_bootmem.
 */
static void __init setup_cpu_pda_map(void)
{
	char *pda;
	struct x8664_pda **new_cpu_pda;
	unsigned long size;
	int cpu;

	size = roundup(sizeof(struct x8664_pda), cache_line_size());

	/* allocate cpu_pda array and pointer table */
	{
		unsigned long tsize = nr_cpu_ids * sizeof(void *);
		unsigned long asize = size * (nr_cpu_ids - 1);

		tsize = roundup(tsize, cache_line_size());
		new_cpu_pda = alloc_bootmem(tsize + asize);
		pda = (char *)new_cpu_pda + tsize;
	}

	/* initialize pointer table to static pda's */
	for_each_possible_cpu(cpu) {
		if (cpu == 0) {
			/* leave boot cpu pda in place */
			new_cpu_pda[0] = cpu_pda(0);
			continue;
		}
		new_cpu_pda[cpu] = (struct x8664_pda *)pda;
		new_cpu_pda[cpu]->in_bootmem = 1;
		pda += size;
	}

	/* point to new pointer table */
	_cpu_pda = new_cpu_pda;
}
149 150 151 152 153 154 155 156 157
#endif

/*
 * Great future plan:
 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
 * Always point %gs to its beginning
 */
void __init setup_per_cpu_areas(void)
{
158 159 160
	ssize_t size = PERCPU_ENOUGH_ROOM;
	char *ptr;
	int cpu;
161 162 163

#ifdef CONFIG_HOTPLUG_CPU
	prefill_possible_map();
164 165
#else
	nr_cpu_ids = num_processors;
166 167
#endif

168 169 170
	/* Setup cpu_pda map */
	setup_cpu_pda_map();

171 172
	/* Copy section for each CPU (we discard the original) */
	size = PERCPU_ENOUGH_ROOM;
173
	printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
174
			  size);
175

176
	for_each_possible_cpu(cpu) {
177 178 179
#ifndef CONFIG_NEED_MULTIPLE_NODES
		ptr = alloc_bootmem_pages(size);
#else
180
		int node = early_cpu_to_node(cpu);
181
		if (!node_online(node) || !NODE_DATA(node)) {
182
			ptr = alloc_bootmem_pages(size);
183
			printk(KERN_INFO
184
			       "cpu %d has no node %d or node-local memory\n",
185
				cpu, node);
186
		}
187 188 189
		else
			ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
#endif
190
		per_cpu_offset(cpu) = ptr - __per_cpu_start;
191
		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
192

193 194
	}

195 196
	printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
		NR_CPUS, nr_cpu_ids, nr_node_ids);
197

198
	/* Setup percpu data maps */
199
	setup_per_cpu_maps();
200

201 202 203
	/* Setup node to cpumask map */
	setup_node_to_cpumask_map();

204 205
	/* Setup cpumask_of_cpu map */
	setup_cpumask_of_cpu();
206 207 208
}

#endif
209 210 211 212 213 214 215 216 217 218 219 220

void __init parse_setup_data(void)
{
	struct setup_data *data;
	u64 pa_data;

	if (boot_params.hdr.version < 0x0209)
		return;
	pa_data = boot_params.hdr.setup_data;
	while (pa_data) {
		data = early_ioremap(pa_data, PAGE_SIZE);
		switch (data->type) {
221 222 223
		case SETUP_E820_EXT:
			parse_e820_ext(data, pa_data);
			break;
224 225 226 227 228 229 230 231 232 233
		default:
			break;
		}
#ifndef CONFIG_DEBUG_BOOT_PARAMS
		free_early(pa_data, pa_data+sizeof(*data)+data->len);
#endif
		pa_data = data->next;
		early_iounmap(data, PAGE_SIZE);
	}
}
234

235
#ifdef X86_64_NUMA
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264

/*
 * Allocate node_to_cpumask_map based on number of available nodes
 * Requires node_possible_map to be valid.
 *
 * Note: node_to_cpumask() is not valid until after this is done.
 */
static void __init setup_node_to_cpumask_map(void)
{
	unsigned int node, num = 0;
	cpumask_t *map;

	/* setup nr_node_ids if not done yet */
	if (nr_node_ids == MAX_NUMNODES) {
		for_each_node_mask(node, node_possible_map)
			num = node;
		nr_node_ids = num + 1;
	}

	/* allocate the map */
	map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));

	Dprintk(KERN_DEBUG "Node to cpumask map at %p for %d nodes\n",
		map, nr_node_ids);

	/* node_to_cpumask() will now work */
	node_to_cpumask_map = map;
}

265 266 267 268
void __cpuinit numa_set_node(int cpu, int node)
{
	int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);

269
	if (cpu_pda(cpu) && node != NUMA_NO_NODE)
M
Mike Travis 已提交
270 271
		cpu_pda(cpu)->nodenumber = node;

272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
	if (cpu_to_node_map)
		cpu_to_node_map[cpu] = node;

	else if (per_cpu_offset(cpu))
		per_cpu(x86_cpu_to_node_map, cpu) = node;

	else
		Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu);
}

void __cpuinit numa_clear_node(int cpu)
{
	numa_set_node(cpu, NUMA_NO_NODE);
}

287 288
#ifndef CONFIG_DEBUG_PER_CPU_MAPS

289 290 291 292 293 294 295 296 297 298
void __cpuinit numa_add_cpu(int cpu)
{
	cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}

void __cpuinit numa_remove_cpu(int cpu)
{
	cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
}

299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
#else /* CONFIG_DEBUG_PER_CPU_MAPS */

/*
 * --------- debug versions of the numa functions ---------
 */
static void __cpuinit numa_set_cpumask(int cpu, int enable)
{
	int node = cpu_to_node(cpu);
	cpumask_t *mask;
	char buf[64];

	if (node_to_cpumask_map == NULL) {
		printk(KERN_ERR "node_to_cpumask_map NULL\n");
		dump_stack();
		return;
	}

	mask = &node_to_cpumask_map[node];
	if (enable)
		cpu_set(cpu, *mask);
	else
		cpu_clear(cpu, *mask);

	cpulist_scnprintf(buf, sizeof(buf), *mask);
	printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
		enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
 }

void __cpuinit numa_add_cpu(int cpu)
{
	numa_set_cpumask(cpu, 1);
}

void __cpuinit numa_remove_cpu(int cpu)
{
	numa_set_cpumask(cpu, 0);
}
336 337 338 339 340 341 342 343 344 345 346 347 348

int cpu_to_node(int cpu)
{
	if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
		printk(KERN_WARNING
			"cpu_to_node(%d): usage too early!\n", cpu);
		dump_stack();
		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
	}
	return per_cpu(x86_cpu_to_node_map, cpu);
}
EXPORT_SYMBOL(cpu_to_node);

349 350 351 352
/*
 * Same function as cpu_to_node() but used if called before the
 * per_cpu areas are setup.
 */
353 354 355 356 357 358 359 360
int early_cpu_to_node(int cpu)
{
	if (early_per_cpu_ptr(x86_cpu_to_node_map))
		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];

	if (!per_cpu_offset(cpu)) {
		printk(KERN_WARNING
			"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
361
		dump_stack();
362 363 364 365
		return NUMA_NO_NODE;
	}
	return per_cpu(x86_cpu_to_node_map, cpu);
}
366 367 368 369 370 371 372 373 374 375 376 377 378

/*
 * Returns a pointer to the bitmask of CPUs on Node 'node'.
 */
cpumask_t *_node_to_cpumask_ptr(int node)
{
	if (node_to_cpumask_map == NULL) {
		printk(KERN_WARNING
			"_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n",
			node);
		dump_stack();
		return &cpu_online_map;
	}
379
	BUG_ON(node >= nr_node_ids);
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
	return &node_to_cpumask_map[node];
}
EXPORT_SYMBOL(_node_to_cpumask_ptr);

/*
 * Returns a bitmask of CPUs on Node 'node'.
 */
cpumask_t node_to_cpumask(int node)
{
	if (node_to_cpumask_map == NULL) {
		printk(KERN_WARNING
			"node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
		dump_stack();
		return cpu_online_map;
	}
395
	BUG_ON(node >= nr_node_ids);
396 397 398 399 400 401 402 403 404 405 406
	return node_to_cpumask_map[node];
}
EXPORT_SYMBOL(node_to_cpumask);

/*
 * --------- end of debug versions of the numa functions ---------
 */

#endif /* CONFIG_DEBUG_PER_CPU_MAPS */

#endif /* X86_64_NUMA */