common.c 27.2 KB
Newer Older
L
Linus Torvalds 已提交
1
#include <linux/init.h>
2 3
#include <linux/kernel.h>
#include <linux/sched.h>
L
Linus Torvalds 已提交
4
#include <linux/string.h>
5 6 7 8 9
#include <linux/bootmem.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/kgdb.h>
#include <linux/topology.h>
L
Linus Torvalds 已提交
10 11 12 13 14 15
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <asm/i387.h>
#include <asm/msr.h>
#include <asm/io.h>
16
#include <asm/linkage.h>
L
Linus Torvalds 已提交
17
#include <asm/mmu_context.h>
18
#include <asm/mtrr.h>
19
#include <asm/mce.h>
20
#include <asm/pat.h>
21
#include <asm/asm.h>
22
#include <asm/numa.h>
I
Ingo Molnar 已提交
23
#include <asm/smp.h>
L
Linus Torvalds 已提交
24 25 26 27
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h>
#include <asm/apic.h>
#include <mach_apic.h>
28
#include <asm/genapic.h>
L
Linus Torvalds 已提交
29 30
#endif

31 32 33 34 35 36 37 38
#include <asm/pda.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/desc.h>
#include <asm/atomic.h>
#include <asm/proto.h>
#include <asm/sections.h>
#include <asm/setup.h>
39
#include <asm/hypervisor.h>
40

L
Linus Torvalds 已提交
41 42
#include "cpu.h"

43 44
static struct cpu_dev *this_cpu __cpuinitdata;

Y
Yinghai Lu 已提交
45 46 47 48 49 50 51
#ifdef CONFIG_X86_64
/* We need valid kernel segments for data and code in long mode too
 * IRET will check the segment types  kkeil 2000/10/28
 * Also sysret mandates a special GDT layout
 */
/* The TLS descriptors are currently at a different place compared to i386.
   Hopefully nobody expects them at a fixed place (Wine?) */
52
DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
Y
Yinghai Lu 已提交
53 54 55 56 57 58 59 60
	[GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
	[GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
	[GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
	[GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
	[GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
	[GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
} };
#else
61
DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
62 63 64 65
	[GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
	[GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
	[GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
	[GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
66 67 68 69 70
	/*
	 * Segments used for calling PnP BIOS have byte granularity.
	 * They code segments and data segments have fixed 64k limits,
	 * the transfer segment sizes are set at run time.
	 */
71 72 73 74 75 76 77 78 79 80
	/* 32-bit code */
	[GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
	/* 16-bit code */
	[GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
	/* 16-bit data */
	[GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
	/* 16-bit data */
	[GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
	/* 16-bit data */
	[GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
81 82 83 84
	/*
	 * The APM segments have byte granularity and their bases
	 * are set at run time.  All have 64k limits.
	 */
85 86
	/* 32-bit code */
	[GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
87
	/* 16-bit code */
88 89 90
	[GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
	/* data */
	[GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
91

92 93
	[GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
	[GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } },
94
} };
Y
Yinghai Lu 已提交
95
#endif
96
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
97

98
#ifdef CONFIG_X86_32
99 100
static int cachesize_override __cpuinitdata = -1;
static int disable_x86_serial_nr __cpuinitdata = 1;
L
Linus Torvalds 已提交
101

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
static int __init cachesize_setup(char *str)
{
	get_option(&str, &cachesize_override);
	return 1;
}
__setup("cachesize=", cachesize_setup);

static int __init x86_fxsr_setup(char *s)
{
	setup_clear_cpu_cap(X86_FEATURE_FXSR);
	setup_clear_cpu_cap(X86_FEATURE_XMM);
	return 1;
}
__setup("nofxsr", x86_fxsr_setup);

static int __init x86_sep_setup(char *s)
{
	setup_clear_cpu_cap(X86_FEATURE_SEP);
	return 1;
}
__setup("nosep", x86_sep_setup);

/* Standard macro to see if a specific flag is changeable */
static inline int flag_is_changeable_p(u32 flag)
{
	u32 f1, f2;

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
	/*
	 * Cyrix and IDT cpus allow disabling of CPUID
	 * so the code below may return different results
	 * when it is executed before and after enabling
	 * the CPUID. Add "volatile" to not allow gcc to
	 * optimize the subsequent calls to this function.
	 */
	asm volatile ("pushfl\n\t"
		      "pushfl\n\t"
		      "popl %0\n\t"
		      "movl %0,%1\n\t"
		      "xorl %2,%0\n\t"
		      "pushl %0\n\t"
		      "popfl\n\t"
		      "pushfl\n\t"
		      "popl %0\n\t"
		      "popfl\n\t"
		      : "=&r" (f1), "=&r" (f2)
		      : "ir" (flag));
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179

	return ((f1^f2) & flag) != 0;
}

/* Probe for the CPUID instruction */
static int __cpuinit have_cpuid_p(void)
{
	return flag_is_changeable_p(X86_EFLAGS_ID);
}

static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
{
	if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
		/* Disable processor serial number */
		unsigned long lo, hi;
		rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
		lo |= 0x200000;
		wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
		printk(KERN_NOTICE "CPU serial number disabled.\n");
		clear_cpu_cap(c, X86_FEATURE_PN);

		/* Disabling the serial number may affect the cpuid level */
		c->cpuid_level = cpuid_eax(0);
	}
}

static int __init x86_serial_nr_setup(char *s)
{
	disable_x86_serial_nr = 0;
	return 1;
}
__setup("serialnumber", x86_serial_nr_setup);
180
#else
181 182 183 184
static inline int flag_is_changeable_p(u32 flag)
{
	return 1;
}
185 186 187 188 189
/* Probe for the CPUID instruction */
static inline int have_cpuid_p(void)
{
	return 1;
}
190 191 192
static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
{
}
193
#endif
194

195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
/*
 * Naming convention should be: <Name> [(<Codename>)]
 * This table only is used unless init_<vendor>() below doesn't set it;
 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
 *
 */

/* Look up CPU names by table lookup. */
static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
{
	struct cpu_model_info *info;

	if (c->x86_model >= 16)
		return NULL;	/* Range check */

	if (!this_cpu)
		return NULL;

	info = this_cpu->c_models;

	while (info && info->family) {
		if (info->family == c->x86)
			return info->model_names[c->x86_model];
		info++;
	}
	return NULL;		/* Not found */
}

223 224
__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;

225 226 227 228 229 230 231 232 233
/* Current gdt points %fs at the "master" per-cpu area: after this,
 * it's on the real one. */
void switch_to_new_gdt(void)
{
	struct desc_ptr gdt_descr;

	gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);
234
#ifdef CONFIG_X86_32
235
	asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
236
#endif
237 238
}

Y
Yinghai Lu 已提交
239
static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
L
Linus Torvalds 已提交
240

241
static void __cpuinit default_init(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
242
{
243 244 245
#ifdef CONFIG_X86_64
	display_cacheinfo(c);
#else
L
Linus Torvalds 已提交
246 247 248 249 250 251 252 253 254
	/* Not much we can do here... */
	/* Check if at least it has cpuid */
	if (c->cpuid_level == -1) {
		/* No cpuid. It must be an ancient CPU */
		if (c->x86 == 4)
			strcpy(c->x86_model_id, "486");
		else if (c->x86 == 3)
			strcpy(c->x86_model_id, "386");
	}
255
#endif
L
Linus Torvalds 已提交
256 257
}

258
static struct cpu_dev __cpuinitdata default_cpu = {
L
Linus Torvalds 已提交
259
	.c_init	= default_init,
260
	.c_vendor = "Unknown",
Y
Yinghai Lu 已提交
261
	.c_x86_vendor = X86_VENDOR_UNKNOWN,
L
Linus Torvalds 已提交
262 263
};

264
static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
265 266 267 268
{
	unsigned int *v;
	char *p, *q;

269
	if (c->extended_cpuid_level < 0x80000004)
270
		return;
L
Linus Torvalds 已提交
271 272 273 274 275 276 277 278 279 280

	v = (unsigned int *) c->x86_model_id;
	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
	c->x86_model_id[48] = 0;

	/* Intel chips right-justify this string for some dumb reason;
	   undo that brain damage */
	p = q = &c->x86_model_id[0];
281
	while (*p == ' ')
L
Linus Torvalds 已提交
282
	     p++;
283 284
	if (p != q) {
	     while (*p)
L
Linus Torvalds 已提交
285
		  *q++ = *p++;
286
	     while (q <= &c->x86_model_id[48])
L
Linus Torvalds 已提交
287 288 289 290
		  *q++ = '\0';	/* Zero-pad the rest */
	}
}

291
void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
292
{
293
	unsigned int n, dummy, ebx, ecx, edx, l2size;
L
Linus Torvalds 已提交
294

295
	n = c->extended_cpuid_level;
L
Linus Torvalds 已提交
296 297

	if (n >= 0x80000005) {
298
		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
L
Linus Torvalds 已提交
299
		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
300 301
				edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
		c->x86_cache_size = (ecx>>24) + (edx>>24);
302 303 304 305
#ifdef CONFIG_X86_64
		/* On K8 L1 TLB is inclusive, so don't count it */
		c->x86_tlbsize = 0;
#endif
L
Linus Torvalds 已提交
306 307 308 309 310
	}

	if (n < 0x80000006)	/* Some chips just has a large L1. */
		return;

311
	cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
L
Linus Torvalds 已提交
312
	l2size = ecx >> 16;
313

314 315 316
#ifdef CONFIG_X86_64
	c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
#else
L
Linus Torvalds 已提交
317 318
	/* do processor-specific cache resizing */
	if (this_cpu->c_size_cache)
319
		l2size = this_cpu->c_size_cache(c, l2size);
L
Linus Torvalds 已提交
320 321 322 323 324

	/* Allow user to override all this if necessary. */
	if (cachesize_override != -1)
		l2size = cachesize_override;

325
	if (l2size == 0)
L
Linus Torvalds 已提交
326
		return;		/* Again, no L2 cache is possible */
327
#endif
L
Linus Torvalds 已提交
328 329 330 331

	c->x86_cache_size = l2size;

	printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
332
			l2size, ecx & 0xFF);
L
Linus Torvalds 已提交
333 334
}

335
void __cpuinit detect_ht(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
336
{
337
#ifdef CONFIG_X86_HT
338 339
	u32 eax, ebx, ecx, edx;
	int index_msb, core_bits;
L
Linus Torvalds 已提交
340

341
	if (!cpu_has(c, X86_FEATURE_HT))
342
		return;
L
Linus Torvalds 已提交
343

344 345
	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
		goto out;
L
Linus Torvalds 已提交
346

347 348
	if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
		return;
L
Linus Torvalds 已提交
349

350
	cpuid(1, &eax, &ebx, &ecx, &edx);
L
Linus Torvalds 已提交
351

352 353 354 355 356 357
	smp_num_siblings = (ebx & 0xff0000) >> 16;

	if (smp_num_siblings == 1) {
		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
	} else if (smp_num_siblings > 1) {

358
		if (smp_num_siblings > nr_cpu_ids) {
359 360 361 362 363 364 365
			printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
					smp_num_siblings);
			smp_num_siblings = 1;
			return;
		}

		index_msb = get_count_order(smp_num_siblings);
366 367 368
#ifdef CONFIG_X86_64
		c->phys_proc_id = phys_pkg_id(index_msb);
#else
369
		c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
370
#endif
371 372 373 374 375 376 377

		smp_num_siblings = smp_num_siblings / c->x86_max_cores;

		index_msb = get_count_order(smp_num_siblings);

		core_bits = get_count_order(c->x86_max_cores);

378 379 380 381
#ifdef CONFIG_X86_64
		c->cpu_core_id = phys_pkg_id(index_msb) &
					       ((1 << core_bits) - 1);
#else
382 383
		c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
					       ((1 << core_bits) - 1);
384
#endif
L
Linus Torvalds 已提交
385 386
	}

387 388 389 390 391 392
out:
	if ((c->x86_max_cores * smp_num_siblings) > 1) {
		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
		       c->phys_proc_id);
		printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
		       c->cpu_core_id);
393 394
	}
#endif
395
}
L
Linus Torvalds 已提交
396

397
static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
398 399 400
{
	char *v = c->x86_vendor_id;
	int i;
401
	static int printed;
L
Linus Torvalds 已提交
402 403

	for (i = 0; i < X86_VENDOR_NUM; i++) {
Y
Yinghai Lu 已提交
404 405 406 407 408 409 410 411 412
		if (!cpu_devs[i])
			break;

		if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
		    (cpu_devs[i]->c_ident[1] &&
		     !strcmp(v, cpu_devs[i]->c_ident[1]))) {
			this_cpu = cpu_devs[i];
			c->x86_vendor = this_cpu->c_x86_vendor;
			return;
L
Linus Torvalds 已提交
413 414
		}
	}
Y
Yinghai Lu 已提交
415

416 417
	if (!printed) {
		printed++;
418
		printk(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n", v);
419 420
		printk(KERN_ERR "CPU: Your system may be unstable.\n");
	}
Y
Yinghai Lu 已提交
421

422 423
	c->x86_vendor = X86_VENDOR_UNKNOWN;
	this_cpu = &default_cpu;
L
Linus Torvalds 已提交
424 425
}

426
void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
427 428
{
	/* Get vendor name */
429 430 431 432
	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
	      (unsigned int *)&c->x86_vendor_id[0],
	      (unsigned int *)&c->x86_vendor_id[8],
	      (unsigned int *)&c->x86_vendor_id[4]);
L
Linus Torvalds 已提交
433 434

	c->x86 = 4;
435
	/* Intel-defined flags: level 0x00000001 */
L
Linus Torvalds 已提交
436 437 438
	if (c->cpuid_level >= 0x00000001) {
		u32 junk, tfms, cap0, misc;
		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
439 440 441
		c->x86 = (tfms >> 8) & 0xf;
		c->x86_model = (tfms >> 4) & 0xf;
		c->x86_mask = tfms & 0xf;
442
		if (c->x86 == 0xf)
L
Linus Torvalds 已提交
443
			c->x86 += (tfms >> 20) & 0xff;
444
		if (c->x86 >= 0x6)
445
			c->x86_model += ((tfms >> 16) & 0xf) << 4;
H
Huang, Ying 已提交
446 447
		if (cap0 & (1<<19)) {
			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
448
			c->x86_cache_alignment = c->x86_clflush_size;
H
Huang, Ying 已提交
449
		}
L
Linus Torvalds 已提交
450 451
	}
}
452 453

static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
454 455
{
	u32 tfms, xlvl;
456
	u32 ebx;
457

458 459 460 461 462 463 464
	/* Intel-defined flags: level 0x00000001 */
	if (c->cpuid_level >= 0x00000001) {
		u32 capability, excap;
		cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
		c->x86_capability[0] = capability;
		c->x86_capability[4] = excap;
	}
465

466 467 468 469 470 471 472
	/* AMD-defined flags: level 0x80000001 */
	xlvl = cpuid_eax(0x80000000);
	c->extended_cpuid_level = xlvl;
	if ((xlvl & 0xffff0000) == 0x80000000) {
		if (xlvl >= 0x80000001) {
			c->x86_capability[1] = cpuid_edx(0x80000001);
			c->x86_capability[6] = cpuid_ecx(0x80000001);
473 474 475
		}
	}

476 477 478 479 480 481
#ifdef CONFIG_X86_64
	if (c->extended_cpuid_level >= 0x80000008) {
		u32 eax = cpuid_eax(0x80000008);

		c->x86_virt_bits = (eax >> 8) & 0xff;
		c->x86_phys_bits = eax & 0xff;
482
	}
483
#endif
484 485 486

	if (c->extended_cpuid_level >= 0x80000007)
		c->x86_power = cpuid_edx(0x80000007);
487 488

}
L
Linus Torvalds 已提交
489

Y
Yinghai Lu 已提交
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_32
	int i;

	/*
	 * First of all, decide if this is a 486 or higher
	 * It's a 486 if we can modify the AC flag
	 */
	if (flag_is_changeable_p(X86_EFLAGS_AC))
		c->x86 = 4;
	else
		c->x86 = 3;

	for (i = 0; i < X86_VENDOR_NUM; i++)
		if (cpu_devs[i] && cpu_devs[i]->c_identify) {
			c->x86_vendor_id[0] = 0;
			cpu_devs[i]->c_identify(c);
			if (c->x86_vendor_id[0]) {
				get_cpu_vendor(c);
				break;
			}
		}
#endif
}

516 517 518 519 520 521 522 523 524
/*
 * Do minimum CPU detection early.
 * Fields really needed: vendor, cpuid_level, family, model, mask,
 * cache alignment.
 * The others are not touched to avoid unwanted side effects.
 *
 * WARNING: this function is only called on the BP.  Don't add code here
 * that is supposed to run on all CPUs.
 */
525
static void __init early_identify_cpu(struct cpuinfo_x86 *c)
526
{
527 528 529
#ifdef CONFIG_X86_64
	c->x86_clflush_size = 64;
#else
H
Huang, Ying 已提交
530
	c->x86_clflush_size = 32;
531
#endif
532
	c->x86_cache_alignment = c->x86_clflush_size;
533

534
	memset(&c->x86_capability, 0, sizeof c->x86_capability);
535
	c->extended_cpuid_level = 0;
536

Y
Yinghai Lu 已提交
537 538 539 540
	if (!have_cpuid_p())
		identify_cpu_without_cpuid(c);

	/* cyrix could have cpuid enabled via c_identify()*/
541 542 543 544 545
	if (!have_cpuid_p())
		return;

	cpu_detect(c);

546
	get_cpu_vendor(c);
547

548
	get_cpu_cap(c);
549

Y
Yinghai Lu 已提交
550 551
	if (this_cpu->c_early_init)
		this_cpu->c_early_init(c);
552

553
	validate_pat_support(c);
554

I
Ingo Molnar 已提交
555
#ifdef CONFIG_SMP
556
	c->cpu_index = boot_cpu_id;
I
Ingo Molnar 已提交
557
#endif
558 559
}

560 561
void __init early_cpu_init(void)
{
Y
Yinghai Lu 已提交
562 563 564 565 566 567 568
	struct cpu_dev **cdev;
	int count = 0;

	printk("KERNEL supported cpus:\n");
	for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
		struct cpu_dev *cpudev = *cdev;
		unsigned int j;
569

Y
Yinghai Lu 已提交
570 571 572 573 574 575 576 577 578 579 580 581
		if (count >= X86_VENDOR_NUM)
			break;
		cpu_devs[count] = cpudev;
		count++;

		for (j = 0; j < 2; j++) {
			if (!cpudev->c_ident[j])
				continue;
			printk("  %s %s\n", cpudev->c_vendor,
				cpudev->c_ident[j]);
		}
	}
582 583

	early_identify_cpu(&boot_cpu_data);
584
}
585

586 587
/*
 * The NOPL instruction is supposed to exist on all CPUs with
588
 * family >= 6; unfortunately, that's not true in practice because
589
 * of early VIA chips and (more importantly) broken virtualizers that
590 591 592
 * are not easy to detect.  In the latter case it doesn't even *fail*
 * reliably, so probing for it doesn't even work.  Disable it completely
 * unless we can find a reliable way to detect all the broken cases.
593 594 595 596
 */
static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
{
	clear_cpu_cap(c, X86_FEATURE_NOPL);
597 598
}

599
static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
600
{
Y
Yinghai Lu 已提交
601
	c->extended_cpuid_level = 0;
L
Linus Torvalds 已提交
602

603
	if (!have_cpuid_p())
Y
Yinghai Lu 已提交
604
		identify_cpu_without_cpuid(c);
605

Y
Yinghai Lu 已提交
606
	/* cyrix could have cpuid enabled via c_identify()*/
I
Ingo Molnar 已提交
607
	if (!have_cpuid_p())
Y
Yinghai Lu 已提交
608
		return;
L
Linus Torvalds 已提交
609

610
	cpu_detect(c);
L
Linus Torvalds 已提交
611

612
	get_cpu_vendor(c);
L
Linus Torvalds 已提交
613

614
	get_cpu_cap(c);
L
Linus Torvalds 已提交
615

616 617
	if (c->cpuid_level >= 0x00000001) {
		c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
618 619
#ifdef CONFIG_X86_32
# ifdef CONFIG_X86_HT
620
		c->apicid = phys_pkg_id(c->initial_apicid, 0);
621
# else
622
		c->apicid = c->initial_apicid;
623 624
# endif
#endif
L
Linus Torvalds 已提交
625

626 627
#ifdef CONFIG_X86_HT
		c->phys_proc_id = c->initial_apicid;
628
#endif
629
	}
L
Linus Torvalds 已提交
630

631
	get_model_name(c); /* Default name */
L
Linus Torvalds 已提交
632

633 634
	init_scattered_cpuid_features(c);
	detect_nopl(c);
L
Linus Torvalds 已提交
635 636 637 638 639
}

/*
 * This does the hard work of actually picking apart the CPU stuff...
 */
Y
Yinghai Lu 已提交
640
static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
641 642 643 644 645 646 647 648 649
{
	int i;

	c->loops_per_jiffy = loops_per_jiffy;
	c->x86_cache_size = -1;
	c->x86_vendor = X86_VENDOR_UNKNOWN;
	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
	c->x86_vendor_id[0] = '\0'; /* Unset */
	c->x86_model_id[0] = '\0';  /* Unset */
650
	c->x86_max_cores = 1;
651
	c->x86_coreid_bits = 0;
652
#ifdef CONFIG_X86_64
653 654 655
	c->x86_clflush_size = 64;
#else
	c->cpuid_level = -1;	/* CPUID not detected */
656
	c->x86_clflush_size = 32;
657 658
#endif
	c->x86_cache_alignment = c->x86_clflush_size;
L
Linus Torvalds 已提交
659 660 661 662
	memset(&c->x86_capability, 0, sizeof c->x86_capability);

	generic_identify(c);

663
	if (this_cpu->c_identify)
L
Linus Torvalds 已提交
664 665
		this_cpu->c_identify(c);

666 667 668 669
#ifdef CONFIG_X86_64
	c->apicid = phys_pkg_id(0);
#endif

L
Linus Torvalds 已提交
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
	/*
	 * Vendor-specific initialization.  In this section we
	 * canonicalize the feature flags, meaning if there are
	 * features a certain CPU supports which CPUID doesn't
	 * tell us, CPUID claiming incorrect flags, or other bugs,
	 * we handle them here.
	 *
	 * At the end of this section, c->x86_capability better
	 * indicate the features this CPU genuinely supports!
	 */
	if (this_cpu->c_init)
		this_cpu->c_init(c);

	/* Disable the PN if appropriate */
	squash_the_stupid_serial_number(c);

	/*
	 * The vendor-specific functions might have changed features.  Now
	 * we do "generic changes."
	 */

	/* If the model name is still unset, do table lookup. */
692
	if (!c->x86_model_id[0]) {
L
Linus Torvalds 已提交
693 694
		char *p;
		p = table_lookup_model(c);
695
		if (p)
L
Linus Torvalds 已提交
696 697 698 699
			strcpy(c->x86_model_id, p);
		else
			/* Last resort... */
			sprintf(c->x86_model_id, "%02x/%02x",
700
				c->x86, c->x86_model);
L
Linus Torvalds 已提交
701 702
	}

703 704 705 706
#ifdef CONFIG_X86_64
	detect_ht(c);
#endif

707
	init_hypervisor(c);
L
Linus Torvalds 已提交
708 709 710 711 712 713
	/*
	 * On SMP, boot_cpu_data holds the common feature set between
	 * all CPUs; so make sure that we indicate which features are
	 * common between the CPUs.  The first time this routine gets
	 * executed, c == &boot_cpu_data.
	 */
714
	if (c != &boot_cpu_data) {
L
Linus Torvalds 已提交
715
		/* AND the already accumulated flags with these */
716
		for (i = 0; i < NCAPINTS; i++)
L
Linus Torvalds 已提交
717 718 719
			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
	}

720 721
	/* Clear all flags overriden by options */
	for (i = 0; i < NCAPINTS; i++)
722
		c->x86_capability[i] &= ~cleared_cpu_caps[i];
723

724
#ifdef CONFIG_X86_MCE
L
Linus Torvalds 已提交
725 726
	/* Init Machine Check Exception if available. */
	mcheck_init(c);
727
#endif
728 729

	select_idle_routine(c);
730 731 732 733

#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
	numa_add_cpu(smp_processor_id());
#endif
734
}
S
Shaohua Li 已提交
735

736 737 738 739 740 741 742 743 744 745
#ifdef CONFIG_X86_64
static void vgetcpu_set_mode(void)
{
	if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
		vgetcpu_mode = VGETCPU_RDTSCP;
	else
		vgetcpu_mode = VGETCPU_LSL;
}
#endif

746 747 748
void __init identify_boot_cpu(void)
{
	identify_cpu(&boot_cpu_data);
749
#ifdef CONFIG_X86_32
750
	sysenter_setup();
L
Li Shaohua 已提交
751
	enable_sep_cpu();
752 753
#else
	vgetcpu_set_mode();
754
#endif
755
}
S
Shaohua Li 已提交
756

757 758 759 760
void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
{
	BUG_ON(c == &boot_cpu_data);
	identify_cpu(c);
761
#ifdef CONFIG_X86_32
762
	enable_sep_cpu();
763
#endif
764
	mtrr_ap_init();
L
Linus Torvalds 已提交
765 766
}

767 768 769 770
struct msr_range {
	unsigned min;
	unsigned max;
};
L
Linus Torvalds 已提交
771

772 773 774 775 776 777
static struct msr_range msr_range_array[] __cpuinitdata = {
	{ 0x00000000, 0x00000418},
	{ 0xc0000000, 0xc000040b},
	{ 0xc0010000, 0xc0010142},
	{ 0xc0011000, 0xc001103b},
};
L
Linus Torvalds 已提交
778

779 780 781 782 783 784 785 786 787 788 789 790 791 792
static void __cpuinit print_cpu_msr(void)
{
	unsigned index;
	u64 val;
	int i;
	unsigned index_min, index_max;

	for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
		index_min = msr_range_array[i].min;
		index_max = msr_range_array[i].max;
		for (index = index_min; index < index_max; index++) {
			if (rdmsrl_amd_safe(index, &val))
				continue;
			printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
L
Linus Torvalds 已提交
793
		}
794 795
	}
}
796

797 798 799 800
static int show_msr __cpuinitdata;
static __init int setup_show_msr(char *arg)
{
	int num;
801

802
	get_option(&arg, &num);
803

804 805 806
	if (num > 0)
		show_msr = num;
	return 1;
L
Linus Torvalds 已提交
807
}
808
__setup("show_msr=", setup_show_msr);
L
Linus Torvalds 已提交
809

A
Andi Kleen 已提交
810 811 812 813 814 815 816
static __init int setup_noclflush(char *arg)
{
	setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
	return 1;
}
__setup("noclflush", setup_noclflush);

817
void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
818 819 820 821 822 823 824 825
{
	char *vendor = NULL;

	if (c->x86_vendor < X86_VENDOR_NUM)
		vendor = this_cpu->c_vendor;
	else if (c->cpuid_level >= 0)
		vendor = c->x86_vendor_id;

826
	if (vendor && !strstr(c->x86_model_id, vendor))
827
		printk(KERN_CONT "%s ", vendor);
L
Linus Torvalds 已提交
828

829 830
	if (c->x86_model_id[0])
		printk(KERN_CONT "%s", c->x86_model_id);
L
Linus Torvalds 已提交
831
	else
832
		printk(KERN_CONT "%d86", c->x86);
L
Linus Torvalds 已提交
833

834
	if (c->x86_mask || c->cpuid_level >= 0)
835
		printk(KERN_CONT " stepping %02x\n", c->x86_mask);
L
Linus Torvalds 已提交
836
	else
837
		printk(KERN_CONT "\n");
838 839 840 841 842 843 844 845

#ifdef CONFIG_SMP
	if (c->cpu_index < show_msr)
		print_cpu_msr();
#else
	if (show_msr)
		print_cpu_msr();
#endif
L
Linus Torvalds 已提交
846 847
}

848 849 850 851 852 853 854 855 856 857 858
static __init int setup_disablecpuid(char *arg)
{
	int bit;
	if (get_option(&arg, &bit) && bit < NCAPINTS*32)
		setup_clear_cpu_cap(bit);
	else
		return 0;
	return 1;
}
__setup("clearcpuid=", setup_disablecpuid);

859
cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
L
Linus Torvalds 已提交
860

861 862 863 864 865 866
#ifdef CONFIG_X86_64
struct x8664_pda **_cpu_pda __read_mostly;
EXPORT_SYMBOL(_cpu_pda);

struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };

867
static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
868

869
void __cpuinit pda_init(int cpu)
870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907
{
	struct x8664_pda *pda = cpu_pda(cpu);

	/* Setup up data that may be needed in __get_free_pages early */
	loadsegment(fs, 0);
	loadsegment(gs, 0);
	/* Memory clobbers used to order PDA accessed */
	mb();
	wrmsrl(MSR_GS_BASE, pda);
	mb();

	pda->cpunumber = cpu;
	pda->irqcount = -1;
	pda->kernelstack = (unsigned long)stack_thread_info() -
				 PDA_STACKOFFSET + THREAD_SIZE;
	pda->active_mm = &init_mm;
	pda->mmu_state = 0;

	if (cpu == 0) {
		/* others are initialized in smpboot.c */
		pda->pcurrent = &init_task;
		pda->irqstackptr = boot_cpu_stack;
		pda->irqstackptr += IRQSTACKSIZE - 64;
	} else {
		if (!pda->irqstackptr) {
			pda->irqstackptr = (char *)
				__get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
			if (!pda->irqstackptr)
				panic("cannot allocate irqstack for cpu %d",
				      cpu);
			pda->irqstackptr += IRQSTACKSIZE - 64;
		}

		if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
			pda->nodenumber = cpu_to_node(cpu);
	}
}

908 909
static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
				  DEBUG_STKSZ] __page_aligned_bss;
910 911 912 913 914

extern asmlinkage void ignore_sysret(void);

/* May not be marked __init: used by software suspend */
void syscall_init(void)
L
Linus Torvalds 已提交
915
{
916 917 918 919 920 921 922 923
	/*
	 * LSTAR and STAR live in a bit strange symbiosis.
	 * They both write to the same internal register. STAR allows to
	 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
	 */
	wrmsrl(MSR_STAR,  ((u64)__USER32_CS)<<48  | ((u64)__KERNEL_CS)<<32);
	wrmsrl(MSR_LSTAR, system_call);
	wrmsrl(MSR_CSTAR, ignore_sysret);
924

925 926 927
#ifdef CONFIG_IA32_EMULATION
	syscall32_cpu_init();
#endif
928

929 930 931
	/* Flags to clear on syscall */
	wrmsrl(MSR_SYSCALL_MASK,
	       X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
L
Linus Torvalds 已提交
932
}
933

934 935 936 937 938 939 940 941 942 943
unsigned long kernel_eflags;

/*
 * Copies of the original ist values from the tss are only accessed during
 * debugging, no special alignment required.
 */
DEFINE_PER_CPU(struct orig_ist, orig_ist);

#else

944
/* Make sure %fs is initialized properly in idle threads */
A
Adrian Bunk 已提交
945
struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
946 947
{
	memset(regs, 0, sizeof(struct pt_regs));
948
	regs->fs = __KERNEL_PERCPU;
949 950
	return regs;
}
951
#endif
952

953 954 955 956 957
/*
 * cpu_init() initializes state that is per-CPU. Some data is already
 * initialized (naturally) in the bootstrap process, such as the GDT
 * and IDT. We reload them nevertheless, this function acts as a
 * 'CPU state barrier', nothing should get across.
958
 * A lot of state is already set up in PDA init for 64 bit
959
 */
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
#ifdef CONFIG_X86_64
void __cpuinit cpu_init(void)
{
	int cpu = stack_smp_processor_id();
	struct tss_struct *t = &per_cpu(init_tss, cpu);
	struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
	unsigned long v;
	char *estacks = NULL;
	struct task_struct *me;
	int i;

	/* CPU 0 is initialised in head64.c */
	if (cpu != 0)
		pda_init(cpu);
	else
		estacks = boot_exception_stacks;

	me = current;

	if (cpu_test_and_set(cpu, cpu_initialized))
		panic("CPU#%d already initialized!\n", cpu);

	printk(KERN_INFO "Initializing CPU#%d\n", cpu);

	clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);

	/*
	 * Initialize the per-CPU GDT with the boot GDT,
	 * and set up the GDT descriptor:
	 */

	switch_to_new_gdt();
	load_idt((const struct desc_ptr *)&idt_descr);

	memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
	syscall_init();

	wrmsrl(MSR_FS_BASE, 0);
	wrmsrl(MSR_KERNEL_GS_BASE, 0);
	barrier();

	check_efer();
	if (cpu != 0 && x2apic)
		enable_x2apic();

	/*
	 * set up and load the per-CPU TSS
	 */
	if (!orig_ist->ist[0]) {
		static const unsigned int order[N_EXCEPTION_STACKS] = {
		  [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
		  [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
		};
		for (v = 0; v < N_EXCEPTION_STACKS; v++) {
			if (cpu) {
				estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
				if (!estacks)
					panic("Cannot allocate exception "
					      "stack %ld %d\n", v, cpu);
			}
			estacks += PAGE_SIZE << order[v];
			orig_ist->ist[v] = t->x86_tss.ist[v] =
					(unsigned long)estacks;
		}
	}

	t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
	/*
	 * <= is required because the CPU will access up to
	 * 8 bits beyond the end of the IO permission bitmap.
	 */
	for (i = 0; i <= IO_BITMAP_LONGS; i++)
		t->io_bitmap[i] = ~0UL;

	atomic_inc(&init_mm.mm_count);
	me->active_mm = &init_mm;
	if (me->mm)
		BUG();
	enter_lazy_tlb(&init_mm, me);

	load_sp0(t, &current->thread);
	set_tss_desc(cpu, t);
	load_TR_desc();
	load_LDT(&init_mm.context);

#ifdef CONFIG_KGDB
	/*
	 * If the kgdb is connected no debug regs should be altered.  This
	 * is only applicable when KGDB and a KGDB I/O module are built
	 * into the kernel and you are using early debugging with
	 * kgdbwait. KGDB will control the kernel HW breakpoint registers.
	 */
	if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
		arch_kgdb_ops.correct_hw_break();
	else {
#endif
	/*
	 * Clear all 6 debug registers:
	 */

	set_debugreg(0UL, 0);
	set_debugreg(0UL, 1);
	set_debugreg(0UL, 2);
	set_debugreg(0UL, 3);
	set_debugreg(0UL, 6);
	set_debugreg(0UL, 7);
#ifdef CONFIG_KGDB
	/* If the kgdb is connected no debug regs should be altered. */
	}
#endif

	fpu_init();

	raw_local_save_flags(kernel_eflags);

	if (is_uv_system())
		uv_cpu_init();
}

#else

1081
void __cpuinit cpu_init(void)
1082
{
1083 1084
	int cpu = smp_processor_id();
	struct task_struct *curr = current;
1085
	struct tss_struct *t = &per_cpu(init_tss, cpu);
1086
	struct thread_struct *thread = &curr->thread;
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097

	if (cpu_test_and_set(cpu, cpu_initialized)) {
		printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
		for (;;) local_irq_enable();
	}

	printk(KERN_INFO "Initializing CPU#%d\n", cpu);

	if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);

1098
	load_idt(&idt_descr);
1099
	switch_to_new_gdt();
L
Linus Torvalds 已提交
1100 1101 1102 1103 1104

	/*
	 * Set up and load the per-CPU TSS and LDT
	 */
	atomic_inc(&init_mm.mm_count);
1105 1106 1107 1108
	curr->active_mm = &init_mm;
	if (curr->mm)
		BUG();
	enter_lazy_tlb(&init_mm, curr);
L
Linus Torvalds 已提交
1109

1110
	load_sp0(t, thread);
1111
	set_tss_desc(cpu, t);
L
Linus Torvalds 已提交
1112 1113 1114
	load_TR_desc();
	load_LDT(&init_mm.context);

1115
#ifdef CONFIG_DOUBLEFAULT
L
Linus Torvalds 已提交
1116 1117
	/* Set up doublefault TSS pointer in the GDT */
	__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1118
#endif
L
Linus Torvalds 已提交
1119

1120 1121
	/* Clear %gs. */
	asm volatile ("mov %0, %%gs" : : "r" (0));
L
Linus Torvalds 已提交
1122 1123

	/* Clear all 6 debug registers: */
1124 1125 1126 1127 1128 1129
	set_debugreg(0, 0);
	set_debugreg(0, 1);
	set_debugreg(0, 2);
	set_debugreg(0, 3);
	set_debugreg(0, 6);
	set_debugreg(0, 7);
L
Linus Torvalds 已提交
1130 1131 1132 1133

	/*
	 * Force FPU initialization:
	 */
1134 1135 1136 1137
	if (cpu_has_xsave)
		current_thread_info()->status = TS_XSAVE;
	else
		current_thread_info()->status = 0;
L
Linus Torvalds 已提交
1138 1139
	clear_used_math();
	mxcsr_feature_mask_init();
1140 1141 1142 1143

	/*
	 * Boot processor to setup the FP and extended state context info.
	 */
1144
	if (smp_processor_id() == boot_cpu_id)
1145 1146 1147
		init_thread_xstate();

	xsave_init();
L
Linus Torvalds 已提交
1148
}
1149

1150 1151

#endif