processor.h 23.5 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_PROCESSOR_H
#define _ASM_X86_PROCESSOR_H
3

4 5
#include <asm/processor-flags.h>

6 7 8 9
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;

10 11 12 13 14 15 16 17
#include <asm/vm86.h>
#include <asm/math_emu.h>
#include <asm/segment.h>
#include <asm/types.h>
#include <asm/sigcontext.h>
#include <asm/current.h>
#include <asm/cpufeature.h>
#include <asm/page.h>
18
#include <asm/pgtable_types.h>
19
#include <asm/percpu.h>
20 21
#include <asm/msr.h>
#include <asm/desc_defs.h>
22
#include <asm/nops.h>
23
#include <asm/special_insns.h>
24

25
#include <linux/personality.h>
26 27
#include <linux/cpumask.h>
#include <linux/cache.h>
28
#include <linux/threads.h>
29
#include <linux/math64.h>
30
#include <linux/init.h>
31
#include <linux/err.h>
32 33 34 35 36 37 38 39 40
#include <linux/irqflags.h>

/*
 * We handle most unaligned accesses in hardware.  On the other hand
 * unaligned DMA can be quite expensive on some Nehalem processors.
 *
 * Based on this we disable the IP header alignment in network drivers.
 */
#define NET_IP_ALIGN	0
41

42
#define HBP_NUM 4
43 44 45 46 47 48 49
/*
 * Default implementation of macro that returns current
 * instruction pointer ("program counter").
 */
static inline void *current_text_addr(void)
{
	void *pc;
50 51 52

	asm volatile("mov $1f, %0; 1:":"=r" (pc));

53 54 55
	return pc;
}

56
#ifdef CONFIG_X86_VSMP
57 58
# define ARCH_MIN_TASKALIGN		(1 << INTERNODE_CACHE_SHIFT)
# define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT)
59
#else
60 61
# define ARCH_MIN_TASKALIGN		16
# define ARCH_MIN_MMSTRUCT_ALIGN	0
62 63
#endif

64 65 66 67 68 69 70 71 72 73 74
enum tlb_infos {
	ENTRIES,
	NR_INFO
};

extern u16 __read_mostly tlb_lli_4k[NR_INFO];
extern u16 __read_mostly tlb_lli_2m[NR_INFO];
extern u16 __read_mostly tlb_lli_4m[NR_INFO];
extern u16 __read_mostly tlb_lld_4k[NR_INFO];
extern u16 __read_mostly tlb_lld_2m[NR_INFO];
extern u16 __read_mostly tlb_lld_4m[NR_INFO];
75 76
extern s8  __read_mostly tlb_flushall_shift;

77 78 79 80 81 82 83
/*
 *  CPU type and hardware bug flags. Kept separately for each CPU.
 *  Members of this structure are referenced in head.S, so think twice
 *  before touching them. [mj]
 */

struct cpuinfo_x86 {
84 85 86 87
	__u8			x86;		/* CPU family */
	__u8			x86_vendor;	/* CPU vendor */
	__u8			x86_model;
	__u8			x86_mask;
88
#ifdef CONFIG_X86_32
89 90 91 92 93 94
	char			wp_works_ok;	/* It doesn't on 386's */

	/* Problems on some 486Dx4's and old 386's: */
	char			hard_math;
	char			rfu;
	char			pad0;
95
#else
96
	/* Number of 4K pages in DTLB/ITLB combined(in pages): */
97
	int			x86_tlbsize;
98
#endif
99 100 101 102 103 104 105 106
	__u8			x86_virt_bits;
	__u8			x86_phys_bits;
	/* CPUID returned core id bits: */
	__u8			x86_coreid_bits;
	/* Max extended CPUID function supported: */
	__u32			extended_cpuid_level;
	/* Maximum supported CPUID level, -1=no CPUID: */
	int			cpuid_level;
107
	__u32			x86_capability[NCAPINTS + NBUGINTS];
108 109 110 111 112 113 114 115 116 117
	char			x86_vendor_id[16];
	char			x86_model_id[64];
	/* in KB - valid for CPUS which support this call: */
	int			x86_cache_size;
	int			x86_cache_alignment;	/* In bytes */
	int			x86_power;
	unsigned long		loops_per_jiffy;
	/* cpuid returned max cores value: */
	u16			 x86_max_cores;
	u16			apicid;
Y
Yinghai Lu 已提交
118
	u16			initial_apicid;
119 120 121 122 123 124 125
	u16			x86_clflush_size;
	/* number of cores as seen by the OS: */
	u16			booted_cores;
	/* Physical processor id: */
	u16			phys_proc_id;
	/* Core id: */
	u16			cpu_core_id;
126 127
	/* Compute unit id */
	u8			compute_unit_id;
128 129
	/* Index into per_cpu list: */
	u16			cpu_index;
130
	u32			microcode;
131 132
} __attribute__((__aligned__(SMP_CACHE_BYTES)));

133 134 135 136 137 138 139 140 141 142
#define X86_VENDOR_INTEL	0
#define X86_VENDOR_CYRIX	1
#define X86_VENDOR_AMD		2
#define X86_VENDOR_UMC		3
#define X86_VENDOR_CENTAUR	5
#define X86_VENDOR_TRANSMETA	7
#define X86_VENDOR_NSC		8
#define X86_VENDOR_NUM		9

#define X86_VENDOR_UNKNOWN	0xff
143

144 145 146
/*
 * capabilities of CPUs
 */
147 148 149 150
extern struct cpuinfo_x86	boot_cpu_data;
extern struct cpuinfo_x86	new_cpu_data;

extern struct tss_struct	doublefault_tss;
151 152
extern __u32			cpu_caps_cleared[NCAPINTS];
extern __u32			cpu_caps_set[NCAPINTS];
153 154

#ifdef CONFIG_SMP
155
DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
156 157
#define cpu_data(cpu)		per_cpu(cpu_info, cpu)
#else
158
#define cpu_info		boot_cpu_data
159 160 161
#define cpu_data(cpu)		boot_cpu_data
#endif

162 163
extern const struct seq_operations cpuinfo_op;

164 165 166
#define cache_line_size()	(boot_cpu_data.x86_cache_alignment)

extern void cpu_detect(struct cpuinfo_x86 *c);
167

168
extern void early_cpu_init(void);
169 170
extern void identify_boot_cpu(void);
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
171
extern void print_cpu_info(struct cpuinfo_x86 *);
172
void print_cpu_msr(struct cpuinfo_x86 *);
173 174
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
175
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
176

177
extern void detect_extended_topology(struct cpuinfo_x86 *c);
178 179
extern void detect_ht(struct cpuinfo_x86 *c);

180 181 182 183 184 185 186 187
#ifdef CONFIG_X86_32
extern int have_cpuid_p(void);
#else
static inline int have_cpuid_p(void)
{
	return 1;
}
#endif
188
static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
189
				unsigned int *ecx, unsigned int *edx)
190 191
{
	/* ecx is often an input as well as an output. */
192
	asm volatile("cpuid"
193 194 195 196
	    : "=a" (*eax),
	      "=b" (*ebx),
	      "=c" (*ecx),
	      "=d" (*edx)
197 198
	    : "0" (*eax), "2" (*ecx)
	    : "memory");
199 200
}

201 202 203 204
static inline void load_cr3(pgd_t *pgdir)
{
	write_cr3(__pa(pgdir));
}
205

206 207 208
#ifdef CONFIG_X86_32
/* This is the TSS defined by the hardware. */
struct x86_hw_tss {
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
	unsigned short		back_link, __blh;
	unsigned long		sp0;
	unsigned short		ss0, __ss0h;
	unsigned long		sp1;
	/* ss1 caches MSR_IA32_SYSENTER_CS: */
	unsigned short		ss1, __ss1h;
	unsigned long		sp2;
	unsigned short		ss2, __ss2h;
	unsigned long		__cr3;
	unsigned long		ip;
	unsigned long		flags;
	unsigned long		ax;
	unsigned long		cx;
	unsigned long		dx;
	unsigned long		bx;
	unsigned long		sp;
	unsigned long		bp;
	unsigned long		si;
	unsigned long		di;
	unsigned short		es, __esh;
	unsigned short		cs, __csh;
	unsigned short		ss, __ssh;
	unsigned short		ds, __dsh;
	unsigned short		fs, __fsh;
	unsigned short		gs, __gsh;
	unsigned short		ldt, __ldth;
	unsigned short		trace;
	unsigned short		io_bitmap_base;

238 239 240
} __attribute__((packed));
#else
struct x86_hw_tss {
241 242 243 244 245 246 247 248 249 250 251
	u32			reserved1;
	u64			sp0;
	u64			sp1;
	u64			sp2;
	u64			reserved2;
	u64			ist[7];
	u32			reserved3;
	u32			reserved4;
	u16			reserved5;
	u16			io_bitmap_base;

252 253 254 255
} __attribute__((packed)) ____cacheline_aligned;
#endif

/*
256
 * IO-bitmap sizes:
257
 */
258 259 260 261 262
#define IO_BITMAP_BITS			65536
#define IO_BITMAP_BYTES			(IO_BITMAP_BITS/8)
#define IO_BITMAP_LONGS			(IO_BITMAP_BYTES/sizeof(long))
#define IO_BITMAP_OFFSET		offsetof(struct tss_struct, io_bitmap)
#define INVALID_IO_BITMAP_OFFSET	0x8000
263 264

struct tss_struct {
265 266 267 268
	/*
	 * The hardware state:
	 */
	struct x86_hw_tss	x86_tss;
269 270 271 272 273 274 275

	/*
	 * The extra 1 is there because the CPU will access an
	 * additional byte beyond the end of the IO permission
	 * bitmap. The extra byte must be all 1 bits, and must
	 * be within the limit.
	 */
276 277
	unsigned long		io_bitmap[IO_BITMAP_LONGS + 1];

278
	/*
279
	 * .. and then another 0x100 bytes for the emergency kernel stack:
280
	 */
281 282
	unsigned long		stack[64];

283
} ____cacheline_aligned;
284

285
DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
286

287 288 289
/*
 * Save the original ist values for checking stack pointers during debugging
 */
290
struct orig_ist {
291
	unsigned long		ist[7];
292 293
};

R
Roland McGrath 已提交
294
#define	MXCSR_DEFAULT		0x1f80
295

R
Roland McGrath 已提交
296
struct i387_fsave_struct {
I
Ingo Molnar 已提交
297 298 299 300 301 302 303 304 305
	u32			cwd;	/* FPU Control Word		*/
	u32			swd;	/* FPU Status Word		*/
	u32			twd;	/* FPU Tag Word			*/
	u32			fip;	/* FPU IP Offset		*/
	u32			fcs;	/* FPU IP Selector		*/
	u32			foo;	/* FPU Operand Pointer Offset	*/
	u32			fos;	/* FPU Operand Pointer Selector	*/

	/* 8*10 bytes for each FP-reg = 80 bytes:			*/
306
	u32			st_space[20];
I
Ingo Molnar 已提交
307 308

	/* Software status information [not touched by FSAVE ]:		*/
309
	u32			status;
310 311 312
};

struct i387_fxsave_struct {
I
Ingo Molnar 已提交
313 314 315 316
	u16			cwd; /* Control Word			*/
	u16			swd; /* Status Word			*/
	u16			twd; /* Tag Word			*/
	u16			fop; /* Last Instruction Opcode		*/
R
Roland McGrath 已提交
317 318
	union {
		struct {
I
Ingo Molnar 已提交
319 320
			u64	rip; /* Instruction Pointer		*/
			u64	rdp; /* Data Pointer			*/
R
Roland McGrath 已提交
321 322
		};
		struct {
I
Ingo Molnar 已提交
323 324 325 326
			u32	fip; /* FPU IP Offset			*/
			u32	fcs; /* FPU IP Selector			*/
			u32	foo; /* FPU Operand Offset		*/
			u32	fos; /* FPU Operand Selector		*/
R
Roland McGrath 已提交
327 328
		};
	};
I
Ingo Molnar 已提交
329 330 331 332
	u32			mxcsr;		/* MXCSR Register State */
	u32			mxcsr_mask;	/* MXCSR Mask		*/

	/* 8*16 bytes for each FP-reg = 128 bytes:			*/
333
	u32			st_space[32];
I
Ingo Molnar 已提交
334 335

	/* 16*16 bytes for each XMM-reg = 256 bytes:			*/
336
	u32			xmm_space[64];
I
Ingo Molnar 已提交
337

338 339 340 341 342 343
	u32			padding[12];

	union {
		u32		padding1[12];
		u32		sw_reserved[12];
	};
344

345 346
} __attribute__((aligned(16)));

R
Roland McGrath 已提交
347
struct i387_soft_struct {
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
	u32			cwd;
	u32			swd;
	u32			twd;
	u32			fip;
	u32			fcs;
	u32			foo;
	u32			fos;
	/* 8*10 bytes for each FP-reg = 80 bytes: */
	u32			st_space[20];
	u8			ftop;
	u8			changed;
	u8			lookahead;
	u8			no_update;
	u8			rm;
	u8			alimit;
T
Tejun Heo 已提交
363
	struct math_emu_info	*info;
364
	u32			entry_eip;
R
Roland McGrath 已提交
365 366
};

367 368 369 370 371
struct ymmh_struct {
	/* 16 * 16 bytes for each YMMH-reg = 256 bytes */
	u32 ymmh_space[64];
};

372 373 374 375 376 377 378 379 380
struct xsave_hdr_struct {
	u64 xstate_bv;
	u64 reserved1[2];
	u64 reserved2[5];
} __attribute__((packed));

struct xsave_struct {
	struct i387_fxsave_struct i387;
	struct xsave_hdr_struct xsave_hdr;
381
	struct ymmh_struct ymmh;
382 383 384
	/* new processor state extensions will go here */
} __attribute__ ((packed, aligned (64)));

385
union thread_xstate {
R
Roland McGrath 已提交
386
	struct i387_fsave_struct	fsave;
387
	struct i387_fxsave_struct	fxsave;
388
	struct i387_soft_struct		soft;
389
	struct xsave_struct		xsave;
390 391
};

392
struct fpu {
393 394
	unsigned int last_cpu;
	unsigned int has_fpu;
395 396 397
	union thread_xstate *state;
};

398
#ifdef CONFIG_X86_64
399
DECLARE_PER_CPU(struct orig_ist, orig_ist);
400

401 402 403 404 405 406 407 408 409 410 411 412 413
union irq_stack_union {
	char irq_stack[IRQ_STACK_SIZE];
	/*
	 * GCC hardcodes the stack canary as %gs:40.  Since the
	 * irq_stack is the object at %gs:0, we reserve the bottom
	 * 48 bytes of the irq stack for the canary.
	 */
	struct {
		char gs_base[40];
		unsigned long stack_canary;
	};
};

414
DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union);
415 416
DECLARE_INIT_PER_CPU(irq_stack_union);

417
DECLARE_PER_CPU(char *, irq_stack_ptr);
418 419
DECLARE_PER_CPU(unsigned int, irq_count);
extern asmlinkage void ignore_sysret(void);
420 421
#else	/* X86_64 */
#ifdef CONFIG_CC_STACKPROTECTOR
422 423 424 425 426 427 428 429 430 431
/*
 * Make sure stack canary segment base is cached-aligned:
 *   "For Intel Atom processors, avoid non zero segment base address
 *    that is not aligned to cache line boundary at all cost."
 * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
 */
struct stack_canary {
	char __pad[20];		/* canary at %gs:20 */
	unsigned long canary;
};
432
DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
433
#endif
434
#endif	/* X86_64 */
435

436
extern unsigned int xstate_size;
437 438
extern void free_thread_xstate(struct task_struct *);
extern struct kmem_cache *task_xstate_cachep;
439

440 441
struct perf_event;

442
struct thread_struct {
443 444 445 446
	/* Cached TLS descriptors: */
	struct desc_struct	tls_array[GDT_ENTRY_TLS_ENTRIES];
	unsigned long		sp0;
	unsigned long		sp;
447
#ifdef CONFIG_X86_32
448
	unsigned long		sysenter_cs;
449
#else
450 451 452 453 454
	unsigned long		usersp;	/* Copy from PDA */
	unsigned short		es;
	unsigned short		ds;
	unsigned short		fsindex;
	unsigned short		gsindex;
455
#endif
456
#ifdef CONFIG_X86_32
457
	unsigned long		ip;
458
#endif
459
#ifdef CONFIG_X86_64
460
	unsigned long		fs;
461
#endif
462
	unsigned long		gs;
463 464 465 466
	/* Save middle states of ptrace breakpoints */
	struct perf_event	*ptrace_bps[HBP_NUM];
	/* Debug status used for traps, single steps, etc... */
	unsigned long           debugreg6;
467 468
	/* Keep track of the exact dr7 value set by the user */
	unsigned long           ptrace_dr7;
469 470
	/* Fault info: */
	unsigned long		cr2;
471
	unsigned long		trap_nr;
472
	unsigned long		error_code;
473
	/* floating point and extended processor state */
474
	struct fpu		fpu;
475
#ifdef CONFIG_X86_32
476
	/* Virtual 86 mode info */
477 478
	struct vm86_struct __user *vm86_info;
	unsigned long		screen_bitmap;
479 480 481 482 483
	unsigned long		v86flags;
	unsigned long		v86mask;
	unsigned long		saved_sp0;
	unsigned int		saved_fs;
	unsigned int		saved_gs;
484
#endif
485 486 487 488 489
	/* IO permissions: */
	unsigned long		*io_bitmap_ptr;
	unsigned long		iopl;
	/* Max allowed port in the bitmap, in bytes: */
	unsigned		io_bitmap_max;
490 491
};

492 493 494 495 496 497 498
/*
 * Set IOPL bits in EFLAGS from given mask
 */
static inline void native_set_iopl_mask(unsigned mask)
{
#ifdef CONFIG_X86_32
	unsigned int reg;
499

500 501 502 503 504 505 506 507
	asm volatile ("pushfl;"
		      "popl %0;"
		      "andl %1, %0;"
		      "orl %2, %0;"
		      "pushl %0;"
		      "popfl"
		      : "=&r" (reg)
		      : "i" (~X86_EFLAGS_IOPL), "r" (mask));
508 509 510
#endif
}

511 512
static inline void
native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
513 514 515
{
	tss->x86_tss.sp0 = thread->sp0;
#ifdef CONFIG_X86_32
516
	/* Only happens when SEP is enabled, no need to test "SEP"arately: */
517 518 519 520 521 522
	if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
		tss->x86_tss.ss1 = thread->sysenter_cs;
		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
	}
#endif
}
523

524 525 526 527 528 529 530
static inline void native_swapgs(void)
{
#ifdef CONFIG_X86_64
	asm volatile("swapgs" ::: "memory");
#endif
}

531 532 533
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
534 535
#define __cpuid			native_cpuid
#define paravirt_enabled()	0
536

537 538
static inline void load_sp0(struct tss_struct *tss,
			    struct thread_struct *thread)
539 540 541 542
{
	native_load_sp0(tss, thread);
}

543
#define set_iopl_mask native_set_iopl_mask
544 545 546 547 548 549 550 551
#endif /* CONFIG_PARAVIRT */

/*
 * Save the cr4 feature set we're using (ie
 * Pentium 4MB enable and PPro Global page
 * enable), so that any CPU's that boot up
 * after us can get the correct flags.
 */
552 553
extern unsigned long mmu_cr4_features;
extern u32 *trampoline_cr4_features;
554 555 556

static inline void set_in_cr4(unsigned long mask)
{
B
Brian Gerst 已提交
557
	unsigned long cr4;
558

559
	mmu_cr4_features |= mask;
560 561
	if (trampoline_cr4_features)
		*trampoline_cr4_features = mmu_cr4_features;
562 563 564 565 566 567 568
	cr4 = read_cr4();
	cr4 |= mask;
	write_cr4(cr4);
}

static inline void clear_in_cr4(unsigned long mask)
{
B
Brian Gerst 已提交
569
	unsigned long cr4;
570

571
	mmu_cr4_features &= ~mask;
572 573
	if (trampoline_cr4_features)
		*trampoline_cr4_features = mmu_cr4_features;
574 575 576 577 578
	cr4 = read_cr4();
	cr4 &= ~mask;
	write_cr4(cr4);
}

579
typedef struct {
580
	unsigned long		seg;
581 582 583
} mm_segment_t;


584 585 586 587
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);

unsigned long get_wchan(struct task_struct *p);
588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620

/*
 * Generic CPUID function
 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
 * resulting in stale register contents being returned.
 */
static inline void cpuid(unsigned int op,
			 unsigned int *eax, unsigned int *ebx,
			 unsigned int *ecx, unsigned int *edx)
{
	*eax = op;
	*ecx = 0;
	__cpuid(eax, ebx, ecx, edx);
}

/* Some CPUID calls want 'count' to be placed in ecx */
static inline void cpuid_count(unsigned int op, int count,
			       unsigned int *eax, unsigned int *ebx,
			       unsigned int *ecx, unsigned int *edx)
{
	*eax = op;
	*ecx = count;
	__cpuid(eax, ebx, ecx, edx);
}

/*
 * CPUID functions returning a single datum
 */
static inline unsigned int cpuid_eax(unsigned int op)
{
	unsigned int eax, ebx, ecx, edx;

	cpuid(op, &eax, &ebx, &ecx, &edx);
621

622 623
	return eax;
}
624

625 626 627 628 629
static inline unsigned int cpuid_ebx(unsigned int op)
{
	unsigned int eax, ebx, ecx, edx;

	cpuid(op, &eax, &ebx, &ecx, &edx);
630

631 632
	return ebx;
}
633

634 635 636 637 638
static inline unsigned int cpuid_ecx(unsigned int op)
{
	unsigned int eax, ebx, ecx, edx;

	cpuid(op, &eax, &ebx, &ecx, &edx);
639

640 641
	return ecx;
}
642

643 644 645 646 647
static inline unsigned int cpuid_edx(unsigned int op)
{
	unsigned int eax, ebx, ecx, edx;

	cpuid(op, &eax, &ebx, &ecx, &edx);
648

649 650 651
	return edx;
}

652 653 654
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static inline void rep_nop(void)
{
655
	asm volatile("rep; nop" ::: "memory");
656 657
}

658 659 660 661 662
static inline void cpu_relax(void)
{
	rep_nop();
}

663
/* Stop speculative execution and prefetching of modified code. */
664 665 666
static inline void sync_core(void)
{
	int tmp;
667

668
#ifdef CONFIG_M486
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
	/*
	 * Do a CPUID if available, otherwise do a jump.  The jump
	 * can conveniently enough be the jump around CPUID.
	 */
	asm volatile("cmpl %2,%1\n\t"
		     "jl 1f\n\t"
		     "cpuid\n"
		     "1:"
		     : "=a" (tmp)
		     : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1)
		     : "ebx", "ecx", "edx", "memory");
#else
	/*
	 * CPUID is a barrier to speculative execution.
	 * Prefetched instructions are automatically
	 * invalidated when modified.
	 */
	asm volatile("cpuid"
		     : "=a" (tmp)
		     : "0" (1)
		     : "ebx", "ecx", "edx", "memory");
690
#endif
691 692
}

693 694
static inline void __monitor(const void *eax, unsigned long ecx,
			     unsigned long edx)
695
{
696
	/* "monitor %eax, %ecx, %edx;" */
697 698
	asm volatile(".byte 0x0f, 0x01, 0xc8;"
		     :: "a" (eax), "c" (ecx), "d"(edx));
699 700 701 702
}

static inline void __mwait(unsigned long eax, unsigned long ecx)
{
703
	/* "mwait %eax, %ecx;" */
704 705
	asm volatile(".byte 0x0f, 0x01, 0xc9;"
		     :: "a" (eax), "c" (ecx));
706 707 708 709
}

static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
710
	trace_hardirqs_on();
711
	/* "mwait %eax, %ecx;" */
712 713
	asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
		     :: "a" (eax), "c" (ecx));
714 715 716
}

extern void select_idle_routine(const struct cpuinfo_x86 *c);
717
extern void init_amd_e400_c1e_mask(void);
718

719
extern unsigned long		boot_option_idle_override;
720
extern bool			amd_e400_c1e_detected;
721

722
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
723
			 IDLE_POLL};
724

725 726 727
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);

728
extern void early_trap_init(void);
729
void early_trap_pf_init(void);
730

731
/* Defined in head.S */
732
extern struct desc_ptr		early_gdt_descr;
733 734

extern void cpu_set_gdt(int);
735
extern void switch_to_new_gdt(int);
736
extern void load_percpu_segment(int);
737 738
extern void cpu_init(void);

739 740
static inline unsigned long get_debugctlmsr(void)
{
P
Peter Zijlstra 已提交
741
	unsigned long debugctlmsr = 0;
742 743 744 745 746 747 748

#ifndef CONFIG_X86_DEBUGCTLMSR
	if (boot_cpu_data.x86 < 6)
		return 0;
#endif
	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);

P
Peter Zijlstra 已提交
749
	return debugctlmsr;
750 751
}

752 753 754 755 756 757 758 759 760
static inline void update_debugctlmsr(unsigned long debugctlmsr)
{
#ifndef CONFIG_X86_DEBUGCTLMSR
	if (boot_cpu_data.x86 < 6)
		return;
#endif
	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
}

761 762
extern void set_task_blockstep(struct task_struct *task, bool on);

763 764 765 766 767 768 769
/*
 * from system description table in BIOS. Mostly for MCA use, but
 * others may find it useful:
 */
extern unsigned int		machine_id;
extern unsigned int		machine_submodel_id;
extern unsigned int		BIOS_revision;
770

771 772
/* Boot loader type from the setup header: */
extern int			bootloader_type;
773
extern int			bootloader_version;
774

775
extern char			ignore_fpu_irq;
776 777 778 779 780

#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH

781
#ifdef CONFIG_X86_32
782 783
# define BASE_PREFETCH		ASM_NOP4
# define ARCH_HAS_PREFETCH
784
#else
785
# define BASE_PREFETCH		"prefetcht0 (%1)"
786 787
#endif

788 789 790 791 792 793
/*
 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
 *
 * It's not worth to care about 3dnow prefetches for the K6
 * because they are microcoded there and very slow.
 */
794 795 796 797 798 799 800 801
static inline void prefetch(const void *x)
{
	alternative_input(BASE_PREFETCH,
			  "prefetchnta (%1)",
			  X86_FEATURE_XMM,
			  "r" (x));
}

802 803 804 805 806
/*
 * 3dnow prefetch to get an exclusive cache line.
 * Useful for spinlocks to avoid one state transition in the
 * cache coherency protocol:
 */
807 808 809 810 811 812 813 814
static inline void prefetchw(const void *x)
{
	alternative_input(BASE_PREFETCH,
			  "prefetchw (%1)",
			  X86_FEATURE_3DNOW,
			  "r" (x));
}

815 816 817 818 819
static inline void spin_lock_prefetch(const void *x)
{
	prefetchw(x);
}

820 821 822 823
#ifdef CONFIG_X86_32
/*
 * User space process size: 3GB (default).
 */
824
#define TASK_SIZE		PAGE_OFFSET
825
#define TASK_SIZE_MAX		TASK_SIZE
826 827 828 829 830 831 832 833
#define STACK_TOP		TASK_SIZE
#define STACK_TOP_MAX		STACK_TOP

#define INIT_THREAD  {							  \
	.sp0			= sizeof(init_stack) + (long)&init_stack, \
	.vm86_info		= NULL,					  \
	.sysenter_cs		= __KERNEL_CS,				  \
	.io_bitmap_ptr		= NULL,					  \
834 835 836 837 838 839 840 841
}

/*
 * Note that the .io_bitmap member must be extra-big. This is because
 * the CPU will access an additional byte beyond the end of the IO
 * permission bitmap. The extra byte must be all 1 bits, and must
 * be within the limit.
 */
842 843
#define INIT_TSS  {							  \
	.x86_tss = {							  \
844
		.sp0		= sizeof(init_stack) + (long)&init_stack, \
845 846 847 848 849
		.ss0		= __KERNEL_DS,				  \
		.ss1		= __KERNEL_CS,				  \
		.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,		  \
	 },								  \
	.io_bitmap		= { [0 ... IO_BITMAP_LONGS] = ~0 },	  \
850 851 852 853 854 855 856 857 858 859 860 861 862 863
}

extern unsigned long thread_saved_pc(struct task_struct *tsk);

#define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
#define KSTK_TOP(info)                                                 \
({                                                                     \
       unsigned long *__ptr = (unsigned long *)(info);                 \
       (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
})

/*
 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
 * This is necessary to guarantee that the entire "struct pt_regs"
864
 * is accessible even if the CPU haven't stored the SS/ESP registers
865 866 867 868 869 870 871 872 873 874 875 876 877
 * on the stack (interrupt gate does not save these registers
 * when switching to the same priv ring).
 * Therefore beware: accessing the ss/esp fields of the
 * "struct pt_regs" is possible, but they may contain the
 * completely wrong values.
 */
#define task_pt_regs(task)                                             \
({                                                                     \
       struct pt_regs *__regs__;                                       \
       __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
       __regs__ - 1;                                                   \
})

878
#define KSTK_ESP(task)		(task_pt_regs(task)->sp)
879 880 881 882 883

#else
/*
 * User space process size. 47bits minus one guard page.
 */
884
#define TASK_SIZE_MAX	((1UL << 47) - PAGE_SIZE)
885 886 887 888

/* This decides where the kernel will search for a free chunk of vm
 * space during mmap's.
 */
889 890
#define IA32_PAGE_OFFSET	((current->personality & ADDR_LIMIT_3GB) ? \
					0xc0000000 : 0xFFFFe000)
891

892
#define TASK_SIZE		(test_thread_flag(TIF_ADDR32) ? \
893
					IA32_PAGE_OFFSET : TASK_SIZE_MAX)
894
#define TASK_SIZE_OF(child)	((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
895
					IA32_PAGE_OFFSET : TASK_SIZE_MAX)
896

897
#define STACK_TOP		TASK_SIZE
898
#define STACK_TOP_MAX		TASK_SIZE_MAX
899

900 901 902 903 904 905 906 907 908 909 910 911
#define INIT_THREAD  { \
	.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
}

#define INIT_TSS  { \
	.x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
}

/*
 * Return saved PC of a blocked thread.
 * What is this good for? it will be always the scheduler or ret_from_fork.
 */
912
#define thread_saved_pc(t)	(*(unsigned long *)((t)->thread.sp - 8))
913

914
#define task_pt_regs(tsk)	((struct pt_regs *)(tsk)->thread.sp0 - 1)
915
extern unsigned long KSTK_ESP(struct task_struct *task);
916 917 918 919 920 921

/*
 * User space RSP while inside the SYSCALL fast path
 */
DECLARE_PER_CPU(unsigned long, old_rsp);

922 923
#endif /* CONFIG_X86_64 */

I
Ingo Molnar 已提交
924 925 926
extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
					       unsigned long new_sp);

927 928
/*
 * This decides where the kernel will search for a free chunk of vm
929 930 931 932
 * space during mmap's.
 */
#define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))

933
#define KSTK_EIP(task)		(task_pt_regs(task)->ip)
934

935 936 937 938 939 940 941
/* Get/set a process' ability to use the timestamp counter instruction */
#define GET_TSC_CTL(adr)	get_tsc_mode((adr))
#define SET_TSC_CTL(val)	set_tsc_mode((val))

extern int get_tsc_mode(unsigned long adr);
extern int set_tsc_mode(unsigned int val);

942
extern u16 amd_get_nb_id(int cpu);
943

944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
struct aperfmperf {
	u64 aperf, mperf;
};

static inline void get_aperfmperf(struct aperfmperf *am)
{
	WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF));

	rdmsrl(MSR_IA32_APERF, am->aperf);
	rdmsrl(MSR_IA32_MPERF, am->mperf);
}

#define APERFMPERF_SHIFT 10

static inline
unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
				    struct aperfmperf *new)
{
	u64 aperf = new->aperf - old->aperf;
	u64 mperf = new->mperf - old->mperf;
	unsigned long ratio = aperf;

	mperf >>= APERFMPERF_SHIFT;
	if (mperf)
		ratio = div64_u64(aperf, mperf);

	return ratio;
}

973 974 975 976
extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);

void default_idle(void);
977 978 979 980 981
#ifdef	CONFIG_XEN
bool xen_set_default_idle(void);
#else
#define xen_set_default_idle 0
#endif
982 983 984

void stop_this_cpu(void *dummy);

H
H. Peter Anvin 已提交
985
#endif /* _ASM_X86_PROCESSOR_H */