processor.h 23.0 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_PROCESSOR_H
#define _ASM_X86_PROCESSOR_H
3

4 5
#include <asm/processor-flags.h>

6 7 8 9
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;

10 11 12 13 14 15 16
#include <asm/vm86.h>
#include <asm/math_emu.h>
#include <asm/segment.h>
#include <asm/types.h>
#include <asm/sigcontext.h>
#include <asm/current.h>
#include <asm/cpufeature.h>
17
#include <asm/system.h>
18
#include <asm/page.h>
19
#include <asm/percpu.h>
20 21
#include <asm/msr.h>
#include <asm/desc_defs.h>
22
#include <asm/nops.h>
M
Markus Metzger 已提交
23
#include <asm/ds.h>
24

25
#include <linux/personality.h>
26 27
#include <linux/cpumask.h>
#include <linux/cache.h>
28 29
#include <linux/threads.h>
#include <linux/init.h>
30

31 32 33 34 35 36 37
/*
 * Default implementation of macro that returns current
 * instruction pointer ("program counter").
 */
static inline void *current_text_addr(void)
{
	void *pc;
38 39 40

	asm volatile("mov $1f, %0; 1:":"=r" (pc));

41 42 43
	return pc;
}

44
#ifdef CONFIG_X86_VSMP
45 46
# define ARCH_MIN_TASKALIGN		(1 << INTERNODE_CACHE_SHIFT)
# define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT)
47
#else
48 49
# define ARCH_MIN_TASKALIGN		16
# define ARCH_MIN_MMSTRUCT_ALIGN	0
50 51
#endif

52 53 54 55 56 57 58
/*
 *  CPU type and hardware bug flags. Kept separately for each CPU.
 *  Members of this structure are referenced in head.S, so think twice
 *  before touching them. [mj]
 */

struct cpuinfo_x86 {
59 60 61 62
	__u8			x86;		/* CPU family */
	__u8			x86_vendor;	/* CPU vendor */
	__u8			x86_model;
	__u8			x86_mask;
63
#ifdef CONFIG_X86_32
64 65 66 67 68 69 70 71 72 73
	char			wp_works_ok;	/* It doesn't on 386's */

	/* Problems on some 486Dx4's and old 386's: */
	char			hlt_works_ok;
	char			hard_math;
	char			rfu;
	char			fdiv_bug;
	char			f00f_bug;
	char			coma_bug;
	char			pad0;
74
#else
75 76 77 78
	/* Number of 4K pages in DTLB/ITLB combined(in pages): */
	int			 x86_tlbsize;
	__u8			x86_virt_bits;
	__u8			x86_phys_bits;
79
#endif
80 81 82 83 84 85 86 87 88 89 90 91 92 93
	/* CPUID returned core id bits: */
	__u8			x86_coreid_bits;
	/* Max extended CPUID function supported: */
	__u32			extended_cpuid_level;
	/* Maximum supported CPUID level, -1=no CPUID: */
	int			cpuid_level;
	__u32			x86_capability[NCAPINTS];
	char			x86_vendor_id[16];
	char			x86_model_id[64];
	/* in KB - valid for CPUS which support this call: */
	int			x86_cache_size;
	int			x86_cache_alignment;	/* In bytes */
	int			x86_power;
	unsigned long		loops_per_jiffy;
94
#ifdef CONFIG_SMP
95 96
	/* cpus sharing the last level cache: */
	cpumask_t		llc_shared_map;
97
#endif
98 99 100
	/* cpuid returned max cores value: */
	u16			 x86_max_cores;
	u16			apicid;
Y
Yinghai Lu 已提交
101
	u16			initial_apicid;
102
	u16			x86_clflush_size;
103
#ifdef CONFIG_SMP
104 105 106 107 108 109 110 111
	/* number of cores as seen by the OS: */
	u16			booted_cores;
	/* Physical processor id: */
	u16			phys_proc_id;
	/* Core id: */
	u16			cpu_core_id;
	/* Index into per_cpu list: */
	u16			cpu_index;
112
#endif
113
	unsigned int		x86_hyper_vendor;
114 115
} __attribute__((__aligned__(SMP_CACHE_BYTES)));

116 117 118 119 120 121 122 123 124 125
#define X86_VENDOR_INTEL	0
#define X86_VENDOR_CYRIX	1
#define X86_VENDOR_AMD		2
#define X86_VENDOR_UMC		3
#define X86_VENDOR_CENTAUR	5
#define X86_VENDOR_TRANSMETA	7
#define X86_VENDOR_NSC		8
#define X86_VENDOR_NUM		9

#define X86_VENDOR_UNKNOWN	0xff
126

127 128 129
#define X86_HYPER_VENDOR_NONE  0
#define X86_HYPER_VENDOR_VMWARE 1

130 131 132
/*
 * capabilities of CPUs
 */
133 134 135 136 137
extern struct cpuinfo_x86	boot_cpu_data;
extern struct cpuinfo_x86	new_cpu_data;

extern struct tss_struct	doublefault_tss;
extern __u32			cleared_cpu_caps[NCAPINTS];
138 139 140 141

#ifdef CONFIG_SMP
DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
#define cpu_data(cpu)		per_cpu(cpu_info, cpu)
142
#define current_cpu_data	__get_cpu_var(cpu_info)
143 144 145 146 147
#else
#define cpu_data(cpu)		boot_cpu_data
#define current_cpu_data	boot_cpu_data
#endif

148 149
extern const struct seq_operations cpuinfo_op;

G
Glauber Costa 已提交
150 151 152 153 154 155 156 157 158
static inline int hlt_works(int cpu)
{
#ifdef CONFIG_X86_32
	return cpu_data(cpu).hlt_works_ok;
#else
	return 1;
#endif
}

159 160 161
#define cache_line_size()	(boot_cpu_data.x86_cache_alignment)

extern void cpu_detect(struct cpuinfo_x86 *c);
162

163 164
extern struct pt_regs *idle_regs(struct pt_regs *);

165
extern void early_cpu_init(void);
166 167
extern void identify_boot_cpu(void);
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
168 169 170 171 172
extern void print_cpu_info(struct cpuinfo_x86 *);
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern unsigned short num_cache_leaves;

173
extern void detect_extended_topology(struct cpuinfo_x86 *c);
174 175
extern void detect_ht(struct cpuinfo_x86 *c);

176
static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
177
				unsigned int *ecx, unsigned int *edx)
178 179
{
	/* ecx is often an input as well as an output. */
180 181 182 183 184 185
	asm("cpuid"
	    : "=a" (*eax),
	      "=b" (*ebx),
	      "=c" (*ecx),
	      "=d" (*edx)
	    : "0" (*eax), "2" (*ecx));
186 187
}

188 189 190 191
static inline void load_cr3(pgd_t *pgdir)
{
	write_cr3(__pa(pgdir));
}
192

193 194 195
#ifdef CONFIG_X86_32
/* This is the TSS defined by the hardware. */
struct x86_hw_tss {
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
	unsigned short		back_link, __blh;
	unsigned long		sp0;
	unsigned short		ss0, __ss0h;
	unsigned long		sp1;
	/* ss1 caches MSR_IA32_SYSENTER_CS: */
	unsigned short		ss1, __ss1h;
	unsigned long		sp2;
	unsigned short		ss2, __ss2h;
	unsigned long		__cr3;
	unsigned long		ip;
	unsigned long		flags;
	unsigned long		ax;
	unsigned long		cx;
	unsigned long		dx;
	unsigned long		bx;
	unsigned long		sp;
	unsigned long		bp;
	unsigned long		si;
	unsigned long		di;
	unsigned short		es, __esh;
	unsigned short		cs, __csh;
	unsigned short		ss, __ssh;
	unsigned short		ds, __dsh;
	unsigned short		fs, __fsh;
	unsigned short		gs, __gsh;
	unsigned short		ldt, __ldth;
	unsigned short		trace;
	unsigned short		io_bitmap_base;

225 226 227
} __attribute__((packed));
#else
struct x86_hw_tss {
228 229 230 231 232 233 234 235 236 237 238
	u32			reserved1;
	u64			sp0;
	u64			sp1;
	u64			sp2;
	u64			reserved2;
	u64			ist[7];
	u32			reserved3;
	u32			reserved4;
	u16			reserved5;
	u16			io_bitmap_base;

239 240 241 242
} __attribute__((packed)) ____cacheline_aligned;
#endif

/*
243
 * IO-bitmap sizes:
244
 */
245 246 247 248 249 250
#define IO_BITMAP_BITS			65536
#define IO_BITMAP_BYTES			(IO_BITMAP_BITS/8)
#define IO_BITMAP_LONGS			(IO_BITMAP_BYTES/sizeof(long))
#define IO_BITMAP_OFFSET		offsetof(struct tss_struct, io_bitmap)
#define INVALID_IO_BITMAP_OFFSET	0x8000
#define INVALID_IO_BITMAP_OFFSET_LAZY	0x9000
251 252

struct tss_struct {
253 254 255 256
	/*
	 * The hardware state:
	 */
	struct x86_hw_tss	x86_tss;
257 258 259 260 261 262 263

	/*
	 * The extra 1 is there because the CPU will access an
	 * additional byte beyond the end of the IO permission
	 * bitmap. The extra byte must be all 1 bits, and must
	 * be within the limit.
	 */
264
	unsigned long		io_bitmap[IO_BITMAP_LONGS + 1];
265 266 267
	/*
	 * Cache the current maximum and the last task that used the bitmap:
	 */
268 269 270
	unsigned long		io_bitmap_max;
	struct thread_struct	*io_bitmap_owner;

271
	/*
272
	 * .. and then another 0x100 bytes for the emergency kernel stack:
273
	 */
274 275
	unsigned long		stack[64];

276
} ____cacheline_aligned;
277 278 279

DECLARE_PER_CPU(struct tss_struct, init_tss);

280 281 282
/*
 * Save the original ist values for checking stack pointers during debugging
 */
283
struct orig_ist {
284
	unsigned long		ist[7];
285 286
};

R
Roland McGrath 已提交
287
#define	MXCSR_DEFAULT		0x1f80
288

R
Roland McGrath 已提交
289
struct i387_fsave_struct {
I
Ingo Molnar 已提交
290 291 292 293 294 295 296 297 298
	u32			cwd;	/* FPU Control Word		*/
	u32			swd;	/* FPU Status Word		*/
	u32			twd;	/* FPU Tag Word			*/
	u32			fip;	/* FPU IP Offset		*/
	u32			fcs;	/* FPU IP Selector		*/
	u32			foo;	/* FPU Operand Pointer Offset	*/
	u32			fos;	/* FPU Operand Pointer Selector	*/

	/* 8*10 bytes for each FP-reg = 80 bytes:			*/
299
	u32			st_space[20];
I
Ingo Molnar 已提交
300 301

	/* Software status information [not touched by FSAVE ]:		*/
302
	u32			status;
303 304 305
};

struct i387_fxsave_struct {
I
Ingo Molnar 已提交
306 307 308 309
	u16			cwd; /* Control Word			*/
	u16			swd; /* Status Word			*/
	u16			twd; /* Tag Word			*/
	u16			fop; /* Last Instruction Opcode		*/
R
Roland McGrath 已提交
310 311
	union {
		struct {
I
Ingo Molnar 已提交
312 313
			u64	rip; /* Instruction Pointer		*/
			u64	rdp; /* Data Pointer			*/
R
Roland McGrath 已提交
314 315
		};
		struct {
I
Ingo Molnar 已提交
316 317 318 319
			u32	fip; /* FPU IP Offset			*/
			u32	fcs; /* FPU IP Selector			*/
			u32	foo; /* FPU Operand Offset		*/
			u32	fos; /* FPU Operand Selector		*/
R
Roland McGrath 已提交
320 321
		};
	};
I
Ingo Molnar 已提交
322 323 324 325
	u32			mxcsr;		/* MXCSR Register State */
	u32			mxcsr_mask;	/* MXCSR Mask		*/

	/* 8*16 bytes for each FP-reg = 128 bytes:			*/
326
	u32			st_space[32];
I
Ingo Molnar 已提交
327 328

	/* 16*16 bytes for each XMM-reg = 256 bytes:			*/
329
	u32			xmm_space[64];
I
Ingo Molnar 已提交
330

331 332 333 334 335 336
	u32			padding[12];

	union {
		u32		padding1[12];
		u32		sw_reserved[12];
	};
337

338 339
} __attribute__((aligned(16)));

R
Roland McGrath 已提交
340
struct i387_soft_struct {
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
	u32			cwd;
	u32			swd;
	u32			twd;
	u32			fip;
	u32			fcs;
	u32			foo;
	u32			fos;
	/* 8*10 bytes for each FP-reg = 80 bytes: */
	u32			st_space[20];
	u8			ftop;
	u8			changed;
	u8			lookahead;
	u8			no_update;
	u8			rm;
	u8			alimit;
	struct info		*info;
	u32			entry_eip;
R
Roland McGrath 已提交
358 359
};

360 361 362 363 364 365 366 367 368 369 370 371
struct xsave_hdr_struct {
	u64 xstate_bv;
	u64 reserved1[2];
	u64 reserved2[5];
} __attribute__((packed));

struct xsave_struct {
	struct i387_fxsave_struct i387;
	struct xsave_hdr_struct xsave_hdr;
	/* new processor state extensions will go here */
} __attribute__ ((packed, aligned (64)));

372
union thread_xstate {
R
Roland McGrath 已提交
373
	struct i387_fsave_struct	fsave;
374
	struct i387_fxsave_struct	fxsave;
375
	struct i387_soft_struct		soft;
376
	struct xsave_struct		xsave;
377 378
};

379
#ifdef CONFIG_X86_64
380
DECLARE_PER_CPU(struct orig_ist, orig_ist);
381

382 383 384 385 386 387 388 389 390 391 392 393 394 395
union irq_stack_union {
	char irq_stack[IRQ_STACK_SIZE];
	/*
	 * GCC hardcodes the stack canary as %gs:40.  Since the
	 * irq_stack is the object at %gs:0, we reserve the bottom
	 * 48 bytes of the irq stack for the canary.
	 */
	struct {
		char gs_base[40];
		unsigned long stack_canary;
	};
};

DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
396
DECLARE_PER_CPU(char *, irq_stack_ptr);
397
#endif
398

399
extern void print_cpu_info(struct cpuinfo_x86 *);
400
extern unsigned int xstate_size;
401 402
extern void free_thread_xstate(struct task_struct *);
extern struct kmem_cache *task_xstate_cachep;
403 404 405 406
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern unsigned short num_cache_leaves;

407
struct thread_struct {
408 409 410 411
	/* Cached TLS descriptors: */
	struct desc_struct	tls_array[GDT_ENTRY_TLS_ENTRIES];
	unsigned long		sp0;
	unsigned long		sp;
412
#ifdef CONFIG_X86_32
413
	unsigned long		sysenter_cs;
414
#else
415 416 417 418 419
	unsigned long		usersp;	/* Copy from PDA */
	unsigned short		es;
	unsigned short		ds;
	unsigned short		fsindex;
	unsigned short		gsindex;
420
#endif
421 422 423 424 425 426 427 428 429 430 431 432 433 434
	unsigned long		ip;
	unsigned long		fs;
	unsigned long		gs;
	/* Hardware debugging registers: */
	unsigned long		debugreg0;
	unsigned long		debugreg1;
	unsigned long		debugreg2;
	unsigned long		debugreg3;
	unsigned long		debugreg6;
	unsigned long		debugreg7;
	/* Fault info: */
	unsigned long		cr2;
	unsigned long		trap_no;
	unsigned long		error_code;
435 436
	/* floating point and extended processor state */
	union thread_xstate	*xstate;
437
#ifdef CONFIG_X86_32
438
	/* Virtual 86 mode info */
439 440
	struct vm86_struct __user *vm86_info;
	unsigned long		screen_bitmap;
441 442 443 444 445
	unsigned long		v86flags;
	unsigned long		v86mask;
	unsigned long		saved_sp0;
	unsigned int		saved_fs;
	unsigned int		saved_gs;
446
#endif
447 448 449 450 451
	/* IO permissions: */
	unsigned long		*io_bitmap_ptr;
	unsigned long		iopl;
	/* Max allowed port in the bitmap, in bytes: */
	unsigned		io_bitmap_max;
452 453
/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set.  */
	unsigned long	debugctlmsr;
M
Markus Metzger 已提交
454 455 456 457 458 459 460 461
#ifdef CONFIG_X86_DS
/* Debug Store context; see include/asm-x86/ds.h; goes into MSR_IA32_DS_AREA */
	struct ds_context	*ds_ctx;
#endif /* CONFIG_X86_DS */
#ifdef CONFIG_X86_PTRACE_BTS
/* the signal to send on a bts buffer overflow */
	unsigned int	bts_ovfl_signal;
#endif /* CONFIG_X86_PTRACE_BTS */
462 463
};

464 465
static inline unsigned long native_get_debugreg(int regno)
{
466
	unsigned long val = 0;	/* Damn you, gcc! */
467 468 469

	switch (regno) {
	case 0:
470 471
		asm("mov %%db0, %0" :"=r" (val));
		break;
472
	case 1:
473 474
		asm("mov %%db1, %0" :"=r" (val));
		break;
475
	case 2:
476 477
		asm("mov %%db2, %0" :"=r" (val));
		break;
478
	case 3:
479 480
		asm("mov %%db3, %0" :"=r" (val));
		break;
481
	case 6:
482 483
		asm("mov %%db6, %0" :"=r" (val));
		break;
484
	case 7:
485 486
		asm("mov %%db7, %0" :"=r" (val));
		break;
487 488 489 490 491 492 493 494 495 496
	default:
		BUG();
	}
	return val;
}

static inline void native_set_debugreg(int regno, unsigned long value)
{
	switch (regno) {
	case 0:
497
		asm("mov %0, %%db0"	::"r" (value));
498 499
		break;
	case 1:
500
		asm("mov %0, %%db1"	::"r" (value));
501 502
		break;
	case 2:
503
		asm("mov %0, %%db2"	::"r" (value));
504 505
		break;
	case 3:
506
		asm("mov %0, %%db3"	::"r" (value));
507 508
		break;
	case 6:
509
		asm("mov %0, %%db6"	::"r" (value));
510 511
		break;
	case 7:
512
		asm("mov %0, %%db7"	::"r" (value));
513 514 515 516 517 518
		break;
	default:
		BUG();
	}
}

519 520 521 522 523 524 525
/*
 * Set IOPL bits in EFLAGS from given mask
 */
static inline void native_set_iopl_mask(unsigned mask)
{
#ifdef CONFIG_X86_32
	unsigned int reg;
526

527 528 529 530 531 532 533 534
	asm volatile ("pushfl;"
		      "popl %0;"
		      "andl %1, %0;"
		      "orl %2, %0;"
		      "pushl %0;"
		      "popfl"
		      : "=&r" (reg)
		      : "i" (~X86_EFLAGS_IOPL), "r" (mask));
535 536 537
#endif
}

538 539
static inline void
native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
540 541 542
{
	tss->x86_tss.sp0 = thread->sp0;
#ifdef CONFIG_X86_32
543
	/* Only happens when SEP is enabled, no need to test "SEP"arately: */
544 545 546 547 548 549
	if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
		tss->x86_tss.ss1 = thread->sysenter_cs;
		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
	}
#endif
}
550

551 552 553 554 555 556 557
static inline void native_swapgs(void)
{
#ifdef CONFIG_X86_64
	asm volatile("swapgs" ::: "memory");
#endif
}

558 559 560
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
561 562
#define __cpuid			native_cpuid
#define paravirt_enabled()	0
563 564 565 566 567 568 569 570 571

/*
 * These special macros can be used to get or set a debugging register
 */
#define get_debugreg(var, register)				\
	(var) = native_get_debugreg(register)
#define set_debugreg(value, register)				\
	native_set_debugreg(register, value)

572 573
static inline void load_sp0(struct tss_struct *tss,
			    struct thread_struct *thread)
574 575 576 577
{
	native_load_sp0(tss, thread);
}

578
#define set_iopl_mask native_set_iopl_mask
579 580 581 582 583 584 585 586
#endif /* CONFIG_PARAVIRT */

/*
 * Save the cr4 feature set we're using (ie
 * Pentium 4MB enable and PPro Global page
 * enable), so that any CPU's that boot up
 * after us can get the correct flags.
 */
587
extern unsigned long		mmu_cr4_features;
588 589 590 591

static inline void set_in_cr4(unsigned long mask)
{
	unsigned cr4;
592

593 594 595 596 597 598 599 600 601
	mmu_cr4_features |= mask;
	cr4 = read_cr4();
	cr4 |= mask;
	write_cr4(cr4);
}

static inline void clear_in_cr4(unsigned long mask)
{
	unsigned cr4;
602

603 604 605 606 607 608
	mmu_cr4_features &= ~mask;
	cr4 = read_cr4();
	cr4 &= ~mask;
	write_cr4(cr4);
}

609
typedef struct {
610
	unsigned long		seg;
611 612 613
} mm_segment_t;


614 615 616 617 618 619 620 621
/*
 * create a kernel thread without removing it from tasklists
 */
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);

/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);

622
/* Prepare to copy thread state - unlazy all lazy state */
623
extern void prepare_to_copy(struct task_struct *tsk);
624

625
unsigned long get_wchan(struct task_struct *p);
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658

/*
 * Generic CPUID function
 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
 * resulting in stale register contents being returned.
 */
static inline void cpuid(unsigned int op,
			 unsigned int *eax, unsigned int *ebx,
			 unsigned int *ecx, unsigned int *edx)
{
	*eax = op;
	*ecx = 0;
	__cpuid(eax, ebx, ecx, edx);
}

/* Some CPUID calls want 'count' to be placed in ecx */
static inline void cpuid_count(unsigned int op, int count,
			       unsigned int *eax, unsigned int *ebx,
			       unsigned int *ecx, unsigned int *edx)
{
	*eax = op;
	*ecx = count;
	__cpuid(eax, ebx, ecx, edx);
}

/*
 * CPUID functions returning a single datum
 */
static inline unsigned int cpuid_eax(unsigned int op)
{
	unsigned int eax, ebx, ecx, edx;

	cpuid(op, &eax, &ebx, &ecx, &edx);
659

660 661
	return eax;
}
662

663 664 665 666 667
static inline unsigned int cpuid_ebx(unsigned int op)
{
	unsigned int eax, ebx, ecx, edx;

	cpuid(op, &eax, &ebx, &ecx, &edx);
668

669 670
	return ebx;
}
671

672 673 674 675 676
static inline unsigned int cpuid_ecx(unsigned int op)
{
	unsigned int eax, ebx, ecx, edx;

	cpuid(op, &eax, &ebx, &ecx, &edx);
677

678 679
	return ecx;
}
680

681 682 683 684 685
static inline unsigned int cpuid_edx(unsigned int op)
{
	unsigned int eax, ebx, ecx, edx;

	cpuid(op, &eax, &ebx, &ecx, &edx);
686

687 688 689
	return edx;
}

690 691 692
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static inline void rep_nop(void)
{
693
	asm volatile("rep; nop" ::: "memory");
694 695
}

696 697 698 699 700 701
static inline void cpu_relax(void)
{
	rep_nop();
}

/* Stop speculative execution: */
702 703 704
static inline void sync_core(void)
{
	int tmp;
705

706
	asm volatile("cpuid" : "=a" (tmp) : "0" (1)
707
		     : "ebx", "ecx", "edx", "memory");
708 709
}

710 711
static inline void __monitor(const void *eax, unsigned long ecx,
			     unsigned long edx)
712
{
713
	/* "monitor %eax, %ecx, %edx;" */
714 715
	asm volatile(".byte 0x0f, 0x01, 0xc8;"
		     :: "a" (eax), "c" (ecx), "d"(edx));
716 717 718 719
}

static inline void __mwait(unsigned long eax, unsigned long ecx)
{
720
	/* "mwait %eax, %ecx;" */
721 722
	asm volatile(".byte 0x0f, 0x01, 0xc9;"
		     :: "a" (eax), "c" (ecx));
723 724 725 726
}

static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
727
	trace_hardirqs_on();
728
	/* "mwait %eax, %ecx;" */
729 730
	asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
		     :: "a" (eax), "c" (ecx));
731 732 733 734 735 736
}

extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);

extern void select_idle_routine(const struct cpuinfo_x86 *c);

737
extern unsigned long		boot_option_idle_override;
Z
Zhao Yakui 已提交
738
extern unsigned long		idle_halt;
739
extern unsigned long		idle_nomwait;
740

741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
/*
 * on systems with caches, caches must be flashed as the absolute
 * last instruction before going into a suspended halt.  Otherwise,
 * dirty data can linger in the cache and become stale on resume,
 * leading to strange errors.
 *
 * perform a variety of operations to guarantee that the compiler
 * will not reorder instructions.  wbinvd itself is serializing
 * so the processor will not reorder.
 *
 * Systems without cache can just go into halt.
 */
static inline void wbinvd_halt(void)
{
	mb();
	/* check for clflush to determine if wbinvd is legal */
	if (cpu_has_clflush)
		asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory");
	else
		while (1)
			halt();
}

764 765 766 767
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);

/* Defined in head.S */
768
extern struct desc_ptr		early_gdt_descr;
769 770 771 772 773

extern void cpu_set_gdt(int);
extern void switch_to_new_gdt(void);
extern void cpu_init(void);

774 775 776 777 778 779 780 781 782 783 784 785 786
static inline unsigned long get_debugctlmsr(void)
{
    unsigned long debugctlmsr = 0;

#ifndef CONFIG_X86_DEBUGCTLMSR
	if (boot_cpu_data.x86 < 6)
		return 0;
#endif
	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);

    return debugctlmsr;
}

787 788 789 790 791 792 793 794 795
static inline void update_debugctlmsr(unsigned long debugctlmsr)
{
#ifndef CONFIG_X86_DEBUGCTLMSR
	if (boot_cpu_data.x86 < 6)
		return;
#endif
	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
}

796 797 798 799 800 801 802
/*
 * from system description table in BIOS. Mostly for MCA use, but
 * others may find it useful:
 */
extern unsigned int		machine_id;
extern unsigned int		machine_submodel_id;
extern unsigned int		BIOS_revision;
803

804 805
/* Boot loader type from the setup header: */
extern int			bootloader_type;
806

807
extern char			ignore_fpu_irq;
808 809 810 811 812

#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH

813
#ifdef CONFIG_X86_32
814 815
# define BASE_PREFETCH		ASM_NOP4
# define ARCH_HAS_PREFETCH
816
#else
817
# define BASE_PREFETCH		"prefetcht0 (%1)"
818 819
#endif

820 821 822 823 824 825
/*
 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
 *
 * It's not worth to care about 3dnow prefetches for the K6
 * because they are microcoded there and very slow.
 */
826 827 828 829 830 831 832 833
static inline void prefetch(const void *x)
{
	alternative_input(BASE_PREFETCH,
			  "prefetchnta (%1)",
			  X86_FEATURE_XMM,
			  "r" (x));
}

834 835 836 837 838
/*
 * 3dnow prefetch to get an exclusive cache line.
 * Useful for spinlocks to avoid one state transition in the
 * cache coherency protocol:
 */
839 840 841 842 843 844 845 846
static inline void prefetchw(const void *x)
{
	alternative_input(BASE_PREFETCH,
			  "prefetchw (%1)",
			  X86_FEATURE_3DNOW,
			  "r" (x));
}

847 848 849 850 851
static inline void spin_lock_prefetch(const void *x)
{
	prefetchw(x);
}

852 853 854 855
#ifdef CONFIG_X86_32
/*
 * User space process size: 3GB (default).
 */
856 857 858 859 860 861 862 863 864 865
#define TASK_SIZE		PAGE_OFFSET
#define STACK_TOP		TASK_SIZE
#define STACK_TOP_MAX		STACK_TOP

#define INIT_THREAD  {							  \
	.sp0			= sizeof(init_stack) + (long)&init_stack, \
	.vm86_info		= NULL,					  \
	.sysenter_cs		= __KERNEL_CS,				  \
	.io_bitmap_ptr		= NULL,					  \
	.fs			= __KERNEL_PERCPU,			  \
866 867 868 869 870 871 872 873
}

/*
 * Note that the .io_bitmap member must be extra-big. This is because
 * the CPU will access an additional byte beyond the end of the IO
 * permission bitmap. The extra byte must be all 1 bits, and must
 * be within the limit.
 */
874 875
#define INIT_TSS  {							  \
	.x86_tss = {							  \
876
		.sp0		= sizeof(init_stack) + (long)&init_stack, \
877 878 879 880 881
		.ss0		= __KERNEL_DS,				  \
		.ss1		= __KERNEL_CS,				  \
		.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,		  \
	 },								  \
	.io_bitmap		= { [0 ... IO_BITMAP_LONGS] = ~0 },	  \
882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
}

extern unsigned long thread_saved_pc(struct task_struct *tsk);

#define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
#define KSTK_TOP(info)                                                 \
({                                                                     \
       unsigned long *__ptr = (unsigned long *)(info);                 \
       (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
})

/*
 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
 * This is necessary to guarantee that the entire "struct pt_regs"
 * is accessable even if the CPU haven't stored the SS/ESP registers
 * on the stack (interrupt gate does not save these registers
 * when switching to the same priv ring).
 * Therefore beware: accessing the ss/esp fields of the
 * "struct pt_regs" is possible, but they may contain the
 * completely wrong values.
 */
#define task_pt_regs(task)                                             \
({                                                                     \
       struct pt_regs *__regs__;                                       \
       __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
       __regs__ - 1;                                                   \
})

910
#define KSTK_ESP(task)		(task_pt_regs(task)->sp)
911 912 913 914 915

#else
/*
 * User space process size. 47bits minus one guard page.
 */
916
#define TASK_SIZE64	((1UL << 47) - PAGE_SIZE)
917 918 919 920

/* This decides where the kernel will search for a free chunk of vm
 * space during mmap's.
 */
921 922
#define IA32_PAGE_OFFSET	((current->personality & ADDR_LIMIT_3GB) ? \
					0xc0000000 : 0xFFFFe000)
923

924 925 926 927
#define TASK_SIZE		(test_thread_flag(TIF_IA32) ? \
					IA32_PAGE_OFFSET : TASK_SIZE64)
#define TASK_SIZE_OF(child)	((test_tsk_thread_flag(child, TIF_IA32)) ? \
					IA32_PAGE_OFFSET : TASK_SIZE64)
928

929 930 931
#define STACK_TOP		TASK_SIZE
#define STACK_TOP_MAX		TASK_SIZE64

932 933 934 935 936 937 938 939 940 941 942 943
#define INIT_THREAD  { \
	.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
}

#define INIT_TSS  { \
	.x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
}

/*
 * Return saved PC of a blocked thread.
 * What is this good for? it will be always the scheduler or ret_from_fork.
 */
944
#define thread_saved_pc(t)	(*(unsigned long *)((t)->thread.sp - 8))
945

946 947
#define task_pt_regs(tsk)	((struct pt_regs *)(tsk)->thread.sp0 - 1)
#define KSTK_ESP(tsk)		-1 /* sorry. doesn't work for syscall. */
948 949
#endif /* CONFIG_X86_64 */

I
Ingo Molnar 已提交
950 951 952
extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
					       unsigned long new_sp);

953 954
/*
 * This decides where the kernel will search for a free chunk of vm
955 956 957 958
 * space during mmap's.
 */
#define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))

959
#define KSTK_EIP(task)		(task_pt_regs(task)->ip)
960

961 962 963 964 965 966 967
/* Get/set a process' ability to use the timestamp counter instruction */
#define GET_TSC_CTL(adr)	get_tsc_mode((adr))
#define SET_TSC_CTL(val)	set_tsc_mode((val))

extern int get_tsc_mode(unsigned long adr);
extern int set_tsc_mode(unsigned int val);

H
H. Peter Anvin 已提交
968
#endif /* _ASM_X86_PROCESSOR_H */