processor.h 25.1 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
H
H. Peter Anvin 已提交
2 3
#ifndef _ASM_X86_PROCESSOR_H
#define _ASM_X86_PROCESSOR_H
4

5 6
#include <asm/processor-flags.h>

7 8 9
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
10
struct vm86;
11

12 13 14
#include <asm/math_emu.h>
#include <asm/segment.h>
#include <asm/types.h>
15
#include <uapi/asm/sigcontext.h>
16
#include <asm/current.h>
17
#include <asm/cpufeatures.h>
18
#include <asm/page.h>
19
#include <asm/pgtable_types.h>
20
#include <asm/percpu.h>
21 22
#include <asm/msr.h>
#include <asm/desc_defs.h>
23
#include <asm/nops.h>
24
#include <asm/special_insns.h>
25
#include <asm/fpu/types.h>
26
#include <asm/unwind_hints.h>
27

28
#include <linux/personality.h>
29
#include <linux/cache.h>
30
#include <linux/threads.h>
31
#include <linux/math64.h>
32
#include <linux/err.h>
33
#include <linux/irqflags.h>
34
#include <linux/mem_encrypt.h>
35 36 37 38 39 40 41 42

/*
 * We handle most unaligned accesses in hardware.  On the other hand
 * unaligned DMA can be quite expensive on some Nehalem processors.
 *
 * Based on this we disable the IP header alignment in network drivers.
 */
#define NET_IP_ALIGN	0
43

44
#define HBP_NUM 4
45 46 47 48 49 50 51
/*
 * Default implementation of macro that returns current
 * instruction pointer ("program counter").
 */
static inline void *current_text_addr(void)
{
	void *pc;
52 53 54

	asm volatile("mov $1f, %0; 1:":"=r" (pc));

55 56 57
	return pc;
}

58 59 60 61 62
/*
 * These alignment constraints are for performance in the vSMP case,
 * but in the task_struct case we must also meet hardware imposed
 * alignment requirements of the FPU state:
 */
63
#ifdef CONFIG_X86_VSMP
64 65
# define ARCH_MIN_TASKALIGN		(1 << INTERNODE_CACHE_SHIFT)
# define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT)
66
#else
67
# define ARCH_MIN_TASKALIGN		__alignof__(union fpregs_state)
68
# define ARCH_MIN_MMSTRUCT_ALIGN	0
69 70
#endif

71 72 73 74 75 76 77 78 79 80 81
enum tlb_infos {
	ENTRIES,
	NR_INFO
};

extern u16 __read_mostly tlb_lli_4k[NR_INFO];
extern u16 __read_mostly tlb_lli_2m[NR_INFO];
extern u16 __read_mostly tlb_lli_4m[NR_INFO];
extern u16 __read_mostly tlb_lld_4k[NR_INFO];
extern u16 __read_mostly tlb_lld_2m[NR_INFO];
extern u16 __read_mostly tlb_lld_4m[NR_INFO];
82
extern u16 __read_mostly tlb_lld_1g[NR_INFO];
83

84 85
/*
 *  CPU type and hardware bug flags. Kept separately for each CPU.
86
 *  Members of this structure are referenced in head_32.S, so think twice
87 88 89 90
 *  before touching them. [mj]
 */

struct cpuinfo_x86 {
91 92 93
	__u8			x86;		/* CPU family */
	__u8			x86_vendor;	/* CPU vendor */
	__u8			x86_model;
94
	__u8			x86_stepping;
95
#ifdef CONFIG_X86_64
96
	/* Number of 4K pages in DTLB/ITLB combined(in pages): */
97
	int			x86_tlbsize;
98
#endif
99 100 101 102
	__u8			x86_virt_bits;
	__u8			x86_phys_bits;
	/* CPUID returned core id bits: */
	__u8			x86_coreid_bits;
103
	__u8			cu_id;
104 105 106 107
	/* Max extended CPUID function supported: */
	__u32			extended_cpuid_level;
	/* Maximum supported CPUID level, -1=no CPUID: */
	int			cpuid_level;
108
	__u32			x86_capability[NCAPINTS + NBUGINTS];
109 110 111
	char			x86_vendor_id[16];
	char			x86_model_id[64];
	/* in KB - valid for CPUS which support this call: */
112
	unsigned int		x86_cache_size;
113
	int			x86_cache_alignment;	/* In bytes */
114 115 116
	/* Cache QoS architectural values: */
	int			x86_cache_max_rmid;	/* max index */
	int			x86_cache_occ_scale;	/* scale to bytes */
117 118 119 120 121
	int			x86_power;
	unsigned long		loops_per_jiffy;
	/* cpuid returned max cores value: */
	u16			 x86_max_cores;
	u16			apicid;
Y
Yinghai Lu 已提交
122
	u16			initial_apicid;
123 124 125 126 127
	u16			x86_clflush_size;
	/* number of cores as seen by the OS: */
	u16			booted_cores;
	/* Physical processor id: */
	u16			phys_proc_id;
128 129
	/* Logical processor id: */
	u16			logical_proc_id;
130 131 132 133
	/* Core id: */
	u16			cpu_core_id;
	/* Index into per_cpu list: */
	u16			cpu_index;
134
	u32			microcode;
135 136
	/* Address space bits used by the cache internally */
	u8			x86_cache_bits;
137
	unsigned		initialized : 1;
138
} __randomize_layout;
139

140 141 142 143 144 145 146 147 148 149 150
struct cpuid_regs {
	u32 eax, ebx, ecx, edx;
};

enum cpuid_regs_idx {
	CPUID_EAX = 0,
	CPUID_EBX,
	CPUID_ECX,
	CPUID_EDX,
};

151 152 153 154 155 156 157 158 159 160
#define X86_VENDOR_INTEL	0
#define X86_VENDOR_CYRIX	1
#define X86_VENDOR_AMD		2
#define X86_VENDOR_UMC		3
#define X86_VENDOR_CENTAUR	5
#define X86_VENDOR_TRANSMETA	7
#define X86_VENDOR_NSC		8
#define X86_VENDOR_NUM		9

#define X86_VENDOR_UNKNOWN	0xff
161

162 163 164
/*
 * capabilities of CPUs
 */
165 166 167
extern struct cpuinfo_x86	boot_cpu_data;
extern struct cpuinfo_x86	new_cpu_data;

168
extern struct x86_hw_tss	doublefault_tss;
169 170
extern __u32			cpu_caps_cleared[NCAPINTS + NBUGINTS];
extern __u32			cpu_caps_set[NCAPINTS + NBUGINTS];
171 172

#ifdef CONFIG_SMP
173
DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
174 175
#define cpu_data(cpu)		per_cpu(cpu_info, cpu)
#else
176
#define cpu_info		boot_cpu_data
177 178 179
#define cpu_data(cpu)		boot_cpu_data
#endif

180 181
extern const struct seq_operations cpuinfo_op;

182 183 184
#define cache_line_size()	(boot_cpu_data.x86_cache_alignment)

extern void cpu_detect(struct cpuinfo_x86 *c);
185

186
static inline unsigned long long l1tf_pfn_limit(void)
187
{
188
	return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
189 190
}

191
extern void early_cpu_init(void);
192 193
extern void identify_boot_cpu(void);
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
194
extern void print_cpu_info(struct cpuinfo_x86 *);
195
void print_cpu_msr(struct cpuinfo_x86 *);
196

197 198 199 200 201 202 203 204
#ifdef CONFIG_X86_32
extern int have_cpuid_p(void);
#else
static inline int have_cpuid_p(void)
{
	return 1;
}
#endif
205
static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
206
				unsigned int *ecx, unsigned int *edx)
207 208
{
	/* ecx is often an input as well as an output. */
209
	asm volatile("cpuid"
210 211 212 213
	    : "=a" (*eax),
	      "=b" (*ebx),
	      "=c" (*ecx),
	      "=d" (*edx)
214 215
	    : "0" (*eax), "2" (*ecx)
	    : "memory");
216 217
}

218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
#define native_cpuid_reg(reg)					\
static inline unsigned int native_cpuid_##reg(unsigned int op)	\
{								\
	unsigned int eax = op, ebx, ecx = 0, edx;		\
								\
	native_cpuid(&eax, &ebx, &ecx, &edx);			\
								\
	return reg;						\
}

/*
 * Native CPUID functions returning a single datum.
 */
native_cpuid_reg(eax)
native_cpuid_reg(ebx)
native_cpuid_reg(ecx)
native_cpuid_reg(edx)

236 237 238 239 240 241 242 243
/*
 * Friendlier CR3 helpers.
 */
static inline unsigned long read_cr3_pa(void)
{
	return __read_cr3() & CR3_ADDR_MASK;
}

244 245 246 247 248
static inline unsigned long native_read_cr3_pa(void)
{
	return __native_read_cr3() & CR3_ADDR_MASK;
}

249 250
static inline void load_cr3(pgd_t *pgdir)
{
251
	write_cr3(__sme_pa(pgdir));
252
}
253

254 255 256 257 258
/*
 * Note that while the legacy 'TSS' name comes from 'Task State Segment',
 * on modern x86 CPUs the TSS also holds information important to 64-bit mode,
 * unrelated to the task-switch mechanism:
 */
259 260 261
#ifdef CONFIG_X86_32
/* This is the TSS defined by the hardware. */
struct x86_hw_tss {
262 263 264
	unsigned short		back_link, __blh;
	unsigned long		sp0;
	unsigned short		ss0, __ss0h;
265
	unsigned long		sp1;
266 267

	/*
268 269 270 271 272 273
	 * We don't use ring 1, so ss1 is a convenient scratch space in
	 * the same cacheline as sp0.  We use ss1 to cache the value in
	 * MSR_IA32_SYSENTER_CS.  When we context switch
	 * MSR_IA32_SYSENTER_CS, we first check if the new value being
	 * written matches ss1, and, if it's not, then we wrmsr the new
	 * value and update ss1.
274
	 *
275 276 277 278
	 * The only reason we context switch MSR_IA32_SYSENTER_CS is
	 * that we set it to zero in vm86 tasks to avoid corrupting the
	 * stack if we were to go through the sysenter path from vm86
	 * mode.
279 280 281 282
	 */
	unsigned short		ss1;	/* MSR_IA32_SYSENTER_CS */

	unsigned short		__ss1h;
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
	unsigned long		sp2;
	unsigned short		ss2, __ss2h;
	unsigned long		__cr3;
	unsigned long		ip;
	unsigned long		flags;
	unsigned long		ax;
	unsigned long		cx;
	unsigned long		dx;
	unsigned long		bx;
	unsigned long		sp;
	unsigned long		bp;
	unsigned long		si;
	unsigned long		di;
	unsigned short		es, __esh;
	unsigned short		cs, __csh;
	unsigned short		ss, __ssh;
	unsigned short		ds, __dsh;
	unsigned short		fs, __fsh;
	unsigned short		gs, __gsh;
	unsigned short		ldt, __ldth;
	unsigned short		trace;
	unsigned short		io_bitmap_base;

306 307 308
} __attribute__((packed));
#else
struct x86_hw_tss {
309 310
	u32			reserved1;
	u64			sp0;
311 312 313 314 315

	/*
	 * We store cpu_current_top_of_stack in sp1 so it's always accessible.
	 * Linux does not use ring 1, so sp1 is not otherwise needed.
	 */
316
	u64			sp1;
317

318 319 320 321 322 323 324 325
	u64			sp2;
	u64			reserved2;
	u64			ist[7];
	u32			reserved3;
	u32			reserved4;
	u16			reserved5;
	u16			io_bitmap_base;

326
} __attribute__((packed));
327 328 329
#endif

/*
330
 * IO-bitmap sizes:
331
 */
332 333 334
#define IO_BITMAP_BITS			65536
#define IO_BITMAP_BYTES			(IO_BITMAP_BITS/8)
#define IO_BITMAP_LONGS			(IO_BITMAP_BYTES/sizeof(long))
335
#define IO_BITMAP_OFFSET		(offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss))
336
#define INVALID_IO_BITMAP_OFFSET	0x8000
337

338
struct entry_stack {
339 340 341
	unsigned long		words[64];
};

342 343
struct entry_stack_page {
	struct entry_stack stack;
344
} __aligned(PAGE_SIZE);
345

346
struct tss_struct {
347
	/*
348 349 350
	 * The fixed hardware portion.  This must not cross a page boundary
	 * at risk of violating the SDM's advice and potentially triggering
	 * errata.
351 352
	 */
	struct x86_hw_tss	x86_tss;
353 354 355 356 357 358 359

	/*
	 * The extra 1 is there because the CPU will access an
	 * additional byte beyond the end of the IO permission
	 * bitmap. The extra byte must be all 1 bits, and must
	 * be within the limit.
	 */
360
	unsigned long		io_bitmap[IO_BITMAP_LONGS + 1];
361
} __aligned(PAGE_SIZE);
362

363
DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
364

365 366 367 368 369 370 371 372 373 374
/*
 * sizeof(unsigned long) coming from an extra "long" at the end
 * of the iobitmap.
 *
 * -1? seg base+limit should be pointing to the address of the
 * last valid byte
 */
#define __KERNEL_TSS_LIMIT	\
	(IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1)

375 376
#ifdef CONFIG_X86_32
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
377
#else
378 379
/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
380 381
#endif

382 383 384
/*
 * Save the original ist values for checking stack pointers during debugging
 */
385
struct orig_ist {
386
	unsigned long		ist[7];
387 388
};

389
#ifdef CONFIG_X86_64
390
DECLARE_PER_CPU(struct orig_ist, orig_ist);
391

392 393 394 395 396 397 398 399 400 401 402 403 404
union irq_stack_union {
	char irq_stack[IRQ_STACK_SIZE];
	/*
	 * GCC hardcodes the stack canary as %gs:40.  Since the
	 * irq_stack is the object at %gs:0, we reserve the bottom
	 * 48 bytes of the irq stack for the canary.
	 */
	struct {
		char gs_base[40];
		unsigned long stack_canary;
	};
};

405
DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
406 407
DECLARE_INIT_PER_CPU(irq_stack_union);

408 409 410 411 412
static inline unsigned long cpu_kernelmode_gs_base(int cpu)
{
	return (unsigned long)per_cpu(irq_stack_union.gs_base, cpu);
}

413
DECLARE_PER_CPU(char *, irq_stack_ptr);
414 415
DECLARE_PER_CPU(unsigned int, irq_count);
extern asmlinkage void ignore_sysret(void);
416 417 418 419 420

#if IS_ENABLED(CONFIG_KVM)
/* Save actual FS/GS selectors and bases to current->thread */
void save_fsgs_for_kvm(void);
#endif
421
#else	/* X86_64 */
422
#ifdef CONFIG_STACKPROTECTOR
423 424 425 426 427 428 429 430 431 432
/*
 * Make sure stack canary segment base is cached-aligned:
 *   "For Intel Atom processors, avoid non zero segment base address
 *    that is not aligned to cache line boundary at all cost."
 * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
 */
struct stack_canary {
	char __pad[20];		/* canary at %gs:20 */
	unsigned long canary;
};
433
DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
434
#endif
435 436 437 438 439 440 441 442 443
/*
 * per-CPU IRQ handling stacks
 */
struct irq_stack {
	u32                     stack[THREAD_SIZE/sizeof(u32)];
} __aligned(THREAD_SIZE);

DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
444
#endif	/* X86_64 */
445

446
extern unsigned int fpu_kernel_xstate_size;
447
extern unsigned int fpu_user_xstate_size;
448

449 450
struct perf_event;

451 452 453 454
typedef struct {
	unsigned long		seg;
} mm_segment_t;

455
struct thread_struct {
456 457
	/* Cached TLS descriptors: */
	struct desc_struct	tls_array[GDT_ENTRY_TLS_ENTRIES];
458
#ifdef CONFIG_X86_32
459
	unsigned long		sp0;
460
#endif
461
	unsigned long		sp;
462
#ifdef CONFIG_X86_32
463
	unsigned long		sysenter_cs;
464
#else
465 466 467 468
	unsigned short		es;
	unsigned short		ds;
	unsigned short		fsindex;
	unsigned short		gsindex;
469
#endif
470

471
#ifdef CONFIG_X86_64
472 473 474 475 476 477 478 479 480
	unsigned long		fsbase;
	unsigned long		gsbase;
#else
	/*
	 * XXX: this could presumably be unsigned short.  Alternatively,
	 * 32-bit kernels could be taught to use fsindex instead.
	 */
	unsigned long fs;
	unsigned long gs;
481
#endif
482

483 484 485 486
	/* Save middle states of ptrace breakpoints */
	struct perf_event	*ptrace_bps[HBP_NUM];
	/* Debug status used for traps, single steps, etc... */
	unsigned long           debugreg6;
487 488
	/* Keep track of the exact dr7 value set by the user */
	unsigned long           ptrace_dr7;
489 490
	/* Fault info: */
	unsigned long		cr2;
491
	unsigned long		trap_nr;
492
	unsigned long		error_code;
493
#ifdef CONFIG_VM86
494
	/* Virtual 86 mode info */
495
	struct vm86		*vm86;
496
#endif
497 498 499 500 501
	/* IO permissions: */
	unsigned long		*io_bitmap_ptr;
	unsigned long		iopl;
	/* Max allowed port in the bitmap, in bytes: */
	unsigned		io_bitmap_max;
502

503 504
	mm_segment_t		addr_limit;

505
	unsigned int		sig_on_uaccess_err:1;
506 507
	unsigned int		uaccess_err:1;	/* uaccess failed */

508 509 510 511 512 513
	/* Floating point and extended processor state */
	struct fpu		fpu;
	/*
	 * WARNING: 'fpu' is dynamically-sized.  It *MUST* be at
	 * the end.
	 */
514 515
};

516 517 518 519 520 521 522 523
/* Whitelist the FPU state from the task_struct for hardened usercopy. */
static inline void arch_thread_struct_whitelist(unsigned long *offset,
						unsigned long *size)
{
	*offset = offsetof(struct thread_struct, fpu.state);
	*size = fpu_kernel_xstate_size;
}

524 525 526 527 528 529 530 531 532
/*
 * Thread-synchronous status.
 *
 * This is different from the flags in that nobody else
 * ever touches our thread-synchronous status, so we don't
 * have to worry about atomic accesses.
 */
#define TS_COMPAT		0x0002	/* 32bit syscall active (64BIT)*/

533 534 535 536 537 538 539
/*
 * Set IOPL bits in EFLAGS from given mask
 */
static inline void native_set_iopl_mask(unsigned mask)
{
#ifdef CONFIG_X86_32
	unsigned int reg;
540

541 542 543 544 545 546 547 548
	asm volatile ("pushfl;"
		      "popl %0;"
		      "andl %1, %0;"
		      "orl %2, %0;"
		      "pushl %0;"
		      "popfl"
		      : "=&r" (reg)
		      : "i" (~X86_EFLAGS_IOPL), "r" (mask));
549 550 551
#endif
}

552
static inline void
553
native_load_sp0(unsigned long sp0)
554
{
555
	this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
556
}
557

558 559 560 561 562 563 564
static inline void native_swapgs(void)
{
#ifdef CONFIG_X86_64
	asm volatile("swapgs" ::: "memory");
#endif
}

565
static inline unsigned long current_top_of_stack(void)
566
{
567 568 569 570 571
	/*
	 *  We can't read directly from tss.sp0: sp0 on x86_32 is special in
	 *  and around vm86 mode and sp0 on x86_64 is special because of the
	 *  entry trampoline.
	 */
572
	return this_cpu_read_stable(cpu_current_top_of_stack);
573 574
}

575 576 577 578 579 580
static inline bool on_thread_stack(void)
{
	return (unsigned long)(current_top_of_stack() -
			       current_stack_pointer) < THREAD_SIZE;
}

581 582 583
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
584
#define __cpuid			native_cpuid
585

586
static inline void load_sp0(unsigned long sp0)
587
{
588
	native_load_sp0(sp0);
589 590
}

591
#define set_iopl_mask native_set_iopl_mask
592 593
#endif /* CONFIG_PARAVIRT */

594 595 596 597
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);

unsigned long get_wchan(struct task_struct *p);
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630

/*
 * Generic CPUID function
 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
 * resulting in stale register contents being returned.
 */
static inline void cpuid(unsigned int op,
			 unsigned int *eax, unsigned int *ebx,
			 unsigned int *ecx, unsigned int *edx)
{
	*eax = op;
	*ecx = 0;
	__cpuid(eax, ebx, ecx, edx);
}

/* Some CPUID calls want 'count' to be placed in ecx */
static inline void cpuid_count(unsigned int op, int count,
			       unsigned int *eax, unsigned int *ebx,
			       unsigned int *ecx, unsigned int *edx)
{
	*eax = op;
	*ecx = count;
	__cpuid(eax, ebx, ecx, edx);
}

/*
 * CPUID functions returning a single datum
 */
static inline unsigned int cpuid_eax(unsigned int op)
{
	unsigned int eax, ebx, ecx, edx;

	cpuid(op, &eax, &ebx, &ecx, &edx);
631

632 633
	return eax;
}
634

635 636 637 638 639
static inline unsigned int cpuid_ebx(unsigned int op)
{
	unsigned int eax, ebx, ecx, edx;

	cpuid(op, &eax, &ebx, &ecx, &edx);
640

641 642
	return ebx;
}
643

644 645 646 647 648
static inline unsigned int cpuid_ecx(unsigned int op)
{
	unsigned int eax, ebx, ecx, edx;

	cpuid(op, &eax, &ebx, &ecx, &edx);
649

650 651
	return ecx;
}
652

653 654 655 656 657
static inline unsigned int cpuid_edx(unsigned int op)
{
	unsigned int eax, ebx, ecx, edx;

	cpuid(op, &eax, &ebx, &ecx, &edx);
658

659 660 661
	return edx;
}

662
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
663
static __always_inline void rep_nop(void)
664
{
665
	asm volatile("rep; nop" ::: "memory");
666 667
}

668
static __always_inline void cpu_relax(void)
669 670 671 672
{
	rep_nop();
}

673 674 675 676 677 678 679 680 681 682 683 684 685 686
/*
 * This function forces the icache and prefetched instruction stream to
 * catch up with reality in two very specific cases:
 *
 *  a) Text was modified using one virtual address and is about to be executed
 *     from the same physical page at a different virtual address.
 *
 *  b) Text was modified on a different CPU, may subsequently be
 *     executed on this CPU, and you want to make sure the new version
 *     gets executed.  This generally means you're calling this in a IPI.
 *
 * If you're calling this for a different reason, you're probably doing
 * it wrong.
 */
687 688
static inline void sync_core(void)
{
689
	/*
690 691 692 693 694 695 696 697 698 699 700 701
	 * There are quite a few ways to do this.  IRET-to-self is nice
	 * because it works on every CPU, at any CPL (so it's compatible
	 * with paravirtualization), and it never exits to a hypervisor.
	 * The only down sides are that it's a bit slow (it seems to be
	 * a bit more than 2x slower than the fastest options) and that
	 * it unmasks NMIs.  The "push %cs" is needed because, in
	 * paravirtual environments, __KERNEL_CS may not be a valid CS
	 * value when we do IRET directly.
	 *
	 * In case NMI unmasking or performance ever becomes a problem,
	 * the next best option appears to be MOV-to-CR2 and an
	 * unconditional jump.  That sequence also works on all CPUs,
702
	 * but it will fault at CPL3 (i.e. Xen PV).
703 704 705 706 707 708 709
	 *
	 * CPUID is the conventional way, but it's nasty: it doesn't
	 * exist on some 486-like CPUs, and it usually exits to a
	 * hypervisor.
	 *
	 * Like all of Linux's memory ordering operations, this is a
	 * compiler barrier as well.
710
	 */
711 712 713 714 715 716 717
#ifdef CONFIG_X86_32
	asm volatile (
		"pushfl\n\t"
		"pushl %%cs\n\t"
		"pushl $1f\n\t"
		"iret\n\t"
		"1:"
718
		: ASM_CALL_CONSTRAINT : : "memory");
719
#else
720 721 722
	unsigned int tmp;

	asm volatile (
723
		UNWIND_HINT_SAVE
724 725 726 727 728 729 730 731 732
		"mov %%ss, %0\n\t"
		"pushq %q0\n\t"
		"pushq %%rsp\n\t"
		"addq $8, (%%rsp)\n\t"
		"pushfq\n\t"
		"mov %%cs, %0\n\t"
		"pushq %q0\n\t"
		"pushq $1f\n\t"
		"iretq\n\t"
733
		UNWIND_HINT_RESTORE
734
		"1:"
735
		: "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
736
#endif
737 738 739
}

extern void select_idle_routine(const struct cpuinfo_x86 *c);
740
extern void amd_e400_c1e_apic_setup(void);
741

742
extern unsigned long		boot_option_idle_override;
743

744
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
745
			 IDLE_POLL};
746

747 748 749
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);

750
void early_trap_pf_init(void);
751

752
/* Defined in head.S */
753
extern struct desc_ptr		early_gdt_descr;
754

755
extern void switch_to_new_gdt(int);
756
extern void load_direct_gdt(int);
757
extern void load_fixmap_gdt(int);
758
extern void load_percpu_segment(int);
759 760
extern void cpu_init(void);

761 762
static inline unsigned long get_debugctlmsr(void)
{
P
Peter Zijlstra 已提交
763
	unsigned long debugctlmsr = 0;
764 765 766 767 768 769 770

#ifndef CONFIG_X86_DEBUGCTLMSR
	if (boot_cpu_data.x86 < 6)
		return 0;
#endif
	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);

P
Peter Zijlstra 已提交
771
	return debugctlmsr;
772 773
}

774 775 776 777 778 779 780 781 782
static inline void update_debugctlmsr(unsigned long debugctlmsr)
{
#ifndef CONFIG_X86_DEBUGCTLMSR
	if (boot_cpu_data.x86 < 6)
		return;
#endif
	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
}

783 784
extern void set_task_blockstep(struct task_struct *task, bool on);

785 786
/* Boot loader type from the setup header: */
extern int			bootloader_type;
787
extern int			bootloader_version;
788

789
extern char			ignore_fpu_irq;
790 791 792 793 794

#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH

795
#ifdef CONFIG_X86_32
796
# define BASE_PREFETCH		""
797
# define ARCH_HAS_PREFETCH
798
#else
799
# define BASE_PREFETCH		"prefetcht0 %P1"
800 801
#endif

802 803 804 805 806 807
/*
 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
 *
 * It's not worth to care about 3dnow prefetches for the K6
 * because they are microcoded there and very slow.
 */
808 809
static inline void prefetch(const void *x)
{
810
	alternative_input(BASE_PREFETCH, "prefetchnta %P1",
811
			  X86_FEATURE_XMM,
812
			  "m" (*(const char *)x));
813 814
}

815 816 817 818 819
/*
 * 3dnow prefetch to get an exclusive cache line.
 * Useful for spinlocks to avoid one state transition in the
 * cache coherency protocol:
 */
820 821
static inline void prefetchw(const void *x)
{
822 823 824
	alternative_input(BASE_PREFETCH, "prefetchw %P1",
			  X86_FEATURE_3DNOWPREFETCH,
			  "m" (*(const char *)x));
825 826
}

827 828 829 830 831
static inline void spin_lock_prefetch(const void *x)
{
	prefetchw(x);
}

832 833 834
#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
			   TOP_OF_KERNEL_STACK_PADDING)

835 836
#define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1))

837 838 839 840 841 842 843
#define task_pt_regs(task) \
({									\
	unsigned long __ptr = (unsigned long)task_stack_page(task);	\
	__ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;		\
	((struct pt_regs *)__ptr) - 1;					\
})

844 845 846 847
#ifdef CONFIG_X86_32
/*
 * User space process size: 3GB (default).
 */
848
#define IA32_PAGE_OFFSET	PAGE_OFFSET
849
#define TASK_SIZE		PAGE_OFFSET
850
#define TASK_SIZE_LOW		TASK_SIZE
851
#define TASK_SIZE_MAX		TASK_SIZE
852
#define DEFAULT_MAP_WINDOW	TASK_SIZE
853 854 855 856
#define STACK_TOP		TASK_SIZE
#define STACK_TOP_MAX		STACK_TOP

#define INIT_THREAD  {							  \
857
	.sp0			= TOP_OF_INIT_STACK,			  \
858 859
	.sysenter_cs		= __KERNEL_CS,				  \
	.io_bitmap_ptr		= NULL,					  \
860
	.addr_limit		= KERNEL_DS,				  \
861 862
}

863
#define KSTK_ESP(task)		(task_pt_regs(task)->sp)
864 865 866

#else
/*
867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882
 * User space process size.  This is the first address outside the user range.
 * There are a few constraints that determine this:
 *
 * On Intel CPUs, if a SYSCALL instruction is at the highest canonical
 * address, then that syscall will enter the kernel with a
 * non-canonical return address, and SYSRET will explode dangerously.
 * We avoid this particular problem by preventing anything executable
 * from being mapped at the maximum canonical address.
 *
 * On AMD CPUs in the Ryzen family, there's a nasty bug in which the
 * CPUs malfunction if they execute code from the highest canonical page.
 * They'll speculate right off the end of the canonical space, and
 * bad things happen.  This is worked around in the same way as the
 * Intel problem.
 *
 * With page table isolation enabled, we map the LDT in ... [stay tuned]
883
 */
884
#define TASK_SIZE_MAX	((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE)
885

886
#define DEFAULT_MAP_WINDOW	((1UL << 47) - PAGE_SIZE)
887 888 889 890

/* This decides where the kernel will search for a free chunk of vm
 * space during mmap's.
 */
891 892
#define IA32_PAGE_OFFSET	((current->personality & ADDR_LIMIT_3GB) ? \
					0xc0000000 : 0xFFFFe000)
893

894 895
#define TASK_SIZE_LOW		(test_thread_flag(TIF_ADDR32) ? \
					IA32_PAGE_OFFSET : DEFAULT_MAP_WINDOW)
896
#define TASK_SIZE		(test_thread_flag(TIF_ADDR32) ? \
897
					IA32_PAGE_OFFSET : TASK_SIZE_MAX)
898
#define TASK_SIZE_OF(child)	((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
899
					IA32_PAGE_OFFSET : TASK_SIZE_MAX)
900

901
#define STACK_TOP		TASK_SIZE_LOW
902
#define STACK_TOP_MAX		TASK_SIZE_MAX
903

904 905
#define INIT_THREAD  {						\
	.addr_limit		= KERNEL_DS,			\
906 907
}

908
extern unsigned long KSTK_ESP(struct task_struct *task);
909

910 911
#endif /* CONFIG_X86_64 */

I
Ingo Molnar 已提交
912 913 914
extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
					       unsigned long new_sp);

915 916
/*
 * This decides where the kernel will search for a free chunk of vm
917 918
 * space during mmap's.
 */
919
#define __TASK_UNMAPPED_BASE(task_size)	(PAGE_ALIGN(task_size / 3))
920
#define TASK_UNMAPPED_BASE		__TASK_UNMAPPED_BASE(TASK_SIZE_LOW)
921

922
#define KSTK_EIP(task)		(task_pt_regs(task)->ip)
923

924 925 926 927 928 929 930
/* Get/set a process' ability to use the timestamp counter instruction */
#define GET_TSC_CTL(adr)	get_tsc_mode((adr))
#define SET_TSC_CTL(val)	set_tsc_mode((val))

extern int get_tsc_mode(unsigned long adr);
extern int set_tsc_mode(unsigned int val);

931 932
DECLARE_PER_CPU(u64, msr_misc_features_shadow);

933
/* Register/unregister a process' MPX related resource */
934 935
#define MPX_ENABLE_MANAGEMENT()	mpx_enable_management()
#define MPX_DISABLE_MANAGEMENT()	mpx_disable_management()
936 937

#ifdef CONFIG_X86_INTEL_MPX
938 939
extern int mpx_enable_management(void);
extern int mpx_disable_management(void);
940
#else
941
static inline int mpx_enable_management(void)
942 943 944
{
	return -EINVAL;
}
945
static inline int mpx_disable_management(void)
946 947 948 949 950
{
	return -EINVAL;
}
#endif /* CONFIG_X86_INTEL_MPX */

951
#ifdef CONFIG_CPU_SUP_AMD
952
extern u16 amd_get_nb_id(int cpu);
953
extern u32 amd_get_nodes_per_socket(void);
954 955 956 957
#else
static inline u16 amd_get_nb_id(int cpu)		{ return 0; }
static inline u32 amd_get_nodes_per_socket(void)	{ return 0; }
#endif
958

959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
{
	uint32_t base, eax, signature[3];

	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
		cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);

		if (!memcmp(sig, signature, 12) &&
		    (leaves == 0 || ((eax - base) >= leaves)))
			return base;
	}

	return 0;
}

974 975
extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
976
extern void free_kernel_image_pages(void *begin, void *end);
977 978

void default_idle(void);
979 980 981 982 983
#ifdef	CONFIG_XEN
bool xen_set_default_idle(void);
#else
#define xen_set_default_idle 0
#endif
984 985

void stop_this_cpu(void *dummy);
986
void df_debug(struct pt_regs *regs, long error_code);
987
void microcode_check(void);
988 989 990 991 992 993 994 995 996 997 998 999

enum l1tf_mitigations {
	L1TF_MITIGATION_OFF,
	L1TF_MITIGATION_FLUSH_NOWARN,
	L1TF_MITIGATION_FLUSH,
	L1TF_MITIGATION_FLUSH_NOSMT,
	L1TF_MITIGATION_FULL,
	L1TF_MITIGATION_FULL_FORCE
};

extern enum l1tf_mitigations l1tf_mitigation;

1000 1001 1002 1003 1004
enum mds_mitigations {
	MDS_MITIGATION_OFF,
	MDS_MITIGATION_FULL,
};

H
H. Peter Anvin 已提交
1005
#endif /* _ASM_X86_PROCESSOR_H */