system.h 13.4 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_SYSTEM_H
#define _ASM_X86_SYSTEM_H
3 4

#include <asm/asm.h>
G
Glauber de Oliveira Costa 已提交
5 6 7
#include <asm/segment.h>
#include <asm/cpufeature.h>
#include <asm/cmpxchg.h>
A
Andi Kleen 已提交
8
#include <asm/nops.h>
9

10
#include <linux/kernel.h>
G
Glauber de Oliveira Costa 已提交
11
#include <linux/irqflags.h>
12

13 14 15 16 17 18 19
/* entries in ARCH_DLINFO: */
#ifdef CONFIG_IA32_EMULATION
# define AT_VECTOR_SIZE_ARCH 2
#else
# define AT_VECTOR_SIZE_ARCH 1
#endif

20
struct task_struct; /* one of the stranger aspects of C forward declarations */
21 22
struct task_struct *__switch_to(struct task_struct *prev,
				struct task_struct *next);
23
struct tss_struct;
24 25
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
		      struct tss_struct *tss);
26

27 28
#ifdef CONFIG_X86_32

29 30
#ifdef CONFIG_CC_STACKPROTECTOR
#define __switch_canary							\
T
Tejun Heo 已提交
31 32
	"movl %P[task_canary](%[next]), %%ebx\n\t"			\
	"movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
33 34 35 36 37 38 39 40 41 42
#define __switch_canary_oparam						\
	, [stack_canary] "=m" (per_cpu_var(stack_canary))
#define __switch_canary_iparam						\
	, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
#else	/* CC_STACKPROTECTOR */
#define __switch_canary
#define __switch_canary_oparam
#define __switch_canary_iparam
#endif	/* CC_STACKPROTECTOR */

43 44 45 46
/*
 * Saving eflags is important. It switches not only IOPL between tasks,
 * it also protects other tasks from NT leaking through sysenter etc.
 */
I
Ingo Molnar 已提交
47 48
#define switch_to(prev, next, last)					\
do {									\
I
Ingo Molnar 已提交
49 50 51 52 53 54 55 56
	/*								\
	 * Context-switching clobbers all registers, so we clobber	\
	 * them explicitly, via unused output variables.		\
	 * (EAX and EBP is not listed because EBP is saved/restored	\
	 * explicitly for wchan access and EAX is the return value of	\
	 * __switch_to())						\
	 */								\
	unsigned long ebx, ecx, edx, esi, edi;				\
I
Ingo Molnar 已提交
57
									\
58 59 60 61 62 63
	asm volatile("pushfl\n\t"		/* save    flags */	\
		     "pushl %%ebp\n\t"		/* save    EBP   */	\
		     "movl %%esp,%[prev_sp]\n\t"	/* save    ESP   */ \
		     "movl %[next_sp],%%esp\n\t"	/* restore ESP   */ \
		     "movl $1f,%[prev_ip]\n\t"	/* save    EIP   */	\
		     "pushl %[next_ip]\n\t"	/* restore EIP   */	\
T
Tejun Heo 已提交
64
		     __switch_canary					\
65 66 67 68
		     "jmp __switch_to\n"	/* regparm call  */	\
		     "1:\t"						\
		     "popl %%ebp\n\t"		/* restore EBP   */	\
		     "popfl\n"			/* restore flags */	\
I
Ingo Molnar 已提交
69
									\
70 71 72 73
		     /* output parameters */				\
		     : [prev_sp] "=m" (prev->thread.sp),		\
		       [prev_ip] "=m" (prev->thread.ip),		\
		       "=a" (last),					\
I
Ingo Molnar 已提交
74
									\
75 76 77 78
		       /* clobbered output registers: */		\
		       "=b" (ebx), "=c" (ecx), "=d" (edx),		\
		       "=S" (esi), "=D" (edi)				\
		       							\
79 80
		       __switch_canary_oparam				\
									\
81 82 83 84 85 86
		       /* input parameters: */				\
		     : [next_sp]  "m" (next->thread.sp),		\
		       [next_ip]  "m" (next->thread.ip),		\
		       							\
		       /* regparm parameters for __switch_to(): */	\
		       [prev]     "a" (prev),				\
87 88
		       [next]     "d" (next)				\
									\
89 90
		       __switch_canary_iparam				\
									\
91 92
		     : /* reloaded segment registers */			\
			"memory");					\
93 94
} while (0)

G
Glauber de Oliveira Costa 已提交
95 96 97 98
/*
 * disable hlt during certain critical i/o operations
 */
#define HAVE_DISABLE_HLT
99
#else
100 101 102 103 104 105 106 107 108 109 110
#define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
#define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"

/* frame pointer must be last for get_wchan */
#define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"

#define __EXTRA_CLOBBER  \
	, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
	  "r12", "r13", "r14", "r15"

111 112 113
#ifdef CONFIG_CC_STACKPROTECTOR
#define __switch_canary							  \
	"movq %P[task_canary](%%rsi),%%r8\n\t"				  \
114 115 116 117 118
	"movq %%r8,"__percpu_arg([gs_canary])"\n\t"
#define __switch_canary_oparam						  \
	, [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
#define __switch_canary_iparam						  \
	, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
119 120
#else	/* CC_STACKPROTECTOR */
#define __switch_canary
121 122
#define __switch_canary_oparam
#define __switch_canary_iparam
123 124
#endif	/* CC_STACKPROTECTOR */

125 126
/* Save restore flags to clear handle leaking NT */
#define switch_to(prev, next, last) \
127
	asm volatile(SAVE_CONTEXT					  \
128 129 130 131 132
	     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
	     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
	     "call __switch_to\n\t"					  \
	     ".globl thread_return\n"					  \
	     "thread_return:\n\t"					  \
133
	     "movq "__percpu_arg([current_task])",%%rsi\n\t"		  \
134
	     __switch_canary						  \
135 136
	     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
	     "movq %%rax,%%rdi\n\t" 					  \
137 138
	     "testl  %[_tif_fork],%P[ti_flags](%%r8)\n\t"	  \
	     "jnz   ret_from_fork\n\t"					  \
139 140
	     RESTORE_CONTEXT						  \
	     : "=a" (last)					  	  \
141
	       __switch_canary_oparam					  \
142 143 144
	     : [next] "S" (next), [prev] "D" (prev),			  \
	       [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
	       [ti_flags] "i" (offsetof(struct thread_info, flags)),	  \
145
	       [_tif_fork] "i" (_TIF_FORK),			  	  \
146
	       [thread_info] "i" (offsetof(struct task_struct, stack)),   \
147
	       [current_task] "m" (per_cpu_var(current_task))		  \
148
	       __switch_canary_iparam					  \
149
	     : "memory", "cc" __EXTRA_CLOBBER)
150
#endif
151 152 153

#ifdef __KERNEL__

154
extern void native_load_gs_index(unsigned);
155

156 157 158 159 160 161
/*
 * Load a segment. Fall back on loading the zero
 * segment if something goes wrong..
 */
#define loadsegment(seg, value)			\
	asm volatile("\n"			\
162 163 164 165 166 167 168 169 170
		     "1:\t"			\
		     "movl %k0,%%" #seg "\n"	\
		     "2:\n"			\
		     ".section .fixup,\"ax\"\n"	\
		     "3:\t"			\
		     "movl %k1, %%" #seg "\n\t"	\
		     "jmp 2b\n"			\
		     ".previous\n"		\
		     _ASM_EXTABLE(1b,3b)	\
171
		     : :"r" (value), "r" (0) : "memory")
172 173


174 175 176
/*
 * Save a segment register away
 */
177
#define savesegment(seg, value)				\
178
	asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
179

T
Tejun Heo 已提交
180 181 182 183
/*
 * x86_32 user gs accessors.
 */
#ifdef CONFIG_X86_32
184
#ifdef CONFIG_X86_32_LAZY_GS
T
Tejun Heo 已提交
185 186 187
#define get_user_gs(regs)	(u16)({unsigned long v; savesegment(gs, v); v;})
#define set_user_gs(regs, v)	loadsegment(gs, (unsigned long)(v))
#define task_user_gs(tsk)	((tsk)->thread.gs)
188 189 190 191 192 193 194 195 196 197
#define lazy_save_gs(v)		savesegment(gs, (v))
#define lazy_load_gs(v)		loadsegment(gs, (v))
#else	/* X86_32_LAZY_GS */
#define get_user_gs(regs)	(u16)((regs)->gs)
#define set_user_gs(regs, v)	do { (regs)->gs = (v); } while (0)
#define task_user_gs(tsk)	(task_pt_regs(tsk)->gs)
#define lazy_save_gs(v)		do { } while (0)
#define lazy_load_gs(v)		do { } while (0)
#endif	/* X86_32_LAZY_GS */
#endif	/* X86_32 */
T
Tejun Heo 已提交
198

199 200 201
static inline unsigned long get_limit(unsigned long segment)
{
	unsigned long __limit;
202 203
	asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
	return __limit + 1;
204
}
205 206 207

static inline void native_clts(void)
{
208
	asm volatile("clts");
209 210 211 212 213 214 215 216 217 218 219 220 221 222
}

/*
 * Volatile isn't enough to prevent the compiler from reordering the
 * read/write functions for the control registers and messing everything up.
 * A memory clobber would solve the problem, but would prevent reordering of
 * all loads stores around it, which can hurt performance. Solution is to
 * use a variable and mimic reads and writes to it to enforce serialization
 */
static unsigned long __force_order;

static inline unsigned long native_read_cr0(void)
{
	unsigned long val;
223
	asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
224 225 226 227 228
	return val;
}

static inline void native_write_cr0(unsigned long val)
{
229
	asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
230 231 232 233 234
}

static inline unsigned long native_read_cr2(void)
{
	unsigned long val;
235
	asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
236 237 238 239 240
	return val;
}

static inline void native_write_cr2(unsigned long val)
{
241
	asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
242 243 244 245 246
}

static inline unsigned long native_read_cr3(void)
{
	unsigned long val;
247
	asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
248 249 250 251 252
	return val;
}

static inline void native_write_cr3(unsigned long val)
{
253
	asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
254 255 256 257 258
}

static inline unsigned long native_read_cr4(void)
{
	unsigned long val;
259
	asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
260 261 262 263 264 265 266 267 268
	return val;
}

static inline unsigned long native_read_cr4_safe(void)
{
	unsigned long val;
	/* This could fault if %cr4 does not exist. In x86_64, a cr4 always
	 * exists, so it will never fail. */
#ifdef CONFIG_X86_32
269 270
	asm volatile("1: mov %%cr4, %0\n"
		     "2:\n"
271
		     _ASM_EXTABLE(1b, 2b)
272
		     : "=r" (val), "=m" (__force_order) : "0" (0));
273 274 275 276 277 278 279 280
#else
	val = native_read_cr4();
#endif
	return val;
}

static inline void native_write_cr4(unsigned long val)
{
281
	asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
282 283
}

284 285 286 287 288 289 290 291 292 293 294 295 296 297
#ifdef CONFIG_X86_64
static inline unsigned long native_read_cr8(void)
{
	unsigned long cr8;
	asm volatile("movq %%cr8,%0" : "=r" (cr8));
	return cr8;
}

static inline void native_write_cr8(unsigned long val)
{
	asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
}
#endif

298 299 300 301
static inline void native_wbinvd(void)
{
	asm volatile("wbinvd": : :"memory");
}
302

303 304 305 306 307 308 309 310 311 312 313 314 315
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define read_cr0()	(native_read_cr0())
#define write_cr0(x)	(native_write_cr0(x))
#define read_cr2()	(native_read_cr2())
#define write_cr2(x)	(native_write_cr2(x))
#define read_cr3()	(native_read_cr3())
#define write_cr3(x)	(native_write_cr3(x))
#define read_cr4()	(native_read_cr4())
#define read_cr4_safe()	(native_read_cr4_safe())
#define write_cr4(x)	(native_write_cr4(x))
#define wbinvd()	(native_wbinvd())
G
Glauber de Oliveira Costa 已提交
316
#ifdef CONFIG_X86_64
317 318
#define read_cr8()	(native_read_cr8())
#define write_cr8(x)	(native_write_cr8(x))
319
#define load_gs_index   native_load_gs_index
G
Glauber de Oliveira Costa 已提交
320 321
#endif

322 323 324 325 326
/* Clear the 'TS' bit */
#define clts()		(native_clts())

#endif/* CONFIG_PARAVIRT */

327
#define stts() write_cr0(read_cr0() | X86_CR0_TS)
328

329 330
#endif /* __KERNEL__ */

331
static inline void clflush(volatile void *__p)
332
{
333
	asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
334 335
}

336
#define nop() asm volatile ("nop")
337 338 339 340 341 342 343 344 345 346 347

void disable_hlt(void);
void enable_hlt(void);

void cpu_idle_wait(void);

extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);

void default_idle(void);

348 349
void stop_this_cpu(void *dummy);

350 351 352 353 354 355 356
/*
 * Force strict CPU ordering.
 * And yes, this is required on UP too when we're talking
 * to devices.
 */
#ifdef CONFIG_X86_32
/*
P
Pavel Machek 已提交
357
 * Some non-Intel clones support out of order store. wmb() ceases to be a
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
 * nop for these.
 */
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
#else
#define mb() 	asm volatile("mfence":::"memory")
#define rmb()	asm volatile("lfence":::"memory")
#define wmb()	asm volatile("sfence" ::: "memory")
#endif

/**
 * read_barrier_depends - Flush all pending reads that subsequents reads
 * depend on.
 *
 * No data-dependent reads from memory-like regions are ever reordered
 * over this barrier.  All reads preceding this primitive are guaranteed
 * to access memory (but not necessarily other CPUs' caches) before any
 * reads following this primitive that depend on the data return by
 * any of the preceding reads.  This primitive is much lighter weight than
 * rmb() on most CPUs, and is never heavier weight than is
 * rmb().
 *
 * These ordering constraints are respected by both the local CPU
 * and the compiler.
 *
 * Ordering is not guaranteed by anything other than these primitives,
 * not even by data dependencies.  See the documentation for
 * memory_barrier() for examples and URLs to more information.
 *
 * For example, the following code would force ordering (the initial
 * value of "a" is zero, "b" is one, and "p" is "&a"):
 *
 * <programlisting>
 *	CPU 0				CPU 1
 *
 *	b = 2;
 *	memory_barrier();
 *	p = &b;				q = p;
 *					read_barrier_depends();
 *					d = *q;
 * </programlisting>
 *
 * because the read of "*q" depends on the read of "p" and these
 * two reads are separated by a read_barrier_depends().  However,
 * the following code, with the same initial values for "a" and "b":
 *
 * <programlisting>
 *	CPU 0				CPU 1
 *
 *	a = 2;
 *	memory_barrier();
 *	b = 3;				y = b;
 *					read_barrier_depends();
 *					x = a;
 * </programlisting>
 *
 * does not enforce ordering, since there is no data dependency between
 * the read of "a" and the read of "b".  Therefore, on some CPUs, such
 * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
 * in cases like this where there are no data dependencies.
 **/

#define read_barrier_depends()	do { } while (0)

#ifdef CONFIG_SMP
#define smp_mb()	mb()
#ifdef CONFIG_X86_PPRO_FENCE
# define smp_rmb()	rmb()
#else
# define smp_rmb()	barrier()
#endif
#ifdef CONFIG_X86_OOSTORE
# define smp_wmb() 	wmb()
#else
# define smp_wmb()	barrier()
#endif
#define smp_read_barrier_depends()	read_barrier_depends()
436
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
437 438 439 440 441 442 443 444
#else
#define smp_mb()	barrier()
#define smp_rmb()	barrier()
#define smp_wmb()	barrier()
#define smp_read_barrier_depends()	do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif

A
Andi Kleen 已提交
445 446 447 448 449 450 451 452 453 454 455 456
/*
 * Stop RDTSC speculation. This is needed when you need to use RDTSC
 * (or get_cycles or vread that possibly accesses the TSC) in a defined
 * code region.
 *
 * (Could use an alternative three way for this if there was one.)
 */
static inline void rdtsc_barrier(void)
{
	alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
	alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
}
457

H
H. Peter Anvin 已提交
458
#endif /* _ASM_X86_SYSTEM_H */