irqflags.h 4.3 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4 5 6 7
#ifndef _X86_IRQFLAGS_H_
#define _X86_IRQFLAGS_H_

#include <asm/processor-flags.h>

#ifndef __ASSEMBLY__
8 9 10 11

/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
#define __cpuidle __attribute__((__section__(".cpuidle.text")))

12 13 14 15
/*
 * Interrupt control:
 */

16 17
/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
extern inline unsigned long native_save_fl(void);
18
extern inline unsigned long native_save_fl(void)
19 20 21
{
	unsigned long flags;

22
	/*
23 24 25
	 * "=rm" is safe here, because "pop" adjusts the stack before
	 * it evaluates its effective address -- this is part of the
	 * documented behavior of the "pop" instruction.
26
	 */
27 28
	asm volatile("# __raw_save_flags\n\t"
		     "pushf ; pop %0"
29
		     : "=rm" (flags)
30 31
		     : /* no input */
		     : "memory");
32 33 34 35

	return flags;
}

36 37
extern inline void native_restore_fl(unsigned long flags);
extern inline void native_restore_fl(unsigned long flags)
38
{
39 40 41 42
	asm volatile("push %0 ; popf"
		     : /* no output */
		     :"g" (flags)
		     :"memory", "cc");
43 44 45 46 47 48 49 50 51 52 53 54
}

static inline void native_irq_disable(void)
{
	asm volatile("cli": : :"memory");
}

static inline void native_irq_enable(void)
{
	asm volatile("sti": : :"memory");
}

55
static inline __cpuidle void native_safe_halt(void)
56 57 58 59
{
	asm volatile("sti; hlt": : :"memory");
}

60
static inline __cpuidle void native_halt(void)
61 62 63 64 65 66 67 68 69 70
{
	asm volatile("hlt": : :"memory");
}

#endif

#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#ifndef __ASSEMBLY__
71
#include <linux/types.h>
72

73
static inline notrace unsigned long arch_local_save_flags(void)
74 75 76 77
{
	return native_save_fl();
}

78
static inline notrace void arch_local_irq_restore(unsigned long flags)
79 80 81 82
{
	native_restore_fl(flags);
}

83
static inline notrace void arch_local_irq_disable(void)
84 85 86 87
{
	native_irq_disable();
}

88
static inline notrace void arch_local_irq_enable(void)
89 90 91 92 93 94 95 96
{
	native_irq_enable();
}

/*
 * Used in the idle loop; sti takes one instruction cycle
 * to complete:
 */
97
static inline __cpuidle void arch_safe_halt(void)
98 99 100 101 102 103 104 105
{
	native_safe_halt();
}

/*
 * Used when interrupts are already enabled or to
 * shutdown the processor:
 */
106
static inline __cpuidle void halt(void)
107 108 109 110 111 112 113
{
	native_halt();
}

/*
 * For spinlocks, etc:
 */
114
static inline notrace unsigned long arch_local_irq_save(void)
115
{
D
David Howells 已提交
116 117
	unsigned long flags = arch_local_save_flags();
	arch_local_irq_disable();
118 119 120 121 122 123 124 125
	return flags;
}
#else

#define ENABLE_INTERRUPTS(x)	sti
#define DISABLE_INTERRUPTS(x)	cli

#ifdef CONFIG_X86_64
126 127 128 129 130 131 132 133 134 135
#define SWAPGS	swapgs
/*
 * Currently paravirt can't handle swapgs nicely when we
 * don't have a stack we can rely on (such as a user space
 * stack).  So we either find a way around these or just fault
 * and emulate if a guest tries to call swapgs directly.
 *
 * Either way, this is a good way to document that we don't
 * have a reliable stack. x86_64 only.
 */
136
#define SWAPGS_UNSAFE_STACK	swapgs
137 138 139

#define PARAVIRT_ADJUST_EXCEPTION_FRAME	/*  */

140
#define INTERRUPT_RETURN	jmp native_iret
141 142 143 144 145 146 147
#define USERGS_SYSRET64				\
	swapgs;					\
	sysretq;
#define USERGS_SYSRET32				\
	swapgs;					\
	sysretl

148 149 150
#ifdef CONFIG_DEBUG_ENTRY
#define SAVE_FLAGS(x)		pushfq; popq %rax
#endif
151 152
#else
#define INTERRUPT_RETURN		iret
153
#define ENABLE_INTERRUPTS_SYSEXIT	sti; sysexit
154 155 156 157 158 159 160 161
#define GET_CR0_INTO_EAX		movl %cr0, %eax
#endif


#endif /* __ASSEMBLY__ */
#endif /* CONFIG_PARAVIRT */

#ifndef __ASSEMBLY__
D
David Howells 已提交
162
static inline int arch_irqs_disabled_flags(unsigned long flags)
163 164 165 166
{
	return !(flags & X86_EFLAGS_IF);
}

D
David Howells 已提交
167
static inline int arch_irqs_disabled(void)
168
{
D
David Howells 已提交
169
	unsigned long flags = arch_local_save_flags();
170

D
David Howells 已提交
171
	return arch_irqs_disabled_flags(flags);
172
}
173
#endif /* !__ASSEMBLY__ */
174

175 176 177 178
#ifdef __ASSEMBLY__
#ifdef CONFIG_TRACE_IRQFLAGS
#  define TRACE_IRQS_ON		call trace_hardirqs_on_thunk;
#  define TRACE_IRQS_OFF	call trace_hardirqs_off_thunk;
179
#else
180 181 182 183 184
#  define TRACE_IRQS_ON
#  define TRACE_IRQS_OFF
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#  ifdef CONFIG_X86_64
185 186
#    define LOCKDEP_SYS_EXIT		call lockdep_sys_exit_thunk
#    define LOCKDEP_SYS_EXIT_IRQ \
187 188
	TRACE_IRQS_ON; \
	sti; \
189
	call lockdep_sys_exit_thunk; \
190 191
	cli; \
	TRACE_IRQS_OFF;
192
#  else
193
#    define LOCKDEP_SYS_EXIT \
194 195 196 197 198 199 200
	pushl %eax;				\
	pushl %ecx;				\
	pushl %edx;				\
	call lockdep_sys_exit;			\
	popl %edx;				\
	popl %ecx;				\
	popl %eax;
201
#    define LOCKDEP_SYS_EXIT_IRQ
202
#  endif
203
#else
204 205
#  define LOCKDEP_SYS_EXIT
#  define LOCKDEP_SYS_EXIT_IRQ
206
#endif
207
#endif /* __ASSEMBLY__ */
208

209
#endif