提交 71999d98 编写于 作者: J Jeremy Fitzhardinge 提交者: Ingo Molnar

x86/paravirt: Use normal calling sequences for irq enable/disable

Bastian Blank reported a boot crash with stackprotector enabled,
and debugged it back to edx register corruption.

For historical reasons irq enable/disable/save/restore had special
calling sequences to make them more efficient.  With the more
recent introduction of higher-level and more general optimisations
this is no longer necessary so we can just use the normal PVOP_
macros.

This fixes some residual bugs in the old implementations which left
edx liable to inadvertent clobbering. Also, fix some bugs in
__PVOP_VCALLEESAVE which were revealed by actual use.
Reported-by: NBastian Blank <bastian@waldi.eu.org>
Signed-off-by: NJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Stable Kernel <stable@kernel.org>
Cc: Xen-devel <xen-devel@lists.xensource.com>
LKML-Reference: <4AD3BC9B.7040501@goop.org>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 d1705c55
...@@ -840,42 +840,22 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) ...@@ -840,42 +840,22 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
static inline unsigned long __raw_local_save_flags(void) static inline unsigned long __raw_local_save_flags(void)
{ {
unsigned long f; return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
asm volatile(paravirt_alt(PARAVIRT_CALL)
: "=a"(f)
: paravirt_type(pv_irq_ops.save_fl),
paravirt_clobber(CLBR_EAX)
: "memory", "cc");
return f;
} }
static inline void raw_local_irq_restore(unsigned long f) static inline void raw_local_irq_restore(unsigned long f)
{ {
asm volatile(paravirt_alt(PARAVIRT_CALL) PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
: "=a"(f)
: PV_FLAGS_ARG(f),
paravirt_type(pv_irq_ops.restore_fl),
paravirt_clobber(CLBR_EAX)
: "memory", "cc");
} }
static inline void raw_local_irq_disable(void) static inline void raw_local_irq_disable(void)
{ {
asm volatile(paravirt_alt(PARAVIRT_CALL) PVOP_VCALLEE0(pv_irq_ops.irq_disable);
:
: paravirt_type(pv_irq_ops.irq_disable),
paravirt_clobber(CLBR_EAX)
: "memory", "eax", "cc");
} }
static inline void raw_local_irq_enable(void) static inline void raw_local_irq_enable(void)
{ {
asm volatile(paravirt_alt(PARAVIRT_CALL) PVOP_VCALLEE0(pv_irq_ops.irq_enable);
:
: paravirt_type(pv_irq_ops.irq_enable),
paravirt_clobber(CLBR_EAX)
: "memory", "eax", "cc");
} }
static inline unsigned long __raw_local_irq_save(void) static inline unsigned long __raw_local_irq_save(void)
......
...@@ -494,10 +494,11 @@ int paravirt_disable_iospace(void); ...@@ -494,10 +494,11 @@ int paravirt_disable_iospace(void);
#define EXTRA_CLOBBERS #define EXTRA_CLOBBERS
#define VEXTRA_CLOBBERS #define VEXTRA_CLOBBERS
#else /* CONFIG_X86_64 */ #else /* CONFIG_X86_64 */
/* [re]ax isn't an arg, but the return val */
#define PVOP_VCALL_ARGS \ #define PVOP_VCALL_ARGS \
unsigned long __edi = __edi, __esi = __esi, \ unsigned long __edi = __edi, __esi = __esi, \
__edx = __edx, __ecx = __ecx __edx = __edx, __ecx = __ecx, __eax = __eax
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
...@@ -509,6 +510,7 @@ int paravirt_disable_iospace(void); ...@@ -509,6 +510,7 @@ int paravirt_disable_iospace(void);
"=c" (__ecx) "=c" (__ecx)
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
/* void functions are still allowed [re]ax for scratch */
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax) #define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
...@@ -583,8 +585,8 @@ int paravirt_disable_iospace(void); ...@@ -583,8 +585,8 @@ int paravirt_disable_iospace(void);
VEXTRA_CLOBBERS, \ VEXTRA_CLOBBERS, \
pre, post, ##__VA_ARGS__) pre, post, ##__VA_ARGS__)
#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \ #define __PVOP_VCALLEESAVE(op, pre, post, ...) \
____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ ____PVOP_VCALL(op.func, CLBR_RET_REG, \
PVOP_VCALLEE_CLOBBERS, , \ PVOP_VCALLEE_CLOBBERS, , \
pre, post, ##__VA_ARGS__) pre, post, ##__VA_ARGS__)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册