提交 aac82d31 编写于 作者: A Andy Lutomirski 提交者: Ingo Molnar

x86, paravirt, xen: Remove the 64-bit ->irq_enable_sysexit() pvop

We don't use irq_enable_sysexit on 64-bit kernels any more.
Remove all the paravirt and Xen machinery to support it on
64-bit kernels.
Tested-by: NBoris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: NAndy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/8a03355698fe5b94194e9e7360f19f91c1b2cf1f.1428100853.git.luto@kernel.orgSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 6a907738
...@@ -77,12 +77,6 @@ ENTRY(native_usergs_sysret32) ...@@ -77,12 +77,6 @@ ENTRY(native_usergs_sysret32)
swapgs swapgs
sysretl sysretl
ENDPROC(native_usergs_sysret32) ENDPROC(native_usergs_sysret32)
ENTRY(native_irq_enable_sysexit)
swapgs
sti
sysexit
ENDPROC(native_irq_enable_sysexit)
#endif #endif
/* /*
......
...@@ -160,13 +160,14 @@ struct pv_cpu_ops { ...@@ -160,13 +160,14 @@ struct pv_cpu_ops {
u64 (*read_pmc)(int counter); u64 (*read_pmc)(int counter);
unsigned long long (*read_tscp)(unsigned int *aux); unsigned long long (*read_tscp)(unsigned int *aux);
#ifdef CONFIG_X86_32
/* /*
* Atomically enable interrupts and return to userspace. This * Atomically enable interrupts and return to userspace. This
* is only ever used to return to 32-bit processes; in a * is only used in 32-bit kernels. 64-bit kernels use
* 64-bit kernel, it's used for 32-on-64 compat processes, but * usergs_sysret32 instead.
* never native 64-bit processes. (Jump, not call.)
*/ */
void (*irq_enable_sysexit)(void); void (*irq_enable_sysexit)(void);
#endif
/* /*
* Switch to usermode gs and return to 64-bit usermode using * Switch to usermode gs and return to 64-bit usermode using
......
...@@ -68,7 +68,9 @@ void common(void) { ...@@ -68,7 +68,9 @@ void common(void) {
OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
OFFSET(PV_CPU_iret, pv_cpu_ops, iret); OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
#ifdef CONFIG_X86_32
OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
#endif
OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
#endif #endif
......
...@@ -154,7 +154,9 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, ...@@ -154,7 +154,9 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
ret = paravirt_patch_ident_64(insnbuf, len); ret = paravirt_patch_ident_64(insnbuf, len);
else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
#ifdef CONFIG_X86_32
type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) || type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
#endif
type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) || type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64)) type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
/* If operation requires a jmp, then jmp */ /* If operation requires a jmp, then jmp */
...@@ -371,7 +373,7 @@ __visible struct pv_cpu_ops pv_cpu_ops = { ...@@ -371,7 +373,7 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
.load_sp0 = native_load_sp0, .load_sp0 = native_load_sp0,
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) #if defined(CONFIG_X86_32)
.irq_enable_sysexit = native_irq_enable_sysexit, .irq_enable_sysexit = native_irq_enable_sysexit,
#endif #endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
...@@ -49,7 +49,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, ...@@ -49,7 +49,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
PATCH_SITE(pv_irq_ops, save_fl); PATCH_SITE(pv_irq_ops, save_fl);
PATCH_SITE(pv_irq_ops, irq_enable); PATCH_SITE(pv_irq_ops, irq_enable);
PATCH_SITE(pv_irq_ops, irq_disable); PATCH_SITE(pv_irq_ops, irq_disable);
PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
PATCH_SITE(pv_cpu_ops, usergs_sysret32); PATCH_SITE(pv_cpu_ops, usergs_sysret32);
PATCH_SITE(pv_cpu_ops, usergs_sysret64); PATCH_SITE(pv_cpu_ops, usergs_sysret64);
PATCH_SITE(pv_cpu_ops, swapgs); PATCH_SITE(pv_cpu_ops, swapgs);
......
...@@ -1267,10 +1267,11 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { ...@@ -1267,10 +1267,11 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.read_tscp = native_read_tscp, .read_tscp = native_read_tscp,
.iret = xen_iret, .iret = xen_iret,
.irq_enable_sysexit = xen_sysexit,
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
.usergs_sysret32 = xen_sysret32, .usergs_sysret32 = xen_sysret32,
.usergs_sysret64 = xen_sysret64, .usergs_sysret64 = xen_sysret64,
#else
.irq_enable_sysexit = xen_sysexit,
#endif #endif
.load_tr_desc = paravirt_nop, .load_tr_desc = paravirt_nop,
......
...@@ -47,22 +47,6 @@ ENTRY(xen_iret) ...@@ -47,22 +47,6 @@ ENTRY(xen_iret)
ENDPATCH(xen_iret) ENDPATCH(xen_iret)
RELOC(xen_iret, 1b+1) RELOC(xen_iret, 1b+1)
/*
* sysexit is not used for 64-bit processes, so it's only ever used to
* return to 32-bit compat userspace.
*/
ENTRY(xen_sysexit)
pushq $__USER32_DS
pushq %rcx
pushq $X86_EFLAGS_IF
pushq $__USER32_CS
pushq %rdx
pushq $0
1: jmp hypercall_iret
ENDPATCH(xen_sysexit)
RELOC(xen_sysexit, 1b+1)
ENTRY(xen_sysret64) ENTRY(xen_sysret64)
/* /*
* We're already on the usermode stack at this point, but * We're already on the usermode stack at this point, but
......
...@@ -134,7 +134,9 @@ DECL_ASM(void, xen_restore_fl_direct, unsigned long); ...@@ -134,7 +134,9 @@ DECL_ASM(void, xen_restore_fl_direct, unsigned long);
/* These are not functions, and cannot be called normally */ /* These are not functions, and cannot be called normally */
__visible void xen_iret(void); __visible void xen_iret(void);
#ifdef CONFIG_X86_32
__visible void xen_sysexit(void); __visible void xen_sysexit(void);
#endif
__visible void xen_sysret32(void); __visible void xen_sysret32(void);
__visible void xen_sysret64(void); __visible void xen_sysret64(void);
__visible void xen_adjust_exception_frame(void); __visible void xen_adjust_exception_frame(void);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册