提交 4ecac3fd 编写于 作者: A Avi Kivity

KVM: Handle virtualization instruction #UD faults during reboot

KVM turns off hardware virtualization extensions during reboot, in order
to disassociate the memory used by the virtualization extensions from the
processor, and in order to have the system in a consistent state.
Unfortunately virtual machines may still be running while this goes on,
and once virtualization extensions are turned off, any virtulization
instruction will #UD on execution.

Fix by adding an exception handler to virtualization instructions; if we get
an exception during reboot, we simply spin waiting for the reset to complete.
If it's a true exception, BUG() so we can have our stack trace.
Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 1b7fcd32
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
#include <asm/desc.h> #include <asm/desc.h>
#define __ex(x) __kvm_handle_fault_on_reboot(x)
MODULE_AUTHOR("Qumranet"); MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -129,17 +131,17 @@ static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq) ...@@ -129,17 +131,17 @@ static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
static inline void clgi(void) static inline void clgi(void)
{ {
asm volatile (SVM_CLGI); asm volatile (__ex(SVM_CLGI));
} }
static inline void stgi(void) static inline void stgi(void)
{ {
asm volatile (SVM_STGI); asm volatile (__ex(SVM_STGI));
} }
static inline void invlpga(unsigned long addr, u32 asid) static inline void invlpga(unsigned long addr, u32 asid)
{ {
asm volatile (SVM_INVLPGA :: "a"(addr), "c"(asid)); asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
} }
static inline unsigned long kvm_read_cr2(void) static inline unsigned long kvm_read_cr2(void)
...@@ -1758,17 +1760,17 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1758,17 +1760,17 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
/* Enter guest mode */ /* Enter guest mode */
"push %%rax \n\t" "push %%rax \n\t"
"mov %c[vmcb](%[svm]), %%rax \n\t" "mov %c[vmcb](%[svm]), %%rax \n\t"
SVM_VMLOAD "\n\t" __ex(SVM_VMLOAD) "\n\t"
SVM_VMRUN "\n\t" __ex(SVM_VMRUN) "\n\t"
SVM_VMSAVE "\n\t" __ex(SVM_VMSAVE) "\n\t"
"pop %%rax \n\t" "pop %%rax \n\t"
#else #else
/* Enter guest mode */ /* Enter guest mode */
"push %%eax \n\t" "push %%eax \n\t"
"mov %c[vmcb](%[svm]), %%eax \n\t" "mov %c[vmcb](%[svm]), %%eax \n\t"
SVM_VMLOAD "\n\t" __ex(SVM_VMLOAD) "\n\t"
SVM_VMRUN "\n\t" __ex(SVM_VMRUN) "\n\t"
SVM_VMSAVE "\n\t" __ex(SVM_VMSAVE) "\n\t"
"pop %%eax \n\t" "pop %%eax \n\t"
#endif #endif
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/desc.h> #include <asm/desc.h>
#define __ex(x) __kvm_handle_fault_on_reboot(x)
MODULE_AUTHOR("Qumranet"); MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -278,7 +280,7 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva) ...@@ -278,7 +280,7 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva)
u64 gva; u64 gva;
} operand = { vpid, 0, gva }; } operand = { vpid, 0, gva };
asm volatile (ASM_VMX_INVVPID asm volatile (__ex(ASM_VMX_INVVPID)
/* CF==1 or ZF==1 --> rc = -1 */ /* CF==1 or ZF==1 --> rc = -1 */
"; ja 1f ; ud2 ; 1:" "; ja 1f ; ud2 ; 1:"
: : "a"(&operand), "c"(ext) : "cc", "memory"); : : "a"(&operand), "c"(ext) : "cc", "memory");
...@@ -290,7 +292,7 @@ static inline void __invept(int ext, u64 eptp, gpa_t gpa) ...@@ -290,7 +292,7 @@ static inline void __invept(int ext, u64 eptp, gpa_t gpa)
u64 eptp, gpa; u64 eptp, gpa;
} operand = {eptp, gpa}; } operand = {eptp, gpa};
asm volatile (ASM_VMX_INVEPT asm volatile (__ex(ASM_VMX_INVEPT)
/* CF==1 or ZF==1 --> rc = -1 */ /* CF==1 or ZF==1 --> rc = -1 */
"; ja 1f ; ud2 ; 1:\n" "; ja 1f ; ud2 ; 1:\n"
: : "a" (&operand), "c" (ext) : "cc", "memory"); : : "a" (&operand), "c" (ext) : "cc", "memory");
...@@ -311,7 +313,7 @@ static void vmcs_clear(struct vmcs *vmcs) ...@@ -311,7 +313,7 @@ static void vmcs_clear(struct vmcs *vmcs)
u64 phys_addr = __pa(vmcs); u64 phys_addr = __pa(vmcs);
u8 error; u8 error;
asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0" asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
: "=g"(error) : "a"(&phys_addr), "m"(phys_addr) : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
: "cc", "memory"); : "cc", "memory");
if (error) if (error)
...@@ -378,7 +380,7 @@ static unsigned long vmcs_readl(unsigned long field) ...@@ -378,7 +380,7 @@ static unsigned long vmcs_readl(unsigned long field)
{ {
unsigned long value; unsigned long value;
asm volatile (ASM_VMX_VMREAD_RDX_RAX asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX)
: "=a"(value) : "d"(field) : "cc"); : "=a"(value) : "d"(field) : "cc");
return value; return value;
} }
...@@ -413,7 +415,7 @@ static void vmcs_writel(unsigned long field, unsigned long value) ...@@ -413,7 +415,7 @@ static void vmcs_writel(unsigned long field, unsigned long value)
{ {
u8 error; u8 error;
asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0" asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
: "=q"(error) : "a"(value), "d"(field) : "cc"); : "=q"(error) : "a"(value), "d"(field) : "cc");
if (unlikely(error)) if (unlikely(error))
vmwrite_error(field, value); vmwrite_error(field, value);
...@@ -621,7 +623,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -621,7 +623,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
u8 error; u8 error;
per_cpu(current_vmcs, cpu) = vmx->vmcs; per_cpu(current_vmcs, cpu) = vmx->vmcs;
asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0" asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
: "=g"(error) : "a"(&phys_addr), "m"(phys_addr) : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
: "cc"); : "cc");
if (error) if (error)
...@@ -1030,13 +1032,14 @@ static void hardware_enable(void *garbage) ...@@ -1030,13 +1032,14 @@ static void hardware_enable(void *garbage)
MSR_IA32_FEATURE_CONTROL_LOCKED | MSR_IA32_FEATURE_CONTROL_LOCKED |
MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED); MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED);
write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr) asm volatile (ASM_VMX_VMXON_RAX
: : "a"(&phys_addr), "m"(phys_addr)
: "memory", "cc"); : "memory", "cc");
} }
static void hardware_disable(void *garbage) static void hardware_disable(void *garbage)
{ {
asm volatile (ASM_VMX_VMXOFF : : : "cc"); asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
write_cr4(read_cr4() & ~X86_CR4_VMXE); write_cr4(read_cr4() & ~X86_CR4_VMXE);
} }
...@@ -2834,7 +2837,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2834,7 +2837,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
"push %%edx; push %%ebp;" "push %%edx; push %%ebp;"
"push %%ecx \n\t" "push %%ecx \n\t"
#endif #endif
ASM_VMX_VMWRITE_RSP_RDX "\n\t" __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
/* Check if vmlaunch of vmresume is needed */ /* Check if vmlaunch of vmresume is needed */
"cmpl $0, %c[launched](%0) \n\t" "cmpl $0, %c[launched](%0) \n\t"
/* Load guest registers. Don't clobber flags. */ /* Load guest registers. Don't clobber flags. */
...@@ -2869,9 +2872,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2869,9 +2872,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
#endif #endif
/* Enter guest mode */ /* Enter guest mode */
"jne .Llaunched \n\t" "jne .Llaunched \n\t"
ASM_VMX_VMLAUNCH "\n\t" __ex(ASM_VMX_VMLAUNCH) "\n\t"
"jmp .Lkvm_vmx_return \n\t" "jmp .Lkvm_vmx_return \n\t"
".Llaunched: " ASM_VMX_VMRESUME "\n\t" ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
".Lkvm_vmx_return: " ".Lkvm_vmx_return: "
/* Save guest registers, load host registers, keep flags */ /* Save guest registers, load host registers, keep flags */
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
...@@ -692,4 +692,28 @@ enum { ...@@ -692,4 +692,28 @@ enum {
trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
vcpu, 0, 0, 0, 0, 0, 0) vcpu, 0, 0, 0, 0, 0, 0)
#ifdef CONFIG_64BIT
#define KVM_EX_ENTRY ".quad"
#else
#define KVM_EX_ENTRY ".long"
#endif
/*
* Hardware virtualization extension instructions may fault if a
* reboot turns off virtualization while processes are running.
* Trap the fault and ignore the instruction if that happens.
*/
asmlinkage void kvm_handle_fault_on_reboot(void);
#define __kvm_handle_fault_on_reboot(insn) \
"666: " insn "\n\t" \
".pushsection .text.fixup, \"ax\" \n" \
"667: \n\t" \
"push $666b \n\t" \
"jmp kvm_handle_fault_on_reboot \n\t" \
".popsection \n\t" \
".pushsection __ex_table, \"a\" \n\t" \
KVM_EX_ENTRY " 666b, 667b \n\t" \
".popsection"
#endif #endif
...@@ -65,6 +65,8 @@ struct dentry *kvm_debugfs_dir; ...@@ -65,6 +65,8 @@ struct dentry *kvm_debugfs_dir;
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg); unsigned long arg);
bool kvm_rebooting;
static inline int valid_vcpu(int n) static inline int valid_vcpu(int n)
{ {
return likely(n >= 0 && n < KVM_MAX_VCPUS); return likely(n >= 0 && n < KVM_MAX_VCPUS);
...@@ -1301,6 +1303,18 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, ...@@ -1301,6 +1303,18 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
return NOTIFY_OK; return NOTIFY_OK;
} }
asmlinkage void kvm_handle_fault_on_reboot(void)
{
if (kvm_rebooting)
/* spin while reset goes on */
while (true)
;
/* Fault while not rebooting. We want the trace. */
BUG();
}
EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
static int kvm_reboot(struct notifier_block *notifier, unsigned long val, static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
void *v) void *v)
{ {
...@@ -1310,6 +1324,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val, ...@@ -1310,6 +1324,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
* in vmx root mode. * in vmx root mode.
*/ */
printk(KERN_INFO "kvm: exiting hardware virtualization\n"); printk(KERN_INFO "kvm: exiting hardware virtualization\n");
kvm_rebooting = true;
on_each_cpu(hardware_disable, NULL, 1); on_each_cpu(hardware_disable, NULL, 1);
} }
return NOTIFY_OK; return NOTIFY_OK;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册