提交 fd8ca6da 编写于 作者: U Uros Bizjak 提交者: Paolo Bonzini

KVM/x86: Use CC_SET()/CC_OUT in arch/x86/kvm/vmx.c

Remove open-coded uses of set instructions to use CC_SET()/CC_OUT() in
arch/x86/kvm/vmx.c.
Signed-off-by: NUros Bizjak <ubizjak@gmail.com>
[Mark error paths as unlikely while touching this. - Paolo]
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 aaffcfd1
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include "kvm_cache_regs.h" #include "kvm_cache_regs.h"
#include "x86.h" #include "x86.h"
#include <asm/asm.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/desc.h> #include <asm/desc.h>
...@@ -1916,11 +1917,12 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva) ...@@ -1916,11 +1917,12 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva)
u64 rsvd : 48; u64 rsvd : 48;
u64 gva; u64 gva;
} operand = { vpid, 0, gva }; } operand = { vpid, 0, gva };
bool error;
asm volatile (__ex(ASM_VMX_INVVPID) asm volatile (__ex(ASM_VMX_INVVPID) CC_SET(na)
/* CF==1 or ZF==1 --> rc = -1 */ : CC_OUT(na) (error) : "a"(&operand), "c"(ext)
"; ja 1f ; ud2 ; 1:" : "memory");
: : "a"(&operand), "c"(ext) : "cc", "memory"); BUG_ON(error);
} }
static inline void __invept(int ext, u64 eptp, gpa_t gpa) static inline void __invept(int ext, u64 eptp, gpa_t gpa)
...@@ -1928,11 +1930,12 @@ static inline void __invept(int ext, u64 eptp, gpa_t gpa) ...@@ -1928,11 +1930,12 @@ static inline void __invept(int ext, u64 eptp, gpa_t gpa)
struct { struct {
u64 eptp, gpa; u64 eptp, gpa;
} operand = {eptp, gpa}; } operand = {eptp, gpa};
bool error;
asm volatile (__ex(ASM_VMX_INVEPT) asm volatile (__ex(ASM_VMX_INVEPT) CC_SET(na)
/* CF==1 or ZF==1 --> rc = -1 */ : CC_OUT(na) (error) : "a" (&operand), "c" (ext)
"; ja 1f ; ud2 ; 1:\n" : "memory");
: : "a" (&operand), "c" (ext) : "cc", "memory"); BUG_ON(error);
} }
static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
...@@ -1948,12 +1951,12 @@ static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) ...@@ -1948,12 +1951,12 @@ static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
static void vmcs_clear(struct vmcs *vmcs) static void vmcs_clear(struct vmcs *vmcs)
{ {
u64 phys_addr = __pa(vmcs); u64 phys_addr = __pa(vmcs);
u8 error; bool error;
asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0" asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) CC_SET(na)
: "=qm"(error) : "a"(&phys_addr), "m"(phys_addr) : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
: "cc", "memory"); : "memory");
if (error) if (unlikely(error))
printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n", printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
vmcs, phys_addr); vmcs, phys_addr);
} }
...@@ -1970,15 +1973,15 @@ static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) ...@@ -1970,15 +1973,15 @@ static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
static void vmcs_load(struct vmcs *vmcs) static void vmcs_load(struct vmcs *vmcs)
{ {
u64 phys_addr = __pa(vmcs); u64 phys_addr = __pa(vmcs);
u8 error; bool error;
if (static_branch_unlikely(&enable_evmcs)) if (static_branch_unlikely(&enable_evmcs))
return evmcs_load(phys_addr); return evmcs_load(phys_addr);
asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0" asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) CC_SET(na)
: "=qm"(error) : "a"(&phys_addr), "m"(phys_addr) : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
: "cc", "memory"); : "memory");
if (error) if (unlikely(error))
printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n", printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
vmcs, phys_addr); vmcs, phys_addr);
} }
...@@ -2203,10 +2206,10 @@ static noinline void vmwrite_error(unsigned long field, unsigned long value) ...@@ -2203,10 +2206,10 @@ static noinline void vmwrite_error(unsigned long field, unsigned long value)
static __always_inline void __vmcs_writel(unsigned long field, unsigned long value) static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
{ {
u8 error; bool error;
asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0" asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) CC_SET(na)
: "=q"(error) : "a"(value), "d"(field) : "cc"); : CC_OUT(na) (error) : "a"(value), "d"(field));
if (unlikely(error)) if (unlikely(error))
vmwrite_error(field, value); vmwrite_error(field, value);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册