提交 c62e2e94 编写于 作者: B Babu Moger 提交者: Paolo Bonzini

KVM: SVM: Modify 64 bit intercept field to two 32 bit vectors

Convert all the intercepts to one array of 32 bit vectors in
vmcb_control_area. This makes it easy for future intercept vector
additions. Also update trace functions.
Signed-off-by: NBabu Moger <babu.moger@amd.com>
Reviewed-by: NJim Mattson <jmattson@google.com>
Message-Id: <159985250813.11252.5736581193881040525.stgit@bmoger-ubuntu>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 9780d51d
...@@ -14,6 +14,8 @@ enum intercept_words { ...@@ -14,6 +14,8 @@ enum intercept_words {
INTERCEPT_CR = 0, INTERCEPT_CR = 0,
INTERCEPT_DR, INTERCEPT_DR,
INTERCEPT_EXCEPTION, INTERCEPT_EXCEPTION,
INTERCEPT_WORD3,
INTERCEPT_WORD4,
MAX_INTERCEPT, MAX_INTERCEPT,
}; };
...@@ -46,10 +48,8 @@ enum { ...@@ -46,10 +48,8 @@ enum {
INTERCEPT_DR7_WRITE, INTERCEPT_DR7_WRITE,
/* Byte offset 008h (word 2) */ /* Byte offset 008h (word 2) */
INTERCEPT_EXCEPTION_OFFSET = 64, INTERCEPT_EXCEPTION_OFFSET = 64,
}; /* Byte offset 00Ch (word 3) */
INTERCEPT_INTR = 96,
enum {
INTERCEPT_INTR,
INTERCEPT_NMI, INTERCEPT_NMI,
INTERCEPT_SMI, INTERCEPT_SMI,
INTERCEPT_INIT, INTERCEPT_INIT,
...@@ -81,7 +81,8 @@ enum { ...@@ -81,7 +81,8 @@ enum {
INTERCEPT_TASK_SWITCH, INTERCEPT_TASK_SWITCH,
INTERCEPT_FERR_FREEZE, INTERCEPT_FERR_FREEZE,
INTERCEPT_SHUTDOWN, INTERCEPT_SHUTDOWN,
INTERCEPT_VMRUN, /* Byte offset 010h (word 4) */
INTERCEPT_VMRUN = 128,
INTERCEPT_VMMCALL, INTERCEPT_VMMCALL,
INTERCEPT_VMLOAD, INTERCEPT_VMLOAD,
INTERCEPT_VMSAVE, INTERCEPT_VMSAVE,
...@@ -101,8 +102,7 @@ enum { ...@@ -101,8 +102,7 @@ enum {
struct __attribute__ ((__packed__)) vmcb_control_area { struct __attribute__ ((__packed__)) vmcb_control_area {
u32 intercepts[MAX_INTERCEPT]; u32 intercepts[MAX_INTERCEPT];
u64 intercept; u32 reserved_1[15 - MAX_INTERCEPT];
u8 reserved_1[40];
u16 pause_filter_thresh; u16 pause_filter_thresh;
u16 pause_filter_count; u16 pause_filter_count;
u64 iopm_base_pa; u64 iopm_base_pa;
......
...@@ -112,8 +112,6 @@ void recalc_intercepts(struct vcpu_svm *svm) ...@@ -112,8 +112,6 @@ void recalc_intercepts(struct vcpu_svm *svm)
for (i = 0; i < MAX_INTERCEPT; i++) for (i = 0; i < MAX_INTERCEPT; i++)
c->intercepts[i] = h->intercepts[i]; c->intercepts[i] = h->intercepts[i];
c->intercept = h->intercept;
if (g->int_ctl & V_INTR_MASKING_MASK) { if (g->int_ctl & V_INTR_MASKING_MASK) {
/* We only want the cr8 intercept bits of L1 */ /* We only want the cr8 intercept bits of L1 */
vmcb_clr_intercept(c, INTERCEPT_CR8_READ); vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
...@@ -124,16 +122,14 @@ void recalc_intercepts(struct vcpu_svm *svm) ...@@ -124,16 +122,14 @@ void recalc_intercepts(struct vcpu_svm *svm)
* affect any interrupt we may want to inject; therefore, * affect any interrupt we may want to inject; therefore,
* interrupt window vmexits are irrelevant to L0. * interrupt window vmexits are irrelevant to L0.
*/ */
c->intercept &= ~(1ULL << INTERCEPT_VINTR); vmcb_clr_intercept(c, INTERCEPT_VINTR);
} }
/* We don't want to see VMMCALLs from a nested guest */ /* We don't want to see VMMCALLs from a nested guest */
c->intercept &= ~(1ULL << INTERCEPT_VMMCALL); vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
for (i = 0; i < MAX_INTERCEPT; i++) for (i = 0; i < MAX_INTERCEPT; i++)
c->intercepts[i] |= g->intercepts[i]; c->intercepts[i] |= g->intercepts[i];
c->intercept |= g->intercept;
} }
static void copy_vmcb_control_area(struct vmcb_control_area *dst, static void copy_vmcb_control_area(struct vmcb_control_area *dst,
...@@ -144,7 +140,6 @@ static void copy_vmcb_control_area(struct vmcb_control_area *dst, ...@@ -144,7 +140,6 @@ static void copy_vmcb_control_area(struct vmcb_control_area *dst,
for (i = 0; i < MAX_INTERCEPT; i++) for (i = 0; i < MAX_INTERCEPT; i++)
dst->intercepts[i] = from->intercepts[i]; dst->intercepts[i] = from->intercepts[i];
dst->intercept = from->intercept;
dst->iopm_base_pa = from->iopm_base_pa; dst->iopm_base_pa = from->iopm_base_pa;
dst->msrpm_base_pa = from->msrpm_base_pa; dst->msrpm_base_pa = from->msrpm_base_pa;
dst->tsc_offset = from->tsc_offset; dst->tsc_offset = from->tsc_offset;
...@@ -177,7 +172,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) ...@@ -177,7 +172,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
*/ */
int i; int i;
if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_MSR_PROT))) if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
return true; return true;
for (i = 0; i < MSRPM_OFFSETS; i++) { for (i = 0; i < MSRPM_OFFSETS; i++) {
...@@ -203,7 +198,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) ...@@ -203,7 +198,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
static bool nested_vmcb_check_controls(struct vmcb_control_area *control) static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
{ {
if ((control->intercept & (1ULL << INTERCEPT_VMRUN)) == 0) if ((vmcb_is_intercept(control, INTERCEPT_VMRUN)) == 0)
return false; return false;
if (control->asid == 0) if (control->asid == 0)
...@@ -489,7 +484,8 @@ int nested_svm_vmrun(struct vcpu_svm *svm) ...@@ -489,7 +484,8 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff, trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
vmcb12->control.intercepts[INTERCEPT_CR] >> 16, vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
vmcb12->control.intercepts[INTERCEPT_EXCEPTION], vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
vmcb12->control.intercept); vmcb12->control.intercepts[INTERCEPT_WORD3],
vmcb12->control.intercepts[INTERCEPT_WORD4]);
/* Clear internal status */ /* Clear internal status */
kvm_clear_exception_queue(&svm->vcpu); kvm_clear_exception_queue(&svm->vcpu);
...@@ -708,7 +704,7 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) ...@@ -708,7 +704,7 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
u32 offset, msr, value; u32 offset, msr, value;
int write, mask; int write, mask;
if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_MSR_PROT))) if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
return NESTED_EXIT_HOST; return NESTED_EXIT_HOST;
msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
...@@ -735,7 +731,7 @@ static int nested_svm_intercept_ioio(struct vcpu_svm *svm) ...@@ -735,7 +731,7 @@ static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
u8 start_bit; u8 start_bit;
u64 gpa; u64 gpa;
if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_IOIO_PROT))) if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
return NESTED_EXIT_HOST; return NESTED_EXIT_HOST;
port = svm->vmcb->control.exit_info_1 >> 16; port = svm->vmcb->control.exit_info_1 >> 16;
...@@ -789,8 +785,7 @@ static int nested_svm_intercept(struct vcpu_svm *svm) ...@@ -789,8 +785,7 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
break; break;
} }
default: { default: {
u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR); if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
if (svm->nested.ctl.intercept & exit_bits)
vmexit = NESTED_EXIT_DONE; vmexit = NESTED_EXIT_DONE;
} }
} }
...@@ -898,7 +893,7 @@ static void nested_svm_intr(struct vcpu_svm *svm) ...@@ -898,7 +893,7 @@ static void nested_svm_intr(struct vcpu_svm *svm)
static inline bool nested_exit_on_init(struct vcpu_svm *svm) static inline bool nested_exit_on_init(struct vcpu_svm *svm)
{ {
return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_INIT)); return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
} }
static void nested_svm_init(struct vcpu_svm *svm) static void nested_svm_init(struct vcpu_svm *svm)
......
...@@ -2217,12 +2217,9 @@ static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, ...@@ -2217,12 +2217,9 @@ static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
{ {
unsigned long cr0 = svm->vcpu.arch.cr0; unsigned long cr0 = svm->vcpu.arch.cr0;
bool ret = false; bool ret = false;
u64 intercept;
intercept = svm->nested.ctl.intercept;
if (!is_guest_mode(&svm->vcpu) || if (!is_guest_mode(&svm->vcpu) ||
(!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))) (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))))
return false; return false;
cr0 &= ~SVM_CR0_SELECTIVE_MASK; cr0 &= ~SVM_CR0_SELECTIVE_MASK;
...@@ -2817,7 +2814,9 @@ static void dump_vmcb(struct kvm_vcpu *vcpu) ...@@ -2817,7 +2814,9 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff); pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff);
pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16); pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16);
pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]); pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]);
pr_err("%-20s%016llx\n", "intercepts:", control->intercept); pr_err("%-20s%08x %08x\n", "intercepts:",
control->intercepts[INTERCEPT_WORD3],
control->intercepts[INTERCEPT_WORD4]);
pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count); pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
pr_err("%-20s%d\n", "pause filter threshold:", pr_err("%-20s%d\n", "pause filter threshold:",
control->pause_filter_thresh); control->pause_filter_thresh);
...@@ -3734,7 +3733,6 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, ...@@ -3734,7 +3733,6 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
break; break;
case SVM_EXIT_WRITE_CR0: { case SVM_EXIT_WRITE_CR0: {
unsigned long cr0, val; unsigned long cr0, val;
u64 intercept;
if (info->intercept == x86_intercept_cr_write) if (info->intercept == x86_intercept_cr_write)
icpt_info.exit_code += info->modrm_reg; icpt_info.exit_code += info->modrm_reg;
...@@ -3743,9 +3741,8 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, ...@@ -3743,9 +3741,8 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
info->intercept == x86_intercept_clts) info->intercept == x86_intercept_clts)
break; break;
intercept = svm->nested.ctl.intercept; if (!(vmcb_is_intercept(&svm->nested.ctl,
INTERCEPT_SELECTIVE_CR0)))
if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
break; break;
cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
...@@ -4013,7 +4010,7 @@ static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu) ...@@ -4013,7 +4010,7 @@ static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
* if an INIT signal is pending. * if an INIT signal is pending.
*/ */
return !gif_set(svm) || return !gif_set(svm) ||
(svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT)); (vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT));
} }
static void svm_vm_destroy(struct kvm *kvm) static void svm_vm_destroy(struct kvm *kvm)
......
...@@ -313,7 +313,7 @@ static inline void svm_set_intercept(struct vcpu_svm *svm, int bit) ...@@ -313,7 +313,7 @@ static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
{ {
struct vmcb *vmcb = get_host_vmcb(svm); struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept |= (1ULL << bit); vmcb_set_intercept(&vmcb->control, bit);
recalc_intercepts(svm); recalc_intercepts(svm);
} }
...@@ -322,14 +322,14 @@ static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit) ...@@ -322,14 +322,14 @@ static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
{ {
struct vmcb *vmcb = get_host_vmcb(svm); struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept &= ~(1ULL << bit); vmcb_clr_intercept(&vmcb->control, bit);
recalc_intercepts(svm); recalc_intercepts(svm);
} }
static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit) static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
{ {
return (svm->vmcb->control.intercept & (1ULL << bit)) != 0; return vmcb_is_intercept(&svm->vmcb->control, bit);
} }
static inline bool vgif_enabled(struct vcpu_svm *svm) static inline bool vgif_enabled(struct vcpu_svm *svm)
...@@ -393,17 +393,17 @@ static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu) ...@@ -393,17 +393,17 @@ static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
static inline bool nested_exit_on_smi(struct vcpu_svm *svm) static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
{ {
return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_SMI)); return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
} }
static inline bool nested_exit_on_intr(struct vcpu_svm *svm) static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
{ {
return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_INTR)); return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
} }
static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
{ {
return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_NMI)); return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
} }
int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
......
...@@ -544,26 +544,30 @@ TRACE_EVENT(kvm_nested_vmrun, ...@@ -544,26 +544,30 @@ TRACE_EVENT(kvm_nested_vmrun,
); );
TRACE_EVENT(kvm_nested_intercepts, TRACE_EVENT(kvm_nested_intercepts,
TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept), TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u32 intercept1,
TP_ARGS(cr_read, cr_write, exceptions, intercept), __u32 intercept2),
TP_ARGS(cr_read, cr_write, exceptions, intercept1, intercept2),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( __u16, cr_read ) __field( __u16, cr_read )
__field( __u16, cr_write ) __field( __u16, cr_write )
__field( __u32, exceptions ) __field( __u32, exceptions )
__field( __u64, intercept ) __field( __u32, intercept1 )
__field( __u32, intercept2 )
), ),
TP_fast_assign( TP_fast_assign(
__entry->cr_read = cr_read; __entry->cr_read = cr_read;
__entry->cr_write = cr_write; __entry->cr_write = cr_write;
__entry->exceptions = exceptions; __entry->exceptions = exceptions;
__entry->intercept = intercept; __entry->intercept1 = intercept1;
__entry->intercept2 = intercept2;
), ),
TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx", TP_printk("cr_read: %04x cr_write: %04x excp: %08x "
__entry->cr_read, __entry->cr_write, __entry->exceptions, "intercepts: %08x %08x",
__entry->intercept) __entry->cr_read, __entry->cr_write, __entry->exceptions,
__entry->intercept1, __entry->intercept2)
); );
/* /*
* Tracepoint for #VMEXIT while nested * Tracepoint for #VMEXIT while nested
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册