提交 e2536742 编写于 作者: L Liran Alon 提交者: Paolo Bonzini

KVM: VMX: Change vmcs12_{read,write}_any() to receive vmcs12 as parameter

No functionality change.
This is done as a preparation for VMCS shadowing emulation.
Signed-off-by: NLiran Alon <liran.alon@oracle.com>
Signed-off-by: NJim Mattson <jmattson@google.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 392b2f25
...@@ -8159,7 +8159,7 @@ static int handle_vmresume(struct kvm_vcpu *vcpu) ...@@ -8159,7 +8159,7 @@ static int handle_vmresume(struct kvm_vcpu *vcpu)
* some of the bits we return here (e.g., on 32-bit guests, only 32 bits of * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
* 64-bit fields are to be returned). * 64-bit fields are to be returned).
*/ */
static inline int vmcs12_read_any(struct kvm_vcpu *vcpu, static inline int vmcs12_read_any(struct vmcs12 *vmcs12,
unsigned long field, u64 *ret) unsigned long field, u64 *ret)
{ {
short offset = vmcs_field_to_offset(field); short offset = vmcs_field_to_offset(field);
...@@ -8168,7 +8168,7 @@ static inline int vmcs12_read_any(struct kvm_vcpu *vcpu, ...@@ -8168,7 +8168,7 @@ static inline int vmcs12_read_any(struct kvm_vcpu *vcpu,
if (offset < 0) if (offset < 0)
return offset; return offset;
p = ((char *)(get_vmcs12(vcpu))) + offset; p = (char *)vmcs12 + offset;
switch (vmcs_field_width(field)) { switch (vmcs_field_width(field)) {
case VMCS_FIELD_WIDTH_NATURAL_WIDTH: case VMCS_FIELD_WIDTH_NATURAL_WIDTH:
...@@ -8190,10 +8190,10 @@ static inline int vmcs12_read_any(struct kvm_vcpu *vcpu, ...@@ -8190,10 +8190,10 @@ static inline int vmcs12_read_any(struct kvm_vcpu *vcpu,
} }
static inline int vmcs12_write_any(struct kvm_vcpu *vcpu, static inline int vmcs12_write_any(struct vmcs12 *vmcs12,
unsigned long field, u64 field_value){ unsigned long field, u64 field_value){
short offset = vmcs_field_to_offset(field); short offset = vmcs_field_to_offset(field);
char *p = ((char *) get_vmcs12(vcpu)) + offset; char *p = (char *)vmcs12 + offset;
if (offset < 0) if (offset < 0)
return offset; return offset;
...@@ -8246,7 +8246,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) ...@@ -8246,7 +8246,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
for (i = 0; i < max_fields[q]; i++) { for (i = 0; i < max_fields[q]; i++) {
field = fields[q][i]; field = fields[q][i];
field_value = __vmcs_readl(field); field_value = __vmcs_readl(field);
vmcs12_write_any(&vmx->vcpu, field, field_value); vmcs12_write_any(get_vmcs12(&vmx->vcpu), field, field_value);
} }
/* /*
* Skip the VM-exit information fields if they are read-only. * Skip the VM-exit information fields if they are read-only.
...@@ -8281,7 +8281,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) ...@@ -8281,7 +8281,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
for (q = 0; q < ARRAY_SIZE(fields); q++) { for (q = 0; q < ARRAY_SIZE(fields); q++) {
for (i = 0; i < max_fields[q]; i++) { for (i = 0; i < max_fields[q]; i++) {
field = fields[q][i]; field = fields[q][i];
vmcs12_read_any(&vmx->vcpu, field, &field_value); vmcs12_read_any(get_vmcs12(&vmx->vcpu), field, &field_value);
__vmcs_writel(field, field_value); __vmcs_writel(field, field_value);
} }
} }
...@@ -8321,7 +8321,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) ...@@ -8321,7 +8321,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
/* Decode instruction info and find the field to read */ /* Decode instruction info and find the field to read */
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
/* Read the field, zero-extended to a u64 field_value */ /* Read the field, zero-extended to a u64 field_value */
if (vmcs12_read_any(vcpu, field, &field_value) < 0) { if (vmcs12_read_any(get_vmcs12(vcpu), field, &field_value) < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
...@@ -8397,7 +8397,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) ...@@ -8397,7 +8397,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
if (vmcs12_write_any(vcpu, field, field_value) < 0) { if (vmcs12_write_any(get_vmcs12(vcpu), field, field_value) < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
...@@ -10971,11 +10971,12 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, ...@@ -10971,11 +10971,12 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
unsigned long count_field, unsigned long count_field,
unsigned long addr_field) unsigned long addr_field)
{ {
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
int maxphyaddr; int maxphyaddr;
u64 count, addr; u64 count, addr;
if (vmcs12_read_any(vcpu, count_field, &count) || if (vmcs12_read_any(vmcs12, count_field, &count) ||
vmcs12_read_any(vcpu, addr_field, &addr)) { vmcs12_read_any(vmcs12, addr_field, &addr)) {
WARN_ON(1); WARN_ON(1);
return -EINVAL; return -EINVAL;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册