提交 45def77e 编写于 作者: S Sean Christopherson 提交者: Paolo Bonzini

KVM: x86: update %rip after emulating IO

Most (all?) x86 platforms provide a port IO based reset mechanism, e.g.
OUT 92h or CF9h.  Userspace may emulate said mechanism, i.e. reset a
vCPU in response to KVM_EXIT_IO, without explicitly announcing to KVM
that it is doing a reset, e.g. Qemu jams vCPU state and resumes running.

To avoid corruping %rip after such a reset, commit 0967b7bf ("KVM:
Skip pio instruction when it is emulated, not executed") changed the
behavior of PIO handlers, i.e. today's "fast" PIO handling to skip the
instruction prior to exiting to userspace.  Full emulation doesn't need
such tricks becase re-emulating the instruction will naturally handle
%rip being changed to point at the reset vector.

Updating %rip prior to executing to userspace has several drawbacks:

  - Userspace sees the wrong %rip on the exit, e.g. if PIO emulation
    fails it will likely yell about the wrong address.
  - Single step exits to userspace for are effectively dropped as
    KVM_EXIT_DEBUG is overwritten with KVM_EXIT_IO.
  - Behavior of PIO emulation is different depending on whether it
    goes down the fast path or the slow path.

Rather than skip the PIO instruction before exiting to userspace,
snapshot the linear %rip and cancel PIO completion if the current
value does not match the snapshot.  For a 64-bit vCPU, i.e. the most
common scenario, the snapshot and comparison has negligible overhead
as VMCS.GUEST_RIP will be cached regardless, i.e. there is no extra
VMREAD in this case.

All other alternatives to snapshotting the linear %rip that don't
rely on an explicit reset announcenment suffer from one corner case
or another.  For example, canceling PIO completion on any write to
%rip fails if userspace does a save/restore of %rip, and attempting to
avoid that issue by canceling PIO only if %rip changed then fails if PIO
collides with the reset %rip.  Attempting to zero in on the exact reset
vector won't work for APs, which means adding more hooks such as the
vCPU's MP_STATE, and so on and so forth.

Checking for a linear %rip match technically suffers from corner cases,
e.g. userspace could theoretically rewrite the underlying code page and
expect a different instruction to execute, or the guest hardcodes a PIO
reset at 0xfffffff0, but those are far, far outside of what can be
considered normal operation.

Fixes: 432baf60 ("KVM: VMX: use kvm_fast_pio_in for handling IN I/O")
Cc: <stable@vger.kernel.org>
Reported-by: NJim Mattson <jmattson@google.com>
Signed-off-by: NSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 013cc6eb
...@@ -350,6 +350,7 @@ struct kvm_mmu_page { ...@@ -350,6 +350,7 @@ struct kvm_mmu_page {
}; };
struct kvm_pio_request { struct kvm_pio_request {
unsigned long linear_rip;
unsigned long count; unsigned long count;
int in; int in;
int port; int port;
......
...@@ -6535,14 +6535,27 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, ...@@ -6535,14 +6535,27 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
} }
EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
{
vcpu->arch.pio.count = 0;
if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip)))
return 1;
return kvm_skip_emulated_instruction(vcpu);
}
static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
unsigned short port) unsigned short port)
{ {
unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
size, port, &val, 1); size, port, &val, 1);
/* do not return to emulator after return from userspace */
vcpu->arch.pio.count = 0; if (!ret) {
vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
vcpu->arch.complete_userspace_io = complete_fast_pio_out;
}
return ret; return ret;
} }
...@@ -6553,6 +6566,11 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu) ...@@ -6553,6 +6566,11 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
/* We should only ever be called with arch.pio.count equal to 1 */ /* We should only ever be called with arch.pio.count equal to 1 */
BUG_ON(vcpu->arch.pio.count != 1); BUG_ON(vcpu->arch.pio.count != 1);
if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) {
vcpu->arch.pio.count = 0;
return 1;
}
/* For size less than 4 we merge, else we zero extend */ /* For size less than 4 we merge, else we zero extend */
val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
: 0; : 0;
...@@ -6565,7 +6583,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu) ...@@ -6565,7 +6583,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
vcpu->arch.pio.port, &val, 1); vcpu->arch.pio.port, &val, 1);
kvm_register_write(vcpu, VCPU_REGS_RAX, val); kvm_register_write(vcpu, VCPU_REGS_RAX, val);
return 1; return kvm_skip_emulated_instruction(vcpu);
} }
static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
...@@ -6584,6 +6602,7 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, ...@@ -6584,6 +6602,7 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
return ret; return ret;
} }
vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
vcpu->arch.complete_userspace_io = complete_fast_pio_in; vcpu->arch.complete_userspace_io = complete_fast_pio_in;
return 0; return 0;
...@@ -6591,16 +6610,13 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, ...@@ -6591,16 +6610,13 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
{ {
int ret = kvm_skip_emulated_instruction(vcpu); int ret;
/*
* TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
* KVM_EXIT_DEBUG here.
*/
if (in) if (in)
return kvm_fast_pio_in(vcpu, size, port) && ret; ret = kvm_fast_pio_in(vcpu, size, port);
else else
return kvm_fast_pio_out(vcpu, size, port) && ret; ret = kvm_fast_pio_out(vcpu, size, port);
return ret && kvm_skip_emulated_instruction(vcpu);
} }
EXPORT_SYMBOL_GPL(kvm_fast_pio); EXPORT_SYMBOL_GPL(kvm_fast_pio);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册