提交 72875d8a 编写于 作者: R Radim Krčmář 提交者: Paolo Bonzini

KVM: add kvm_{test,clear}_request to replace {test,clear}_bit

Users were expected to use kvm_check_request() for testing and clearing,
but request have expanded their use since then and some users want to
only test or do a faster clear.

Make sure that requests are not directly accessed with bit operations.
Reviewed-by: NChristian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: NRadim Krčmář <rkrcmar@redhat.com>
Reviewed-by: NAndrew Jones <drjones@redhat.com>
Reviewed-by: NCornelia Huck <cornelia.huck@de.ibm.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 cf9bdd35
...@@ -982,7 +982,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) ...@@ -982,7 +982,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
* check if any I/O interrupts are pending. * check if any I/O interrupts are pending.
*/ */
if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
clear_bit(KVM_REQ_UNHALT, &vcpu->requests); kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
} }
} }
......
...@@ -349,7 +349,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) ...@@ -349,7 +349,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
if (msr & MSR_POW) { if (msr & MSR_POW) {
if (!vcpu->arch.pending_exceptions) { if (!vcpu->arch.pending_exceptions) {
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests); kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu->stat.halt_wakeup++; vcpu->stat.halt_wakeup++;
/* Unset POW bit after we woke up */ /* Unset POW bit after we woke up */
......
...@@ -344,7 +344,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) ...@@ -344,7 +344,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
case H_CEDE: case H_CEDE:
kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE); kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests); kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu->stat.halt_wakeup++; vcpu->stat.halt_wakeup++;
return EMULATE_DONE; return EMULATE_DONE;
case H_LOGICAL_CI_LOAD: case H_LOGICAL_CI_LOAD:
......
...@@ -584,7 +584,7 @@ static void arm_next_watchdog(struct kvm_vcpu *vcpu) ...@@ -584,7 +584,7 @@ static void arm_next_watchdog(struct kvm_vcpu *vcpu)
* userspace, so clear the KVM_REQ_WATCHDOG request. * userspace, so clear the KVM_REQ_WATCHDOG request.
*/ */
if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS)) if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests); kvm_clear_request(KVM_REQ_WATCHDOG, vcpu);
spin_lock_irqsave(&vcpu->arch.wdt_lock, flags); spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
nr_jiffies = watchdog_next_timeout(vcpu); nr_jiffies = watchdog_next_timeout(vcpu);
...@@ -695,7 +695,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) ...@@ -695,7 +695,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
if (vcpu->arch.shared->msr & MSR_WE) { if (vcpu->arch.shared->msr & MSR_WE) {
local_irq_enable(); local_irq_enable();
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests); kvm_clear_request(KVM_REQ_UNHALT, vcpu);
hard_irq_disable(); hard_irq_disable();
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
......
...@@ -233,7 +233,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) ...@@ -233,7 +233,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
case EV_HCALL_TOKEN(EV_IDLE): case EV_HCALL_TOKEN(EV_IDLE):
r = EV_SUCCESS; r = EV_SUCCESS;
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests); kvm_clear_request(KVM_REQ_UNHALT, vcpu);
break; break;
default: default:
r = EV_UNIMPLEMENTED; r = EV_UNIMPLEMENTED;
......
...@@ -2496,7 +2496,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) ...@@ -2496,7 +2496,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
} }
/* nothing to do, just clear the request */ /* nothing to do, just clear the request */
clear_bit(KVM_REQ_UNHALT, &vcpu->requests); kvm_clear_request(KVM_REQ_UNHALT, vcpu);
return 0; return 0;
} }
......
...@@ -6299,7 +6299,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) ...@@ -6299,7 +6299,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
if (intr_window_requested && vmx_interrupt_allowed(vcpu)) if (intr_window_requested && vmx_interrupt_allowed(vcpu))
return handle_interrupt_window(&vmx->vcpu); return handle_interrupt_window(&vmx->vcpu);
if (test_bit(KVM_REQ_EVENT, &vcpu->requests)) if (kvm_test_request(KVM_REQ_EVENT, vcpu))
return 1; return 1;
err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE); err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
......
...@@ -1753,7 +1753,7 @@ static void kvm_gen_update_masterclock(struct kvm *kvm) ...@@ -1753,7 +1753,7 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
/* guest entries allowed */ /* guest entries allowed */
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests); kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
spin_unlock(&ka->pvclock_gtod_sync_lock); spin_unlock(&ka->pvclock_gtod_sync_lock);
#endif #endif
...@@ -7041,7 +7041,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu) ...@@ -7041,7 +7041,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
if (r <= 0) if (r <= 0)
break; break;
clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu);
if (kvm_cpu_has_pending_timer(vcpu)) if (kvm_cpu_has_pending_timer(vcpu))
kvm_inject_pending_timer_irqs(vcpu); kvm_inject_pending_timer_irqs(vcpu);
...@@ -7169,7 +7169,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -7169,7 +7169,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
kvm_apic_accept_events(vcpu); kvm_apic_accept_events(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests); kvm_clear_request(KVM_REQ_UNHALT, vcpu);
r = -EAGAIN; r = -EAGAIN;
goto out; goto out;
} }
...@@ -8382,7 +8382,7 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) ...@@ -8382,7 +8382,7 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
if (atomic_read(&vcpu->arch.nmi_queued)) if (atomic_read(&vcpu->arch.nmi_queued))
return true; return true;
if (test_bit(KVM_REQ_SMI, &vcpu->requests)) if (kvm_test_request(KVM_REQ_SMI, vcpu))
return true; return true;
if (kvm_arch_interrupt_allowed(vcpu) && if (kvm_arch_interrupt_allowed(vcpu) &&
......
...@@ -1079,10 +1079,20 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) ...@@ -1079,10 +1079,20 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
set_bit(req, &vcpu->requests); set_bit(req, &vcpu->requests);
} }
static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
{
return test_bit(req, &vcpu->requests);
}
static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
{ {
if (test_bit(req, &vcpu->requests)) {
clear_bit(req, &vcpu->requests); clear_bit(req, &vcpu->requests);
}
static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
{
if (kvm_test_request(req, vcpu)) {
kvm_clear_request(req, vcpu);
/* /*
* Ensure the rest of the request is visible to kvm_check_request's * Ensure the rest of the request is visible to kvm_check_request's
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册