提交 03d25c5b 编写于 作者: A Alexander Graf

KVM: PPC: Use same kvmppc_prepare_to_enter code for booke and book3s_pr

We need to do the same things when preparing to enter a guest for booke and
book3s_pr cores. Fold the generic code into a generic function that both call.
Signed-off-by: NAlexander Graf <agraf@suse.de>
上级 2d8185d4
...@@ -112,6 +112,7 @@ extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ...@@ -112,6 +112,7 @@ extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
ulong val); ulong val);
extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
ulong *val); ulong *val);
extern void kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
extern int kvmppc_booke_init(void); extern int kvmppc_booke_init(void);
extern void kvmppc_booke_exit(void); extern void kvmppc_booke_exit(void);
...@@ -150,6 +151,8 @@ extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, ...@@ -150,6 +151,8 @@ extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
extern int kvmppc_bookehv_init(void); extern int kvmppc_bookehv_init(void);
extern void kvmppc_bookehv_exit(void); extern void kvmppc_bookehv_exit(void);
extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
/* /*
* Cuts out inst bits with ordering according to spec. * Cuts out inst bits with ordering according to spec.
* That means the leftmost bit is zero. All given bits are included. * That means the leftmost bit is zero. All given bits are included.
......
...@@ -88,6 +88,10 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -88,6 +88,10 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
kvmppc_giveup_ext(vcpu, MSR_VSX); kvmppc_giveup_ext(vcpu, MSR_VSX);
} }
void kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
{
}
static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
{ {
ulong smsr = vcpu->arch.shared->msr; ulong smsr = vcpu->arch.shared->msr;
...@@ -815,19 +819,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -815,19 +819,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* again due to a host external interrupt. * again due to a host external interrupt.
*/ */
__hard_irq_disable(); __hard_irq_disable();
if (signal_pending(current)) { if (kvmppc_prepare_to_enter(vcpu)) {
__hard_irq_enable();
#ifdef EXIT_DEBUG
printk(KERN_EMERG "KVM: Going back to host\n");
#endif
vcpu->stat.signal_exits++;
run->exit_reason = KVM_EXIT_INTR; run->exit_reason = KVM_EXIT_INTR;
r = -EINTR; r = -EINTR;
} else {
/* In case an interrupt came in that was triggered
* from userspace (like DEC), we need to check what
* to inject now! */
kvmppc_core_prepare_to_enter(vcpu);
} }
} }
...@@ -1029,8 +1023,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -1029,8 +1023,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
goto out; goto out;
} }
kvmppc_core_prepare_to_enter(vcpu);
/* /*
* Interrupts could be timers for the guest which we have to inject * Interrupts could be timers for the guest which we have to inject
* again, so let's postpone them until we're in the guest and if we * again, so let's postpone them until we're in the guest and if we
...@@ -1038,9 +1030,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -1038,9 +1030,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
* a host external interrupt. * a host external interrupt.
*/ */
__hard_irq_disable(); __hard_irq_disable();
if (kvmppc_prepare_to_enter(vcpu)) {
/* No need to go into the guest when all we do is going out */
if (signal_pending(current)) {
__hard_irq_enable(); __hard_irq_enable();
kvm_run->exit_reason = KVM_EXIT_INTR; kvm_run->exit_reason = KVM_EXIT_INTR;
ret = -EINTR; ret = -EINTR;
......
...@@ -455,10 +455,8 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) ...@@ -455,10 +455,8 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
return r; return r;
} }
static void kvmppc_check_requests(struct kvm_vcpu *vcpu) void kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
{ {
trace_kvm_check_requests(vcpu);
if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
update_timer_ints(vcpu); update_timer_ints(vcpu);
#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
...@@ -467,60 +465,6 @@ static void kvmppc_check_requests(struct kvm_vcpu *vcpu) ...@@ -467,60 +465,6 @@ static void kvmppc_check_requests(struct kvm_vcpu *vcpu)
#endif #endif
} }
/*
* Common checks before entering the guest world. Call with interrupts
* disabled.
*
* returns !0 if a signal is pending and check_signal is true
*/
static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
{
int r = 0;
WARN_ON_ONCE(!irqs_disabled());
while (true) {
if (need_resched()) {
local_irq_enable();
cond_resched();
local_irq_disable();
continue;
}
if (signal_pending(current)) {
r = 1;
break;
}
smp_mb();
if (vcpu->requests) {
/* Make sure we process requests preemptable */
local_irq_enable();
kvmppc_check_requests(vcpu);
local_irq_disable();
continue;
}
if (kvmppc_core_prepare_to_enter(vcpu)) {
/* interrupts got enabled in between, so we
are back at square 1 */
continue;
}
if (vcpu->mode == EXITING_GUEST_MODE) {
r = 1;
break;
}
/* Going into guest context! Yay! */
vcpu->mode = IN_GUEST_MODE;
smp_wmb();
break;
}
return r;
}
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{ {
int ret; int ret;
......
...@@ -47,6 +47,63 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) ...@@ -47,6 +47,63 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
return 1; return 1;
} }
#ifndef CONFIG_KVM_BOOK3S_64_HV
/*
* Common checks before entering the guest world. Call with interrupts
* disabled.
*
* returns !0 if a signal is pending and check_signal is true
*/
int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
{
int r = 0;
WARN_ON_ONCE(!irqs_disabled());
while (true) {
if (need_resched()) {
local_irq_enable();
cond_resched();
local_irq_disable();
continue;
}
if (signal_pending(current)) {
r = 1;
break;
}
smp_mb();
if (vcpu->requests) {
/* Make sure we process requests preemptable */
local_irq_enable();
trace_kvm_check_requests(vcpu);
kvmppc_core_check_requests(vcpu);
local_irq_disable();
continue;
}
if (kvmppc_core_prepare_to_enter(vcpu)) {
/* interrupts got enabled in between, so we
are back at square 1 */
continue;
}
if (vcpu->mode == EXITING_GUEST_MODE) {
r = 1;
break;
}
/* Going into guest context! Yay! */
vcpu->mode = IN_GUEST_MODE;
smp_wmb();
break;
}
return r;
}
#endif /* CONFIG_KVM_BOOK3S_64_HV */
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
{ {
int nr = kvmppc_get_gpr(vcpu, 11); int nr = kvmppc_get_gpr(vcpu, 11);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册