提交 df6909e5 编写于 作者: P Paul Mackerras 提交者: Avi Kivity

KVM: PPC: Move guest enter/exit down into subarch-specific code

Instead of doing the kvm_guest_enter/exit() and local_irq_dis/enable()
calls in powerpc.c, this moves them down into the subarch-specific
book3s_pr.c and booke.c.  This eliminates an extra local_irq_enable()
call in book3s_pr.c, and will be needed for when we do SMT4 guest
support in the book3s hypervisor mode code.
Signed-off-by: NPaul Mackerras <paulus@samba.org>
Signed-off-by: NAlexander Graf <agraf@suse.de>
上级 f9e0554d
...@@ -42,6 +42,7 @@ enum emulation_result { ...@@ -42,6 +42,7 @@ enum emulation_result {
EMULATE_AGAIN, /* something went wrong. go again */ EMULATE_AGAIN, /* something went wrong. go again */
}; };
extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
extern char kvmppc_handlers_start[]; extern char kvmppc_handlers_start[];
extern unsigned long kvmppc_handler_len; extern unsigned long kvmppc_handler_len;
......
...@@ -85,7 +85,7 @@ ...@@ -85,7 +85,7 @@
* r3: kvm_run pointer * r3: kvm_run pointer
* r4: vcpu pointer * r4: vcpu pointer
*/ */
_GLOBAL(__kvmppc_vcpu_entry) _GLOBAL(__kvmppc_vcpu_run)
kvm_start_entry: kvm_start_entry:
/* Write correct stack frame */ /* Write correct stack frame */
......
...@@ -891,8 +891,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) ...@@ -891,8 +891,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
vfree(vcpu_book3s); vfree(vcpu_book3s);
} }
extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{ {
int ret; int ret;
double fpr[32][TS_FPRWIDTH]; double fpr[32][TS_FPRWIDTH];
...@@ -944,14 +943,15 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -944,14 +943,15 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* Remember the MSR with disabled extensions */ /* Remember the MSR with disabled extensions */
ext_msr = current->thread.regs->msr; ext_msr = current->thread.regs->msr;
/* XXX we get called with irq disabled - change that! */
local_irq_enable();
/* Preload FPU if it's enabled */ /* Preload FPU if it's enabled */
if (vcpu->arch.shared->msr & MSR_FP) if (vcpu->arch.shared->msr & MSR_FP)
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
ret = __kvmppc_vcpu_entry(kvm_run, vcpu); kvm_guest_enter();
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
kvm_guest_exit();
local_irq_disable(); local_irq_disable();
......
...@@ -312,6 +312,19 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) ...@@ -312,6 +312,19 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
vcpu->arch.shared->int_pending = 0; vcpu->arch.shared->int_pending = 0;
} }
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
int ret;
local_irq_disable();
kvm_guest_enter();
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
kvm_guest_exit();
local_irq_enable();
return ret;
}
/** /**
* kvmppc_handle_exit * kvmppc_handle_exit
* *
......
...@@ -500,11 +500,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -500,11 +500,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvmppc_core_deliver_interrupts(vcpu); kvmppc_core_deliver_interrupts(vcpu);
local_irq_disable(); r = kvmppc_vcpu_run(run, vcpu);
kvm_guest_enter();
r = __kvmppc_vcpu_run(run, vcpu);
kvm_guest_exit();
local_irq_enable();
if (vcpu->sigset_active) if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL); sigprocmask(SIG_SETMASK, &sigsaved, NULL);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册