提交 45c5eb67 编写于 作者: H Hollis Blanchard 提交者: Avi Kivity

KVM: ppc: Handle guest idle by emulating MSR[WE] writes

This reduces host CPU usage when the guest is idle. However, the guest must
set MSR[WE] in its idle loop, which Linux did not do until 2.6.26.
Signed-off-by: NHollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: NJerone Young <jyoung5@us.ibm.com>
Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 3fe913e7
...@@ -49,6 +49,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { ...@@ -49,6 +49,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "inst_emu", VCPU_STAT(emulated_inst_exits) }, { "inst_emu", VCPU_STAT(emulated_inst_exits) },
{ "dec", VCPU_STAT(dec_exits) }, { "dec", VCPU_STAT(dec_exits) },
{ "ext_intr", VCPU_STAT(ext_intr_exits) }, { "ext_intr", VCPU_STAT(ext_intr_exits) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ NULL } { NULL }
}; };
......
...@@ -36,13 +36,12 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) ...@@ -36,13 +36,12 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
int kvm_cpu_has_interrupt(struct kvm_vcpu *v) int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
{ {
/* XXX implement me */ return !!(v->arch.pending_exceptions);
return 0;
} }
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{ {
return 1; return !(v->arch.msr & MSR_WE);
} }
...@@ -214,6 +213,11 @@ static void kvmppc_decrementer_func(unsigned long data) ...@@ -214,6 +213,11 @@ static void kvmppc_decrementer_func(unsigned long data)
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER); kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER);
if (waitqueue_active(&vcpu->wq)) {
wake_up_interruptible(&vcpu->wq);
vcpu->stat.halt_wakeup++;
}
} }
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
...@@ -339,6 +343,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -339,6 +343,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
int r; int r;
sigset_t sigsaved; sigset_t sigsaved;
vcpu_load(vcpu);
if (vcpu->sigset_active) if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
...@@ -363,12 +369,20 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -363,12 +369,20 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (vcpu->sigset_active) if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL); sigprocmask(SIG_SETMASK, &sigsaved, NULL);
vcpu_put(vcpu);
return r; return r;
} }
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
{ {
kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL); kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL);
if (waitqueue_active(&vcpu->wq)) {
wake_up_interruptible(&vcpu->wq);
vcpu->stat.halt_wakeup++;
}
return 0; return 0;
} }
......
...@@ -59,6 +59,7 @@ struct kvm_vcpu_stat { ...@@ -59,6 +59,7 @@ struct kvm_vcpu_stat {
u32 emulated_inst_exits; u32 emulated_inst_exits;
u32 dec_exits; u32 dec_exits;
u32 ext_intr_exits; u32 ext_intr_exits;
u32 halt_wakeup;
}; };
struct tlbe { struct tlbe {
......
...@@ -77,12 +77,17 @@ static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception) ...@@ -77,12 +77,17 @@ static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception)
clear_bit(priority, &vcpu->arch.pending_exceptions); clear_bit(priority, &vcpu->arch.pending_exceptions);
} }
/* Helper function for "full" MSR writes. No need to call this if only EE is
* changing. */
static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
{ {
if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR)) if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
vcpu->arch.msr = new_msr; vcpu->arch.msr = new_msr;
if (vcpu->arch.msr & MSR_WE)
kvm_vcpu_block(vcpu);
} }
#endif /* __POWERPC_KVM_PPC_H__ */ #endif /* __POWERPC_KVM_PPC_H__ */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册