提交 6c85f52b 编写于 作者: S Scott Wood 提交者: Alexander Graf

kvm/ppc: IRQ disabling cleanup

Simplify the handling of lazy EE by going directly from fully-enabled
to hard-disabled.  This replaces the lazy_irq_pending() check
(including its misplaced kvm_guest_exit() call).

As suggested by Tiejun Chen, move the interrupt disabling into
kvmppc_prepare_to_enter() rather than have each caller do it.  Also
move the IRQ enabling on heavyweight exit into
kvmppc_prepare_to_enter().
Signed-off-by: NScott Wood <scottwood@freescale.com>
Signed-off-by: NAlexander Graf <agraf@suse.de>
上级 70713fe3
......@@ -456,6 +456,12 @@ static inline void kvmppc_fix_ee_before_entry(void)
trace_hardirqs_on();
#ifdef CONFIG_PPC64
/*
* To avoid races, the caller must have gone directly from having
* interrupts fully-enabled to hard-disabled.
*/
WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
/* Only need to enable IRQs by hard enabling them after this */
local_paca->irq_happened = 0;
local_paca->soft_enabled = 1;
......
......@@ -999,14 +999,14 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
* and if we really did time things so badly, then we just exit
* again due to a host external interrupt.
*/
local_irq_disable();
s = kvmppc_prepare_to_enter(vcpu);
if (s <= 0) {
local_irq_enable();
if (s <= 0)
r = s;
} else {
else {
/* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry();
}
kvmppc_handle_lost_ext(vcpu);
}
......@@ -1219,12 +1219,10 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
* really did time things so badly, then we just exit again due to
* a host external interrupt.
*/
local_irq_disable();
ret = kvmppc_prepare_to_enter(vcpu);
if (ret <= 0) {
local_irq_enable();
if (ret <= 0)
goto out;
}
/* interrupts now hard-disabled */
/* Save FPU state in thread_struct */
if (current->thread.regs->msr & MSR_FP)
......
......@@ -643,7 +643,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
local_irq_enable();
kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
local_irq_disable();
hard_irq_disable();
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
r = 1;
......@@ -688,13 +688,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
return -EINVAL;
}
local_irq_disable();
s = kvmppc_prepare_to_enter(vcpu);
if (s <= 0) {
local_irq_enable();
ret = s;
goto out;
}
/* interrupts now hard-disabled */
#ifdef CONFIG_PPC_FPU
/* Save userspace FPU state in stack */
......@@ -1187,12 +1186,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* aren't already exiting to userspace for some other reason.
*/
if (!(r & RESUME_HOST)) {
local_irq_disable();
s = kvmppc_prepare_to_enter(vcpu);
if (s <= 0) {
local_irq_enable();
if (s <= 0)
r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
} else {
else {
/* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry();
}
}
......
......@@ -68,14 +68,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
*/
int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
{
int r = 1;
int r;
WARN_ON(irqs_disabled());
hard_irq_disable();
WARN_ON_ONCE(!irqs_disabled());
while (true) {
if (need_resched()) {
local_irq_enable();
cond_resched();
local_irq_disable();
hard_irq_disable();
continue;
}
......@@ -101,7 +103,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
local_irq_enable();
trace_kvm_check_requests(vcpu);
r = kvmppc_core_check_requests(vcpu);
local_irq_disable();
hard_irq_disable();
if (r > 0)
continue;
break;
......@@ -113,22 +115,12 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
continue;
}
#ifdef CONFIG_PPC64
/* lazy EE magic */
hard_irq_disable();
if (lazy_irq_pending()) {
/* Got an interrupt in between, try again */
local_irq_enable();
local_irq_disable();
kvm_guest_exit();
continue;
}
#endif
kvm_guest_enter();
break;
return 1;
}
/* return to host */
local_irq_enable();
return r;
}
EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册