提交 fb58e300 编写于 作者: D David Vrabel 提交者: Konrad Rzeszutek Wilk

x86/xen: disable premption when enabling local irqs

If CONFIG_PREEMPT is enabled then xen_enable_irq() (and
xen_restore_fl()) could be preempted and rescheduled on a different
VCPU in between the clear of the mask and the check for pending
events.  This may result in events being lost as the upcall will check
for pending events on the wrong VCPU.

Fix this by disabling preemption around the unmask and check for
events.
Signed-off-by: NDavid Vrabel <david.vrabel@citrix.com>
Signed-off-by: NKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
上级 781575cd
...@@ -47,23 +47,18 @@ static void xen_restore_fl(unsigned long flags) ...@@ -47,23 +47,18 @@ static void xen_restore_fl(unsigned long flags)
/* convert from IF type flag */ /* convert from IF type flag */
flags = !(flags & X86_EFLAGS_IF); flags = !(flags & X86_EFLAGS_IF);
/* There's a one instruction preempt window here. We need to /* See xen_irq_enable() for why preemption must be disabled. */
make sure we're don't switch CPUs between getting the vcpu
pointer and updating the mask. */
preempt_disable(); preempt_disable();
vcpu = this_cpu_read(xen_vcpu); vcpu = this_cpu_read(xen_vcpu);
vcpu->evtchn_upcall_mask = flags; vcpu->evtchn_upcall_mask = flags;
preempt_enable_no_resched();
/* Doesn't matter if we get preempted here, because any
pending event will get dealt with anyway. */
if (flags == 0) { if (flags == 0) {
preempt_check_resched();
barrier(); /* unmask then check (avoid races) */ barrier(); /* unmask then check (avoid races) */
if (unlikely(vcpu->evtchn_upcall_pending)) if (unlikely(vcpu->evtchn_upcall_pending))
xen_force_evtchn_callback(); xen_force_evtchn_callback();
} preempt_enable();
} else
preempt_enable_no_resched();
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl); PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
...@@ -82,10 +77,12 @@ static void xen_irq_enable(void) ...@@ -82,10 +77,12 @@ static void xen_irq_enable(void)
{ {
struct vcpu_info *vcpu; struct vcpu_info *vcpu;
/* We don't need to worry about being preempted here, since /*
either a) interrupts are disabled, so no preemption, or b) * We may be preempted as soon as vcpu->evtchn_upcall_mask is
the caller is confused and is trying to re-enable interrupts * cleared, so disable preemption to ensure we check for
on an indeterminate processor. */ * events on the VCPU we are still running on.
*/
preempt_disable();
vcpu = this_cpu_read(xen_vcpu); vcpu = this_cpu_read(xen_vcpu);
vcpu->evtchn_upcall_mask = 0; vcpu->evtchn_upcall_mask = 0;
...@@ -96,6 +93,8 @@ static void xen_irq_enable(void) ...@@ -96,6 +93,8 @@ static void xen_irq_enable(void)
barrier(); /* unmask then check (avoid races) */ barrier(); /* unmask then check (avoid races) */
if (unlikely(vcpu->evtchn_upcall_pending)) if (unlikely(vcpu->evtchn_upcall_pending))
xen_force_evtchn_callback(); xen_force_evtchn_callback();
preempt_enable();
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable); PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册