提交 39e6c820 编写于 作者: E Eric Dumazet 提交者: David S. Miller

net: solve a NAPI race

While playing with mlx4 hardware timestamping of RX packets, I found
that some packets were received by TCP stack with a ~200 ms delay...

Since the timestamp was provided by the NIC, and my probe was added
in tcp_v4_rcv() while in BH handler, I was confident it was not
a sender issue, or a drop in the network.

This would happen with a very low probability, but hurting RPC
workloads.

A NAPI driver normally arms the IRQ after the napi_complete_done(),
after NAPI_STATE_SCHED is cleared, so that the hard irq handler can grab
it.

Problem is that if another point in the stack grabs NAPI_STATE_SCHED bit
while IRQ are not disabled, we might have later an IRQ firing and
finding this bit set, right before napi_complete_done() clears it.

This can happen with busy polling users, or if gro_flush_timeout is
used. But some other uses of napi_schedule() in drivers can cause this
as well.

thread 1                                 thread 2 (could be on same cpu, or not)

// busy polling or napi_watchdog()
napi_schedule();
...
napi->poll()

device polling:
read 2 packets from ring buffer
                                          Additional 3rd packet is
available.
                                          device hard irq

                                          // does nothing because
NAPI_STATE_SCHED bit is owned by thread 1
                                          napi_schedule();

napi_complete_done(napi, 2);
rearm_irq();

Note that rearm_irq() will not force the device to send an additional
IRQ for the packet it already signaled (3rd packet in my example)

This patch adds a new NAPI_STATE_MISSED bit, that napi_schedule_prep()
can set if it could not grab NAPI_STATE_SCHED

Then napi_complete_done() properly reschedules the napi to make sure
we do not miss something.

Since we manipulate multiple bits at once, use cmpxchg() like in
sk_busy_loop() to provide proper transactions.

In v2, I changed napi_watchdog() to use a relaxed variant of
napi_schedule_prep() : No need to set NAPI_STATE_MISSED from this point.

In v3, I added more details in the changelog and clears
NAPI_STATE_MISSED in busy_poll_stop()

In v4, I added the ideas given by Alexander Duyck in v3 review
Signed-off-by: NEric Dumazet <edumazet@google.com>
Cc: Alexander Duyck <alexander.duyck@gmail.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 b2d0fe35
...@@ -330,6 +330,7 @@ struct napi_struct { ...@@ -330,6 +330,7 @@ struct napi_struct {
enum { enum {
NAPI_STATE_SCHED, /* Poll is scheduled */ NAPI_STATE_SCHED, /* Poll is scheduled */
NAPI_STATE_MISSED, /* reschedule a napi */
NAPI_STATE_DISABLE, /* Disable pending */ NAPI_STATE_DISABLE, /* Disable pending */
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
...@@ -338,12 +339,13 @@ enum { ...@@ -338,12 +339,13 @@ enum {
}; };
enum { enum {
NAPIF_STATE_SCHED = (1UL << NAPI_STATE_SCHED), NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
NAPIF_STATE_DISABLE = (1UL << NAPI_STATE_DISABLE), NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
NAPIF_STATE_NPSVC = (1UL << NAPI_STATE_NPSVC), NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
NAPIF_STATE_HASHED = (1UL << NAPI_STATE_HASHED), NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
NAPIF_STATE_NO_BUSY_POLL = (1UL << NAPI_STATE_NO_BUSY_POLL), NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
NAPIF_STATE_IN_BUSY_POLL = (1UL << NAPI_STATE_IN_BUSY_POLL), NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
}; };
enum gro_result { enum gro_result {
...@@ -414,20 +416,7 @@ static inline bool napi_disable_pending(struct napi_struct *n) ...@@ -414,20 +416,7 @@ static inline bool napi_disable_pending(struct napi_struct *n)
return test_bit(NAPI_STATE_DISABLE, &n->state); return test_bit(NAPI_STATE_DISABLE, &n->state);
} }
/** bool napi_schedule_prep(struct napi_struct *n);
* napi_schedule_prep - check if NAPI can be scheduled
* @n: NAPI context
*
* Test if NAPI routine is already running, and if not mark
* it as running. This is used as a condition variable to
* insure only one NAPI poll instance runs. We also make
* sure there is no pending NAPI disable.
*/
static inline bool napi_schedule_prep(struct napi_struct *n)
{
return !napi_disable_pending(n) &&
!test_and_set_bit(NAPI_STATE_SCHED, &n->state);
}
/** /**
* napi_schedule - schedule NAPI poll * napi_schedule - schedule NAPI poll
......
...@@ -4883,6 +4883,39 @@ void __napi_schedule(struct napi_struct *n) ...@@ -4883,6 +4883,39 @@ void __napi_schedule(struct napi_struct *n)
} }
EXPORT_SYMBOL(__napi_schedule); EXPORT_SYMBOL(__napi_schedule);
/**
* napi_schedule_prep - check if napi can be scheduled
* @n: napi context
*
* Test if NAPI routine is already running, and if not mark
* it as running. This is used as a condition variable
* insure only one NAPI poll instance runs. We also make
* sure there is no pending NAPI disable.
*/
bool napi_schedule_prep(struct napi_struct *n)
{
unsigned long val, new;
do {
val = READ_ONCE(n->state);
if (unlikely(val & NAPIF_STATE_DISABLE))
return false;
new = val | NAPIF_STATE_SCHED;
/* Sets STATE_MISSED bit if STATE_SCHED was already set
* This was suggested by Alexander Duyck, as compiler
* emits better code than :
* if (val & NAPIF_STATE_SCHED)
* new |= NAPIF_STATE_MISSED;
*/
new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
NAPIF_STATE_MISSED;
} while (cmpxchg(&n->state, val, new) != val);
return !(val & NAPIF_STATE_SCHED);
}
EXPORT_SYMBOL(napi_schedule_prep);
/** /**
* __napi_schedule_irqoff - schedule for receive * __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule * @n: entry to schedule
...@@ -4897,7 +4930,7 @@ EXPORT_SYMBOL(__napi_schedule_irqoff); ...@@ -4897,7 +4930,7 @@ EXPORT_SYMBOL(__napi_schedule_irqoff);
bool napi_complete_done(struct napi_struct *n, int work_done) bool napi_complete_done(struct napi_struct *n, int work_done)
{ {
unsigned long flags; unsigned long flags, val, new;
/* /*
* 1) Don't let napi dequeue from the cpu poll list * 1) Don't let napi dequeue from the cpu poll list
...@@ -4927,7 +4960,27 @@ bool napi_complete_done(struct napi_struct *n, int work_done) ...@@ -4927,7 +4960,27 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
list_del_init(&n->poll_list); list_del_init(&n->poll_list);
local_irq_restore(flags); local_irq_restore(flags);
} }
WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
do {
val = READ_ONCE(n->state);
WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
/* If STATE_MISSED was set, leave STATE_SCHED set,
* because we will call napi->poll() one more time.
* This C code was suggested by Alexander Duyck to help gcc.
*/
new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
NAPIF_STATE_SCHED;
} while (cmpxchg(&n->state, val, new) != val);
if (unlikely(val & NAPIF_STATE_MISSED)) {
__napi_schedule(n);
return false;
}
return true; return true;
} }
EXPORT_SYMBOL(napi_complete_done); EXPORT_SYMBOL(napi_complete_done);
...@@ -4953,6 +5006,16 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock) ...@@ -4953,6 +5006,16 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
{ {
int rc; int rc;
/* Busy polling means there is a high chance device driver hard irq
* could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
* set in napi_schedule_prep().
* Since we are about to call napi->poll() once more, we can safely
* clear NAPI_STATE_MISSED.
*
* Note: x86 could use a single "lock and ..." instruction
* to perform these two clear_bit()
*/
clear_bit(NAPI_STATE_MISSED, &napi->state);
clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
local_bh_disable(); local_bh_disable();
...@@ -5088,8 +5151,13 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) ...@@ -5088,8 +5151,13 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
struct napi_struct *napi; struct napi_struct *napi;
napi = container_of(timer, struct napi_struct, timer); napi = container_of(timer, struct napi_struct, timer);
if (napi->gro_list)
napi_schedule_irqoff(napi); /* Note : we use a relaxed variant of napi_schedule_prep() not setting
* NAPI_STATE_MISSED, since we do not react to a device IRQ.
*/
if (napi->gro_list && !napi_disable_pending(napi) &&
!test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
__napi_schedule_irqoff(napi);
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册