提交 eecfd7c4 编写于 作者: E Eric Dumazet 提交者: David S. Miller

rps: Various optimizations

Introduce ____napi_schedule() helper for callers in irq disabled
contexts. rps_trigger_softirq() becomes a leaf function.

Use container_of() in process_backlog() instead of accessing per_cpu
address.

Use a custom inlined version of __napi_complete() in process_backlog()
to avoid one locked instruction :

 only current cpu owns and manipulates this napi,
 and NAPI_STATE_SCHED is the only possible flag set on backlog.
 we can use a plain write instead of clear_bit(),
 and we dont need an smp_mb() memory barrier, since RPS is on,
 backlog is protected by a spinlock.
Signed-off-by: NEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 39e0786d
...@@ -2205,6 +2205,14 @@ int netdev_max_backlog __read_mostly = 1000; ...@@ -2205,6 +2205,14 @@ int netdev_max_backlog __read_mostly = 1000;
int netdev_budget __read_mostly = 300; int netdev_budget __read_mostly = 300;
int weight_p __read_mostly = 64; /* old backlog weight */ int weight_p __read_mostly = 64; /* old backlog weight */
/* Called with irq disabled */
static inline void ____napi_schedule(struct softnet_data *sd,
struct napi_struct *napi)
{
list_add_tail(&napi->poll_list, &sd->poll_list);
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
}
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
/* One global table that all flow-based protocols share. */ /* One global table that all flow-based protocols share. */
...@@ -2363,7 +2371,7 @@ static void rps_trigger_softirq(void *data) ...@@ -2363,7 +2371,7 @@ static void rps_trigger_softirq(void *data)
{ {
struct softnet_data *sd = data; struct softnet_data *sd = data;
__napi_schedule(&sd->backlog); ____napi_schedule(sd, &sd->backlog);
sd->received_rps++; sd->received_rps++;
} }
...@@ -2421,7 +2429,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, ...@@ -2421,7 +2429,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
/* Schedule NAPI for backlog device */ /* Schedule NAPI for backlog device */
if (napi_schedule_prep(&sd->backlog)) { if (napi_schedule_prep(&sd->backlog)) {
if (!rps_ipi_queued(sd)) if (!rps_ipi_queued(sd))
__napi_schedule(&sd->backlog); ____napi_schedule(sd, &sd->backlog);
} }
goto enqueue; goto enqueue;
} }
...@@ -3280,7 +3288,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) ...@@ -3280,7 +3288,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
static int process_backlog(struct napi_struct *napi, int quota) static int process_backlog(struct napi_struct *napi, int quota)
{ {
int work = 0; int work = 0;
struct softnet_data *sd = &__get_cpu_var(softnet_data); struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
/* Check if we have pending ipi, its better to send them now, /* Check if we have pending ipi, its better to send them now,
...@@ -3313,7 +3321,16 @@ static int process_backlog(struct napi_struct *napi, int quota) ...@@ -3313,7 +3321,16 @@ static int process_backlog(struct napi_struct *napi, int quota)
&sd->process_queue); &sd->process_queue);
} }
if (qlen < quota - work) { if (qlen < quota - work) {
__napi_complete(napi); /*
* Inline a custom version of __napi_complete().
* only current cpu owns and manipulates this napi,
* and NAPI_STATE_SCHED is the only possible flag set on backlog.
* we can use a plain write instead of clear_bit(),
* and we dont need an smp_mb() memory barrier.
*/
list_del(&napi->poll_list);
napi->state = 0;
quota = work + qlen; quota = work + qlen;
} }
rps_unlock(sd); rps_unlock(sd);
...@@ -3334,8 +3351,7 @@ void __napi_schedule(struct napi_struct *n) ...@@ -3334,8 +3351,7 @@ void __napi_schedule(struct napi_struct *n)
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list); ____napi_schedule(&__get_cpu_var(softnet_data), n);
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(__napi_schedule); EXPORT_SYMBOL(__napi_schedule);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册