提交 88751275 编写于 作者: E Eric Dumazet 提交者: David S. Miller

rps: shortcut net_rps_action()

net_rps_action() is a bit expensive on NR_CPUS=64..4096 kernels, even if
RPS is not active.

Tom Herbert used two bitmasks to hold information needed to send IPI,
but a single LIFO list seems more appropriate.

Move all RPS logic into net_rps_action() to cleanup net_rx_action() code
(remove two ifdefs)

Move rps_remote_softirq_cpus into softnet_data to share its first cache
line, filling an existing hole.

In a future patch, we could call net_rps_action() from process_backlog()
to make sure we send IPI before handling this cpu backlog.
Signed-off-by: NEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 a03b1a5c
...@@ -1381,17 +1381,20 @@ static inline int unregister_gifconf(unsigned int family) ...@@ -1381,17 +1381,20 @@ static inline int unregister_gifconf(unsigned int family)
} }
/* /*
* Incoming packets are placed on per-cpu queues so that * Incoming packets are placed on per-cpu queues
* no locking is needed.
*/ */
struct softnet_data { struct softnet_data {
struct Qdisc *output_queue; struct Qdisc *output_queue;
struct list_head poll_list; struct list_head poll_list;
struct sk_buff *completion_queue; struct sk_buff *completion_queue;
/* Elements below can be accessed between CPUs for RPS */
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
struct softnet_data *rps_ipi_list;
/* Elements below can be accessed between CPUs for RPS */
struct call_single_data csd ____cacheline_aligned_in_smp; struct call_single_data csd ____cacheline_aligned_in_smp;
struct softnet_data *rps_ipi_next;
unsigned int cpu;
unsigned int input_queue_head; unsigned int input_queue_head;
#endif #endif
struct sk_buff_head input_pkt_queue; struct sk_buff_head input_pkt_queue;
......
...@@ -2345,21 +2345,6 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, ...@@ -2345,21 +2345,6 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
return cpu; return cpu;
} }
/*
* This structure holds the per-CPU mask of CPUs for which IPIs are scheduled
* to be sent to kick remote softirq processing. There are two masks since
* the sending of IPIs must be done with interrupts enabled. The select field
* indicates the current mask that enqueue_backlog uses to schedule IPIs.
* select is flipped before net_rps_action is called while still under lock,
* net_rps_action then uses the non-selected mask to send the IPIs and clears
* it without conflicting with enqueue_backlog operation.
*/
struct rps_remote_softirq_cpus {
cpumask_t mask[2];
int select;
};
static DEFINE_PER_CPU(struct rps_remote_softirq_cpus, rps_remote_softirq_cpus);
/* Called from hardirq (IPI) context */ /* Called from hardirq (IPI) context */
static void trigger_softirq(void *data) static void trigger_softirq(void *data)
{ {
...@@ -2402,10 +2387,12 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, ...@@ -2402,10 +2387,12 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
if (napi_schedule_prep(&queue->backlog)) { if (napi_schedule_prep(&queue->backlog)) {
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
if (cpu != smp_processor_id()) { if (cpu != smp_processor_id()) {
struct rps_remote_softirq_cpus *rcpus = struct softnet_data *myqueue;
&__get_cpu_var(rps_remote_softirq_cpus);
myqueue = &__get_cpu_var(softnet_data);
queue->rps_ipi_next = myqueue->rps_ipi_list;
myqueue->rps_ipi_list = queue;
cpu_set(cpu, rcpus->mask[rcpus->select]);
__raise_softirq_irqoff(NET_RX_SOFTIRQ); __raise_softirq_irqoff(NET_RX_SOFTIRQ);
goto enqueue; goto enqueue;
} }
...@@ -2910,7 +2897,9 @@ int netif_receive_skb(struct sk_buff *skb) ...@@ -2910,7 +2897,9 @@ int netif_receive_skb(struct sk_buff *skb)
} }
EXPORT_SYMBOL(netif_receive_skb); EXPORT_SYMBOL(netif_receive_skb);
/* Network device is going away, flush any packets still pending */ /* Network device is going away, flush any packets still pending
* Called with irqs disabled.
*/
static void flush_backlog(void *arg) static void flush_backlog(void *arg)
{ {
struct net_device *dev = arg; struct net_device *dev = arg;
...@@ -3338,24 +3327,33 @@ void netif_napi_del(struct napi_struct *napi) ...@@ -3338,24 +3327,33 @@ void netif_napi_del(struct napi_struct *napi)
} }
EXPORT_SYMBOL(netif_napi_del); EXPORT_SYMBOL(netif_napi_del);
#ifdef CONFIG_RPS
/* /*
* net_rps_action sends any pending IPI's for rps. This is only called from * net_rps_action sends any pending IPI's for rps.
* softirq and interrupts must be enabled. * Note: called with local irq disabled, but exits with local irq enabled.
*/ */
static void net_rps_action(cpumask_t *mask) static void net_rps_action(void)
{ {
int cpu; #ifdef CONFIG_RPS
struct softnet_data *locqueue = &__get_cpu_var(softnet_data);
struct softnet_data *remqueue = locqueue->rps_ipi_list;
/* Send pending IPI's to kick RPS processing on remote cpus. */ if (remqueue) {
for_each_cpu_mask_nr(cpu, *mask) { locqueue->rps_ipi_list = NULL;
struct softnet_data *queue = &per_cpu(softnet_data, cpu);
if (cpu_online(cpu)) local_irq_enable();
__smp_call_function_single(cpu, &queue->csd, 0);
} /* Send pending IPI's to kick RPS processing on remote cpus. */
cpus_clear(*mask); while (remqueue) {
} struct softnet_data *next = remqueue->rps_ipi_next;
if (cpu_online(remqueue->cpu))
__smp_call_function_single(remqueue->cpu,
&remqueue->csd, 0);
remqueue = next;
}
} else
#endif #endif
local_irq_enable();
}
static void net_rx_action(struct softirq_action *h) static void net_rx_action(struct softirq_action *h)
{ {
...@@ -3363,10 +3361,6 @@ static void net_rx_action(struct softirq_action *h) ...@@ -3363,10 +3361,6 @@ static void net_rx_action(struct softirq_action *h)
unsigned long time_limit = jiffies + 2; unsigned long time_limit = jiffies + 2;
int budget = netdev_budget; int budget = netdev_budget;
void *have; void *have;
#ifdef CONFIG_RPS
int select;
struct rps_remote_softirq_cpus *rcpus;
#endif
local_irq_disable(); local_irq_disable();
...@@ -3429,17 +3423,7 @@ static void net_rx_action(struct softirq_action *h) ...@@ -3429,17 +3423,7 @@ static void net_rx_action(struct softirq_action *h)
netpoll_poll_unlock(have); netpoll_poll_unlock(have);
} }
out: out:
#ifdef CONFIG_RPS net_rps_action();
rcpus = &__get_cpu_var(rps_remote_softirq_cpus);
select = rcpus->select;
rcpus->select ^= 1;
local_irq_enable();
net_rps_action(&rcpus->mask[select]);
#else
local_irq_enable();
#endif
#ifdef CONFIG_NET_DMA #ifdef CONFIG_NET_DMA
/* /*
...@@ -5839,6 +5823,7 @@ static int __init net_dev_init(void) ...@@ -5839,6 +5823,7 @@ static int __init net_dev_init(void)
queue->csd.func = trigger_softirq; queue->csd.func = trigger_softirq;
queue->csd.info = queue; queue->csd.info = queue;
queue->csd.flags = 0; queue->csd.flags = 0;
queue->cpu = i;
#endif #endif
queue->backlog.poll = process_backlog; queue->backlog.poll = process_backlog;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册