提交 24b36f01 编写于 作者: E Eric Dumazet 提交者: Patrick McHardy

netfilter: {ip,ip6,arp}_tables: dont block bottom half more than necessary

We currently disable BH for the whole duration of get_counters()

On machines with a lot of cpus and large tables, this might be too long.

We can disable preemption during the whole function, and disable BH only
while fetching counters for the current cpu.
Signed-off-by: NEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: NPatrick McHardy <kaber@trash.net>
上级 7df0884c
...@@ -710,7 +710,7 @@ static void get_counters(const struct xt_table_info *t, ...@@ -710,7 +710,7 @@ static void get_counters(const struct xt_table_info *t,
struct arpt_entry *iter; struct arpt_entry *iter;
unsigned int cpu; unsigned int cpu;
unsigned int i; unsigned int i;
unsigned int curcpu; unsigned int curcpu = get_cpu();
/* Instead of clearing (by a previous call to memset()) /* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters * the counters and using adds, we set the counters
...@@ -720,14 +720,16 @@ static void get_counters(const struct xt_table_info *t, ...@@ -720,14 +720,16 @@ static void get_counters(const struct xt_table_info *t,
* if new softirq were to run and call ipt_do_table * if new softirq were to run and call ipt_do_table
*/ */
local_bh_disable(); local_bh_disable();
curcpu = smp_processor_id();
i = 0; i = 0;
xt_entry_foreach(iter, t->entries[curcpu], t->size) { xt_entry_foreach(iter, t->entries[curcpu], t->size) {
SET_COUNTER(counters[i], iter->counters.bcnt, SET_COUNTER(counters[i], iter->counters.bcnt,
iter->counters.pcnt); iter->counters.pcnt);
++i; ++i;
} }
local_bh_enable();
/* Processing counters from other cpus, we can let bottom half enabled,
* (preemption is disabled)
*/
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (cpu == curcpu) if (cpu == curcpu)
...@@ -741,7 +743,7 @@ static void get_counters(const struct xt_table_info *t, ...@@ -741,7 +743,7 @@ static void get_counters(const struct xt_table_info *t,
} }
xt_info_wrunlock(cpu); xt_info_wrunlock(cpu);
} }
local_bh_enable(); put_cpu();
} }
static struct xt_counters *alloc_counters(const struct xt_table *table) static struct xt_counters *alloc_counters(const struct xt_table *table)
......
...@@ -884,7 +884,7 @@ get_counters(const struct xt_table_info *t, ...@@ -884,7 +884,7 @@ get_counters(const struct xt_table_info *t,
struct ipt_entry *iter; struct ipt_entry *iter;
unsigned int cpu; unsigned int cpu;
unsigned int i; unsigned int i;
unsigned int curcpu; unsigned int curcpu = get_cpu();
/* Instead of clearing (by a previous call to memset()) /* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters * the counters and using adds, we set the counters
...@@ -894,14 +894,16 @@ get_counters(const struct xt_table_info *t, ...@@ -894,14 +894,16 @@ get_counters(const struct xt_table_info *t,
* if new softirq were to run and call ipt_do_table * if new softirq were to run and call ipt_do_table
*/ */
local_bh_disable(); local_bh_disable();
curcpu = smp_processor_id();
i = 0; i = 0;
xt_entry_foreach(iter, t->entries[curcpu], t->size) { xt_entry_foreach(iter, t->entries[curcpu], t->size) {
SET_COUNTER(counters[i], iter->counters.bcnt, SET_COUNTER(counters[i], iter->counters.bcnt,
iter->counters.pcnt); iter->counters.pcnt);
++i; ++i;
} }
local_bh_enable();
/* Processing counters from other cpus, we can let bottom half enabled,
* (preemption is disabled)
*/
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (cpu == curcpu) if (cpu == curcpu)
...@@ -915,7 +917,7 @@ get_counters(const struct xt_table_info *t, ...@@ -915,7 +917,7 @@ get_counters(const struct xt_table_info *t,
} }
xt_info_wrunlock(cpu); xt_info_wrunlock(cpu);
} }
local_bh_enable(); put_cpu();
} }
static struct xt_counters *alloc_counters(const struct xt_table *table) static struct xt_counters *alloc_counters(const struct xt_table *table)
......
...@@ -897,7 +897,7 @@ get_counters(const struct xt_table_info *t, ...@@ -897,7 +897,7 @@ get_counters(const struct xt_table_info *t,
struct ip6t_entry *iter; struct ip6t_entry *iter;
unsigned int cpu; unsigned int cpu;
unsigned int i; unsigned int i;
unsigned int curcpu; unsigned int curcpu = get_cpu();
/* Instead of clearing (by a previous call to memset()) /* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters * the counters and using adds, we set the counters
...@@ -907,14 +907,16 @@ get_counters(const struct xt_table_info *t, ...@@ -907,14 +907,16 @@ get_counters(const struct xt_table_info *t,
* if new softirq were to run and call ipt_do_table * if new softirq were to run and call ipt_do_table
*/ */
local_bh_disable(); local_bh_disable();
curcpu = smp_processor_id();
i = 0; i = 0;
xt_entry_foreach(iter, t->entries[curcpu], t->size) { xt_entry_foreach(iter, t->entries[curcpu], t->size) {
SET_COUNTER(counters[i], iter->counters.bcnt, SET_COUNTER(counters[i], iter->counters.bcnt,
iter->counters.pcnt); iter->counters.pcnt);
++i; ++i;
} }
local_bh_enable();
/* Processing counters from other cpus, we can let bottom half enabled,
* (preemption is disabled)
*/
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (cpu == curcpu) if (cpu == curcpu)
...@@ -928,7 +930,7 @@ get_counters(const struct xt_table_info *t, ...@@ -928,7 +930,7 @@ get_counters(const struct xt_table_info *t,
} }
xt_info_wrunlock(cpu); xt_info_wrunlock(cpu);
} }
local_bh_enable(); put_cpu();
} }
static struct xt_counters *alloc_counters(const struct xt_table *table) static struct xt_counters *alloc_counters(const struct xt_table *table)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册