提交 acb32ba3 编写于 作者: E Eric Dumazet 提交者: David S. Miller

ipv4: reduce percpu needs for icmpmsg mibs

Reading /proc/net/snmp on a machine with a lot of cpus is very expensive
(can be ~88000 us).

This is because ICMPMSG MIB uses 4096 bytes per cpu, and folding values
for all possible cpus can read 16 Mbytes of memory.

ICMP messages are not considered as fast path on a typical server, and
eventually few cpus handle them anyway. We can afford an atomic
operation instead of using percpu data.

This saves 4096 bytes per cpu and per network namespace.
Signed-off-by: NEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 e56c57d0
......@@ -31,8 +31,8 @@ struct icmp_err {
extern const struct icmp_err icmp_err_convert[];
#define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field)
#define ICMP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field)
#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmpmsg_statistics, field+256)
#define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmpmsg_statistics, field)
#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256)
#define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
struct dst_entry;
struct net_proto_family;
......
......@@ -10,7 +10,7 @@ struct netns_mib {
DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics);
DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
DEFINE_SNMP_STAT(struct icmpmsg_mib, icmpmsg_statistics);
DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct proc_dir_entry *proc_net_devsnmp6;
......
......@@ -67,7 +67,7 @@ struct icmp_mib {
#define ICMPMSG_MIB_MAX __ICMPMSG_MIB_MAX
struct icmpmsg_mib {
unsigned long mibs[ICMPMSG_MIB_MAX];
atomic_long_t mibs[ICMPMSG_MIB_MAX];
};
/* ICMP6 (IPv6-ICMP) */
......
......@@ -1572,9 +1572,9 @@ static __net_init int ipv4_mib_init_net(struct net *net)
sizeof(struct icmp_mib),
__alignof__(struct icmp_mib)) < 0)
goto err_icmp_mib;
if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics,
sizeof(struct icmpmsg_mib),
__alignof__(struct icmpmsg_mib)) < 0)
net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
GFP_KERNEL);
if (!net->mib.icmpmsg_statistics)
goto err_icmpmsg_mib;
tcp_mib_init(net);
......@@ -1598,7 +1598,7 @@ static __net_init int ipv4_mib_init_net(struct net *net)
static __net_exit void ipv4_mib_exit_net(struct net *net)
{
snmp_mib_free((void __percpu **)net->mib.icmpmsg_statistics);
kfree(net->mib.icmpmsg_statistics);
snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
snmp_mib_free((void __percpu **)net->mib.udp_statistics);
......
......@@ -288,7 +288,7 @@ static void icmpmsg_put(struct seq_file *seq)
count = 0;
for (i = 0; i < ICMPMSG_MIB_MAX; i++) {
val = snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, i);
val = atomic_long_read(&net->mib.icmpmsg_statistics->mibs[i]);
if (val) {
type[count] = i;
vals[count++] = val;
......@@ -307,6 +307,7 @@ static void icmp_put(struct seq_file *seq)
{
int i;
struct net *net = seq->private;
atomic_long_t *ptr = net->mib.icmpmsg_statistics->mibs;
seq_puts(seq, "\nIcmp: InMsgs InErrors");
for (i=0; icmpmibmap[i].name != NULL; i++)
......@@ -319,15 +320,13 @@ static void icmp_put(struct seq_file *seq)
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS));
for (i=0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " %lu",
snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
icmpmibmap[i].index));
atomic_long_read(ptr + icmpmibmap[i].index));
seq_printf(seq, " %lu %lu",
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
for (i=0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " %lu",
snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
icmpmibmap[i].index | 0x100));
atomic_long_read(ptr + (icmpmibmap[i].index | 0x100)));
}
/*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册