net: proc: speedup /proc/net/netstat

Use cache friendly helpers to better use cpu caches
while reading /proc/net/netstat

Tested on a platform with 256 threads (AMD Rome)

Before: 305 usec spent in netstat_seq_show()
After: 130 usec spent in netstat_seq_show()

Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20210128162145.1703601-1-eric.dumazet@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Eric Dumazet 2021-01-28 08:21:45 -08:00 committed by Jakub Kicinski
parent df610cd916
commit 0d6cd689f9
1 changed files with 35 additions and 13 deletions

View File

@ -464,30 +464,52 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
*/
static int netstat_seq_show(struct seq_file *seq, void *v)
{
int i;
const int ip_cnt = ARRAY_SIZE(snmp4_ipextstats_list) - 1;
const int tcp_cnt = ARRAY_SIZE(snmp4_net_list) - 1;
struct net *net = seq->private;
unsigned long *buff;
int i;
seq_puts(seq, "TcpExt:");
for (i = 0; snmp4_net_list[i].name; i++)
for (i = 0; i < tcp_cnt; i++)
seq_printf(seq, " %s", snmp4_net_list[i].name);
seq_puts(seq, "\nTcpExt:");
for (i = 0; snmp4_net_list[i].name; i++)
seq_printf(seq, " %lu",
snmp_fold_field(net->mib.net_statistics,
snmp4_net_list[i].entry));
buff = kzalloc(max(tcp_cnt * sizeof(long), ip_cnt * sizeof(u64)),
GFP_KERNEL);
if (buff) {
snmp_get_cpu_field_batch(buff, snmp4_net_list,
net->mib.net_statistics);
for (i = 0; i < tcp_cnt; i++)
seq_printf(seq, " %lu", buff[i]);
} else {
for (i = 0; i < tcp_cnt; i++)
seq_printf(seq, " %lu",
snmp_fold_field(net->mib.net_statistics,
snmp4_net_list[i].entry));
}
seq_puts(seq, "\nIpExt:");
for (i = 0; snmp4_ipextstats_list[i].name; i++)
for (i = 0; i < ip_cnt; i++)
seq_printf(seq, " %s", snmp4_ipextstats_list[i].name);
seq_puts(seq, "\nIpExt:");
for (i = 0; snmp4_ipextstats_list[i].name; i++)
seq_printf(seq, " %llu",
snmp_fold_field64(net->mib.ip_statistics,
snmp4_ipextstats_list[i].entry,
offsetof(struct ipstats_mib, syncp)));
if (buff) {
u64 *buff64 = (u64 *)buff;
memset(buff64, 0, ip_cnt * sizeof(u64));
snmp_get_cpu_field64_batch(buff64, snmp4_ipextstats_list,
net->mib.ip_statistics,
offsetof(struct ipstats_mib, syncp));
for (i = 0; i < ip_cnt; i++)
seq_printf(seq, " %llu", buff64[i]);
} else {
for (i = 0; i < ip_cnt; i++)
seq_printf(seq, " %llu",
snmp_fold_field64(net->mib.ip_statistics,
snmp4_ipextstats_list[i].entry,
offsetof(struct ipstats_mib, syncp)));
}
kfree(buff);
seq_putc(seq, '\n');
mptcp_seq_show(seq);
return 0;