net: make SK_MEMORY_PCPU_RESERV tunable
[ Upstream commit 12a686c2e761f1f1f6e6e2117a9ab9c6de2ac8a7 ]
This patch adds /proc/sys/net/core/mem_pcpu_rsv sysctl file,
to make SK_MEMORY_PCPU_RESERV tunable.
Commit 3cd3399dd7
("net: implement per-cpu reserves for
memory_allocated") introduced per-cpu forward alloc cache:
"Implement a per-cpu cache of +1/-1 MB, to reduce number
of changes to sk->sk_prot->memory_allocated, which
would otherwise be cause of false sharing."
sk_prot->memory_allocated points to global atomic variable:
atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp;
If increasing the per-cpu cache size from 1MB to e.g. 16MB,
changes to sk->sk_prot->memory_allocated can be further reduced.
Performance may be improved on system with many cores.
Signed-off-by: Adam Li <adamli@os.amperecomputing.com>
Reviewed-by: Christoph Lameter (Ampere) <cl@linux.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Stable-dep-of: 3584718cf2ec ("net: fix sk_memory_allocated_{add|sub} vs softirqs")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
6a65f015ee
commit
fe1e83811c
|
@ -205,6 +205,11 @@ Will increase power usage.
|
|||
|
||||
Default: 0 (off)
|
||||
|
||||
mem_pcpu_rsv
|
||||
------------
|
||||
|
||||
Per-cpu reserved forward alloc cache size in page units. Default 1MB per CPU.
|
||||
|
||||
rmem_default
|
||||
------------
|
||||
|
||||
|
|
|
@ -1458,6 +1458,7 @@ sk_memory_allocated(const struct sock *sk)
|
|||
|
||||
/* 1 MB per cpu, in page units */
|
||||
#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
|
||||
extern int sysctl_mem_pcpu_rsv;
|
||||
|
||||
static inline void
|
||||
sk_memory_allocated_add(struct sock *sk, int amt)
|
||||
|
@ -1466,7 +1467,7 @@ sk_memory_allocated_add(struct sock *sk, int amt)
|
|||
|
||||
preempt_disable();
|
||||
local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
|
||||
if (local_reserve >= SK_MEMORY_PCPU_RESERVE) {
|
||||
if (local_reserve >= READ_ONCE(sysctl_mem_pcpu_rsv)) {
|
||||
__this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
|
||||
atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
|
||||
}
|
||||
|
@ -1480,7 +1481,7 @@ sk_memory_allocated_sub(struct sock *sk, int amt)
|
|||
|
||||
preempt_disable();
|
||||
local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
|
||||
if (local_reserve <= -SK_MEMORY_PCPU_RESERVE) {
|
||||
if (local_reserve <= -READ_ONCE(sysctl_mem_pcpu_rsv)) {
|
||||
__this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
|
||||
atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
|
||||
}
|
||||
|
|
|
@ -283,6 +283,7 @@ __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
|
|||
EXPORT_SYMBOL(sysctl_rmem_max);
|
||||
__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
|
||||
__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
|
||||
int sysctl_mem_pcpu_rsv __read_mostly = SK_MEMORY_PCPU_RESERVE;
|
||||
|
||||
/* Maximal space eaten by iovec or ancillary data plus some space */
|
||||
int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
|
||||
|
|
|
@ -30,6 +30,7 @@ static int int_3600 = 3600;
|
|||
static int min_sndbuf = SOCK_MIN_SNDBUF;
|
||||
static int min_rcvbuf = SOCK_MIN_RCVBUF;
|
||||
static int max_skb_frags = MAX_SKB_FRAGS;
|
||||
static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE;
|
||||
|
||||
static int net_msg_warn; /* Unused, but still a sysctl */
|
||||
|
||||
|
@ -407,6 +408,14 @@ static struct ctl_table net_core_table[] = {
|
|||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &min_rcvbuf,
|
||||
},
|
||||
{
|
||||
.procname = "mem_pcpu_rsv",
|
||||
.data = &sysctl_mem_pcpu_rsv,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &min_mem_pcpu_rsv,
|
||||
},
|
||||
{
|
||||
.procname = "dev_weight",
|
||||
.data = &weight_p,
|
||||
|
|
Loading…
Reference in New Issue