summaryrefslogtreecommitdiffstats
path: root/include/net/sock.h
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-07 02:18:07 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-07 02:18:07 +0000
commit6c080a1a353c6fc2590555ec7dd8982d1143d02d (patch)
tree5fb60c5cd3d6bb6496650d1605a3c51d09be2bf1 /include/net/sock.h
parentAdding debian version 6.1.85-1. (diff)
downloadlinux-6c080a1a353c6fc2590555ec7dd8982d1143d02d.tar.xz
linux-6c080a1a353c6fc2590555ec7dd8982d1143d02d.zip
Merging upstream version 6.1.90.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--include/net/sock.h39
1 files changed, 21 insertions, 18 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 60577751e..77298c748 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1483,33 +1483,36 @@ sk_memory_allocated(const struct sock *sk)
/* 1 MB per cpu, in page units */
#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
+extern int sysctl_mem_pcpu_rsv;
+
+static inline void proto_memory_pcpu_drain(struct proto *proto)
+{
+ int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0);
+
+ if (val)
+ atomic_long_add(val, proto->memory_allocated);
+}
static inline void
-sk_memory_allocated_add(struct sock *sk, int amt)
+sk_memory_allocated_add(const struct sock *sk, int val)
{
- int local_reserve;
+ struct proto *proto = sk->sk_prot;
- preempt_disable();
- local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
- if (local_reserve >= SK_MEMORY_PCPU_RESERVE) {
- __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
- atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
- }
- preempt_enable();
+ val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val);
+
+ if (unlikely(val >= READ_ONCE(sysctl_mem_pcpu_rsv)))
+ proto_memory_pcpu_drain(proto);
}
static inline void
-sk_memory_allocated_sub(struct sock *sk, int amt)
+sk_memory_allocated_sub(const struct sock *sk, int val)
{
- int local_reserve;
+ struct proto *proto = sk->sk_prot;
- preempt_disable();
- local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
- if (local_reserve <= -SK_MEMORY_PCPU_RESERVE) {
- __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
- atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
- }
- preempt_enable();
+ val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val);
+
+ if (unlikely(val <= -READ_ONCE(sysctl_mem_pcpu_rsv)))
+ proto_memory_pcpu_drain(proto);
}
#define SK_ALLOC_PERCPU_COUNTER_BATCH 16