aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2022-02-17 10:13:19 +0100
committerIngo Molnar <mingo@kernel.org>2022-03-15 12:57:34 +0100
commit289578527470b8575a5dcbc3782d8d88c9c2b574 (patch)
treef8698f400e5bf6b6d44a25ae06bf9e6a702d6c9d
parent1e71fc90c48cabbc63d5410b15c7254b91efbf3d (diff)
downloadtip-289578527470b8575a5dcbc3782d8d88c9c2b574.tar.gz
headers/deps: net: Introduce <net/sock_api_extra.h> for rarely used APIs
Move the sk*backlog() functionality over there - it's only used in ~25 files, while <net/sock_api.h> is included in over 1,000 files on an typical distro kernel build. The motivation is to decouple <net/sock_api.h> from <net/dst_api.h> some more, paving the way for more header dependency reductions. Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/net/sock_api.h63
-rw-r--r--include/net/sock_api_extra.h71
2 files changed, 70 insertions, 64 deletions
diff --git a/include/net/sock_api.h b/include/net/sock_api.h
index 4386fc80c43902..d78bd655ddff49 100644
--- a/include/net/sock_api.h
+++ b/include/net/sock_api.h
@@ -474,69 +474,6 @@ static inline void sk_wmem_queued_add(struct sock *sk, int val)
void sk_stream_write_space(struct sock *sk);
-/* OOB backlog add */
-static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
-{
- /* dont let skb dst not refcounted, we are going to leave rcu lock */
- skb_dst_force(skb);
-
- if (!sk->sk_backlog.tail)
- WRITE_ONCE(sk->sk_backlog.head, skb);
- else
- sk->sk_backlog.tail->next = skb;
-
- WRITE_ONCE(sk->sk_backlog.tail, skb);
- skb->next = NULL;
-}
-
-/*
- * Take into account size of receive queue and backlog queue
- * Do not take into account this skb truesize,
- * to allow even a single big packet to come.
- */
-static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
-{
- unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
-
- return qsize > limit;
-}
-
-/* The per-socket spinlock must be held here. */
-static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
- unsigned int limit)
-{
- if (sk_rcvqueues_full(sk, limit))
- return -ENOBUFS;
-
- /*
- * If the skb was allocated from pfmemalloc reserves, only
- * allow SOCK_MEMALLOC sockets to use it as this socket is
- * helping free memory
- */
- if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
- return -ENOMEM;
-
- __sk_add_backlog(sk, skb);
- sk->sk_backlog.len += skb->truesize;
- return 0;
-}
-
-int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
-
-INDIRECT_CALLABLE_DECLARE(int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb));
-INDIRECT_CALLABLE_DECLARE(int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb));
-
-static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
-{
- if (sk_memalloc_socks() && skb_pfmemalloc(skb))
- return __sk_backlog_rcv(sk, skb);
-
- return INDIRECT_CALL_INET(sk->sk_backlog_rcv,
- tcp_v6_do_rcv,
- tcp_v4_do_rcv,
- sk, skb);
-}
-
static inline void sk_incoming_cpu_update(struct sock *sk)
{
int cpu = raw_smp_processor_id();
diff --git a/include/net/sock_api_extra.h b/include/net/sock_api_extra.h
index f7e055489e6700..349210c624f671 100644
--- a/include/net/sock_api_extra.h
+++ b/include/net/sock_api_extra.h
@@ -1 +1,70 @@
-#include <net/sock.h>
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _SOCK_API_EXTRA_H
+#define _SOCK_API_EXTRA_H
+
+#include <net/sock_api.h>
+
+/* OOB backlog add */
+static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+{
+ /* dont let skb dst not refcounted, we are going to leave rcu lock */
+ skb_dst_force(skb);
+
+ if (!sk->sk_backlog.tail)
+ WRITE_ONCE(sk->sk_backlog.head, skb);
+ else
+ sk->sk_backlog.tail->next = skb;
+
+ WRITE_ONCE(sk->sk_backlog.tail, skb);
+ skb->next = NULL;
+}
+
+/*
+ * Take into account size of receive queue and backlog queue
+ * Do not take into account this skb truesize,
+ * to allow even a single big packet to come.
+ */
+static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
+{
+ unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
+
+ return qsize > limit;
+}
+
+/* The per-socket spinlock must be held here. */
+static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
+ unsigned int limit)
+{
+ if (sk_rcvqueues_full(sk, limit))
+ return -ENOBUFS;
+
+ /*
+ * If the skb was allocated from pfmemalloc reserves, only
+ * allow SOCK_MEMALLOC sockets to use it as this socket is
+ * helping free memory
+ */
+ if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
+ return -ENOMEM;
+
+ __sk_add_backlog(sk, skb);
+ sk->sk_backlog.len += skb->truesize;
+ return 0;
+}
+
+int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+
+INDIRECT_CALLABLE_DECLARE(int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb));
+INDIRECT_CALLABLE_DECLARE(int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb));
+
+static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ if (sk_memalloc_socks() && skb_pfmemalloc(skb))
+ return __sk_backlog_rcv(sk, skb);
+
+ return INDIRECT_CALL_INET(sk->sk_backlog_rcv,
+ tcp_v6_do_rcv,
+ tcp_v4_do_rcv,
+ sk, skb);
+}
+
+#endif /* _SOCK_API_EXTRA_H */