aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin KaFai Lau <martin.lau@kernel.org>2023-03-29 13:10:56 -0700
committerMartin KaFai Lau <martin.lau@kernel.org>2023-03-29 13:10:56 -0700
commit8b52cc2a2fef541d605debb7efc7dc1bae7b2876 (patch)
treee16331d33a1dab203c847b830d862e3a3a09a559
parentd8d8b008629ffd69f1e204010cb3299bb633877e (diff)
parent4239561b69feb94e52e43d93685cc46fb9dbcae5 (diff)
downloadbpf-8b52cc2a2fef541d605debb7efc7dc1bae7b2876.tar.gz
Merge branch 'Allow BPF TCP CCs to write app_limited'
Yixin Shen says: ==================== This series allow BPF TCP CCs to write app_limited of struct tcp_sock. A built-in CC or one from a kernel module is already able to write to app_limited of struct tcp_sock. Until now, a BPF CC doesn't have write access to this member of struct tcp_sock. v2: - Merge the test of writing app_limited into the test of writing sk_pacing. (Martin KaFai Lau) ==================== Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
-rw-r--r--net/ipv4/bpf_tcp_ca.c3
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c13
2 files changed, 15 insertions, 1 deletions
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index e8b27826283ead..ea21c96c03aa13 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -113,6 +113,9 @@ static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
case offsetof(struct tcp_sock, ecn_flags):
end = offsetofend(struct tcp_sock, ecn_flags);
break;
+ case offsetof(struct tcp_sock, app_limited):
+ end = offsetofend(struct tcp_sock, app_limited);
+ break;
default:
bpf_log(log, "no write support to tcp_sock at off %d\n", off);
return -EACCES;
diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c b/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c
index 43447704cf0e61..0724a79cec786b 100644
--- a/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c
+++ b/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c
@@ -16,6 +16,16 @@ static inline struct tcp_sock *tcp_sk(const struct sock *sk)
return (struct tcp_sock *)sk;
}
+static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
+{
+ return tp->sacked_out + tp->lost_out;
+}
+
+static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
+{
+ return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
+}
+
SEC("struct_ops/write_sk_pacing_init")
void BPF_PROG(write_sk_pacing_init, struct sock *sk)
{
@@ -31,11 +41,12 @@ SEC("struct_ops/write_sk_pacing_cong_control")
void BPF_PROG(write_sk_pacing_cong_control, struct sock *sk,
const struct rate_sample *rs)
{
- const struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
unsigned long rate =
((tp->snd_cwnd * tp->mss_cache * USEC_PER_SEC) << 3) /
(tp->srtt_us ?: 1U << 3);
sk->sk_pacing_rate = min(rate, sk->sk_max_pacing_rate);
+ tp->app_limited = (tp->delivered + tcp_packets_in_flight(tp)) ?: 1;
}
SEC("struct_ops/write_sk_pacing_ssthresh")