aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGeliang Tang <geliang.tang@suse.com>2023-06-25 20:33:56 +0800
committerGeliang Tang <tanggeliang@kylinos.cn>2024-04-22 10:56:55 +0800
commite55803eabf494286b038c5da7fe050ab2ac1af73 (patch)
tree11557a663609ec4429b6d66c4196378b0dceabed
parent043030da965d152f50ead9452f202c3807c439c4 (diff)
downloadmptcp_net-next-e55803eabf494286b038c5da7fe050ab2ac1af73.tar.gz
selftests/bpf: Add bpf_stale scheduler & test
This patch implements the setting a subflow as stale/unstale in BPF MPTCP scheduler, named bpf_stale. The staled subflow id will be added into a map in sk_storage. Two helper mptcp_subflow_set_stale() and mptcp_subflow_clear_stale() are added. In this test, subflow 1 is set as stale in bpf_stale_data_init(). Each subflow is checked whether it's a stale one in bpf_stale_get_subflow() to select a unstale subflow to send data. This patch adds the bpf_stale scheduler test: test_stale(). Use sysctl to set net.mptcp.scheduler to use this sched. Add two veth net devices to simulate the multiple addresses case. Use 'ip mptcp endpoint' command to add the new endpoint ADDR_2 to PM netlink. Send data and check bytes_sent of 'ss' output after it to make sure the data has been only sent on ADDR_1 since ADDR_2 is set as stale. Signed-off-by: Geliang Tang <geliang.tang@suse.com>
-rw-r--r--tools/testing/selftests/bpf/bpf_tcp_helpers.h1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mptcp.c3
-rw-r--r--tools/testing/selftests/bpf/progs/mptcp_bpf_stale.c157
3 files changed, 161 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/bpf_tcp_helpers.h b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
index 2200e2cb1fd692..6658533728aa40 100644
--- a/tools/testing/selftests/bpf/bpf_tcp_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
@@ -240,6 +240,7 @@ struct mptcp_subflow_context {
unsigned long avg_pacing_rate;
__u32 backup : 1;
__u8 stale_count;
+ __u32 subflow_id;
struct sock *tcp_sock; /* tcp sk backpointer */
} __attribute__((preserve_access_index));
diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c
index c94418b4cda6a2..1a43e9ed1fa27a 100644
--- a/tools/testing/selftests/bpf/prog_tests/mptcp.c
+++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c
@@ -15,6 +15,7 @@
#include "mptcp_bpf_rr.skel.h"
#include "mptcp_bpf_red.skel.h"
#include "mptcp_bpf_burst.skel.h"
+#include "mptcp_bpf_stale.skel.h"
#define NS_TEST "mptcp_ns"
#define ADDR_1 "10.0.1.1"
@@ -602,6 +603,7 @@ MPTCP_SCHED_TEST(bkup, WITH_DATA, WITHOUT_DATA);
MPTCP_SCHED_TEST(rr, WITH_DATA, WITH_DATA);
MPTCP_SCHED_TEST(red, WITH_DATA, WITH_DATA);
MPTCP_SCHED_TEST(burst, WITH_DATA, WITH_DATA);
+MPTCP_SCHED_TEST(stale, WITH_DATA, WITHOUT_DATA);
#define RUN_MPTCP_TEST(suffix) \
do { \
@@ -620,4 +622,5 @@ void test_mptcp(void)
RUN_MPTCP_TEST(rr);
RUN_MPTCP_TEST(red);
RUN_MPTCP_TEST(burst);
+ RUN_MPTCP_TEST(stale);
}
diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf_stale.c b/tools/testing/selftests/bpf/progs/mptcp_bpf_stale.c
new file mode 100644
index 00000000000000..89533d5577a137
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mptcp_bpf_stale.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023, SUSE. */
+/* Copyright (c) 2024, Kylin Software */
+
+#include <linux/bpf.h>
+#include "bpf_tcp_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct mptcp_stale_storage {
+ __u8 nr;
+ __u32 ids[MPTCP_SUBFLOWS_MAX];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct mptcp_stale_storage);
+} mptcp_stale_map SEC(".maps");
+
+static void mptcp_subflow_set_stale(struct mptcp_stale_storage *storage,
+ __u32 subflow_id)
+{
+ if (!subflow_id)
+ return;
+
+ for (int i = 0; i < storage->nr && i < MPTCP_SUBFLOWS_MAX; i++) {
+ if (storage->ids[i] == subflow_id)
+ return;
+ }
+
+ if (storage->nr < MPTCP_SUBFLOWS_MAX - 1)
+ storage->ids[storage->nr++] = subflow_id;
+}
+
+static void mptcp_subflow_clear_stale(struct mptcp_stale_storage *storage,
+ __u32 subflow_id)
+{
+ if (!subflow_id)
+ return;
+
+ for (int i = 0; i < storage->nr && i < MPTCP_SUBFLOWS_MAX; i++) {
+ if (storage->ids[i] == subflow_id) {
+ for (int j = i; j < MPTCP_SUBFLOWS_MAX - 1; j++) {
+ if (!storage->ids[j + 1])
+ break;
+ storage->ids[j] = storage->ids[j + 1];
+ storage->ids[j + 1] = 0;
+ }
+ storage->nr--;
+ return;
+ }
+ }
+}
+
+static bool mptcp_subflow_is_stale(struct mptcp_stale_storage *storage,
+ __u32 subflow_id)
+{
+ for (int i = 0; i < storage->nr && i < MPTCP_SUBFLOWS_MAX; i++) {
+ if (storage->ids[i] == subflow_id)
+ return true;
+ }
+
+ return false;
+}
+
+static bool mptcp_subflow_is_active(struct mptcp_sched_data *data,
+ __u32 subflow_id)
+{
+ for (int i = 0; i < data->subflows && i < MPTCP_SUBFLOWS_MAX; i++) {
+ struct mptcp_subflow_context *subflow;
+
+ subflow = bpf_mptcp_subflow_ctx_by_pos(data, i);
+ if (!subflow)
+ break;
+ if (subflow->subflow_id == subflow_id)
+ return true;
+ }
+
+ return false;
+}
+
+SEC("struct_ops/mptcp_sched_stale_init")
+void BPF_PROG(mptcp_sched_stale_init, struct mptcp_sock *msk)
+{
+ struct mptcp_stale_storage *storage;
+
+ storage = bpf_sk_storage_get(&mptcp_stale_map, msk, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return;
+
+ for (int i = 0; i < MPTCP_SUBFLOWS_MAX; i++)
+ storage->ids[i] = 0;
+ storage->nr = 0;
+
+ mptcp_subflow_set_stale(storage, 2);
+ mptcp_subflow_set_stale(storage, 3);
+}
+
+SEC("struct_ops/mptcp_sched_stale_release")
+void BPF_PROG(mptcp_sched_stale_release, struct mptcp_sock *msk)
+{
+ bpf_sk_storage_delete(&mptcp_stale_map, msk);
+}
+
+int BPF_STRUCT_OPS(bpf_stale_get_subflow, struct mptcp_sock *msk,
+ struct mptcp_sched_data *data)
+{
+ struct mptcp_stale_storage *storage;
+ int nr = -1, i;
+
+ if (data->subflows == 1) {
+ mptcp_subflow_set_scheduled(bpf_mptcp_subflow_ctx_by_pos(data, 0), true);
+ return 0;
+ }
+
+ storage = bpf_sk_storage_get(&mptcp_stale_map, msk, 0, 0);
+ if (!storage)
+ return -1;
+
+ /* Handle invalid subflow ids for subflows that have been closed */
+ if (data->subflows < storage->nr + 1) {
+ for (i = 0; i < storage->nr && i < MPTCP_SUBFLOWS_MAX; i++) {
+ if (!mptcp_subflow_is_active(data, storage->ids[i]))
+ mptcp_subflow_clear_stale(storage, storage->ids[i]);
+ }
+ }
+
+ for (i = 0; i < data->subflows && i < MPTCP_SUBFLOWS_MAX; i++) {
+ struct mptcp_subflow_context *subflow;
+
+ subflow = bpf_mptcp_subflow_ctx_by_pos(data, i);
+ if (!subflow)
+ break;
+
+ if (mptcp_subflow_is_stale(storage, subflow->subflow_id))
+ continue;
+
+ nr = i;
+ }
+
+ if (nr != -1) {
+ mptcp_subflow_set_scheduled(bpf_mptcp_subflow_ctx_by_pos(data, nr), true);
+ return -1;
+ }
+ return 0;
+}
+
+SEC(".struct_ops")
+struct mptcp_sched_ops stale = {
+ .init = (void *)mptcp_sched_stale_init,
+ .release = (void *)mptcp_sched_stale_release,
+ .get_subflow = (void *)bpf_stale_get_subflow,
+ .name = "bpf_stale",
+};