diff options
author | Paul Gortmaker <paul.gortmaker@windriver.com> | 2018-08-02 13:08:45 -0400 |
---|---|---|
committer | Paul Gortmaker <paul.gortmaker@windriver.com> | 2018-08-02 13:08:45 -0400 |
commit | 4c6d72d2ed0d5dfbb38b7db2786780af57e33fea (patch) | |
tree | c013a519116fdee12cf0d0228ce28e01cda3b2b1 | |
parent | 000447dc0d892a41d1f7c9989885cde9cb95a86f (diff) | |
download | longterm-queue-4.12-4c6d72d2ed0d5dfbb38b7db2786780af57e33fea.tar.gz |
net: add rb_to_skb helper for future commits
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r-- | queue/net-add-rb_to_skb-and-other-rb-tree-helpers.patch | 248 | ||||
-rw-r--r-- | queue/series | 1 |
2 files changed, 249 insertions, 0 deletions
diff --git a/queue/net-add-rb_to_skb-and-other-rb-tree-helpers.patch b/queue/net-add-rb_to_skb-and-other-rb-tree-helpers.patch new file mode 100644 index 0000000..7d9b75a --- /dev/null +++ b/queue/net-add-rb_to_skb-and-other-rb-tree-helpers.patch @@ -0,0 +1,248 @@ +From 18a4c0eab2623cc95be98a1e6af1ad18e7695977 Mon Sep 17 00:00:00 2001 +From: Eric Dumazet <edumazet@google.com> +Date: Thu, 5 Oct 2017 22:21:21 -0700 +Subject: [PATCH] net: add rb_to_skb() and other rb tree helpers + +commit 18a4c0eab2623cc95be98a1e6af1ad18e7695977 upstream. + +Geeralize private netem_rb_to_skb() + +TCP rtx queue will soon be converted to rb-tree, +so we will need skb_rbtree_walk() helpers. + +Signed-off-by: Eric Dumazet <edumazet@google.com> +Signed-off-by: David S. Miller <davem@davemloft.net> + +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h +index 01a985937867..03634ec2f918 100644 +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -3158,6 +3158,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) + return __skb_grow(skb, len); + } + ++#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode) ++#define skb_rb_first(root) rb_to_skb(rb_first(root)) ++#define skb_rb_last(root) rb_to_skb(rb_last(root)) ++#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode)) ++#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode)) ++ + #define skb_queue_walk(queue, skb) \ + for (skb = (queue)->next; \ + skb != (struct sk_buff *)(queue); \ +@@ -3172,6 +3178,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) + for (; skb != (struct sk_buff *)(queue); \ + skb = skb->next) + ++#define skb_rbtree_walk(skb, root) \ ++ for (skb = skb_rb_first(root); skb != NULL; \ ++ skb = skb_rb_next(skb)) ++ ++#define skb_rbtree_walk_from(skb) \ ++ for (; skb != NULL; \ ++ skb = skb_rb_next(skb)) ++ ++#define skb_rbtree_walk_from_safe(skb, tmp) \ ++ for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \ ++ skb = tmp) ++ + #define skb_queue_walk_from_safe(queue, skb, tmp) \ + for (tmp = skb->next; \ + skb != (struct sk_buff *)(queue); \ +diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c +index 29fff14d5a53..7ee4aadcdd71 100644 +--- a/net/ipv4/tcp_fastopen.c ++++ b/net/ipv4/tcp_fastopen.c +@@ -465,17 +465,15 @@ bool tcp_fastopen_active_should_disable(struct sock *sk) + void tcp_fastopen_active_disable_ofo_check(struct sock *sk) + { + struct tcp_sock *tp = tcp_sk(sk); +- struct rb_node *p; +- struct sk_buff *skb; + struct dst_entry *dst; ++ struct sk_buff *skb; + + if (!tp->syn_fastopen) + return; + + if (!tp->data_segs_in) { +- p = rb_first(&tp->out_of_order_queue); +- if (p && !rb_next(p)) { +- skb = rb_entry(p, struct sk_buff, rbnode); ++ skb = skb_rb_first(&tp->out_of_order_queue); ++ if (skb && !skb_rb_next(skb)) { + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { + tcp_fastopen_active_disable(sk); + return; +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index fb0d7ed84b94..90afe4143596 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -4335,7 +4335,7 @@ static void tcp_ofo_queue(struct sock *sk) + + p = rb_first(&tp->out_of_order_queue); + while (p) { +- skb = rb_entry(p, struct sk_buff, rbnode); ++ skb = rb_to_skb(p); + if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) + break; + +@@ -4399,7 +4399,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, + static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) + { + struct tcp_sock *tp = tcp_sk(sk); +- struct rb_node **p, *q, *parent; ++ struct rb_node **p, *parent; + struct sk_buff *skb1; + u32 seq, end_seq; + bool fragstolen; +@@ -4458,7 +4458,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) + parent = NULL; + while (*p) { + parent = *p; +- skb1 = rb_entry(parent, struct sk_buff, rbnode); ++ skb1 = rb_to_skb(parent); + if (before(seq, TCP_SKB_CB(skb1)->seq)) { + p = &parent->rb_left; + continue; +@@ -4503,9 +4503,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) + + merge_right: + /* Remove other segments covered by skb. */ +- while ((q = rb_next(&skb->rbnode)) != NULL) { +- skb1 = rb_entry(q, struct sk_buff, rbnode); +- ++ while ((skb1 = skb_rb_next(skb)) != NULL) { + if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) + break; + if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { +@@ -4520,7 +4518,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) + tcp_drop(sk, skb1); + } + /* If there is no skb after us, we are the last_skb ! */ +- if (!q) ++ if (!skb1) + tp->ooo_last_skb = skb; + + add_sack: +@@ -4706,7 +4704,7 @@ static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *li + if (list) + return !skb_queue_is_last(list, skb) ? skb->next : NULL; + +- return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode); ++ return skb_rb_next(skb); + } + + static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, +@@ -4735,7 +4733,7 @@ static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb) + + while (*p) { + parent = *p; +- skb1 = rb_entry(parent, struct sk_buff, rbnode); ++ skb1 = rb_to_skb(parent); + if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq)) + p = &parent->rb_left; + else +@@ -4854,26 +4852,19 @@ static void tcp_collapse_ofo_queue(struct sock *sk) + { + struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *skb, *head; +- struct rb_node *p; + u32 start, end; + +- p = rb_first(&tp->out_of_order_queue); +- skb = rb_entry_safe(p, struct sk_buff, rbnode); ++ skb = skb_rb_first(&tp->out_of_order_queue); + new_range: + if (!skb) { +- p = rb_last(&tp->out_of_order_queue); +- /* Note: This is possible p is NULL here. We do not +- * use rb_entry_safe(), as ooo_last_skb is valid only +- * if rbtree is not empty. +- */ +- tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode); ++ tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue); + return; + } + start = TCP_SKB_CB(skb)->seq; + end = TCP_SKB_CB(skb)->end_seq; + + for (head = skb;;) { +- skb = tcp_skb_next(skb, NULL); ++ skb = skb_rb_next(skb); + + /* Range is terminated when we see a gap or when + * we are at the queue end. +@@ -4916,14 +4907,14 @@ static bool tcp_prune_ofo_queue(struct sock *sk) + do { + prev = rb_prev(node); + rb_erase(node, &tp->out_of_order_queue); +- tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode)); ++ tcp_drop(sk, rb_to_skb(node)); + sk_mem_reclaim(sk); + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && + !tcp_under_memory_pressure(sk)) + break; + node = prev; + } while (node); +- tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode); ++ tp->ooo_last_skb = rb_to_skb(prev); + + /* Reset SACK state. A conforming SACK implementation will + * do the same at a timeout based retransmit. When a connection +diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c +index 5a4f10080290..db0228a65e8c 100644 +--- a/net/sched/sch_netem.c ++++ b/net/sched/sch_netem.c +@@ -148,12 +148,6 @@ struct netem_skb_cb { + psched_time_t time_to_send; + }; + +- +-static struct sk_buff *netem_rb_to_skb(struct rb_node *rb) +-{ +- return rb_entry(rb, struct sk_buff, rbnode); +-} +- + static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) + { + /* we assume we can use skb next/prev/tstamp as storage for rb_node */ +@@ -364,7 +358,7 @@ static void tfifo_reset(struct Qdisc *sch) + struct rb_node *p = rb_first(&q->t_root); + + while (p) { +- struct sk_buff *skb = netem_rb_to_skb(p); ++ struct sk_buff *skb = rb_to_skb(p); + + p = rb_next(p); + rb_erase(&skb->rbnode, &q->t_root); +@@ -382,7 +376,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) + struct sk_buff *skb; + + parent = *p; +- skb = netem_rb_to_skb(parent); ++ skb = rb_to_skb(parent); + if (tnext >= netem_skb_cb(skb)->time_to_send) + p = &parent->rb_right; + else +@@ -538,7 +532,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff *t_skb; + struct netem_skb_cb *t_last; + +- t_skb = netem_rb_to_skb(rb_last(&q->t_root)); ++ t_skb = skb_rb_last(&q->t_root); + t_last = netem_skb_cb(t_skb); + if (!last || + t_last->time_to_send > last->time_to_send) { +@@ -617,7 +611,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) + if (p) { + psched_time_t time_to_send; + +- skb = netem_rb_to_skb(p); ++ skb = rb_to_skb(p); + + /* if more time remaining? */ + time_to_send = netem_skb_cb(skb)->time_to_send; +-- +2.15.0 + diff --git a/queue/series b/queue/series index c0ba19c..a0578e6 100644 --- a/queue/series +++ b/queue/series @@ -71,6 +71,7 @@ drm-sun4i-Fix-error-path-handling.patch libnvdimm-dax-fix-1GB-aligned-namespaces-vs-physical.patch libnvdimm-btt-Fix-an-incompatibility-in-the-log-layo.patch libnvdimm-pfn-fix-start_pad-handling-for-aligned-nam.patch +net-add-rb_to_skb-and-other-rb-tree-helpers.patch tcp-free-batches-of-packets-in-tcp_prune_ofo_queue.patch tcp-avoid-collapses-in-tcp_prune_queue-if-possible.patch tcp-detect-malicious-patterns-in-tcp_collapse_ofo_qu.patch |