summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2018-08-02 13:13:21 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2018-08-02 13:13:21 -0400
commitb4e38d7eb5b4ab308ff4061333d9d49e9ffbe983 (patch)
treedee94cbc3c2d9fb6810d651381535e4141a972b9
parent4c6d72d2ed0d5dfbb38b7db2786780af57e33fea (diff)
downloadlongterm-queue-4.12-b4e38d7eb5b4ab308ff4061333d9d49e9ffbe983.tar.gz
net: refresh context
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--queue/net-add-rb_to_skb-and-other-rb-tree-helpers.patch51
1 files changed, 26 insertions, 25 deletions
diff --git a/queue/net-add-rb_to_skb-and-other-rb-tree-helpers.patch b/queue/net-add-rb_to_skb-and-other-rb-tree-helpers.patch
index 7d9b75a..abfdcef 100644
--- a/queue/net-add-rb_to_skb-and-other-rb-tree-helpers.patch
+++ b/queue/net-add-rb_to_skb-and-other-rb-tree-helpers.patch
@@ -1,4 +1,4 @@
-From 18a4c0eab2623cc95be98a1e6af1ad18e7695977 Mon Sep 17 00:00:00 2001
+From d6a7b11b874d61f7bf35dfec18b24490ccedd491 Mon Sep 17 00:00:00 2001
From: Eric Dumazet <edumazet@google.com>
Date: Thu, 5 Oct 2017 22:21:21 -0700
Subject: [PATCH] net: add rb_to_skb() and other rb tree helpers
@@ -12,12 +12,13 @@ so we will need skb_rbtree_walk() helpers.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
-index 01a985937867..03634ec2f918 100644
+index 8760c3f604ab..f9dcb6f70887 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -3158,6 +3158,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
+@@ -3006,6 +3006,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
return __skb_grow(skb, len);
}
@@ -30,7 +31,7 @@ index 01a985937867..03634ec2f918 100644
#define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \
skb != (struct sk_buff *)(queue); \
-@@ -3172,6 +3178,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
+@@ -3020,6 +3026,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
for (; skb != (struct sk_buff *)(queue); \
skb = skb->next)
@@ -50,10 +51,10 @@ index 01a985937867..03634ec2f918 100644
for (tmp = skb->next; \
skb != (struct sk_buff *)(queue); \
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
-index 29fff14d5a53..7ee4aadcdd71 100644
+index 4af82b914dd4..4c9945cf6b10 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
-@@ -465,17 +465,15 @@ bool tcp_fastopen_active_should_disable(struct sock *sk)
+@@ -458,17 +458,15 @@ bool tcp_fastopen_active_should_disable(struct sock *sk)
void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -75,10 +76,10 @@ index 29fff14d5a53..7ee4aadcdd71 100644
tcp_fastopen_active_disable(sk);
return;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
-index fb0d7ed84b94..90afe4143596 100644
+index c37915f00a51..54d846d007f8 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
-@@ -4335,7 +4335,7 @@ static void tcp_ofo_queue(struct sock *sk)
+@@ -4314,7 +4314,7 @@ static void tcp_ofo_queue(struct sock *sk)
p = rb_first(&tp->out_of_order_queue);
while (p) {
@@ -87,7 +88,7 @@ index fb0d7ed84b94..90afe4143596 100644
if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
break;
-@@ -4399,7 +4399,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
+@@ -4378,7 +4378,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -96,7 +97,7 @@ index fb0d7ed84b94..90afe4143596 100644
struct sk_buff *skb1;
u32 seq, end_seq;
bool fragstolen;
-@@ -4458,7 +4458,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
+@@ -4436,7 +4436,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
parent = NULL;
while (*p) {
parent = *p;
@@ -105,7 +106,7 @@ index fb0d7ed84b94..90afe4143596 100644
if (before(seq, TCP_SKB_CB(skb1)->seq)) {
p = &parent->rb_left;
continue;
-@@ -4503,9 +4503,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
+@@ -4480,9 +4480,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
merge_right:
/* Remove other segments covered by skb. */
@@ -116,7 +117,7 @@ index fb0d7ed84b94..90afe4143596 100644
if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
break;
if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
-@@ -4520,7 +4518,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
+@@ -4497,7 +4495,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
tcp_drop(sk, skb1);
}
/* If there is no skb after us, we are the last_skb ! */
@@ -125,7 +126,7 @@ index fb0d7ed84b94..90afe4143596 100644
tp->ooo_last_skb = skb;
add_sack:
-@@ -4706,7 +4704,7 @@ static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *li
+@@ -4701,7 +4699,7 @@ static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *li
if (list)
return !skb_queue_is_last(list, skb) ? skb->next : NULL;
@@ -134,7 +135,7 @@ index fb0d7ed84b94..90afe4143596 100644
}
static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
-@@ -4735,7 +4733,7 @@ static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
+@@ -4730,7 +4728,7 @@ static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
while (*p) {
parent = *p;
@@ -143,7 +144,7 @@ index fb0d7ed84b94..90afe4143596 100644
if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
p = &parent->rb_left;
else
-@@ -4854,26 +4852,19 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
+@@ -4849,26 +4847,19 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb, *head;
@@ -173,7 +174,7 @@ index fb0d7ed84b94..90afe4143596 100644
/* Range is terminated when we see a gap or when
* we are at the queue end.
-@@ -4916,14 +4907,14 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
+@@ -4911,14 +4902,14 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
do {
prev = rb_prev(node);
rb_erase(node, &tp->out_of_order_queue);
@@ -191,11 +192,11 @@ index fb0d7ed84b94..90afe4143596 100644
/* Reset SACK state. A conforming SACK implementation will
* do the same at a timeout based retransmit. When a connection
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
-index 5a4f10080290..db0228a65e8c 100644
+index 1b3dd6190e93..efc3cc279622 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
-@@ -148,12 +148,6 @@ struct netem_skb_cb {
- psched_time_t time_to_send;
+@@ -149,12 +149,6 @@ struct netem_skb_cb {
+ ktime_t tstamp_save;
};
-
@@ -207,15 +208,15 @@ index 5a4f10080290..db0228a65e8c 100644
static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
{
/* we assume we can use skb next/prev/tstamp as storage for rb_node */
-@@ -364,7 +358,7 @@ static void tfifo_reset(struct Qdisc *sch)
- struct rb_node *p = rb_first(&q->t_root);
+@@ -365,7 +359,7 @@ static void tfifo_reset(struct Qdisc *sch)
+ struct rb_node *p;
- while (p) {
+ while ((p = rb_first(&q->t_root))) {
- struct sk_buff *skb = netem_rb_to_skb(p);
+ struct sk_buff *skb = rb_to_skb(p);
- p = rb_next(p);
- rb_erase(&skb->rbnode, &q->t_root);
+ rb_erase(p, &q->t_root);
+ rtnl_kfree_skbs(skb, skb);
@@ -382,7 +376,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
struct sk_buff *skb;
@@ -234,7 +235,7 @@ index 5a4f10080290..db0228a65e8c 100644
t_last = netem_skb_cb(t_skb);
if (!last ||
t_last->time_to_send > last->time_to_send) {
-@@ -617,7 +611,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+@@ -618,7 +612,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
if (p) {
psched_time_t time_to_send;