summaryrefslogtreecommitdiffstats
path: root/net-Convert-netfilter-to-percpu_locked.patch
blob: 0e8a6da254a6fce8719bd4d5ba9410c15ae56439 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
From 90667dded1be0045652680c95a2ec1ed384c1afb Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:53 -0500
Subject: [PATCH] net: Convert netfilter to percpu_locked

commit 75f82937350a5e223e3887c0343446f83d19bdae in tip.

Allows that code to be preemtible

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
---
 net/core/flow.c                 |   22 ++++++++++++++--------
 net/ipv4/netfilter/arp_tables.c |    4 ++--
 2 files changed, 16 insertions(+), 10 deletions(-)

diff --git a/net/core/flow.c b/net/core/flow.c
index 9601587..f032d1c 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -39,9 +39,10 @@ atomic_t flow_cache_genid = ATOMIC_INIT(0);
 
 static u32 flow_hash_shift;
 #define flow_hash_size	(1 << flow_hash_shift)
-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
 
-#define flow_table(cpu) (per_cpu(flow_tables, cpu))
+static DEFINE_PER_CPU_LOCKED(struct flow_cache_entry **, flow_tables);
+
+#define flow_table(cpu) (per_cpu_var_locked(flow_tables, cpu))
 
 static struct kmem_cache *flow_cachep __read_mostly;
 
@@ -168,24 +169,24 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2)
 void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
 			flow_resolve_t resolver)
 {
-	struct flow_cache_entry *fle, **head;
+	struct flow_cache_entry **table, *fle, **head;
 	unsigned int hash;
 	int cpu;
 
 	local_bh_disable();
-	cpu = smp_processor_id();
+	table = get_cpu_var_locked(flow_tables, &cpu);
 
 	fle = NULL;
 	/* Packet really early in init?  Making flow_cache_init a
 	 * pre-smp initcall would solve this.  --RR */
-	if (!flow_table(cpu))
+	if (!table)
 		goto nocache;
 
 	if (flow_hash_rnd_recalc(cpu))
 		flow_new_hash_rnd(cpu);
 	hash = flow_hash_code(key, cpu);
 
-	head = &flow_table(cpu)[hash];
+	head = &table[hash];
 	for (fle = *head; fle; fle = fle->next) {
 		if (fle->family == family &&
 		    fle->dir == dir &&
@@ -195,6 +196,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
 
 				if (ret)
 					atomic_inc(fle->object_ref);
+				put_cpu_var_locked(flow_tables, cpu);
 				local_bh_enable();
 
 				return ret;
@@ -220,6 +222,8 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
 	}
 
 nocache:
+	put_cpu_var_locked(flow_tables, cpu);
+
 	{
 		int err;
 		void *obj;
@@ -249,14 +253,15 @@ nocache:
 static void flow_cache_flush_tasklet(unsigned long data)
 {
 	struct flow_flush_info *info = (void *)data;
+	struct flow_cache_entry **table;
 	int i;
 	int cpu;
 
-	cpu = smp_processor_id();
+	table = get_cpu_var_locked(flow_tables, &cpu);
 	for (i = 0; i < flow_hash_size; i++) {
 		struct flow_cache_entry *fle;
 
-		fle = flow_table(cpu)[i];
+		fle = table[i];
 		for (; fle; fle = fle->next) {
 			unsigned genid = atomic_read(&flow_cache_genid);
 
@@ -267,6 +272,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
 			atomic_dec(fle->object_ref);
 		}
 	}
+	put_cpu_var_locked(flow_tables, cpu);
 
 	if (atomic_dec_and_test(&info->cpuleft))
 		complete(&info->completion);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index f07d77f..f3c60dc 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -275,7 +275,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
 
 	xt_info_rdlock_bh();
 	private = table->private;
-	table_base = private->entries[smp_processor_id()];
+	table_base = private->entries[raw_smp_processor_id()];
 
 	e = get_entry(table_base, private->hook_entry[hook]);
 	back = get_entry(table_base, private->underflow[hook]);
@@ -1187,7 +1187,7 @@ static int do_add_counters(struct net *net, const void __user *user,
 
 	i = 0;
 	/* Choose the copy that is on our node */
-	curcpu = smp_processor_id();
+	curcpu = raw_smp_processor_id();
 	loc_cpu_entry = private->entries[curcpu];
 	xt_info_wrlock(curcpu);
 	xt_entry_foreach(iter, loc_cpu_entry, private->size) {
-- 
1.7.0.4