summaryrefslogtreecommitdiffstats
path: root/percpu-add-percpu-locked-infrastructure.patch
blob: c67ec67b3aeeaa1cff8b88fe9bbe3111132af392 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
From 0a572d0dbaf39ecb310c1633dd9d0377d55df6f0 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:51 -0500
Subject: [PATCH] percpu: add percpu locked infrastructure

commit 9566018270e0c6cee0388e1a577ae8b6f15f4e8d in tip.

RT needs per cpu data structures protected by per cpu locks instead of
disabling preemption. Add the infrastructure for per cpu locked data.

[PG: fold in a couple small deltas from the big 33rt merge commit,
 also note the removal of per_cpu_## in dd17c8f72993f9461e9c19250]

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
---
 include/asm-generic/percpu.h |   17 ++++++++++++++++-
 include/linux/percpu-defs.h  |   20 ++++++++++++++++++++
 include/linux/percpu.h       |   23 +++++++++++++++++++++++
 3 files changed, 59 insertions(+), 1 deletions(-)

diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 04f91c2..d1d3488 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -5,6 +5,9 @@
 #include <linux/threads.h>
 #include <linux/percpu-defs.h>
 
+#define __per_cpu_var_lock(var)	per_cpu__lock_##var##_locked
+#define __per_cpu_var_lock_var(var) per_cpu__##var##_locked
+
 #ifdef CONFIG_SMP
 
 /*
@@ -60,10 +63,18 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
 #define __raw_get_cpu_var(var) \
 	(*SHIFT_PERCPU_PTR(&(var), __my_cpu_offset))
 
+#define per_cpu_lock(var, cpu) \
+	(*SHIFT_PERCPU_PTR(&__per_cpu_var_lock(var), per_cpu_offset(cpu)))
+#define per_cpu_var_locked(var, cpu) \
+	(*SHIFT_PERCPU_PTR(&__per_cpu_var_lock_var(var), per_cpu_offset(cpu)))
+#define __get_cpu_lock(var, cpu) \
+		per_cpu_lock(var, cpu)
+#define __get_cpu_var_locked(var, cpu) \
+		per_cpu_var_locked(var, cpu)
+
 #define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
 #define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
 
-
 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
 extern void setup_per_cpu_areas(void);
 #endif
@@ -71,6 +82,10 @@ extern void setup_per_cpu_areas(void);
 #else /* ! SMP */
 
 #define per_cpu(var, cpu)			(*((void)(cpu), &(var)))
+#define per_cpu_var_locked(var, cpu) \
+	(*((void)(cpu), &__per_cpu_var_lock_var(var)))
+#define __get_cpu_lock(var, cpu)		__per_cpu_var_lock(var)
+#define __get_cpu_var_locked(var, cpu)		__per_cpu_var_lock_var(var)
 #define __get_cpu_var(var)			(var)
 #define __raw_get_cpu_var(var)			(var)
 #define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 68567c0..0b95d09 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -91,6 +91,22 @@
 	DEFINE_PER_CPU_SECTION(type, name, "")
 
 /*
+ * next three added for RT patch
+ * (wonder if we need corresponding DECLARE_*'s?) (clrkwllms)
+ */
+#define DEFINE_PER_CPU_SPINLOCK(name, sec)				\
+	__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES			\
+	__DEFINE_SPINLOCK(per_cpu__lock_##name##_locked)
+
+#define DECLARE_PER_CPU_LOCKED(type, name)				\
+	extern PER_CPU_ATTRIBUTES spinlock_t __per_cpu_var_lock(name);	\
+	extern PER_CPU_ATTRIBUTES __typeof__(type) __per_cpu_var_lock_var(name)
+
+#define DEFINE_PER_CPU_LOCKED(type, name)				\
+	DEFINE_PER_CPU_SPINLOCK(name, "");				\
+	DEFINE_PER_CPU_SECTION(type, name##_locked, "")
+
+/*
  * Declaration/definition used for per-CPU variables that must come first in
  * the set of variables.
  */
@@ -145,10 +161,14 @@
  */
 #ifndef __CHECKER__
 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
+#define EXPORT_PER_CPU_LOCKED_SYMBOL(var) EXPORT_SYMBOL(var##_locked)
 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
+#define EXPORT_PER_CPU_LOCKED_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var##_locked)
 #else
 #define EXPORT_PER_CPU_SYMBOL(var)
+#define EXPORT_PER_CPU_LOCKED_SYMBOL(var##_locked)
 #define EXPORT_PER_CPU_SYMBOL_GPL(var)
+#define EXPORT_PER_CPU_LOCKED_SYMBOL_GPL(var##_locked)
 #endif
 
 #endif /* _LINUX_PERCPU_DEFS_H */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index d3a38d6..4494bb0 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -39,6 +39,29 @@
 	preempt_enable();				\
 } while (0)
 
+/*
+ * Per-CPU data structures with an additional lock - useful for
+ * PREEMPT_RT code that wants to reschedule but also wants per-CPU
+ * data structures.
+ *
+ * 'cpu' gets updated with the CPU the task is currently executing on.
+ *
+ * NOTE: on normal !PREEMPT_RT kernels these per-CPU variables are the
+ * same as the normal per-CPU variables, so there is no runtime
+ * overhead.
+ */
+#define get_cpu_var_locked(var, cpuptr)			\
+(*({							\
+	int __cpu = raw_smp_processor_id();		\
+							\
+	*(cpuptr) = __cpu;				\
+	spin_lock(&__get_cpu_lock(var, __cpu));		\
+	&__get_cpu_var_locked(var, __cpu);		\
+}))
+
+#define put_cpu_var_locked(var, cpu) \
+	 do { (void)cpu; spin_unlock(&__get_cpu_lock(var, cpu)); } while (0)
+
 #ifdef CONFIG_SMP
 
 /* minimum unit size, also is the maximum supported allocation size */
-- 
1.7.0.4