summaryrefslogtreecommitdiffstats
path: root/seqlock-Create-raw_seqlock.patch
blob: 58456d619d662e619c422c189c06a64ec81a49a3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
From 20220836b053750eb69436535a96978fde58fd3e Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 25 Jul 2009 19:27:54 +0200
Subject: [PATCH] seqlock: Create raw_seqlock

commit 09e46c7a86b2e81f97bd93f588b62c2d36cff58e in tip.

raw_seqlock_t will be used to annotate seqlocks which can not be
converted to sleeping locks in preempt-rt

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
---
 include/linux/seqlock.h |   86 ++++++++++++++++++++++++++++++++++++++++++++--
 1 files changed, 82 insertions(+), 4 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 632205c..0c38f7c 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -31,6 +31,11 @@
 
 typedef struct {
 	unsigned sequence;
+	raw_spinlock_t lock;
+} raw_seqlock_t;
+
+typedef struct {
+	unsigned sequence;
 	spinlock_t lock;
 } seqlock_t;
 
@@ -38,11 +43,23 @@ typedef struct {
  * These macros triggered gcc-3.x compile-time problems.  We think these are
  * OK now.  Be cautious.
  */
+#define __RAW_SEQLOCK_UNLOCKED(lockname) \
+	{ 0, __RAW_SPIN_LOCK_UNLOCKED(lockname) }
+
+#define raw_seqlock_init(x)				\
+	do {						\
+		(x)->sequence = 0;			\
+		raw_spin_lock_init(&(x)->lock);	\
+	} while (0)
+
+#define DEFINE_RAW_SEQLOCK(x) \
+	raw_seqlock_t x = __RAW_SEQLOCK_UNLOCKED(x)
+
 #define __SEQLOCK_UNLOCKED(lockname) \
-		 { 0, __SPIN_LOCK_UNLOCKED(lockname) }
+	{ 0, __SPIN_LOCK_UNLOCKED(lockname) }
 
 #define SEQLOCK_UNLOCKED \
-		 __SEQLOCK_UNLOCKED(old_style_seqlock_init)
+	__SEQLOCK_UNLOCKED(old_style_seqlock_init)
 
 #define seqlock_init(x)					\
 	do {						\
@@ -51,12 +68,19 @@ typedef struct {
 	} while (0)
 
 #define DEFINE_SEQLOCK(x) \
-		seqlock_t x = __SEQLOCK_UNLOCKED(x)
+	seqlock_t x = __SEQLOCK_UNLOCKED(x)
 
 /* Lock out other writers and update the count.
  * Acts like a normal spin_lock/unlock.
  * Don't need preempt_disable() because that is in the spin_lock already.
  */
+static inline void write_raw_seqlock(raw_seqlock_t *sl)
+{
+	raw_spin_lock(&sl->lock);
+	++sl->sequence;
+	smp_wmb();
+}
+
 static inline void write_seqlock(seqlock_t *sl)
 {
 	spin_lock(&sl->lock);
@@ -64,6 +88,13 @@ static inline void write_seqlock(seqlock_t *sl)
 	smp_wmb();
 }
 
+static inline void write_raw_sequnlock(raw_seqlock_t *sl)
+{
+	smp_wmb();
+	sl->sequence++;
+	raw_spin_unlock(&sl->lock);
+}
+
 static inline void write_sequnlock(seqlock_t *sl)
 {
 	smp_wmb();
@@ -83,6 +114,21 @@ static inline int write_tryseqlock(seqlock_t *sl)
 }
 
 /* Start of read calculation -- fetch last complete writer token */
+static __always_inline unsigned read_raw_seqbegin(const raw_seqlock_t *sl)
+{
+	unsigned ret;
+
+repeat:
+	ret = sl->sequence;
+	smp_rmb();
+	if (unlikely(ret & 1)) {
+		cpu_relax();
+		goto repeat;
+	}
+
+	return ret;
+}
+
 static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
 {
 	unsigned ret;
@@ -103,6 +149,14 @@ repeat:
  *
  * If sequence value changed then writer changed data while in section.
  */
+static __always_inline int
+read_raw_seqretry(const raw_seqlock_t *sl, unsigned start)
+{
+	smp_rmb();
+
+	return (sl->sequence != start);
+}
+
 static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
 {
 	smp_rmb();
@@ -170,12 +224,36 @@ static inline void write_seqcount_end(seqcount_t *s)
 /*
  * Possible sw/hw IRQ protected versions of the interfaces.
  */
+#define write_raw_seqlock_irqsave(lock, flags)				\
+	do { local_irq_save(flags); write_raw_seqlock(lock); } while (0)
+#define write_raw_seqlock_irq(lock)					\
+	do { local_irq_disable();   write_raw_seqlock(lock); } while (0)
+#define write_raw_seqlock_bh(lock)					\
+	do { local_bh_disable();    write_raw_seqlock(lock); } while (0)
+
+#define write_raw_sequnlock_irqrestore(lock, flags)			\
+	do { write_raw_sequnlock(lock); local_irq_restore(flags); } while(0)
+#define write_raw_sequnlock_irq(lock)					\
+	do { write_raw_sequnlock(lock); local_irq_enable(); } while(0)
+#define write_raw_sequnlock_bh(lock)					\
+	do { write_raw_sequnlock(lock); local_bh_enable(); } while(0)
+
+#define read_raw_seqbegin_irqsave(lock, flags)				\
+	({ local_irq_save(flags);   read_raw_seqbegin(lock); })
+
+#define read_raw_seqretry_irqrestore(lock, iv, flags)			\
+	({								\
+		int ret = read_raw_seqretry(lock, iv);			\
+		local_irq_restore(flags);				\
+		ret;							\
+	})
+
 #define write_seqlock_irqsave(lock, flags)				\
 	do { local_irq_save(flags); write_seqlock(lock); } while (0)
 #define write_seqlock_irq(lock)						\
 	do { local_irq_disable();   write_seqlock(lock); } while (0)
 #define write_seqlock_bh(lock)						\
-        do { local_bh_disable();    write_seqlock(lock); } while (0)
+	do { local_bh_disable();    write_seqlock(lock); } while (0)
 
 #define write_sequnlock_irqrestore(lock, flags)				\
 	do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
-- 
1.7.0.4