Root/
1 | #ifndef __LINUX_SEQLOCK_H |
2 | #define __LINUX_SEQLOCK_H |
3 | /* |
4 | * Reader/writer consistent mechanism without starving writers. This type of |
5 | * lock for data where the reader wants a consistent set of information |
6 | * and is willing to retry if the information changes. Readers never |
7 | * block but they may have to retry if a writer is in |
8 | * progress. Writers do not wait for readers. |
9 | * |
10 | * This is not as cache friendly as brlock. Also, this will not work |
11 | * for data that contains pointers, because any writer could |
12 | * invalidate a pointer that a reader was following. |
13 | * |
14 | * Expected reader usage: |
15 | * do { |
16 | * seq = read_seqbegin(&foo); |
17 | * ... |
18 | * } while (read_seqretry(&foo, seq)); |
19 | * |
20 | * |
21 | * On non-SMP the spin locks disappear but the writer still needs |
22 | * to increment the sequence variables because an interrupt routine could |
23 | * change the state of the data. |
24 | * |
25 | * Based on x86_64 vsyscall gettimeofday |
26 | * by Keith Owens and Andrea Arcangeli |
27 | */ |
28 | |
29 | #include <linux/spinlock.h> |
30 | #include <linux/preempt.h> |
31 | #include <asm/processor.h> |
32 | |
33 | /* |
34 | * Version using sequence counter only. |
35 | * This can be used when code has its own mutex protecting the |
36 | * updating starting before the write_seqcountbeqin() and ending |
37 | * after the write_seqcount_end(). |
38 | */ |
39 | typedef struct seqcount { |
40 | unsigned sequence; |
41 | } seqcount_t; |
42 | |
43 | #define SEQCNT_ZERO { 0 } |
44 | #define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0) |
45 | |
46 | /** |
47 | * __read_seqcount_begin - begin a seq-read critical section (without barrier) |
48 | * @s: pointer to seqcount_t |
49 | * Returns: count to be passed to read_seqcount_retry |
50 | * |
51 | * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() |
52 | * barrier. Callers should ensure that smp_rmb() or equivalent ordering is |
53 | * provided before actually loading any of the variables that are to be |
54 | * protected in this critical section. |
55 | * |
56 | * Use carefully, only in critical code, and comment how the barrier is |
57 | * provided. |
58 | */ |
59 | static inline unsigned __read_seqcount_begin(const seqcount_t *s) |
60 | { |
61 | unsigned ret; |
62 | |
63 | repeat: |
64 | ret = ACCESS_ONCE(s->sequence); |
65 | if (unlikely(ret & 1)) { |
66 | cpu_relax(); |
67 | goto repeat; |
68 | } |
69 | return ret; |
70 | } |
71 | |
72 | /** |
73 | * read_seqcount_begin - begin a seq-read critical section |
74 | * @s: pointer to seqcount_t |
75 | * Returns: count to be passed to read_seqcount_retry |
76 | * |
77 | * read_seqcount_begin opens a read critical section of the given seqcount. |
78 | * Validity of the critical section is tested by checking read_seqcount_retry |
79 | * function. |
80 | */ |
81 | static inline unsigned read_seqcount_begin(const seqcount_t *s) |
82 | { |
83 | unsigned ret = __read_seqcount_begin(s); |
84 | smp_rmb(); |
85 | return ret; |
86 | } |
87 | |
88 | /** |
89 | * raw_seqcount_begin - begin a seq-read critical section |
90 | * @s: pointer to seqcount_t |
91 | * Returns: count to be passed to read_seqcount_retry |
92 | * |
93 | * raw_seqcount_begin opens a read critical section of the given seqcount. |
94 | * Validity of the critical section is tested by checking read_seqcount_retry |
95 | * function. |
96 | * |
97 | * Unlike read_seqcount_begin(), this function will not wait for the count |
98 | * to stabilize. If a writer is active when we begin, we will fail the |
99 | * read_seqcount_retry() instead of stabilizing at the beginning of the |
100 | * critical section. |
101 | */ |
102 | static inline unsigned raw_seqcount_begin(const seqcount_t *s) |
103 | { |
104 | unsigned ret = ACCESS_ONCE(s->sequence); |
105 | smp_rmb(); |
106 | return ret & ~1; |
107 | } |
108 | |
109 | /** |
110 | * __read_seqcount_retry - end a seq-read critical section (without barrier) |
111 | * @s: pointer to seqcount_t |
112 | * @start: count, from read_seqcount_begin |
113 | * Returns: 1 if retry is required, else 0 |
114 | * |
115 | * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() |
116 | * barrier. Callers should ensure that smp_rmb() or equivalent ordering is |
117 | * provided before actually loading any of the variables that are to be |
118 | * protected in this critical section. |
119 | * |
120 | * Use carefully, only in critical code, and comment how the barrier is |
121 | * provided. |
122 | */ |
123 | static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) |
124 | { |
125 | return unlikely(s->sequence != start); |
126 | } |
127 | |
128 | /** |
129 | * read_seqcount_retry - end a seq-read critical section |
130 | * @s: pointer to seqcount_t |
131 | * @start: count, from read_seqcount_begin |
132 | * Returns: 1 if retry is required, else 0 |
133 | * |
134 | * read_seqcount_retry closes a read critical section of the given seqcount. |
135 | * If the critical section was invalid, it must be ignored (and typically |
136 | * retried). |
137 | */ |
138 | static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) |
139 | { |
140 | smp_rmb(); |
141 | return __read_seqcount_retry(s, start); |
142 | } |
143 | |
144 | |
145 | /* |
146 | * Sequence counter only version assumes that callers are using their |
147 | * own mutexing. |
148 | */ |
149 | static inline void write_seqcount_begin(seqcount_t *s) |
150 | { |
151 | s->sequence++; |
152 | smp_wmb(); |
153 | } |
154 | |
155 | static inline void write_seqcount_end(seqcount_t *s) |
156 | { |
157 | smp_wmb(); |
158 | s->sequence++; |
159 | } |
160 | |
161 | /** |
162 | * write_seqcount_barrier - invalidate in-progress read-side seq operations |
163 | * @s: pointer to seqcount_t |
164 | * |
165 | * After write_seqcount_barrier, no read-side seq operations will complete |
166 | * successfully and see data older than this. |
167 | */ |
168 | static inline void write_seqcount_barrier(seqcount_t *s) |
169 | { |
170 | smp_wmb(); |
171 | s->sequence+=2; |
172 | } |
173 | |
174 | typedef struct { |
175 | struct seqcount seqcount; |
176 | spinlock_t lock; |
177 | } seqlock_t; |
178 | |
179 | /* |
180 | * These macros triggered gcc-3.x compile-time problems. We think these are |
181 | * OK now. Be cautious. |
182 | */ |
183 | #define __SEQLOCK_UNLOCKED(lockname) \ |
184 | { \ |
185 | .seqcount = SEQCNT_ZERO, \ |
186 | .lock = __SPIN_LOCK_UNLOCKED(lockname) \ |
187 | } |
188 | |
189 | #define seqlock_init(x) \ |
190 | do { \ |
191 | seqcount_init(&(x)->seqcount); \ |
192 | spin_lock_init(&(x)->lock); \ |
193 | } while (0) |
194 | |
195 | #define DEFINE_SEQLOCK(x) \ |
196 | seqlock_t x = __SEQLOCK_UNLOCKED(x) |
197 | |
198 | /* |
199 | * Read side functions for starting and finalizing a read side section. |
200 | */ |
201 | static inline unsigned read_seqbegin(const seqlock_t *sl) |
202 | { |
203 | return read_seqcount_begin(&sl->seqcount); |
204 | } |
205 | |
206 | static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) |
207 | { |
208 | return read_seqcount_retry(&sl->seqcount, start); |
209 | } |
210 | |
211 | /* |
212 | * Lock out other writers and update the count. |
213 | * Acts like a normal spin_lock/unlock. |
214 | * Don't need preempt_disable() because that is in the spin_lock already. |
215 | */ |
216 | static inline void write_seqlock(seqlock_t *sl) |
217 | { |
218 | spin_lock(&sl->lock); |
219 | write_seqcount_begin(&sl->seqcount); |
220 | } |
221 | |
222 | static inline void write_sequnlock(seqlock_t *sl) |
223 | { |
224 | write_seqcount_end(&sl->seqcount); |
225 | spin_unlock(&sl->lock); |
226 | } |
227 | |
228 | static inline void write_seqlock_bh(seqlock_t *sl) |
229 | { |
230 | spin_lock_bh(&sl->lock); |
231 | write_seqcount_begin(&sl->seqcount); |
232 | } |
233 | |
234 | static inline void write_sequnlock_bh(seqlock_t *sl) |
235 | { |
236 | write_seqcount_end(&sl->seqcount); |
237 | spin_unlock_bh(&sl->lock); |
238 | } |
239 | |
240 | static inline void write_seqlock_irq(seqlock_t *sl) |
241 | { |
242 | spin_lock_irq(&sl->lock); |
243 | write_seqcount_begin(&sl->seqcount); |
244 | } |
245 | |
246 | static inline void write_sequnlock_irq(seqlock_t *sl) |
247 | { |
248 | write_seqcount_end(&sl->seqcount); |
249 | spin_unlock_irq(&sl->lock); |
250 | } |
251 | |
252 | static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) |
253 | { |
254 | unsigned long flags; |
255 | |
256 | spin_lock_irqsave(&sl->lock, flags); |
257 | write_seqcount_begin(&sl->seqcount); |
258 | return flags; |
259 | } |
260 | |
261 | #define write_seqlock_irqsave(lock, flags) \ |
262 | do { flags = __write_seqlock_irqsave(lock); } while (0) |
263 | |
264 | static inline void |
265 | write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) |
266 | { |
267 | write_seqcount_end(&sl->seqcount); |
268 | spin_unlock_irqrestore(&sl->lock, flags); |
269 | } |
270 | |
271 | #endif /* __LINUX_SEQLOCK_H */ |
272 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9