Root/
1 | #ifndef __LINUX_SEQLOCK_H |
2 | #define __LINUX_SEQLOCK_H |
3 | /* |
4 | * Reader/writer consistent mechanism without starving writers. This type of |
5 | * lock for data where the reader wants a consistent set of information |
6 | * and is willing to retry if the information changes. Readers never |
7 | * block but they may have to retry if a writer is in |
8 | * progress. Writers do not wait for readers. |
9 | * |
10 | * This is not as cache friendly as brlock. Also, this will not work |
11 | * for data that contains pointers, because any writer could |
12 | * invalidate a pointer that a reader was following. |
13 | * |
14 | * Expected reader usage: |
15 | * do { |
16 | * seq = read_seqbegin(&foo); |
17 | * ... |
18 | * } while (read_seqretry(&foo, seq)); |
19 | * |
20 | * |
21 | * On non-SMP the spin locks disappear but the writer still needs |
22 | * to increment the sequence variables because an interrupt routine could |
23 | * change the state of the data. |
24 | * |
25 | * Based on x86_64 vsyscall gettimeofday |
26 | * by Keith Owens and Andrea Arcangeli |
27 | */ |
28 | |
29 | #include <linux/spinlock.h> |
30 | #include <linux/preempt.h> |
31 | #include <asm/processor.h> |
32 | |
33 | typedef struct { |
34 | unsigned sequence; |
35 | spinlock_t lock; |
36 | } seqlock_t; |
37 | |
38 | /* |
39 | * These macros triggered gcc-3.x compile-time problems. We think these are |
40 | * OK now. Be cautious. |
41 | */ |
42 | #define __SEQLOCK_UNLOCKED(lockname) \ |
43 | { 0, __SPIN_LOCK_UNLOCKED(lockname) } |
44 | |
45 | #define seqlock_init(x) \ |
46 | do { \ |
47 | (x)->sequence = 0; \ |
48 | spin_lock_init(&(x)->lock); \ |
49 | } while (0) |
50 | |
51 | #define DEFINE_SEQLOCK(x) \ |
52 | seqlock_t x = __SEQLOCK_UNLOCKED(x) |
53 | |
54 | /* Lock out other writers and update the count. |
55 | * Acts like a normal spin_lock/unlock. |
56 | * Don't need preempt_disable() because that is in the spin_lock already. |
57 | */ |
58 | static inline void write_seqlock(seqlock_t *sl) |
59 | { |
60 | spin_lock(&sl->lock); |
61 | ++sl->sequence; |
62 | smp_wmb(); |
63 | } |
64 | |
65 | static inline void write_sequnlock(seqlock_t *sl) |
66 | { |
67 | smp_wmb(); |
68 | sl->sequence++; |
69 | spin_unlock(&sl->lock); |
70 | } |
71 | |
72 | static inline int write_tryseqlock(seqlock_t *sl) |
73 | { |
74 | int ret = spin_trylock(&sl->lock); |
75 | |
76 | if (ret) { |
77 | ++sl->sequence; |
78 | smp_wmb(); |
79 | } |
80 | return ret; |
81 | } |
82 | |
83 | /* Start of read calculation -- fetch last complete writer token */ |
84 | static __always_inline unsigned read_seqbegin(const seqlock_t *sl) |
85 | { |
86 | unsigned ret; |
87 | |
88 | repeat: |
89 | ret = ACCESS_ONCE(sl->sequence); |
90 | if (unlikely(ret & 1)) { |
91 | cpu_relax(); |
92 | goto repeat; |
93 | } |
94 | smp_rmb(); |
95 | |
96 | return ret; |
97 | } |
98 | |
99 | /* |
100 | * Test if reader processed invalid data. |
101 | * |
102 | * If sequence value changed then writer changed data while in section. |
103 | */ |
104 | static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start) |
105 | { |
106 | smp_rmb(); |
107 | |
108 | return unlikely(sl->sequence != start); |
109 | } |
110 | |
111 | |
112 | /* |
113 | * Version using sequence counter only. |
114 | * This can be used when code has its own mutex protecting the |
115 | * updating starting before the write_seqcountbeqin() and ending |
116 | * after the write_seqcount_end(). |
117 | */ |
118 | |
119 | typedef struct seqcount { |
120 | unsigned sequence; |
121 | } seqcount_t; |
122 | |
123 | #define SEQCNT_ZERO { 0 } |
124 | #define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0) |
125 | |
126 | /** |
127 | * __read_seqcount_begin - begin a seq-read critical section (without barrier) |
128 | * @s: pointer to seqcount_t |
129 | * Returns: count to be passed to read_seqcount_retry |
130 | * |
131 | * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() |
132 | * barrier. Callers should ensure that smp_rmb() or equivalent ordering is |
133 | * provided before actually loading any of the variables that are to be |
134 | * protected in this critical section. |
135 | * |
136 | * Use carefully, only in critical code, and comment how the barrier is |
137 | * provided. |
138 | */ |
139 | static inline unsigned __read_seqcount_begin(const seqcount_t *s) |
140 | { |
141 | unsigned ret; |
142 | |
143 | repeat: |
144 | ret = s->sequence; |
145 | if (unlikely(ret & 1)) { |
146 | cpu_relax(); |
147 | goto repeat; |
148 | } |
149 | return ret; |
150 | } |
151 | |
152 | /** |
153 | * read_seqcount_begin - begin a seq-read critical section |
154 | * @s: pointer to seqcount_t |
155 | * Returns: count to be passed to read_seqcount_retry |
156 | * |
157 | * read_seqcount_begin opens a read critical section of the given seqcount. |
158 | * Validity of the critical section is tested by checking read_seqcount_retry |
159 | * function. |
160 | */ |
161 | static inline unsigned read_seqcount_begin(const seqcount_t *s) |
162 | { |
163 | unsigned ret = __read_seqcount_begin(s); |
164 | smp_rmb(); |
165 | return ret; |
166 | } |
167 | |
168 | /** |
169 | * __read_seqcount_retry - end a seq-read critical section (without barrier) |
170 | * @s: pointer to seqcount_t |
171 | * @start: count, from read_seqcount_begin |
172 | * Returns: 1 if retry is required, else 0 |
173 | * |
174 | * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() |
175 | * barrier. Callers should ensure that smp_rmb() or equivalent ordering is |
176 | * provided before actually loading any of the variables that are to be |
177 | * protected in this critical section. |
178 | * |
179 | * Use carefully, only in critical code, and comment how the barrier is |
180 | * provided. |
181 | */ |
182 | static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) |
183 | { |
184 | return unlikely(s->sequence != start); |
185 | } |
186 | |
187 | /** |
188 | * read_seqcount_retry - end a seq-read critical section |
189 | * @s: pointer to seqcount_t |
190 | * @start: count, from read_seqcount_begin |
191 | * Returns: 1 if retry is required, else 0 |
192 | * |
193 | * read_seqcount_retry closes a read critical section of the given seqcount. |
194 | * If the critical section was invalid, it must be ignored (and typically |
195 | * retried). |
196 | */ |
197 | static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) |
198 | { |
199 | smp_rmb(); |
200 | |
201 | return __read_seqcount_retry(s, start); |
202 | } |
203 | |
204 | |
205 | /* |
206 | * Sequence counter only version assumes that callers are using their |
207 | * own mutexing. |
208 | */ |
209 | static inline void write_seqcount_begin(seqcount_t *s) |
210 | { |
211 | s->sequence++; |
212 | smp_wmb(); |
213 | } |
214 | |
215 | static inline void write_seqcount_end(seqcount_t *s) |
216 | { |
217 | smp_wmb(); |
218 | s->sequence++; |
219 | } |
220 | |
221 | /** |
222 | * write_seqcount_barrier - invalidate in-progress read-side seq operations |
223 | * @s: pointer to seqcount_t |
224 | * |
225 | * After write_seqcount_barrier, no read-side seq operations will complete |
226 | * successfully and see data older than this. |
227 | */ |
228 | static inline void write_seqcount_barrier(seqcount_t *s) |
229 | { |
230 | smp_wmb(); |
231 | s->sequence+=2; |
232 | } |
233 | |
234 | /* |
235 | * Possible sw/hw IRQ protected versions of the interfaces. |
236 | */ |
237 | #define write_seqlock_irqsave(lock, flags) \ |
238 | do { local_irq_save(flags); write_seqlock(lock); } while (0) |
239 | #define write_seqlock_irq(lock) \ |
240 | do { local_irq_disable(); write_seqlock(lock); } while (0) |
241 | #define write_seqlock_bh(lock) \ |
242 | do { local_bh_disable(); write_seqlock(lock); } while (0) |
243 | |
244 | #define write_sequnlock_irqrestore(lock, flags) \ |
245 | do { write_sequnlock(lock); local_irq_restore(flags); } while(0) |
246 | #define write_sequnlock_irq(lock) \ |
247 | do { write_sequnlock(lock); local_irq_enable(); } while(0) |
248 | #define write_sequnlock_bh(lock) \ |
249 | do { write_sequnlock(lock); local_bh_enable(); } while(0) |
250 | |
251 | #define read_seqbegin_irqsave(lock, flags) \ |
252 | ({ local_irq_save(flags); read_seqbegin(lock); }) |
253 | |
254 | #define read_seqretry_irqrestore(lock, iv, flags) \ |
255 | ({ \ |
256 | int ret = read_seqretry(lock, iv); \ |
257 | local_irq_restore(flags); \ |
258 | ret; \ |
259 | }) |
260 | |
261 | #endif /* __LINUX_SEQLOCK_H */ |
262 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9