Root/
1 | /* |
2 | * Copyright (2004) Linus Torvalds |
3 | * |
4 | * Author: Zwane Mwaikambo <zwane@fsmlabs.com> |
5 | * |
6 | * Copyright (2004, 2005) Ingo Molnar |
7 | * |
8 | * This file contains the spinlock/rwlock implementations for the |
9 | * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them) |
10 | * |
11 | * Note that some architectures have special knowledge about the |
12 | * stack frames of these functions in their profile_pc. If you |
13 | * change anything significant here that could change the stack |
14 | * frame contact the architecture maintainers. |
15 | */ |
16 | |
17 | #include <linux/linkage.h> |
18 | #include <linux/preempt.h> |
19 | #include <linux/spinlock.h> |
20 | #include <linux/interrupt.h> |
21 | #include <linux/debug_locks.h> |
22 | #include <linux/module.h> |
23 | |
24 | #ifndef _spin_trylock |
25 | int __lockfunc _spin_trylock(spinlock_t *lock) |
26 | { |
27 | return __spin_trylock(lock); |
28 | } |
29 | EXPORT_SYMBOL(_spin_trylock); |
30 | #endif |
31 | |
32 | #ifndef _read_trylock |
33 | int __lockfunc _read_trylock(rwlock_t *lock) |
34 | { |
35 | return __read_trylock(lock); |
36 | } |
37 | EXPORT_SYMBOL(_read_trylock); |
38 | #endif |
39 | |
40 | #ifndef _write_trylock |
41 | int __lockfunc _write_trylock(rwlock_t *lock) |
42 | { |
43 | return __write_trylock(lock); |
44 | } |
45 | EXPORT_SYMBOL(_write_trylock); |
46 | #endif |
47 | |
48 | /* |
49 | * If lockdep is enabled then we use the non-preemption spin-ops |
50 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are |
51 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): |
52 | */ |
53 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
54 | |
55 | #ifndef _read_lock |
56 | void __lockfunc _read_lock(rwlock_t *lock) |
57 | { |
58 | __read_lock(lock); |
59 | } |
60 | EXPORT_SYMBOL(_read_lock); |
61 | #endif |
62 | |
63 | #ifndef _spin_lock_irqsave |
64 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) |
65 | { |
66 | return __spin_lock_irqsave(lock); |
67 | } |
68 | EXPORT_SYMBOL(_spin_lock_irqsave); |
69 | #endif |
70 | |
71 | #ifndef _spin_lock_irq |
72 | void __lockfunc _spin_lock_irq(spinlock_t *lock) |
73 | { |
74 | __spin_lock_irq(lock); |
75 | } |
76 | EXPORT_SYMBOL(_spin_lock_irq); |
77 | #endif |
78 | |
79 | #ifndef _spin_lock_bh |
80 | void __lockfunc _spin_lock_bh(spinlock_t *lock) |
81 | { |
82 | __spin_lock_bh(lock); |
83 | } |
84 | EXPORT_SYMBOL(_spin_lock_bh); |
85 | #endif |
86 | |
87 | #ifndef _read_lock_irqsave |
88 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) |
89 | { |
90 | return __read_lock_irqsave(lock); |
91 | } |
92 | EXPORT_SYMBOL(_read_lock_irqsave); |
93 | #endif |
94 | |
95 | #ifndef _read_lock_irq |
96 | void __lockfunc _read_lock_irq(rwlock_t *lock) |
97 | { |
98 | __read_lock_irq(lock); |
99 | } |
100 | EXPORT_SYMBOL(_read_lock_irq); |
101 | #endif |
102 | |
103 | #ifndef _read_lock_bh |
104 | void __lockfunc _read_lock_bh(rwlock_t *lock) |
105 | { |
106 | __read_lock_bh(lock); |
107 | } |
108 | EXPORT_SYMBOL(_read_lock_bh); |
109 | #endif |
110 | |
111 | #ifndef _write_lock_irqsave |
112 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) |
113 | { |
114 | return __write_lock_irqsave(lock); |
115 | } |
116 | EXPORT_SYMBOL(_write_lock_irqsave); |
117 | #endif |
118 | |
119 | #ifndef _write_lock_irq |
120 | void __lockfunc _write_lock_irq(rwlock_t *lock) |
121 | { |
122 | __write_lock_irq(lock); |
123 | } |
124 | EXPORT_SYMBOL(_write_lock_irq); |
125 | #endif |
126 | |
127 | #ifndef _write_lock_bh |
128 | void __lockfunc _write_lock_bh(rwlock_t *lock) |
129 | { |
130 | __write_lock_bh(lock); |
131 | } |
132 | EXPORT_SYMBOL(_write_lock_bh); |
133 | #endif |
134 | |
135 | #ifndef _spin_lock |
136 | void __lockfunc _spin_lock(spinlock_t *lock) |
137 | { |
138 | __spin_lock(lock); |
139 | } |
140 | EXPORT_SYMBOL(_spin_lock); |
141 | #endif |
142 | |
143 | #ifndef _write_lock |
144 | void __lockfunc _write_lock(rwlock_t *lock) |
145 | { |
146 | __write_lock(lock); |
147 | } |
148 | EXPORT_SYMBOL(_write_lock); |
149 | #endif |
150 | |
151 | #else /* CONFIG_PREEMPT: */ |
152 | |
153 | /* |
154 | * This could be a long-held lock. We both prepare to spin for a long |
155 | * time (making _this_ CPU preemptable if possible), and we also signal |
156 | * towards that other CPU that it should break the lock ASAP. |
157 | * |
158 | * (We do this in a function because inlining it would be excessive.) |
159 | */ |
160 | |
161 | #define BUILD_LOCK_OPS(op, locktype) \ |
162 | void __lockfunc _##op##_lock(locktype##_t *lock) \ |
163 | { \ |
164 | for (;;) { \ |
165 | preempt_disable(); \ |
166 | if (likely(_raw_##op##_trylock(lock))) \ |
167 | break; \ |
168 | preempt_enable(); \ |
169 | \ |
170 | if (!(lock)->break_lock) \ |
171 | (lock)->break_lock = 1; \ |
172 | while (!op##_can_lock(lock) && (lock)->break_lock) \ |
173 | _raw_##op##_relax(&lock->raw_lock); \ |
174 | } \ |
175 | (lock)->break_lock = 0; \ |
176 | } \ |
177 | \ |
178 | EXPORT_SYMBOL(_##op##_lock); \ |
179 | \ |
180 | unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ |
181 | { \ |
182 | unsigned long flags; \ |
183 | \ |
184 | for (;;) { \ |
185 | preempt_disable(); \ |
186 | local_irq_save(flags); \ |
187 | if (likely(_raw_##op##_trylock(lock))) \ |
188 | break; \ |
189 | local_irq_restore(flags); \ |
190 | preempt_enable(); \ |
191 | \ |
192 | if (!(lock)->break_lock) \ |
193 | (lock)->break_lock = 1; \ |
194 | while (!op##_can_lock(lock) && (lock)->break_lock) \ |
195 | _raw_##op##_relax(&lock->raw_lock); \ |
196 | } \ |
197 | (lock)->break_lock = 0; \ |
198 | return flags; \ |
199 | } \ |
200 | \ |
201 | EXPORT_SYMBOL(_##op##_lock_irqsave); \ |
202 | \ |
203 | void __lockfunc _##op##_lock_irq(locktype##_t *lock) \ |
204 | { \ |
205 | _##op##_lock_irqsave(lock); \ |
206 | } \ |
207 | \ |
208 | EXPORT_SYMBOL(_##op##_lock_irq); \ |
209 | \ |
210 | void __lockfunc _##op##_lock_bh(locktype##_t *lock) \ |
211 | { \ |
212 | unsigned long flags; \ |
213 | \ |
214 | /* */ \ |
215 | /* Careful: we must exclude softirqs too, hence the */ \ |
216 | /* irq-disabling. We use the generic preemption-aware */ \ |
217 | /* function: */ \ |
218 | /**/ \ |
219 | flags = _##op##_lock_irqsave(lock); \ |
220 | local_bh_disable(); \ |
221 | local_irq_restore(flags); \ |
222 | } \ |
223 | \ |
224 | EXPORT_SYMBOL(_##op##_lock_bh) |
225 | |
226 | /* |
227 | * Build preemption-friendly versions of the following |
228 | * lock-spinning functions: |
229 | * |
230 | * _[spin|read|write]_lock() |
231 | * _[spin|read|write]_lock_irq() |
232 | * _[spin|read|write]_lock_irqsave() |
233 | * _[spin|read|write]_lock_bh() |
234 | */ |
235 | BUILD_LOCK_OPS(spin, spinlock); |
236 | BUILD_LOCK_OPS(read, rwlock); |
237 | BUILD_LOCK_OPS(write, rwlock); |
238 | |
239 | #endif /* CONFIG_PREEMPT */ |
240 | |
241 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
242 | |
243 | void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) |
244 | { |
245 | preempt_disable(); |
246 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
247 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); |
248 | } |
249 | EXPORT_SYMBOL(_spin_lock_nested); |
250 | |
251 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) |
252 | { |
253 | unsigned long flags; |
254 | |
255 | local_irq_save(flags); |
256 | preempt_disable(); |
257 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
258 | LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock, |
259 | _raw_spin_lock_flags, &flags); |
260 | return flags; |
261 | } |
262 | EXPORT_SYMBOL(_spin_lock_irqsave_nested); |
263 | |
264 | void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, |
265 | struct lockdep_map *nest_lock) |
266 | { |
267 | preempt_disable(); |
268 | spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); |
269 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); |
270 | } |
271 | EXPORT_SYMBOL(_spin_lock_nest_lock); |
272 | |
273 | #endif |
274 | |
275 | #ifndef _spin_unlock |
276 | void __lockfunc _spin_unlock(spinlock_t *lock) |
277 | { |
278 | __spin_unlock(lock); |
279 | } |
280 | EXPORT_SYMBOL(_spin_unlock); |
281 | #endif |
282 | |
283 | #ifndef _write_unlock |
284 | void __lockfunc _write_unlock(rwlock_t *lock) |
285 | { |
286 | __write_unlock(lock); |
287 | } |
288 | EXPORT_SYMBOL(_write_unlock); |
289 | #endif |
290 | |
291 | #ifndef _read_unlock |
292 | void __lockfunc _read_unlock(rwlock_t *lock) |
293 | { |
294 | __read_unlock(lock); |
295 | } |
296 | EXPORT_SYMBOL(_read_unlock); |
297 | #endif |
298 | |
299 | #ifndef _spin_unlock_irqrestore |
300 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
301 | { |
302 | __spin_unlock_irqrestore(lock, flags); |
303 | } |
304 | EXPORT_SYMBOL(_spin_unlock_irqrestore); |
305 | #endif |
306 | |
307 | #ifndef _spin_unlock_irq |
308 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) |
309 | { |
310 | __spin_unlock_irq(lock); |
311 | } |
312 | EXPORT_SYMBOL(_spin_unlock_irq); |
313 | #endif |
314 | |
315 | #ifndef _spin_unlock_bh |
316 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) |
317 | { |
318 | __spin_unlock_bh(lock); |
319 | } |
320 | EXPORT_SYMBOL(_spin_unlock_bh); |
321 | #endif |
322 | |
323 | #ifndef _read_unlock_irqrestore |
324 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
325 | { |
326 | __read_unlock_irqrestore(lock, flags); |
327 | } |
328 | EXPORT_SYMBOL(_read_unlock_irqrestore); |
329 | #endif |
330 | |
331 | #ifndef _read_unlock_irq |
332 | void __lockfunc _read_unlock_irq(rwlock_t *lock) |
333 | { |
334 | __read_unlock_irq(lock); |
335 | } |
336 | EXPORT_SYMBOL(_read_unlock_irq); |
337 | #endif |
338 | |
339 | #ifndef _read_unlock_bh |
340 | void __lockfunc _read_unlock_bh(rwlock_t *lock) |
341 | { |
342 | __read_unlock_bh(lock); |
343 | } |
344 | EXPORT_SYMBOL(_read_unlock_bh); |
345 | #endif |
346 | |
347 | #ifndef _write_unlock_irqrestore |
348 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
349 | { |
350 | __write_unlock_irqrestore(lock, flags); |
351 | } |
352 | EXPORT_SYMBOL(_write_unlock_irqrestore); |
353 | #endif |
354 | |
355 | #ifndef _write_unlock_irq |
356 | void __lockfunc _write_unlock_irq(rwlock_t *lock) |
357 | { |
358 | __write_unlock_irq(lock); |
359 | } |
360 | EXPORT_SYMBOL(_write_unlock_irq); |
361 | #endif |
362 | |
363 | #ifndef _write_unlock_bh |
364 | void __lockfunc _write_unlock_bh(rwlock_t *lock) |
365 | { |
366 | __write_unlock_bh(lock); |
367 | } |
368 | EXPORT_SYMBOL(_write_unlock_bh); |
369 | #endif |
370 | |
371 | #ifndef _spin_trylock_bh |
372 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) |
373 | { |
374 | return __spin_trylock_bh(lock); |
375 | } |
376 | EXPORT_SYMBOL(_spin_trylock_bh); |
377 | #endif |
378 | |
379 | notrace int in_lock_functions(unsigned long addr) |
380 | { |
381 | /* Linker adds these: start and end of __lockfunc functions */ |
382 | extern char __lock_text_start[], __lock_text_end[]; |
383 | |
384 | return addr >= (unsigned long)__lock_text_start |
385 | && addr < (unsigned long)__lock_text_end; |
386 | } |
387 | EXPORT_SYMBOL(in_lock_functions); |
388 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9