Root/
1 | #ifndef __LINUX_RWLOCK_API_SMP_H |
2 | #define __LINUX_RWLOCK_API_SMP_H |
3 | |
4 | #ifndef __LINUX_SPINLOCK_API_SMP_H |
5 | # error "please don't include this file directly" |
6 | #endif |
7 | |
8 | /* |
9 | * include/linux/rwlock_api_smp.h |
10 | * |
11 | * spinlock API declarations on SMP (and debug) |
12 | * (implemented in kernel/spinlock.c) |
13 | * |
14 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar |
15 | * Released under the General Public License (GPL). |
16 | */ |
17 | |
18 | void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); |
19 | void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); |
20 | void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); |
21 | void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); |
22 | void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); |
23 | void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); |
24 | unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) |
25 | __acquires(lock); |
26 | unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) |
27 | __acquires(lock); |
28 | int __lockfunc _raw_read_trylock(rwlock_t *lock); |
29 | int __lockfunc _raw_write_trylock(rwlock_t *lock); |
30 | void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock); |
31 | void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock); |
32 | void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock); |
33 | void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock); |
34 | void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock); |
35 | void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock); |
36 | void __lockfunc |
37 | _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
38 | __releases(lock); |
39 | void __lockfunc |
40 | _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
41 | __releases(lock); |
42 | |
43 | #ifdef CONFIG_INLINE_READ_LOCK |
44 | #define _raw_read_lock(lock) __raw_read_lock(lock) |
45 | #endif |
46 | |
47 | #ifdef CONFIG_INLINE_WRITE_LOCK |
48 | #define _raw_write_lock(lock) __raw_write_lock(lock) |
49 | #endif |
50 | |
51 | #ifdef CONFIG_INLINE_READ_LOCK_BH |
52 | #define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock) |
53 | #endif |
54 | |
55 | #ifdef CONFIG_INLINE_WRITE_LOCK_BH |
56 | #define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock) |
57 | #endif |
58 | |
59 | #ifdef CONFIG_INLINE_READ_LOCK_IRQ |
60 | #define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock) |
61 | #endif |
62 | |
63 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQ |
64 | #define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock) |
65 | #endif |
66 | |
67 | #ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE |
68 | #define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock) |
69 | #endif |
70 | |
71 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE |
72 | #define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock) |
73 | #endif |
74 | |
75 | #ifdef CONFIG_INLINE_READ_TRYLOCK |
76 | #define _raw_read_trylock(lock) __raw_read_trylock(lock) |
77 | #endif |
78 | |
79 | #ifdef CONFIG_INLINE_WRITE_TRYLOCK |
80 | #define _raw_write_trylock(lock) __raw_write_trylock(lock) |
81 | #endif |
82 | |
83 | #ifdef CONFIG_INLINE_READ_UNLOCK |
84 | #define _raw_read_unlock(lock) __raw_read_unlock(lock) |
85 | #endif |
86 | |
87 | #ifdef CONFIG_INLINE_WRITE_UNLOCK |
88 | #define _raw_write_unlock(lock) __raw_write_unlock(lock) |
89 | #endif |
90 | |
91 | #ifdef CONFIG_INLINE_READ_UNLOCK_BH |
92 | #define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock) |
93 | #endif |
94 | |
95 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_BH |
96 | #define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock) |
97 | #endif |
98 | |
99 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQ |
100 | #define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock) |
101 | #endif |
102 | |
103 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ |
104 | #define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock) |
105 | #endif |
106 | |
107 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE |
108 | #define _raw_read_unlock_irqrestore(lock, flags) \ |
109 | __raw_read_unlock_irqrestore(lock, flags) |
110 | #endif |
111 | |
112 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE |
113 | #define _raw_write_unlock_irqrestore(lock, flags) \ |
114 | __raw_write_unlock_irqrestore(lock, flags) |
115 | #endif |
116 | |
117 | static inline int __raw_read_trylock(rwlock_t *lock) |
118 | { |
119 | preempt_disable(); |
120 | if (do_raw_read_trylock(lock)) { |
121 | rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); |
122 | return 1; |
123 | } |
124 | preempt_enable(); |
125 | return 0; |
126 | } |
127 | |
128 | static inline int __raw_write_trylock(rwlock_t *lock) |
129 | { |
130 | preempt_disable(); |
131 | if (do_raw_write_trylock(lock)) { |
132 | rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
133 | return 1; |
134 | } |
135 | preempt_enable(); |
136 | return 0; |
137 | } |
138 | |
139 | /* |
140 | * If lockdep is enabled then we use the non-preemption spin-ops |
141 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are |
142 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): |
143 | */ |
144 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
145 | |
146 | static inline void __raw_read_lock(rwlock_t *lock) |
147 | { |
148 | preempt_disable(); |
149 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); |
150 | LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); |
151 | } |
152 | |
153 | static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock) |
154 | { |
155 | unsigned long flags; |
156 | |
157 | local_irq_save(flags); |
158 | preempt_disable(); |
159 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); |
160 | LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock, |
161 | do_raw_read_lock_flags, &flags); |
162 | return flags; |
163 | } |
164 | |
165 | static inline void __raw_read_lock_irq(rwlock_t *lock) |
166 | { |
167 | local_irq_disable(); |
168 | preempt_disable(); |
169 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); |
170 | LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); |
171 | } |
172 | |
173 | static inline void __raw_read_lock_bh(rwlock_t *lock) |
174 | { |
175 | local_bh_disable(); |
176 | preempt_disable(); |
177 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); |
178 | LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); |
179 | } |
180 | |
181 | static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock) |
182 | { |
183 | unsigned long flags; |
184 | |
185 | local_irq_save(flags); |
186 | preempt_disable(); |
187 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
188 | LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock, |
189 | do_raw_write_lock_flags, &flags); |
190 | return flags; |
191 | } |
192 | |
193 | static inline void __raw_write_lock_irq(rwlock_t *lock) |
194 | { |
195 | local_irq_disable(); |
196 | preempt_disable(); |
197 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
198 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); |
199 | } |
200 | |
201 | static inline void __raw_write_lock_bh(rwlock_t *lock) |
202 | { |
203 | local_bh_disable(); |
204 | preempt_disable(); |
205 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
206 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); |
207 | } |
208 | |
209 | static inline void __raw_write_lock(rwlock_t *lock) |
210 | { |
211 | preempt_disable(); |
212 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
213 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); |
214 | } |
215 | |
216 | #endif /* CONFIG_PREEMPT */ |
217 | |
218 | static inline void __raw_write_unlock(rwlock_t *lock) |
219 | { |
220 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
221 | do_raw_write_unlock(lock); |
222 | preempt_enable(); |
223 | } |
224 | |
225 | static inline void __raw_read_unlock(rwlock_t *lock) |
226 | { |
227 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
228 | do_raw_read_unlock(lock); |
229 | preempt_enable(); |
230 | } |
231 | |
232 | static inline void |
233 | __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
234 | { |
235 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
236 | do_raw_read_unlock(lock); |
237 | local_irq_restore(flags); |
238 | preempt_enable(); |
239 | } |
240 | |
241 | static inline void __raw_read_unlock_irq(rwlock_t *lock) |
242 | { |
243 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
244 | do_raw_read_unlock(lock); |
245 | local_irq_enable(); |
246 | preempt_enable(); |
247 | } |
248 | |
249 | static inline void __raw_read_unlock_bh(rwlock_t *lock) |
250 | { |
251 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
252 | do_raw_read_unlock(lock); |
253 | preempt_enable_no_resched(); |
254 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
255 | } |
256 | |
257 | static inline void __raw_write_unlock_irqrestore(rwlock_t *lock, |
258 | unsigned long flags) |
259 | { |
260 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
261 | do_raw_write_unlock(lock); |
262 | local_irq_restore(flags); |
263 | preempt_enable(); |
264 | } |
265 | |
266 | static inline void __raw_write_unlock_irq(rwlock_t *lock) |
267 | { |
268 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
269 | do_raw_write_unlock(lock); |
270 | local_irq_enable(); |
271 | preempt_enable(); |
272 | } |
273 | |
274 | static inline void __raw_write_unlock_bh(rwlock_t *lock) |
275 | { |
276 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
277 | do_raw_write_unlock(lock); |
278 | preempt_enable_no_resched(); |
279 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
280 | } |
281 | |
282 | #endif /* __LINUX_RWLOCK_API_SMP_H */ |
283 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9