Root/
1 | /* |
2 | * kernel/mutex.c |
3 | * |
4 | * Mutexes: blocking mutual exclusion locks |
5 | * |
6 | * Started by Ingo Molnar: |
7 | * |
8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
9 | * |
10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and |
11 | * David Howells for suggestions and improvements. |
12 | * |
13 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
14 | * from the -rt tree, where it was originally implemented for rtmutexes |
15 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale |
16 | * and Sven Dietrich. |
17 | * |
18 | * Also see Documentation/mutex-design.txt. |
19 | */ |
20 | #include <linux/mutex.h> |
21 | #include <linux/sched.h> |
22 | #include <linux/sched/rt.h> |
23 | #include <linux/export.h> |
24 | #include <linux/spinlock.h> |
25 | #include <linux/interrupt.h> |
26 | #include <linux/debug_locks.h> |
27 | |
28 | /* |
29 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, |
30 | * which forces all calls into the slowpath: |
31 | */ |
32 | #ifdef CONFIG_DEBUG_MUTEXES |
33 | # include "mutex-debug.h" |
34 | # include <asm-generic/mutex-null.h> |
35 | #else |
36 | # include "mutex.h" |
37 | # include <asm/mutex.h> |
38 | #endif |
39 | |
40 | void |
41 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
42 | { |
43 | atomic_set(&lock->count, 1); |
44 | spin_lock_init(&lock->wait_lock); |
45 | INIT_LIST_HEAD(&lock->wait_list); |
46 | mutex_clear_owner(lock); |
47 | |
48 | debug_mutex_init(lock, name, key); |
49 | } |
50 | |
51 | EXPORT_SYMBOL(__mutex_init); |
52 | |
53 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
54 | /* |
55 | * We split the mutex lock/unlock logic into separate fastpath and |
56 | * slowpath functions, to reduce the register pressure on the fastpath. |
57 | * We also put the fastpath first in the kernel image, to make sure the |
58 | * branch is predicted by the CPU as default-untaken. |
59 | */ |
60 | static __used noinline void __sched |
61 | __mutex_lock_slowpath(atomic_t *lock_count); |
62 | |
63 | /** |
64 | * mutex_lock - acquire the mutex |
65 | * @lock: the mutex to be acquired |
66 | * |
67 | * Lock the mutex exclusively for this task. If the mutex is not |
68 | * available right now, it will sleep until it can get it. |
69 | * |
70 | * The mutex must later on be released by the same task that |
71 | * acquired it. Recursive locking is not allowed. The task |
72 | * may not exit without first unlocking the mutex. Also, kernel |
73 | * memory where the mutex resides mutex must not be freed with |
74 | * the mutex still locked. The mutex must first be initialized |
75 | * (or statically defined) before it can be locked. memset()-ing |
76 | * the mutex to 0 is not allowed. |
77 | * |
78 | * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging |
79 | * checks that will enforce the restrictions and will also do |
80 | * deadlock debugging. ) |
81 | * |
82 | * This function is similar to (but not equivalent to) down(). |
83 | */ |
84 | void __sched mutex_lock(struct mutex *lock) |
85 | { |
86 | might_sleep(); |
87 | /* |
88 | * The locking fastpath is the 1->0 transition from |
89 | * 'unlocked' into 'locked' state. |
90 | */ |
91 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); |
92 | mutex_set_owner(lock); |
93 | } |
94 | |
95 | EXPORT_SYMBOL(mutex_lock); |
96 | #endif |
97 | |
98 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
99 | |
100 | /** |
101 | * mutex_unlock - release the mutex |
102 | * @lock: the mutex to be released |
103 | * |
104 | * Unlock a mutex that has been locked by this task previously. |
105 | * |
106 | * This function must not be used in interrupt context. Unlocking |
107 | * of a not locked mutex is not allowed. |
108 | * |
109 | * This function is similar to (but not equivalent to) up(). |
110 | */ |
111 | void __sched mutex_unlock(struct mutex *lock) |
112 | { |
113 | /* |
114 | * The unlocking fastpath is the 0->1 transition from 'locked' |
115 | * into 'unlocked' state: |
116 | */ |
117 | #ifndef CONFIG_DEBUG_MUTEXES |
118 | /* |
119 | * When debugging is enabled we must not clear the owner before time, |
120 | * the slow path will always be taken, and that clears the owner field |
121 | * after verifying that it was indeed current. |
122 | */ |
123 | mutex_clear_owner(lock); |
124 | #endif |
125 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); |
126 | } |
127 | |
128 | EXPORT_SYMBOL(mutex_unlock); |
129 | |
130 | /* |
131 | * Lock a mutex (possibly interruptible), slowpath: |
132 | */ |
133 | static inline int __sched |
134 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
135 | struct lockdep_map *nest_lock, unsigned long ip) |
136 | { |
137 | struct task_struct *task = current; |
138 | struct mutex_waiter waiter; |
139 | unsigned long flags; |
140 | |
141 | preempt_disable(); |
142 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
143 | |
144 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
145 | /* |
146 | * Optimistic spinning. |
147 | * |
148 | * We try to spin for acquisition when we find that there are no |
149 | * pending waiters and the lock owner is currently running on a |
150 | * (different) CPU. |
151 | * |
152 | * The rationale is that if the lock owner is running, it is likely to |
153 | * release the lock soon. |
154 | * |
155 | * Since this needs the lock owner, and this mutex implementation |
156 | * doesn't track the owner atomically in the lock field, we need to |
157 | * track it non-atomically. |
158 | * |
159 | * We can't do this for DEBUG_MUTEXES because that relies on wait_lock |
160 | * to serialize everything. |
161 | */ |
162 | |
163 | for (;;) { |
164 | struct task_struct *owner; |
165 | |
166 | /* |
167 | * If there's an owner, wait for it to either |
168 | * release the lock or go to sleep. |
169 | */ |
170 | owner = ACCESS_ONCE(lock->owner); |
171 | if (owner && !mutex_spin_on_owner(lock, owner)) |
172 | break; |
173 | |
174 | if (atomic_cmpxchg(&lock->count, 1, 0) == 1) { |
175 | lock_acquired(&lock->dep_map, ip); |
176 | mutex_set_owner(lock); |
177 | preempt_enable(); |
178 | return 0; |
179 | } |
180 | |
181 | /* |
182 | * When there's no owner, we might have preempted between the |
183 | * owner acquiring the lock and setting the owner field. If |
184 | * we're an RT task that will live-lock because we won't let |
185 | * the owner complete. |
186 | */ |
187 | if (!owner && (need_resched() || rt_task(task))) |
188 | break; |
189 | |
190 | /* |
191 | * The cpu_relax() call is a compiler barrier which forces |
192 | * everything in this loop to be re-loaded. We don't need |
193 | * memory barriers as we'll eventually observe the right |
194 | * values at the cost of a few extra spins. |
195 | */ |
196 | arch_mutex_cpu_relax(); |
197 | } |
198 | #endif |
199 | spin_lock_mutex(&lock->wait_lock, flags); |
200 | |
201 | debug_mutex_lock_common(lock, &waiter); |
202 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
203 | |
204 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
205 | list_add_tail(&waiter.list, &lock->wait_list); |
206 | waiter.task = task; |
207 | |
208 | if (atomic_xchg(&lock->count, -1) == 1) |
209 | goto done; |
210 | |
211 | lock_contended(&lock->dep_map, ip); |
212 | |
213 | for (;;) { |
214 | /* |
215 | * Lets try to take the lock again - this is needed even if |
216 | * we get here for the first time (shortly after failing to |
217 | * acquire the lock), to make sure that we get a wakeup once |
218 | * it's unlocked. Later on, if we sleep, this is the |
219 | * operation that gives us the lock. We xchg it to -1, so |
220 | * that when we release the lock, we properly wake up the |
221 | * other waiters: |
222 | */ |
223 | if (atomic_xchg(&lock->count, -1) == 1) |
224 | break; |
225 | |
226 | /* |
227 | * got a signal? (This code gets eliminated in the |
228 | * TASK_UNINTERRUPTIBLE case.) |
229 | */ |
230 | if (unlikely(signal_pending_state(state, task))) { |
231 | mutex_remove_waiter(lock, &waiter, |
232 | task_thread_info(task)); |
233 | mutex_release(&lock->dep_map, 1, ip); |
234 | spin_unlock_mutex(&lock->wait_lock, flags); |
235 | |
236 | debug_mutex_free_waiter(&waiter); |
237 | preempt_enable(); |
238 | return -EINTR; |
239 | } |
240 | __set_task_state(task, state); |
241 | |
242 | /* didn't get the lock, go to sleep: */ |
243 | spin_unlock_mutex(&lock->wait_lock, flags); |
244 | schedule_preempt_disabled(); |
245 | spin_lock_mutex(&lock->wait_lock, flags); |
246 | } |
247 | |
248 | done: |
249 | lock_acquired(&lock->dep_map, ip); |
250 | /* got the lock - rejoice! */ |
251 | mutex_remove_waiter(lock, &waiter, current_thread_info()); |
252 | mutex_set_owner(lock); |
253 | |
254 | /* set it to 0 if there are no waiters left: */ |
255 | if (likely(list_empty(&lock->wait_list))) |
256 | atomic_set(&lock->count, 0); |
257 | |
258 | spin_unlock_mutex(&lock->wait_lock, flags); |
259 | |
260 | debug_mutex_free_waiter(&waiter); |
261 | preempt_enable(); |
262 | |
263 | return 0; |
264 | } |
265 | |
266 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
267 | void __sched |
268 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) |
269 | { |
270 | might_sleep(); |
271 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); |
272 | } |
273 | |
274 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
275 | |
276 | void __sched |
277 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) |
278 | { |
279 | might_sleep(); |
280 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); |
281 | } |
282 | |
283 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); |
284 | |
285 | int __sched |
286 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) |
287 | { |
288 | might_sleep(); |
289 | return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); |
290 | } |
291 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); |
292 | |
293 | int __sched |
294 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) |
295 | { |
296 | might_sleep(); |
297 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
298 | subclass, NULL, _RET_IP_); |
299 | } |
300 | |
301 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
302 | #endif |
303 | |
304 | /* |
305 | * Release the lock, slowpath: |
306 | */ |
307 | static inline void |
308 | __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) |
309 | { |
310 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
311 | unsigned long flags; |
312 | |
313 | spin_lock_mutex(&lock->wait_lock, flags); |
314 | mutex_release(&lock->dep_map, nested, _RET_IP_); |
315 | debug_mutex_unlock(lock); |
316 | |
317 | /* |
318 | * some architectures leave the lock unlocked in the fastpath failure |
319 | * case, others need to leave it locked. In the later case we have to |
320 | * unlock it here |
321 | */ |
322 | if (__mutex_slowpath_needs_to_unlock()) |
323 | atomic_set(&lock->count, 1); |
324 | |
325 | if (!list_empty(&lock->wait_list)) { |
326 | /* get the first entry from the wait-list: */ |
327 | struct mutex_waiter *waiter = |
328 | list_entry(lock->wait_list.next, |
329 | struct mutex_waiter, list); |
330 | |
331 | debug_mutex_wake_waiter(lock, waiter); |
332 | |
333 | wake_up_process(waiter->task); |
334 | } |
335 | |
336 | spin_unlock_mutex(&lock->wait_lock, flags); |
337 | } |
338 | |
339 | /* |
340 | * Release the lock, slowpath: |
341 | */ |
342 | static __used noinline void |
343 | __mutex_unlock_slowpath(atomic_t *lock_count) |
344 | { |
345 | __mutex_unlock_common_slowpath(lock_count, 1); |
346 | } |
347 | |
348 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
349 | /* |
350 | * Here come the less common (and hence less performance-critical) APIs: |
351 | * mutex_lock_interruptible() and mutex_trylock(). |
352 | */ |
353 | static noinline int __sched |
354 | __mutex_lock_killable_slowpath(atomic_t *lock_count); |
355 | |
356 | static noinline int __sched |
357 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
358 | |
359 | /** |
360 | * mutex_lock_interruptible - acquire the mutex, interruptible |
361 | * @lock: the mutex to be acquired |
362 | * |
363 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has |
364 | * been acquired or sleep until the mutex becomes available. If a |
365 | * signal arrives while waiting for the lock then this function |
366 | * returns -EINTR. |
367 | * |
368 | * This function is similar to (but not equivalent to) down_interruptible(). |
369 | */ |
370 | int __sched mutex_lock_interruptible(struct mutex *lock) |
371 | { |
372 | int ret; |
373 | |
374 | might_sleep(); |
375 | ret = __mutex_fastpath_lock_retval |
376 | (&lock->count, __mutex_lock_interruptible_slowpath); |
377 | if (!ret) |
378 | mutex_set_owner(lock); |
379 | |
380 | return ret; |
381 | } |
382 | |
383 | EXPORT_SYMBOL(mutex_lock_interruptible); |
384 | |
385 | int __sched mutex_lock_killable(struct mutex *lock) |
386 | { |
387 | int ret; |
388 | |
389 | might_sleep(); |
390 | ret = __mutex_fastpath_lock_retval |
391 | (&lock->count, __mutex_lock_killable_slowpath); |
392 | if (!ret) |
393 | mutex_set_owner(lock); |
394 | |
395 | return ret; |
396 | } |
397 | EXPORT_SYMBOL(mutex_lock_killable); |
398 | |
399 | static __used noinline void __sched |
400 | __mutex_lock_slowpath(atomic_t *lock_count) |
401 | { |
402 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
403 | |
404 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); |
405 | } |
406 | |
407 | static noinline int __sched |
408 | __mutex_lock_killable_slowpath(atomic_t *lock_count) |
409 | { |
410 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
411 | |
412 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); |
413 | } |
414 | |
415 | static noinline int __sched |
416 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) |
417 | { |
418 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
419 | |
420 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); |
421 | } |
422 | #endif |
423 | |
424 | /* |
425 | * Spinlock based trylock, we take the spinlock and check whether we |
426 | * can get the lock: |
427 | */ |
428 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) |
429 | { |
430 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
431 | unsigned long flags; |
432 | int prev; |
433 | |
434 | spin_lock_mutex(&lock->wait_lock, flags); |
435 | |
436 | prev = atomic_xchg(&lock->count, -1); |
437 | if (likely(prev == 1)) { |
438 | mutex_set_owner(lock); |
439 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
440 | } |
441 | |
442 | /* Set it back to 0 if there are no waiters: */ |
443 | if (likely(list_empty(&lock->wait_list))) |
444 | atomic_set(&lock->count, 0); |
445 | |
446 | spin_unlock_mutex(&lock->wait_lock, flags); |
447 | |
448 | return prev == 1; |
449 | } |
450 | |
451 | /** |
452 | * mutex_trylock - try to acquire the mutex, without waiting |
453 | * @lock: the mutex to be acquired |
454 | * |
455 | * Try to acquire the mutex atomically. Returns 1 if the mutex |
456 | * has been acquired successfully, and 0 on contention. |
457 | * |
458 | * NOTE: this function follows the spin_trylock() convention, so |
459 | * it is negated from the down_trylock() return values! Be careful |
460 | * about this when converting semaphore users to mutexes. |
461 | * |
462 | * This function must not be used in interrupt context. The |
463 | * mutex must be released by the same task that acquired it. |
464 | */ |
465 | int __sched mutex_trylock(struct mutex *lock) |
466 | { |
467 | int ret; |
468 | |
469 | ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); |
470 | if (ret) |
471 | mutex_set_owner(lock); |
472 | |
473 | return ret; |
474 | } |
475 | EXPORT_SYMBOL(mutex_trylock); |
476 | |
477 | /** |
478 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 |
479 | * @cnt: the atomic which we are to dec |
480 | * @lock: the mutex to return holding if we dec to 0 |
481 | * |
482 | * return true and hold lock if we dec to 0, return false otherwise |
483 | */ |
484 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) |
485 | { |
486 | /* dec if we can't possibly hit 0 */ |
487 | if (atomic_add_unless(cnt, -1, 1)) |
488 | return 0; |
489 | /* we might hit 0, so take the lock */ |
490 | mutex_lock(lock); |
491 | if (!atomic_dec_and_test(cnt)) { |
492 | /* when we actually did the dec, we didn't hit 0 */ |
493 | mutex_unlock(lock); |
494 | return 0; |
495 | } |
496 | /* we hit 0, and we hold the lock */ |
497 | return 1; |
498 | } |
499 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |
500 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9