Root/kernel/mutex.c

1/*
2 * kernel/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
18 * Also see Documentation/mutex-design.txt.
19 */
20#include <linux/mutex.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/spinlock.h>
24#include <linux/interrupt.h>
25#include <linux/debug_locks.h>
26
27/*
28 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
29 * which forces all calls into the slowpath:
30 */
31#ifdef CONFIG_DEBUG_MUTEXES
32# include "mutex-debug.h"
33# include <asm-generic/mutex-null.h>
34#else
35# include "mutex.h"
36# include <asm/mutex.h>
37#endif
38
39void
40__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
41{
42    atomic_set(&lock->count, 1);
43    spin_lock_init(&lock->wait_lock);
44    INIT_LIST_HEAD(&lock->wait_list);
45    mutex_clear_owner(lock);
46
47    debug_mutex_init(lock, name, key);
48}
49
50EXPORT_SYMBOL(__mutex_init);
51
52#ifndef CONFIG_DEBUG_LOCK_ALLOC
53/*
54 * We split the mutex lock/unlock logic into separate fastpath and
55 * slowpath functions, to reduce the register pressure on the fastpath.
56 * We also put the fastpath first in the kernel image, to make sure the
57 * branch is predicted by the CPU as default-untaken.
58 */
59static __used noinline void __sched
60__mutex_lock_slowpath(atomic_t *lock_count);
61
62/**
63 * mutex_lock - acquire the mutex
64 * @lock: the mutex to be acquired
65 *
66 * Lock the mutex exclusively for this task. If the mutex is not
67 * available right now, it will sleep until it can get it.
68 *
69 * The mutex must later on be released by the same task that
70 * acquired it. Recursive locking is not allowed. The task
71 * may not exit without first unlocking the mutex. Also, kernel
72 * memory where the mutex resides mutex must not be freed with
73 * the mutex still locked. The mutex must first be initialized
74 * (or statically defined) before it can be locked. memset()-ing
75 * the mutex to 0 is not allowed.
76 *
77 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
78 * checks that will enforce the restrictions and will also do
79 * deadlock debugging. )
80 *
81 * This function is similar to (but not equivalent to) down().
82 */
83void __sched mutex_lock(struct mutex *lock)
84{
85    might_sleep();
86    /*
87     * The locking fastpath is the 1->0 transition from
88     * 'unlocked' into 'locked' state.
89     */
90    __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
91    mutex_set_owner(lock);
92}
93
94EXPORT_SYMBOL(mutex_lock);
95#endif
96
97static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
98
99/**
100 * mutex_unlock - release the mutex
101 * @lock: the mutex to be released
102 *
103 * Unlock a mutex that has been locked by this task previously.
104 *
105 * This function must not be used in interrupt context. Unlocking
106 * of a not locked mutex is not allowed.
107 *
108 * This function is similar to (but not equivalent to) up().
109 */
110void __sched mutex_unlock(struct mutex *lock)
111{
112    /*
113     * The unlocking fastpath is the 0->1 transition from 'locked'
114     * into 'unlocked' state:
115     */
116#ifndef CONFIG_DEBUG_MUTEXES
117    /*
118     * When debugging is enabled we must not clear the owner before time,
119     * the slow path will always be taken, and that clears the owner field
120     * after verifying that it was indeed current.
121     */
122    mutex_clear_owner(lock);
123#endif
124    __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
125}
126
127EXPORT_SYMBOL(mutex_unlock);
128
129/*
130 * Lock a mutex (possibly interruptible), slowpath:
131 */
132static inline int __sched
133__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
134            struct lockdep_map *nest_lock, unsigned long ip)
135{
136    struct task_struct *task = current;
137    struct mutex_waiter waiter;
138    unsigned long flags;
139
140    preempt_disable();
141    mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
142
143#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
144    /*
145     * Optimistic spinning.
146     *
147     * We try to spin for acquisition when we find that there are no
148     * pending waiters and the lock owner is currently running on a
149     * (different) CPU.
150     *
151     * The rationale is that if the lock owner is running, it is likely to
152     * release the lock soon.
153     *
154     * Since this needs the lock owner, and this mutex implementation
155     * doesn't track the owner atomically in the lock field, we need to
156     * track it non-atomically.
157     *
158     * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
159     * to serialize everything.
160     */
161
162    for (;;) {
163        struct task_struct *owner;
164
165        /*
166         * If there's an owner, wait for it to either
167         * release the lock or go to sleep.
168         */
169        owner = ACCESS_ONCE(lock->owner);
170        if (owner && !mutex_spin_on_owner(lock, owner))
171            break;
172
173        if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
174            lock_acquired(&lock->dep_map, ip);
175            mutex_set_owner(lock);
176            preempt_enable();
177            return 0;
178        }
179
180        /*
181         * When there's no owner, we might have preempted between the
182         * owner acquiring the lock and setting the owner field. If
183         * we're an RT task that will live-lock because we won't let
184         * the owner complete.
185         */
186        if (!owner && (need_resched() || rt_task(task)))
187            break;
188
189        /*
190         * The cpu_relax() call is a compiler barrier which forces
191         * everything in this loop to be re-loaded. We don't need
192         * memory barriers as we'll eventually observe the right
193         * values at the cost of a few extra spins.
194         */
195        arch_mutex_cpu_relax();
196    }
197#endif
198    spin_lock_mutex(&lock->wait_lock, flags);
199
200    debug_mutex_lock_common(lock, &waiter);
201    debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
202
203    /* add waiting tasks to the end of the waitqueue (FIFO): */
204    list_add_tail(&waiter.list, &lock->wait_list);
205    waiter.task = task;
206
207    if (atomic_xchg(&lock->count, -1) == 1)
208        goto done;
209
210    lock_contended(&lock->dep_map, ip);
211
212    for (;;) {
213        /*
214         * Lets try to take the lock again - this is needed even if
215         * we get here for the first time (shortly after failing to
216         * acquire the lock), to make sure that we get a wakeup once
217         * it's unlocked. Later on, if we sleep, this is the
218         * operation that gives us the lock. We xchg it to -1, so
219         * that when we release the lock, we properly wake up the
220         * other waiters:
221         */
222        if (atomic_xchg(&lock->count, -1) == 1)
223            break;
224
225        /*
226         * got a signal? (This code gets eliminated in the
227         * TASK_UNINTERRUPTIBLE case.)
228         */
229        if (unlikely(signal_pending_state(state, task))) {
230            mutex_remove_waiter(lock, &waiter,
231                        task_thread_info(task));
232            mutex_release(&lock->dep_map, 1, ip);
233            spin_unlock_mutex(&lock->wait_lock, flags);
234
235            debug_mutex_free_waiter(&waiter);
236            preempt_enable();
237            return -EINTR;
238        }
239        __set_task_state(task, state);
240
241        /* didn't get the lock, go to sleep: */
242        spin_unlock_mutex(&lock->wait_lock, flags);
243        preempt_enable_no_resched();
244        schedule();
245        preempt_disable();
246        spin_lock_mutex(&lock->wait_lock, flags);
247    }
248
249done:
250    lock_acquired(&lock->dep_map, ip);
251    /* got the lock - rejoice! */
252    mutex_remove_waiter(lock, &waiter, current_thread_info());
253    mutex_set_owner(lock);
254
255    /* set it to 0 if there are no waiters left: */
256    if (likely(list_empty(&lock->wait_list)))
257        atomic_set(&lock->count, 0);
258
259    spin_unlock_mutex(&lock->wait_lock, flags);
260
261    debug_mutex_free_waiter(&waiter);
262    preempt_enable();
263
264    return 0;
265}
266
267#ifdef CONFIG_DEBUG_LOCK_ALLOC
268void __sched
269mutex_lock_nested(struct mutex *lock, unsigned int subclass)
270{
271    might_sleep();
272    __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
273}
274
275EXPORT_SYMBOL_GPL(mutex_lock_nested);
276
277void __sched
278_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
279{
280    might_sleep();
281    __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
282}
283
284EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
285
286int __sched
287mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
288{
289    might_sleep();
290    return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
291}
292EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
293
294int __sched
295mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
296{
297    might_sleep();
298    return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
299                   subclass, NULL, _RET_IP_);
300}
301
302EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
303#endif
304
305/*
306 * Release the lock, slowpath:
307 */
308static inline void
309__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
310{
311    struct mutex *lock = container_of(lock_count, struct mutex, count);
312    unsigned long flags;
313
314    spin_lock_mutex(&lock->wait_lock, flags);
315    mutex_release(&lock->dep_map, nested, _RET_IP_);
316    debug_mutex_unlock(lock);
317
318    /*
319     * some architectures leave the lock unlocked in the fastpath failure
320     * case, others need to leave it locked. In the later case we have to
321     * unlock it here
322     */
323    if (__mutex_slowpath_needs_to_unlock())
324        atomic_set(&lock->count, 1);
325
326    if (!list_empty(&lock->wait_list)) {
327        /* get the first entry from the wait-list: */
328        struct mutex_waiter *waiter =
329                list_entry(lock->wait_list.next,
330                       struct mutex_waiter, list);
331
332        debug_mutex_wake_waiter(lock, waiter);
333
334        wake_up_process(waiter->task);
335    }
336
337    spin_unlock_mutex(&lock->wait_lock, flags);
338}
339
340/*
341 * Release the lock, slowpath:
342 */
343static __used noinline void
344__mutex_unlock_slowpath(atomic_t *lock_count)
345{
346    __mutex_unlock_common_slowpath(lock_count, 1);
347}
348
349#ifndef CONFIG_DEBUG_LOCK_ALLOC
350/*
351 * Here come the less common (and hence less performance-critical) APIs:
352 * mutex_lock_interruptible() and mutex_trylock().
353 */
354static noinline int __sched
355__mutex_lock_killable_slowpath(atomic_t *lock_count);
356
357static noinline int __sched
358__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
359
360/**
361 * mutex_lock_interruptible - acquire the mutex, interruptible
362 * @lock: the mutex to be acquired
363 *
364 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
365 * been acquired or sleep until the mutex becomes available. If a
366 * signal arrives while waiting for the lock then this function
367 * returns -EINTR.
368 *
369 * This function is similar to (but not equivalent to) down_interruptible().
370 */
371int __sched mutex_lock_interruptible(struct mutex *lock)
372{
373    int ret;
374
375    might_sleep();
376    ret = __mutex_fastpath_lock_retval
377            (&lock->count, __mutex_lock_interruptible_slowpath);
378    if (!ret)
379        mutex_set_owner(lock);
380
381    return ret;
382}
383
384EXPORT_SYMBOL(mutex_lock_interruptible);
385
386int __sched mutex_lock_killable(struct mutex *lock)
387{
388    int ret;
389
390    might_sleep();
391    ret = __mutex_fastpath_lock_retval
392            (&lock->count, __mutex_lock_killable_slowpath);
393    if (!ret)
394        mutex_set_owner(lock);
395
396    return ret;
397}
398EXPORT_SYMBOL(mutex_lock_killable);
399
400static __used noinline void __sched
401__mutex_lock_slowpath(atomic_t *lock_count)
402{
403    struct mutex *lock = container_of(lock_count, struct mutex, count);
404
405    __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
406}
407
408static noinline int __sched
409__mutex_lock_killable_slowpath(atomic_t *lock_count)
410{
411    struct mutex *lock = container_of(lock_count, struct mutex, count);
412
413    return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
414}
415
416static noinline int __sched
417__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
418{
419    struct mutex *lock = container_of(lock_count, struct mutex, count);
420
421    return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
422}
423#endif
424
425/*
426 * Spinlock based trylock, we take the spinlock and check whether we
427 * can get the lock:
428 */
429static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
430{
431    struct mutex *lock = container_of(lock_count, struct mutex, count);
432    unsigned long flags;
433    int prev;
434
435    spin_lock_mutex(&lock->wait_lock, flags);
436
437    prev = atomic_xchg(&lock->count, -1);
438    if (likely(prev == 1)) {
439        mutex_set_owner(lock);
440        mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
441    }
442
443    /* Set it back to 0 if there are no waiters: */
444    if (likely(list_empty(&lock->wait_list)))
445        atomic_set(&lock->count, 0);
446
447    spin_unlock_mutex(&lock->wait_lock, flags);
448
449    return prev == 1;
450}
451
452/**
453 * mutex_trylock - try to acquire the mutex, without waiting
454 * @lock: the mutex to be acquired
455 *
456 * Try to acquire the mutex atomically. Returns 1 if the mutex
457 * has been acquired successfully, and 0 on contention.
458 *
459 * NOTE: this function follows the spin_trylock() convention, so
460 * it is negated from the down_trylock() return values! Be careful
461 * about this when converting semaphore users to mutexes.
462 *
463 * This function must not be used in interrupt context. The
464 * mutex must be released by the same task that acquired it.
465 */
466int __sched mutex_trylock(struct mutex *lock)
467{
468    int ret;
469
470    ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
471    if (ret)
472        mutex_set_owner(lock);
473
474    return ret;
475}
476EXPORT_SYMBOL(mutex_trylock);
477
478/**
479 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
480 * @cnt: the atomic which we are to dec
481 * @lock: the mutex to return holding if we dec to 0
482 *
483 * return true and hold lock if we dec to 0, return false otherwise
484 */
485int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
486{
487    /* dec if we can't possibly hit 0 */
488    if (atomic_add_unless(cnt, -1, 1))
489        return 0;
490    /* we might hit 0, so take the lock */
491    mutex_lock(lock);
492    if (!atomic_dec_and_test(cnt)) {
493        /* when we actually did the dec, we didn't hit 0 */
494        mutex_unlock(lock);
495        return 0;
496    }
497    /* we hit 0, and we hold the lock */
498    return 1;
499}
500EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
501

Archive Download this file



interactive