Root/kernel/mutex.c

1/*
2 * kernel/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
18 * Also see Documentation/mutex-design.txt.
19 */
20#include <linux/mutex.h>
21#include <linux/sched.h>
22#include <linux/export.h>
23#include <linux/spinlock.h>
24#include <linux/interrupt.h>
25#include <linux/debug_locks.h>
26
27/*
28 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
29 * which forces all calls into the slowpath:
30 */
31#ifdef CONFIG_DEBUG_MUTEXES
32# include "mutex-debug.h"
33# include <asm-generic/mutex-null.h>
34#else
35# include "mutex.h"
36# include <asm/mutex.h>
37#endif
38
39void
40__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
41{
42    atomic_set(&lock->count, 1);
43    spin_lock_init(&lock->wait_lock);
44    INIT_LIST_HEAD(&lock->wait_list);
45    mutex_clear_owner(lock);
46
47    debug_mutex_init(lock, name, key);
48}
49
50EXPORT_SYMBOL(__mutex_init);
51
52#ifndef CONFIG_DEBUG_LOCK_ALLOC
53/*
54 * We split the mutex lock/unlock logic into separate fastpath and
55 * slowpath functions, to reduce the register pressure on the fastpath.
56 * We also put the fastpath first in the kernel image, to make sure the
57 * branch is predicted by the CPU as default-untaken.
58 */
59static __used noinline void __sched
60__mutex_lock_slowpath(atomic_t *lock_count);
61
62/**
63 * mutex_lock - acquire the mutex
64 * @lock: the mutex to be acquired
65 *
66 * Lock the mutex exclusively for this task. If the mutex is not
67 * available right now, it will sleep until it can get it.
68 *
69 * The mutex must later on be released by the same task that
70 * acquired it. Recursive locking is not allowed. The task
71 * may not exit without first unlocking the mutex. Also, kernel
72 * memory where the mutex resides mutex must not be freed with
73 * the mutex still locked. The mutex must first be initialized
74 * (or statically defined) before it can be locked. memset()-ing
75 * the mutex to 0 is not allowed.
76 *
77 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
78 * checks that will enforce the restrictions and will also do
79 * deadlock debugging. )
80 *
81 * This function is similar to (but not equivalent to) down().
82 */
83void __sched mutex_lock(struct mutex *lock)
84{
85    might_sleep();
86    /*
87     * The locking fastpath is the 1->0 transition from
88     * 'unlocked' into 'locked' state.
89     */
90    __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
91    mutex_set_owner(lock);
92}
93
94EXPORT_SYMBOL(mutex_lock);
95#endif
96
97static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
98
99/**
100 * mutex_unlock - release the mutex
101 * @lock: the mutex to be released
102 *
103 * Unlock a mutex that has been locked by this task previously.
104 *
105 * This function must not be used in interrupt context. Unlocking
106 * of a not locked mutex is not allowed.
107 *
108 * This function is similar to (but not equivalent to) up().
109 */
110void __sched mutex_unlock(struct mutex *lock)
111{
112    /*
113     * The unlocking fastpath is the 0->1 transition from 'locked'
114     * into 'unlocked' state:
115     */
116#ifndef CONFIG_DEBUG_MUTEXES
117    /*
118     * When debugging is enabled we must not clear the owner before time,
119     * the slow path will always be taken, and that clears the owner field
120     * after verifying that it was indeed current.
121     */
122    mutex_clear_owner(lock);
123#endif
124    __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
125}
126
127EXPORT_SYMBOL(mutex_unlock);
128
129/*
130 * Lock a mutex (possibly interruptible), slowpath:
131 */
132static inline int __sched
133__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
134            struct lockdep_map *nest_lock, unsigned long ip)
135{
136    struct task_struct *task = current;
137    struct mutex_waiter waiter;
138    unsigned long flags;
139
140    preempt_disable();
141    mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
142
143#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
144    /*
145     * Optimistic spinning.
146     *
147     * We try to spin for acquisition when we find that there are no
148     * pending waiters and the lock owner is currently running on a
149     * (different) CPU.
150     *
151     * The rationale is that if the lock owner is running, it is likely to
152     * release the lock soon.
153     *
154     * Since this needs the lock owner, and this mutex implementation
155     * doesn't track the owner atomically in the lock field, we need to
156     * track it non-atomically.
157     *
158     * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
159     * to serialize everything.
160     */
161
162    for (;;) {
163        struct task_struct *owner;
164
165        /*
166         * If there's an owner, wait for it to either
167         * release the lock or go to sleep.
168         */
169        owner = ACCESS_ONCE(lock->owner);
170        if (owner && !mutex_spin_on_owner(lock, owner))
171            break;
172
173        if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
174            lock_acquired(&lock->dep_map, ip);
175            mutex_set_owner(lock);
176            preempt_enable();
177            return 0;
178        }
179
180        /*
181         * When there's no owner, we might have preempted between the
182         * owner acquiring the lock and setting the owner field. If
183         * we're an RT task that will live-lock because we won't let
184         * the owner complete.
185         */
186        if (!owner && (need_resched() || rt_task(task)))
187            break;
188
189        /*
190         * The cpu_relax() call is a compiler barrier which forces
191         * everything in this loop to be re-loaded. We don't need
192         * memory barriers as we'll eventually observe the right
193         * values at the cost of a few extra spins.
194         */
195        arch_mutex_cpu_relax();
196    }
197#endif
198    spin_lock_mutex(&lock->wait_lock, flags);
199
200    debug_mutex_lock_common(lock, &waiter);
201    debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
202
203    /* add waiting tasks to the end of the waitqueue (FIFO): */
204    list_add_tail(&waiter.list, &lock->wait_list);
205    waiter.task = task;
206
207    if (atomic_xchg(&lock->count, -1) == 1)
208        goto done;
209
210    lock_contended(&lock->dep_map, ip);
211
212    for (;;) {
213        /*
214         * Lets try to take the lock again - this is needed even if
215         * we get here for the first time (shortly after failing to
216         * acquire the lock), to make sure that we get a wakeup once
217         * it's unlocked. Later on, if we sleep, this is the
218         * operation that gives us the lock. We xchg it to -1, so
219         * that when we release the lock, we properly wake up the
220         * other waiters:
221         */
222        if (atomic_xchg(&lock->count, -1) == 1)
223            break;
224
225        /*
226         * got a signal? (This code gets eliminated in the
227         * TASK_UNINTERRUPTIBLE case.)
228         */
229        if (unlikely(signal_pending_state(state, task))) {
230            mutex_remove_waiter(lock, &waiter,
231                        task_thread_info(task));
232            mutex_release(&lock->dep_map, 1, ip);
233            spin_unlock_mutex(&lock->wait_lock, flags);
234
235            debug_mutex_free_waiter(&waiter);
236            preempt_enable();
237            return -EINTR;
238        }
239        __set_task_state(task, state);
240
241        /* didn't get the lock, go to sleep: */
242        spin_unlock_mutex(&lock->wait_lock, flags);
243        schedule_preempt_disabled();
244        spin_lock_mutex(&lock->wait_lock, flags);
245    }
246
247done:
248    lock_acquired(&lock->dep_map, ip);
249    /* got the lock - rejoice! */
250    mutex_remove_waiter(lock, &waiter, current_thread_info());
251    mutex_set_owner(lock);
252
253    /* set it to 0 if there are no waiters left: */
254    if (likely(list_empty(&lock->wait_list)))
255        atomic_set(&lock->count, 0);
256
257    spin_unlock_mutex(&lock->wait_lock, flags);
258
259    debug_mutex_free_waiter(&waiter);
260    preempt_enable();
261
262    return 0;
263}
264
265#ifdef CONFIG_DEBUG_LOCK_ALLOC
266void __sched
267mutex_lock_nested(struct mutex *lock, unsigned int subclass)
268{
269    might_sleep();
270    __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
271}
272
273EXPORT_SYMBOL_GPL(mutex_lock_nested);
274
275void __sched
276_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
277{
278    might_sleep();
279    __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
280}
281
282EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
283
284int __sched
285mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
286{
287    might_sleep();
288    return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
289}
290EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
291
292int __sched
293mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
294{
295    might_sleep();
296    return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
297                   subclass, NULL, _RET_IP_);
298}
299
300EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
301#endif
302
303/*
304 * Release the lock, slowpath:
305 */
306static inline void
307__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
308{
309    struct mutex *lock = container_of(lock_count, struct mutex, count);
310    unsigned long flags;
311
312    spin_lock_mutex(&lock->wait_lock, flags);
313    mutex_release(&lock->dep_map, nested, _RET_IP_);
314    debug_mutex_unlock(lock);
315
316    /*
317     * some architectures leave the lock unlocked in the fastpath failure
318     * case, others need to leave it locked. In the later case we have to
319     * unlock it here
320     */
321    if (__mutex_slowpath_needs_to_unlock())
322        atomic_set(&lock->count, 1);
323
324    if (!list_empty(&lock->wait_list)) {
325        /* get the first entry from the wait-list: */
326        struct mutex_waiter *waiter =
327                list_entry(lock->wait_list.next,
328                       struct mutex_waiter, list);
329
330        debug_mutex_wake_waiter(lock, waiter);
331
332        wake_up_process(waiter->task);
333    }
334
335    spin_unlock_mutex(&lock->wait_lock, flags);
336}
337
338/*
339 * Release the lock, slowpath:
340 */
341static __used noinline void
342__mutex_unlock_slowpath(atomic_t *lock_count)
343{
344    __mutex_unlock_common_slowpath(lock_count, 1);
345}
346
347#ifndef CONFIG_DEBUG_LOCK_ALLOC
348/*
349 * Here come the less common (and hence less performance-critical) APIs:
350 * mutex_lock_interruptible() and mutex_trylock().
351 */
352static noinline int __sched
353__mutex_lock_killable_slowpath(atomic_t *lock_count);
354
355static noinline int __sched
356__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
357
358/**
359 * mutex_lock_interruptible - acquire the mutex, interruptible
360 * @lock: the mutex to be acquired
361 *
362 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
363 * been acquired or sleep until the mutex becomes available. If a
364 * signal arrives while waiting for the lock then this function
365 * returns -EINTR.
366 *
367 * This function is similar to (but not equivalent to) down_interruptible().
368 */
369int __sched mutex_lock_interruptible(struct mutex *lock)
370{
371    int ret;
372
373    might_sleep();
374    ret = __mutex_fastpath_lock_retval
375            (&lock->count, __mutex_lock_interruptible_slowpath);
376    if (!ret)
377        mutex_set_owner(lock);
378
379    return ret;
380}
381
382EXPORT_SYMBOL(mutex_lock_interruptible);
383
384int __sched mutex_lock_killable(struct mutex *lock)
385{
386    int ret;
387
388    might_sleep();
389    ret = __mutex_fastpath_lock_retval
390            (&lock->count, __mutex_lock_killable_slowpath);
391    if (!ret)
392        mutex_set_owner(lock);
393
394    return ret;
395}
396EXPORT_SYMBOL(mutex_lock_killable);
397
398static __used noinline void __sched
399__mutex_lock_slowpath(atomic_t *lock_count)
400{
401    struct mutex *lock = container_of(lock_count, struct mutex, count);
402
403    __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
404}
405
406static noinline int __sched
407__mutex_lock_killable_slowpath(atomic_t *lock_count)
408{
409    struct mutex *lock = container_of(lock_count, struct mutex, count);
410
411    return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
412}
413
414static noinline int __sched
415__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
416{
417    struct mutex *lock = container_of(lock_count, struct mutex, count);
418
419    return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
420}
421#endif
422
423/*
424 * Spinlock based trylock, we take the spinlock and check whether we
425 * can get the lock:
426 */
427static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
428{
429    struct mutex *lock = container_of(lock_count, struct mutex, count);
430    unsigned long flags;
431    int prev;
432
433    spin_lock_mutex(&lock->wait_lock, flags);
434
435    prev = atomic_xchg(&lock->count, -1);
436    if (likely(prev == 1)) {
437        mutex_set_owner(lock);
438        mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
439    }
440
441    /* Set it back to 0 if there are no waiters: */
442    if (likely(list_empty(&lock->wait_list)))
443        atomic_set(&lock->count, 0);
444
445    spin_unlock_mutex(&lock->wait_lock, flags);
446
447    return prev == 1;
448}
449
450/**
451 * mutex_trylock - try to acquire the mutex, without waiting
452 * @lock: the mutex to be acquired
453 *
454 * Try to acquire the mutex atomically. Returns 1 if the mutex
455 * has been acquired successfully, and 0 on contention.
456 *
457 * NOTE: this function follows the spin_trylock() convention, so
458 * it is negated from the down_trylock() return values! Be careful
459 * about this when converting semaphore users to mutexes.
460 *
461 * This function must not be used in interrupt context. The
462 * mutex must be released by the same task that acquired it.
463 */
464int __sched mutex_trylock(struct mutex *lock)
465{
466    int ret;
467
468    ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
469    if (ret)
470        mutex_set_owner(lock);
471
472    return ret;
473}
474EXPORT_SYMBOL(mutex_trylock);
475
476/**
477 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
478 * @cnt: the atomic which we are to dec
479 * @lock: the mutex to return holding if we dec to 0
480 *
481 * return true and hold lock if we dec to 0, return false otherwise
482 */
483int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
484{
485    /* dec if we can't possibly hit 0 */
486    if (atomic_add_unless(cnt, -1, 1))
487        return 0;
488    /* we might hit 0, so take the lock */
489    mutex_lock(lock);
490    if (!atomic_dec_and_test(cnt)) {
491        /* when we actually did the dec, we didn't hit 0 */
492        mutex_unlock(lock);
493        return 0;
494    }
495    /* we hit 0, and we hold the lock */
496    return 1;
497}
498EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
499

Archive Download this file



interactive