Root/kernel/futex.c

1/*
2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
4 *
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7 *
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier
10 *
11 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14 *
15 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18 *
19 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21 *
22 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23 * Copyright (C) IBM Corporation, 2009
24 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
25 *
26 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27 * enough at me, Linus for the original (flawed) idea, Matthew
28 * Kirkwood for proof-of-concept implementation.
29 *
30 * "The futexes are also cursed."
31 * "But they come in a choice of three flavours!"
32 *
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License as published by
35 * the Free Software Foundation; either version 2 of the License, or
36 * (at your option) any later version.
37 *
38 * This program is distributed in the hope that it will be useful,
39 * but WITHOUT ANY WARRANTY; without even the implied warranty of
40 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 * GNU General Public License for more details.
42 *
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 */
47#include <linux/slab.h>
48#include <linux/poll.h>
49#include <linux/fs.h>
50#include <linux/file.h>
51#include <linux/jhash.h>
52#include <linux/init.h>
53#include <linux/futex.h>
54#include <linux/mount.h>
55#include <linux/pagemap.h>
56#include <linux/syscalls.h>
57#include <linux/signal.h>
58#include <linux/module.h>
59#include <linux/magic.h>
60#include <linux/pid.h>
61#include <linux/nsproxy.h>
62
63#include <asm/futex.h>
64
65#include "rtmutex_common.h"
66
67int __read_mostly futex_cmpxchg_enabled;
68
69#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
70
71/*
72 * Futex flags used to encode options to functions and preserve them across
73 * restarts.
74 */
75#define FLAGS_SHARED 0x01
76#define FLAGS_CLOCKRT 0x02
77#define FLAGS_HAS_TIMEOUT 0x04
78
79/*
80 * Priority Inheritance state:
81 */
82struct futex_pi_state {
83    /*
84     * list of 'owned' pi_state instances - these have to be
85     * cleaned up in do_exit() if the task exits prematurely:
86     */
87    struct list_head list;
88
89    /*
90     * The PI object:
91     */
92    struct rt_mutex pi_mutex;
93
94    struct task_struct *owner;
95    atomic_t refcount;
96
97    union futex_key key;
98};
99
100/**
101 * struct futex_q - The hashed futex queue entry, one per waiting task
102 * @list: priority-sorted list of tasks waiting on this futex
103 * @task: the task waiting on the futex
104 * @lock_ptr: the hash bucket lock
105 * @key: the key the futex is hashed on
106 * @pi_state: optional priority inheritance state
107 * @rt_waiter: rt_waiter storage for use with requeue_pi
108 * @requeue_pi_key: the requeue_pi target futex key
109 * @bitset: bitset for the optional bitmasked wakeup
110 *
111 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
112 * we can wake only the relevant ones (hashed queues may be shared).
113 *
114 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
115 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
116 * The order of wakeup is always to make the first condition true, then
117 * the second.
118 *
119 * PI futexes are typically woken before they are removed from the hash list via
120 * the rt_mutex code. See unqueue_me_pi().
121 */
122struct futex_q {
123    struct plist_node list;
124
125    struct task_struct *task;
126    spinlock_t *lock_ptr;
127    union futex_key key;
128    struct futex_pi_state *pi_state;
129    struct rt_mutex_waiter *rt_waiter;
130    union futex_key *requeue_pi_key;
131    u32 bitset;
132};
133
134static const struct futex_q futex_q_init = {
135    /* list gets initialized in queue_me()*/
136    .key = FUTEX_KEY_INIT,
137    .bitset = FUTEX_BITSET_MATCH_ANY
138};
139
140/*
141 * Hash buckets are shared by all the futex_keys that hash to the same
142 * location. Each key may have multiple futex_q structures, one for each task
143 * waiting on a futex.
144 */
145struct futex_hash_bucket {
146    spinlock_t lock;
147    struct plist_head chain;
148};
149
150static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
151
152/*
153 * We hash on the keys returned from get_futex_key (see below).
154 */
155static struct futex_hash_bucket *hash_futex(union futex_key *key)
156{
157    u32 hash = jhash2((u32*)&key->both.word,
158              (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
159              key->both.offset);
160    return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
161}
162
163/*
164 * Return 1 if two futex_keys are equal, 0 otherwise.
165 */
166static inline int match_futex(union futex_key *key1, union futex_key *key2)
167{
168    return (key1 && key2
169        && key1->both.word == key2->both.word
170        && key1->both.ptr == key2->both.ptr
171        && key1->both.offset == key2->both.offset);
172}
173
174/*
175 * Take a reference to the resource addressed by a key.
176 * Can be called while holding spinlocks.
177 *
178 */
179static void get_futex_key_refs(union futex_key *key)
180{
181    if (!key->both.ptr)
182        return;
183
184    switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
185    case FUT_OFF_INODE:
186        ihold(key->shared.inode);
187        break;
188    case FUT_OFF_MMSHARED:
189        atomic_inc(&key->private.mm->mm_count);
190        break;
191    }
192}
193
194/*
195 * Drop a reference to the resource addressed by a key.
196 * The hash bucket spinlock must not be held.
197 */
198static void drop_futex_key_refs(union futex_key *key)
199{
200    if (!key->both.ptr) {
201        /* If we're here then we tried to put a key we failed to get */
202        WARN_ON_ONCE(1);
203        return;
204    }
205
206    switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
207    case FUT_OFF_INODE:
208        iput(key->shared.inode);
209        break;
210    case FUT_OFF_MMSHARED:
211        mmdrop(key->private.mm);
212        break;
213    }
214}
215
216/**
217 * get_futex_key() - Get parameters which are the keys for a futex
218 * @uaddr: virtual address of the futex
219 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
220 * @key: address where result is stored.
221 *
222 * Returns a negative error code or 0
223 * The key words are stored in *key on success.
224 *
225 * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
226 * offset_within_page). For private mappings, it's (uaddr, current->mm).
227 * We can usually work out the index without swapping in the page.
228 *
229 * lock_page() might sleep, the caller should not hold a spinlock.
230 */
231static int
232get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
233{
234    unsigned long address = (unsigned long)uaddr;
235    struct mm_struct *mm = current->mm;
236    struct page *page, *page_head;
237    int err;
238
239    /*
240     * The futex address must be "naturally" aligned.
241     */
242    key->both.offset = address % PAGE_SIZE;
243    if (unlikely((address % sizeof(u32)) != 0))
244        return -EINVAL;
245    address -= key->both.offset;
246
247    /*
248     * PROCESS_PRIVATE futexes are fast.
249     * As the mm cannot disappear under us and the 'key' only needs
250     * virtual address, we dont even have to find the underlying vma.
251     * Note : We do have to check 'uaddr' is a valid user address,
252     * but access_ok() should be faster than find_vma()
253     */
254    if (!fshared) {
255        if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
256            return -EFAULT;
257        key->private.mm = mm;
258        key->private.address = address;
259        get_futex_key_refs(key);
260        return 0;
261    }
262
263again:
264    err = get_user_pages_fast(address, 1, 1, &page);
265    if (err < 0)
266        return err;
267
268#ifdef CONFIG_TRANSPARENT_HUGEPAGE
269    page_head = page;
270    if (unlikely(PageTail(page))) {
271        put_page(page);
272        /* serialize against __split_huge_page_splitting() */
273        local_irq_disable();
274        if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) {
275            page_head = compound_head(page);
276            /*
277             * page_head is valid pointer but we must pin
278             * it before taking the PG_lock and/or
279             * PG_compound_lock. The moment we re-enable
280             * irqs __split_huge_page_splitting() can
281             * return and the head page can be freed from
282             * under us. We can't take the PG_lock and/or
283             * PG_compound_lock on a page that could be
284             * freed from under us.
285             */
286            if (page != page_head) {
287                get_page(page_head);
288                put_page(page);
289            }
290            local_irq_enable();
291        } else {
292            local_irq_enable();
293            goto again;
294        }
295    }
296#else
297    page_head = compound_head(page);
298    if (page != page_head) {
299        get_page(page_head);
300        put_page(page);
301    }
302#endif
303
304    lock_page(page_head);
305    if (!page_head->mapping) {
306        unlock_page(page_head);
307        put_page(page_head);
308        goto again;
309    }
310
311    /*
312     * Private mappings are handled in a simple way.
313     *
314     * NOTE: When userspace waits on a MAP_SHARED mapping, even if
315     * it's a read-only handle, it's expected that futexes attach to
316     * the object not the particular process.
317     */
318    if (PageAnon(page_head)) {
319        key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
320        key->private.mm = mm;
321        key->private.address = address;
322    } else {
323        key->both.offset |= FUT_OFF_INODE; /* inode-based key */
324        key->shared.inode = page_head->mapping->host;
325        key->shared.pgoff = page_head->index;
326    }
327
328    get_futex_key_refs(key);
329
330    unlock_page(page_head);
331    put_page(page_head);
332    return 0;
333}
334
335static inline void put_futex_key(union futex_key *key)
336{
337    drop_futex_key_refs(key);
338}
339
340/**
341 * fault_in_user_writeable() - Fault in user address and verify RW access
342 * @uaddr: pointer to faulting user space address
343 *
344 * Slow path to fixup the fault we just took in the atomic write
345 * access to @uaddr.
346 *
347 * We have no generic implementation of a non-destructive write to the
348 * user address. We know that we faulted in the atomic pagefault
349 * disabled section so we can as well avoid the #PF overhead by
350 * calling get_user_pages() right away.
351 */
352static int fault_in_user_writeable(u32 __user *uaddr)
353{
354    struct mm_struct *mm = current->mm;
355    int ret;
356
357    down_read(&mm->mmap_sem);
358    ret = get_user_pages(current, mm, (unsigned long)uaddr,
359                 1, 1, 0, NULL, NULL);
360    up_read(&mm->mmap_sem);
361
362    return ret < 0 ? ret : 0;
363}
364
365/**
366 * futex_top_waiter() - Return the highest priority waiter on a futex
367 * @hb: the hash bucket the futex_q's reside in
368 * @key: the futex key (to distinguish it from other futex futex_q's)
369 *
370 * Must be called with the hb lock held.
371 */
372static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
373                    union futex_key *key)
374{
375    struct futex_q *this;
376
377    plist_for_each_entry(this, &hb->chain, list) {
378        if (match_futex(&this->key, key))
379            return this;
380    }
381    return NULL;
382}
383
384static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
385                      u32 uval, u32 newval)
386{
387    int ret;
388
389    pagefault_disable();
390    ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
391    pagefault_enable();
392
393    return ret;
394}
395
396static int get_futex_value_locked(u32 *dest, u32 __user *from)
397{
398    int ret;
399
400    pagefault_disable();
401    ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
402    pagefault_enable();
403
404    return ret ? -EFAULT : 0;
405}
406
407
408/*
409 * PI code:
410 */
411static int refill_pi_state_cache(void)
412{
413    struct futex_pi_state *pi_state;
414
415    if (likely(current->pi_state_cache))
416        return 0;
417
418    pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
419
420    if (!pi_state)
421        return -ENOMEM;
422
423    INIT_LIST_HEAD(&pi_state->list);
424    /* pi_mutex gets initialized later */
425    pi_state->owner = NULL;
426    atomic_set(&pi_state->refcount, 1);
427    pi_state->key = FUTEX_KEY_INIT;
428
429    current->pi_state_cache = pi_state;
430
431    return 0;
432}
433
434static struct futex_pi_state * alloc_pi_state(void)
435{
436    struct futex_pi_state *pi_state = current->pi_state_cache;
437
438    WARN_ON(!pi_state);
439    current->pi_state_cache = NULL;
440
441    return pi_state;
442}
443
444static void free_pi_state(struct futex_pi_state *pi_state)
445{
446    if (!atomic_dec_and_test(&pi_state->refcount))
447        return;
448
449    /*
450     * If pi_state->owner is NULL, the owner is most probably dying
451     * and has cleaned up the pi_state already
452     */
453    if (pi_state->owner) {
454        raw_spin_lock_irq(&pi_state->owner->pi_lock);
455        list_del_init(&pi_state->list);
456        raw_spin_unlock_irq(&pi_state->owner->pi_lock);
457
458        rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
459    }
460
461    if (current->pi_state_cache)
462        kfree(pi_state);
463    else {
464        /*
465         * pi_state->list is already empty.
466         * clear pi_state->owner.
467         * refcount is at 0 - put it back to 1.
468         */
469        pi_state->owner = NULL;
470        atomic_set(&pi_state->refcount, 1);
471        current->pi_state_cache = pi_state;
472    }
473}
474
475/*
476 * Look up the task based on what TID userspace gave us.
477 * We dont trust it.
478 */
479static struct task_struct * futex_find_get_task(pid_t pid)
480{
481    struct task_struct *p;
482
483    rcu_read_lock();
484    p = find_task_by_vpid(pid);
485    if (p)
486        get_task_struct(p);
487
488    rcu_read_unlock();
489
490    return p;
491}
492
493/*
494 * This task is holding PI mutexes at exit time => bad.
495 * Kernel cleans up PI-state, but userspace is likely hosed.
496 * (Robust-futex cleanup is separate and might save the day for userspace.)
497 */
498void exit_pi_state_list(struct task_struct *curr)
499{
500    struct list_head *next, *head = &curr->pi_state_list;
501    struct futex_pi_state *pi_state;
502    struct futex_hash_bucket *hb;
503    union futex_key key = FUTEX_KEY_INIT;
504
505    if (!futex_cmpxchg_enabled)
506        return;
507    /*
508     * We are a ZOMBIE and nobody can enqueue itself on
509     * pi_state_list anymore, but we have to be careful
510     * versus waiters unqueueing themselves:
511     */
512    raw_spin_lock_irq(&curr->pi_lock);
513    while (!list_empty(head)) {
514
515        next = head->next;
516        pi_state = list_entry(next, struct futex_pi_state, list);
517        key = pi_state->key;
518        hb = hash_futex(&key);
519        raw_spin_unlock_irq(&curr->pi_lock);
520
521        spin_lock(&hb->lock);
522
523        raw_spin_lock_irq(&curr->pi_lock);
524        /*
525         * We dropped the pi-lock, so re-check whether this
526         * task still owns the PI-state:
527         */
528        if (head->next != next) {
529            spin_unlock(&hb->lock);
530            continue;
531        }
532
533        WARN_ON(pi_state->owner != curr);
534        WARN_ON(list_empty(&pi_state->list));
535        list_del_init(&pi_state->list);
536        pi_state->owner = NULL;
537        raw_spin_unlock_irq(&curr->pi_lock);
538
539        rt_mutex_unlock(&pi_state->pi_mutex);
540
541        spin_unlock(&hb->lock);
542
543        raw_spin_lock_irq(&curr->pi_lock);
544    }
545    raw_spin_unlock_irq(&curr->pi_lock);
546}
547
548static int
549lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
550        union futex_key *key, struct futex_pi_state **ps)
551{
552    struct futex_pi_state *pi_state = NULL;
553    struct futex_q *this, *next;
554    struct plist_head *head;
555    struct task_struct *p;
556    pid_t pid = uval & FUTEX_TID_MASK;
557
558    head = &hb->chain;
559
560    plist_for_each_entry_safe(this, next, head, list) {
561        if (match_futex(&this->key, key)) {
562            /*
563             * Another waiter already exists - bump up
564             * the refcount and return its pi_state:
565             */
566            pi_state = this->pi_state;
567            /*
568             * Userspace might have messed up non-PI and PI futexes
569             */
570            if (unlikely(!pi_state))
571                return -EINVAL;
572
573            WARN_ON(!atomic_read(&pi_state->refcount));
574
575            /*
576             * When pi_state->owner is NULL then the owner died
577             * and another waiter is on the fly. pi_state->owner
578             * is fixed up by the task which acquires
579             * pi_state->rt_mutex.
580             *
581             * We do not check for pid == 0 which can happen when
582             * the owner died and robust_list_exit() cleared the
583             * TID.
584             */
585            if (pid && pi_state->owner) {
586                /*
587                 * Bail out if user space manipulated the
588                 * futex value.
589                 */
590                if (pid != task_pid_vnr(pi_state->owner))
591                    return -EINVAL;
592            }
593
594            atomic_inc(&pi_state->refcount);
595            *ps = pi_state;
596
597            return 0;
598        }
599    }
600
601    /*
602     * We are the first waiter - try to look up the real owner and attach
603     * the new pi_state to it, but bail out when TID = 0
604     */
605    if (!pid)
606        return -ESRCH;
607    p = futex_find_get_task(pid);
608    if (!p)
609        return -ESRCH;
610
611    /*
612     * We need to look at the task state flags to figure out,
613     * whether the task is exiting. To protect against the do_exit
614     * change of the task flags, we do this protected by
615     * p->pi_lock:
616     */
617    raw_spin_lock_irq(&p->pi_lock);
618    if (unlikely(p->flags & PF_EXITING)) {
619        /*
620         * The task is on the way out. When PF_EXITPIDONE is
621         * set, we know that the task has finished the
622         * cleanup:
623         */
624        int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
625
626        raw_spin_unlock_irq(&p->pi_lock);
627        put_task_struct(p);
628        return ret;
629    }
630
631    pi_state = alloc_pi_state();
632
633    /*
634     * Initialize the pi_mutex in locked state and make 'p'
635     * the owner of it:
636     */
637    rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
638
639    /* Store the key for possible exit cleanups: */
640    pi_state->key = *key;
641
642    WARN_ON(!list_empty(&pi_state->list));
643    list_add(&pi_state->list, &p->pi_state_list);
644    pi_state->owner = p;
645    raw_spin_unlock_irq(&p->pi_lock);
646
647    put_task_struct(p);
648
649    *ps = pi_state;
650
651    return 0;
652}
653
654/**
655 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
656 * @uaddr: the pi futex user address
657 * @hb: the pi futex hash bucket
658 * @key: the futex key associated with uaddr and hb
659 * @ps: the pi_state pointer where we store the result of the
660 * lookup
661 * @task: the task to perform the atomic lock work for. This will
662 * be "current" except in the case of requeue pi.
663 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
664 *
665 * Returns:
666 * 0 - ready to wait
667 * 1 - acquired the lock
668 * <0 - error
669 *
670 * The hb->lock and futex_key refs shall be held by the caller.
671 */
672static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
673                union futex_key *key,
674                struct futex_pi_state **ps,
675                struct task_struct *task, int set_waiters)
676{
677    int lock_taken, ret, ownerdied = 0;
678    u32 uval, newval, curval, vpid = task_pid_vnr(task);
679
680retry:
681    ret = lock_taken = 0;
682
683    /*
684     * To avoid races, we attempt to take the lock here again
685     * (by doing a 0 -> TID atomic cmpxchg), while holding all
686     * the locks. It will most likely not succeed.
687     */
688    newval = vpid;
689    if (set_waiters)
690        newval |= FUTEX_WAITERS;
691
692    if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
693        return -EFAULT;
694
695    /*
696     * Detect deadlocks.
697     */
698    if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
699        return -EDEADLK;
700
701    /*
702     * Surprise - we got the lock. Just return to userspace:
703     */
704    if (unlikely(!curval))
705        return 1;
706
707    uval = curval;
708
709    /*
710     * Set the FUTEX_WAITERS flag, so the owner will know it has someone
711     * to wake at the next unlock.
712     */
713    newval = curval | FUTEX_WAITERS;
714
715    /*
716     * There are two cases, where a futex might have no owner (the
717     * owner TID is 0): OWNER_DIED. We take over the futex in this
718     * case. We also do an unconditional take over, when the owner
719     * of the futex died.
720     *
721     * This is safe as we are protected by the hash bucket lock !
722     */
723    if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
724        /* Keep the OWNER_DIED bit */
725        newval = (curval & ~FUTEX_TID_MASK) | vpid;
726        ownerdied = 0;
727        lock_taken = 1;
728    }
729
730    if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
731        return -EFAULT;
732    if (unlikely(curval != uval))
733        goto retry;
734
735    /*
736     * We took the lock due to owner died take over.
737     */
738    if (unlikely(lock_taken))
739        return 1;
740
741    /*
742     * We dont have the lock. Look up the PI state (or create it if
743     * we are the first waiter):
744     */
745    ret = lookup_pi_state(uval, hb, key, ps);
746
747    if (unlikely(ret)) {
748        switch (ret) {
749        case -ESRCH:
750            /*
751             * No owner found for this futex. Check if the
752             * OWNER_DIED bit is set to figure out whether
753             * this is a robust futex or not.
754             */
755            if (get_futex_value_locked(&curval, uaddr))
756                return -EFAULT;
757
758            /*
759             * We simply start over in case of a robust
760             * futex. The code above will take the futex
761             * and return happy.
762             */
763            if (curval & FUTEX_OWNER_DIED) {
764                ownerdied = 1;
765                goto retry;
766            }
767        default:
768            break;
769        }
770    }
771
772    return ret;
773}
774
775/**
776 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
777 * @q: The futex_q to unqueue
778 *
779 * The q->lock_ptr must not be NULL and must be held by the caller.
780 */
781static void __unqueue_futex(struct futex_q *q)
782{
783    struct futex_hash_bucket *hb;
784
785    if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
786        || WARN_ON(plist_node_empty(&q->list)))
787        return;
788
789    hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
790    plist_del(&q->list, &hb->chain);
791}
792
793/*
794 * The hash bucket lock must be held when this is called.
795 * Afterwards, the futex_q must not be accessed.
796 */
797static void wake_futex(struct futex_q *q)
798{
799    struct task_struct *p = q->task;
800
801    /*
802     * We set q->lock_ptr = NULL _before_ we wake up the task. If
803     * a non-futex wake up happens on another CPU then the task
804     * might exit and p would dereference a non-existing task
805     * struct. Prevent this by holding a reference on p across the
806     * wake up.
807     */
808    get_task_struct(p);
809
810    __unqueue_futex(q);
811    /*
812     * The waiting task can free the futex_q as soon as
813     * q->lock_ptr = NULL is written, without taking any locks. A
814     * memory barrier is required here to prevent the following
815     * store to lock_ptr from getting ahead of the plist_del.
816     */
817    smp_wmb();
818    q->lock_ptr = NULL;
819
820    wake_up_state(p, TASK_NORMAL);
821    put_task_struct(p);
822}
823
824static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
825{
826    struct task_struct *new_owner;
827    struct futex_pi_state *pi_state = this->pi_state;
828    u32 curval, newval;
829
830    if (!pi_state)
831        return -EINVAL;
832
833    /*
834     * If current does not own the pi_state then the futex is
835     * inconsistent and user space fiddled with the futex value.
836     */
837    if (pi_state->owner != current)
838        return -EINVAL;
839
840    raw_spin_lock(&pi_state->pi_mutex.wait_lock);
841    new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
842
843    /*
844     * It is possible that the next waiter (the one that brought
845     * this owner to the kernel) timed out and is no longer
846     * waiting on the lock.
847     */
848    if (!new_owner)
849        new_owner = this->task;
850
851    /*
852     * We pass it to the next owner. (The WAITERS bit is always
853     * kept enabled while there is PI state around. We must also
854     * preserve the owner died bit.)
855     */
856    if (!(uval & FUTEX_OWNER_DIED)) {
857        int ret = 0;
858
859        newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
860
861        if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
862            ret = -EFAULT;
863        else if (curval != uval)
864            ret = -EINVAL;
865        if (ret) {
866            raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
867            return ret;
868        }
869    }
870
871    raw_spin_lock_irq(&pi_state->owner->pi_lock);
872    WARN_ON(list_empty(&pi_state->list));
873    list_del_init(&pi_state->list);
874    raw_spin_unlock_irq(&pi_state->owner->pi_lock);
875
876    raw_spin_lock_irq(&new_owner->pi_lock);
877    WARN_ON(!list_empty(&pi_state->list));
878    list_add(&pi_state->list, &new_owner->pi_state_list);
879    pi_state->owner = new_owner;
880    raw_spin_unlock_irq(&new_owner->pi_lock);
881
882    raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
883    rt_mutex_unlock(&pi_state->pi_mutex);
884
885    return 0;
886}
887
888static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
889{
890    u32 oldval;
891
892    /*
893     * There is no waiter, so we unlock the futex. The owner died
894     * bit has not to be preserved here. We are the owner:
895     */
896    if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
897        return -EFAULT;
898    if (oldval != uval)
899        return -EAGAIN;
900
901    return 0;
902}
903
904/*
905 * Express the locking dependencies for lockdep:
906 */
907static inline void
908double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
909{
910    if (hb1 <= hb2) {
911        spin_lock(&hb1->lock);
912        if (hb1 < hb2)
913            spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
914    } else { /* hb1 > hb2 */
915        spin_lock(&hb2->lock);
916        spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
917    }
918}
919
920static inline void
921double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
922{
923    spin_unlock(&hb1->lock);
924    if (hb1 != hb2)
925        spin_unlock(&hb2->lock);
926}
927
928/*
929 * Wake up waiters matching bitset queued on this futex (uaddr).
930 */
931static int
932futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
933{
934    struct futex_hash_bucket *hb;
935    struct futex_q *this, *next;
936    struct plist_head *head;
937    union futex_key key = FUTEX_KEY_INIT;
938    int ret;
939
940    if (!bitset)
941        return -EINVAL;
942
943    ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key);
944    if (unlikely(ret != 0))
945        goto out;
946
947    hb = hash_futex(&key);
948    spin_lock(&hb->lock);
949    head = &hb->chain;
950
951    plist_for_each_entry_safe(this, next, head, list) {
952        if (match_futex (&this->key, &key)) {
953            if (this->pi_state || this->rt_waiter) {
954                ret = -EINVAL;
955                break;
956            }
957
958            /* Check if one of the bits is set in both bitsets */
959            if (!(this->bitset & bitset))
960                continue;
961
962            wake_futex(this);
963            if (++ret >= nr_wake)
964                break;
965        }
966    }
967
968    spin_unlock(&hb->lock);
969    put_futex_key(&key);
970out:
971    return ret;
972}
973
974/*
975 * Wake up all waiters hashed on the physical page that is mapped
976 * to this virtual address:
977 */
978static int
979futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
980          int nr_wake, int nr_wake2, int op)
981{
982    union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
983    struct futex_hash_bucket *hb1, *hb2;
984    struct plist_head *head;
985    struct futex_q *this, *next;
986    int ret, op_ret;
987
988retry:
989    ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1);
990    if (unlikely(ret != 0))
991        goto out;
992    ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
993    if (unlikely(ret != 0))
994        goto out_put_key1;
995
996    hb1 = hash_futex(&key1);
997    hb2 = hash_futex(&key2);
998
999retry_private:
1000    double_lock_hb(hb1, hb2);
1001    op_ret = futex_atomic_op_inuser(op, uaddr2);
1002    if (unlikely(op_ret < 0)) {
1003
1004        double_unlock_hb(hb1, hb2);
1005
1006#ifndef CONFIG_MMU
1007        /*
1008         * we don't get EFAULT from MMU faults if we don't have an MMU,
1009         * but we might get them from range checking
1010         */
1011        ret = op_ret;
1012        goto out_put_keys;
1013#endif
1014
1015        if (unlikely(op_ret != -EFAULT)) {
1016            ret = op_ret;
1017            goto out_put_keys;
1018        }
1019
1020        ret = fault_in_user_writeable(uaddr2);
1021        if (ret)
1022            goto out_put_keys;
1023
1024        if (!(flags & FLAGS_SHARED))
1025            goto retry_private;
1026
1027        put_futex_key(&key2);
1028        put_futex_key(&key1);
1029        goto retry;
1030    }
1031
1032    head = &hb1->chain;
1033
1034    plist_for_each_entry_safe(this, next, head, list) {
1035        if (match_futex (&this->key, &key1)) {
1036            wake_futex(this);
1037            if (++ret >= nr_wake)
1038                break;
1039        }
1040    }
1041
1042    if (op_ret > 0) {
1043        head = &hb2->chain;
1044
1045        op_ret = 0;
1046        plist_for_each_entry_safe(this, next, head, list) {
1047            if (match_futex (&this->key, &key2)) {
1048                wake_futex(this);
1049                if (++op_ret >= nr_wake2)
1050                    break;
1051            }
1052        }
1053        ret += op_ret;
1054    }
1055
1056    double_unlock_hb(hb1, hb2);
1057out_put_keys:
1058    put_futex_key(&key2);
1059out_put_key1:
1060    put_futex_key(&key1);
1061out:
1062    return ret;
1063}
1064
1065/**
1066 * requeue_futex() - Requeue a futex_q from one hb to another
1067 * @q: the futex_q to requeue
1068 * @hb1: the source hash_bucket
1069 * @hb2: the target hash_bucket
1070 * @key2: the new key for the requeued futex_q
1071 */
1072static inline
1073void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1074           struct futex_hash_bucket *hb2, union futex_key *key2)
1075{
1076
1077    /*
1078     * If key1 and key2 hash to the same bucket, no need to
1079     * requeue.
1080     */
1081    if (likely(&hb1->chain != &hb2->chain)) {
1082        plist_del(&q->list, &hb1->chain);
1083        plist_add(&q->list, &hb2->chain);
1084        q->lock_ptr = &hb2->lock;
1085    }
1086    get_futex_key_refs(key2);
1087    q->key = *key2;
1088}
1089
1090/**
1091 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1092 * @q: the futex_q
1093 * @key: the key of the requeue target futex
1094 * @hb: the hash_bucket of the requeue target futex
1095 *
1096 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1097 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1098 * to the requeue target futex so the waiter can detect the wakeup on the right
1099 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1100 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1101 * to protect access to the pi_state to fixup the owner later. Must be called
1102 * with both q->lock_ptr and hb->lock held.
1103 */
1104static inline
1105void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1106               struct futex_hash_bucket *hb)
1107{
1108    get_futex_key_refs(key);
1109    q->key = *key;
1110
1111    __unqueue_futex(q);
1112
1113    WARN_ON(!q->rt_waiter);
1114    q->rt_waiter = NULL;
1115
1116    q->lock_ptr = &hb->lock;
1117
1118    wake_up_state(q->task, TASK_NORMAL);
1119}
1120
1121/**
1122 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1123 * @pifutex: the user address of the to futex
1124 * @hb1: the from futex hash bucket, must be locked by the caller
1125 * @hb2: the to futex hash bucket, must be locked by the caller
1126 * @key1: the from futex key
1127 * @key2: the to futex key
1128 * @ps: address to store the pi_state pointer
1129 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1130 *
1131 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1132 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1133 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1134 * hb1 and hb2 must be held by the caller.
1135 *
1136 * Returns:
1137 * 0 - failed to acquire the lock atomicly
1138 * 1 - acquired the lock
1139 * <0 - error
1140 */
1141static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1142                 struct futex_hash_bucket *hb1,
1143                 struct futex_hash_bucket *hb2,
1144                 union futex_key *key1, union futex_key *key2,
1145                 struct futex_pi_state **ps, int set_waiters)
1146{
1147    struct futex_q *top_waiter = NULL;
1148    u32 curval;
1149    int ret;
1150
1151    if (get_futex_value_locked(&curval, pifutex))
1152        return -EFAULT;
1153
1154    /*
1155     * Find the top_waiter and determine if there are additional waiters.
1156     * If the caller intends to requeue more than 1 waiter to pifutex,
1157     * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1158     * as we have means to handle the possible fault. If not, don't set
1159     * the bit unecessarily as it will force the subsequent unlock to enter
1160     * the kernel.
1161     */
1162    top_waiter = futex_top_waiter(hb1, key1);
1163
1164    /* There are no waiters, nothing for us to do. */
1165    if (!top_waiter)
1166        return 0;
1167
1168    /* Ensure we requeue to the expected futex. */
1169    if (!match_futex(top_waiter->requeue_pi_key, key2))
1170        return -EINVAL;
1171
1172    /*
1173     * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1174     * the contended case or if set_waiters is 1. The pi_state is returned
1175     * in ps in contended cases.
1176     */
1177    ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1178                   set_waiters);
1179    if (ret == 1)
1180        requeue_pi_wake_futex(top_waiter, key2, hb2);
1181
1182    return ret;
1183}
1184
1185/**
1186 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1187 * @uaddr1: source futex user address
1188 * @flags: futex flags (FLAGS_SHARED, etc.)
1189 * @uaddr2: target futex user address
1190 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1191 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1192 * @cmpval: @uaddr1 expected value (or %NULL)
1193 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
1194 * pi futex (pi to pi requeue is not supported)
1195 *
1196 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1197 * uaddr2 atomically on behalf of the top waiter.
1198 *
1199 * Returns:
1200 * >=0 - on success, the number of tasks requeued or woken
1201 * <0 - on error
1202 */
1203static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1204             u32 __user *uaddr2, int nr_wake, int nr_requeue,
1205             u32 *cmpval, int requeue_pi)
1206{
1207    union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1208    int drop_count = 0, task_count = 0, ret;
1209    struct futex_pi_state *pi_state = NULL;
1210    struct futex_hash_bucket *hb1, *hb2;
1211    struct plist_head *head1;
1212    struct futex_q *this, *next;
1213    u32 curval2;
1214
1215    if (requeue_pi) {
1216        /*
1217         * requeue_pi requires a pi_state, try to allocate it now
1218         * without any locks in case it fails.
1219         */
1220        if (refill_pi_state_cache())
1221            return -ENOMEM;
1222        /*
1223         * requeue_pi must wake as many tasks as it can, up to nr_wake
1224         * + nr_requeue, since it acquires the rt_mutex prior to
1225         * returning to userspace, so as to not leave the rt_mutex with
1226         * waiters and no owner. However, second and third wake-ups
1227         * cannot be predicted as they involve race conditions with the
1228         * first wake and a fault while looking up the pi_state. Both
1229         * pthread_cond_signal() and pthread_cond_broadcast() should
1230         * use nr_wake=1.
1231         */
1232        if (nr_wake != 1)
1233            return -EINVAL;
1234    }
1235
1236retry:
1237    if (pi_state != NULL) {
1238        /*
1239         * We will have to lookup the pi_state again, so free this one
1240         * to keep the accounting correct.
1241         */
1242        free_pi_state(pi_state);
1243        pi_state = NULL;
1244    }
1245
1246    ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1);
1247    if (unlikely(ret != 0))
1248        goto out;
1249    ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
1250    if (unlikely(ret != 0))
1251        goto out_put_key1;
1252
1253    hb1 = hash_futex(&key1);
1254    hb2 = hash_futex(&key2);
1255
1256retry_private:
1257    double_lock_hb(hb1, hb2);
1258
1259    if (likely(cmpval != NULL)) {
1260        u32 curval;
1261
1262        ret = get_futex_value_locked(&curval, uaddr1);
1263
1264        if (unlikely(ret)) {
1265            double_unlock_hb(hb1, hb2);
1266
1267            ret = get_user(curval, uaddr1);
1268            if (ret)
1269                goto out_put_keys;
1270
1271            if (!(flags & FLAGS_SHARED))
1272                goto retry_private;
1273
1274            put_futex_key(&key2);
1275            put_futex_key(&key1);
1276            goto retry;
1277        }
1278        if (curval != *cmpval) {
1279            ret = -EAGAIN;
1280            goto out_unlock;
1281        }
1282    }
1283
1284    if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1285        /*
1286         * Attempt to acquire uaddr2 and wake the top waiter. If we
1287         * intend to requeue waiters, force setting the FUTEX_WAITERS
1288         * bit. We force this here where we are able to easily handle
1289         * faults rather in the requeue loop below.
1290         */
1291        ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1292                         &key2, &pi_state, nr_requeue);
1293
1294        /*
1295         * At this point the top_waiter has either taken uaddr2 or is
1296         * waiting on it. If the former, then the pi_state will not
1297         * exist yet, look it up one more time to ensure we have a
1298         * reference to it.
1299         */
1300        if (ret == 1) {
1301            WARN_ON(pi_state);
1302            drop_count++;
1303            task_count++;
1304            ret = get_futex_value_locked(&curval2, uaddr2);
1305            if (!ret)
1306                ret = lookup_pi_state(curval2, hb2, &key2,
1307                              &pi_state);
1308        }
1309
1310        switch (ret) {
1311        case 0:
1312            break;
1313        case -EFAULT:
1314            double_unlock_hb(hb1, hb2);
1315            put_futex_key(&key2);
1316            put_futex_key(&key1);
1317            ret = fault_in_user_writeable(uaddr2);
1318            if (!ret)
1319                goto retry;
1320            goto out;
1321        case -EAGAIN:
1322            /* The owner was exiting, try again. */
1323            double_unlock_hb(hb1, hb2);
1324            put_futex_key(&key2);
1325            put_futex_key(&key1);
1326            cond_resched();
1327            goto retry;
1328        default:
1329            goto out_unlock;
1330        }
1331    }
1332
1333    head1 = &hb1->chain;
1334    plist_for_each_entry_safe(this, next, head1, list) {
1335        if (task_count - nr_wake >= nr_requeue)
1336            break;
1337
1338        if (!match_futex(&this->key, &key1))
1339            continue;
1340
1341        /*
1342         * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1343         * be paired with each other and no other futex ops.
1344         */
1345        if ((requeue_pi && !this->rt_waiter) ||
1346            (!requeue_pi && this->rt_waiter)) {
1347            ret = -EINVAL;
1348            break;
1349        }
1350
1351        /*
1352         * Wake nr_wake waiters. For requeue_pi, if we acquired the
1353         * lock, we already woke the top_waiter. If not, it will be
1354         * woken by futex_unlock_pi().
1355         */
1356        if (++task_count <= nr_wake && !requeue_pi) {
1357            wake_futex(this);
1358            continue;
1359        }
1360
1361        /* Ensure we requeue to the expected futex for requeue_pi. */
1362        if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1363            ret = -EINVAL;
1364            break;
1365        }
1366
1367        /*
1368         * Requeue nr_requeue waiters and possibly one more in the case
1369         * of requeue_pi if we couldn't acquire the lock atomically.
1370         */
1371        if (requeue_pi) {
1372            /* Prepare the waiter to take the rt_mutex. */
1373            atomic_inc(&pi_state->refcount);
1374            this->pi_state = pi_state;
1375            ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1376                            this->rt_waiter,
1377                            this->task, 1);
1378            if (ret == 1) {
1379                /* We got the lock. */
1380                requeue_pi_wake_futex(this, &key2, hb2);
1381                drop_count++;
1382                continue;
1383            } else if (ret) {
1384                /* -EDEADLK */
1385                this->pi_state = NULL;
1386                free_pi_state(pi_state);
1387                goto out_unlock;
1388            }
1389        }
1390        requeue_futex(this, hb1, hb2, &key2);
1391        drop_count++;
1392    }
1393
1394out_unlock:
1395    double_unlock_hb(hb1, hb2);
1396
1397    /*
1398     * drop_futex_key_refs() must be called outside the spinlocks. During
1399     * the requeue we moved futex_q's from the hash bucket at key1 to the
1400     * one at key2 and updated their key pointer. We no longer need to
1401     * hold the references to key1.
1402     */
1403    while (--drop_count >= 0)
1404        drop_futex_key_refs(&key1);
1405
1406out_put_keys:
1407    put_futex_key(&key2);
1408out_put_key1:
1409    put_futex_key(&key1);
1410out:
1411    if (pi_state != NULL)
1412        free_pi_state(pi_state);
1413    return ret ? ret : task_count;
1414}
1415
1416/* The key must be already stored in q->key. */
1417static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1418    __acquires(&hb->lock)
1419{
1420    struct futex_hash_bucket *hb;
1421
1422    hb = hash_futex(&q->key);
1423    q->lock_ptr = &hb->lock;
1424
1425    spin_lock(&hb->lock);
1426    return hb;
1427}
1428
1429static inline void
1430queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1431    __releases(&hb->lock)
1432{
1433    spin_unlock(&hb->lock);
1434}
1435
1436/**
1437 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1438 * @q: The futex_q to enqueue
1439 * @hb: The destination hash bucket
1440 *
1441 * The hb->lock must be held by the caller, and is released here. A call to
1442 * queue_me() is typically paired with exactly one call to unqueue_me(). The
1443 * exceptions involve the PI related operations, which may use unqueue_me_pi()
1444 * or nothing if the unqueue is done as part of the wake process and the unqueue
1445 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1446 * an example).
1447 */
1448static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1449    __releases(&hb->lock)
1450{
1451    int prio;
1452
1453    /*
1454     * The priority used to register this element is
1455     * - either the real thread-priority for the real-time threads
1456     * (i.e. threads with a priority lower than MAX_RT_PRIO)
1457     * - or MAX_RT_PRIO for non-RT threads.
1458     * Thus, all RT-threads are woken first in priority order, and
1459     * the others are woken last, in FIFO order.
1460     */
1461    prio = min(current->normal_prio, MAX_RT_PRIO);
1462
1463    plist_node_init(&q->list, prio);
1464    plist_add(&q->list, &hb->chain);
1465    q->task = current;
1466    spin_unlock(&hb->lock);
1467}
1468
1469/**
1470 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1471 * @q: The futex_q to unqueue
1472 *
1473 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1474 * be paired with exactly one earlier call to queue_me().
1475 *
1476 * Returns:
1477 * 1 - if the futex_q was still queued (and we removed unqueued it)
1478 * 0 - if the futex_q was already removed by the waking thread
1479 */
1480static int unqueue_me(struct futex_q *q)
1481{
1482    spinlock_t *lock_ptr;
1483    int ret = 0;
1484
1485    /* In the common case we don't take the spinlock, which is nice. */
1486retry:
1487    lock_ptr = q->lock_ptr;
1488    barrier();
1489    if (lock_ptr != NULL) {
1490        spin_lock(lock_ptr);
1491        /*
1492         * q->lock_ptr can change between reading it and
1493         * spin_lock(), causing us to take the wrong lock. This
1494         * corrects the race condition.
1495         *
1496         * Reasoning goes like this: if we have the wrong lock,
1497         * q->lock_ptr must have changed (maybe several times)
1498         * between reading it and the spin_lock(). It can
1499         * change again after the spin_lock() but only if it was
1500         * already changed before the spin_lock(). It cannot,
1501         * however, change back to the original value. Therefore
1502         * we can detect whether we acquired the correct lock.
1503         */
1504        if (unlikely(lock_ptr != q->lock_ptr)) {
1505            spin_unlock(lock_ptr);
1506            goto retry;
1507        }
1508        __unqueue_futex(q);
1509
1510        BUG_ON(q->pi_state);
1511
1512        spin_unlock(lock_ptr);
1513        ret = 1;
1514    }
1515
1516    drop_futex_key_refs(&q->key);
1517    return ret;
1518}
1519
1520/*
1521 * PI futexes can not be requeued and must remove themself from the
1522 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1523 * and dropped here.
1524 */
1525static void unqueue_me_pi(struct futex_q *q)
1526    __releases(q->lock_ptr)
1527{
1528    __unqueue_futex(q);
1529
1530    BUG_ON(!q->pi_state);
1531    free_pi_state(q->pi_state);
1532    q->pi_state = NULL;
1533
1534    spin_unlock(q->lock_ptr);
1535}
1536
1537/*
1538 * Fixup the pi_state owner with the new owner.
1539 *
1540 * Must be called with hash bucket lock held and mm->sem held for non
1541 * private futexes.
1542 */
1543static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1544                struct task_struct *newowner)
1545{
1546    u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1547    struct futex_pi_state *pi_state = q->pi_state;
1548    struct task_struct *oldowner = pi_state->owner;
1549    u32 uval, curval, newval;
1550    int ret;
1551
1552    /* Owner died? */
1553    if (!pi_state->owner)
1554        newtid |= FUTEX_OWNER_DIED;
1555
1556    /*
1557     * We are here either because we stole the rtmutex from the
1558     * previous highest priority waiter or we are the highest priority
1559     * waiter but failed to get the rtmutex the first time.
1560     * We have to replace the newowner TID in the user space variable.
1561     * This must be atomic as we have to preserve the owner died bit here.
1562     *
1563     * Note: We write the user space value _before_ changing the pi_state
1564     * because we can fault here. Imagine swapped out pages or a fork
1565     * that marked all the anonymous memory readonly for cow.
1566     *
1567     * Modifying pi_state _before_ the user space value would
1568     * leave the pi_state in an inconsistent state when we fault
1569     * here, because we need to drop the hash bucket lock to
1570     * handle the fault. This might be observed in the PID check
1571     * in lookup_pi_state.
1572     */
1573retry:
1574    if (get_futex_value_locked(&uval, uaddr))
1575        goto handle_fault;
1576
1577    while (1) {
1578        newval = (uval & FUTEX_OWNER_DIED) | newtid;
1579
1580        if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
1581            goto handle_fault;
1582        if (curval == uval)
1583            break;
1584        uval = curval;
1585    }
1586
1587    /*
1588     * We fixed up user space. Now we need to fix the pi_state
1589     * itself.
1590     */
1591    if (pi_state->owner != NULL) {
1592        raw_spin_lock_irq(&pi_state->owner->pi_lock);
1593        WARN_ON(list_empty(&pi_state->list));
1594        list_del_init(&pi_state->list);
1595        raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1596    }
1597
1598    pi_state->owner = newowner;
1599
1600    raw_spin_lock_irq(&newowner->pi_lock);
1601    WARN_ON(!list_empty(&pi_state->list));
1602    list_add(&pi_state->list, &newowner->pi_state_list);
1603    raw_spin_unlock_irq(&newowner->pi_lock);
1604    return 0;
1605
1606    /*
1607     * To handle the page fault we need to drop the hash bucket
1608     * lock here. That gives the other task (either the highest priority
1609     * waiter itself or the task which stole the rtmutex) the
1610     * chance to try the fixup of the pi_state. So once we are
1611     * back from handling the fault we need to check the pi_state
1612     * after reacquiring the hash bucket lock and before trying to
1613     * do another fixup. When the fixup has been done already we
1614     * simply return.
1615     */
1616handle_fault:
1617    spin_unlock(q->lock_ptr);
1618
1619    ret = fault_in_user_writeable(uaddr);
1620
1621    spin_lock(q->lock_ptr);
1622
1623    /*
1624     * Check if someone else fixed it for us:
1625     */
1626    if (pi_state->owner != oldowner)
1627        return 0;
1628
1629    if (ret)
1630        return ret;
1631
1632    goto retry;
1633}
1634
1635static long futex_wait_restart(struct restart_block *restart);
1636
1637/**
1638 * fixup_owner() - Post lock pi_state and corner case management
1639 * @uaddr: user address of the futex
1640 * @q: futex_q (contains pi_state and access to the rt_mutex)
1641 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
1642 *
1643 * After attempting to lock an rt_mutex, this function is called to cleanup
1644 * the pi_state owner as well as handle race conditions that may allow us to
1645 * acquire the lock. Must be called with the hb lock held.
1646 *
1647 * Returns:
1648 * 1 - success, lock taken
1649 * 0 - success, lock not taken
1650 * <0 - on error (-EFAULT)
1651 */
1652static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
1653{
1654    struct task_struct *owner;
1655    int ret = 0;
1656
1657    if (locked) {
1658        /*
1659         * Got the lock. We might not be the anticipated owner if we
1660         * did a lock-steal - fix up the PI-state in that case:
1661         */
1662        if (q->pi_state->owner != current)
1663            ret = fixup_pi_state_owner(uaddr, q, current);
1664        goto out;
1665    }
1666
1667    /*
1668     * Catch the rare case, where the lock was released when we were on the
1669     * way back before we locked the hash bucket.
1670     */
1671    if (q->pi_state->owner == current) {
1672        /*
1673         * Try to get the rt_mutex now. This might fail as some other
1674         * task acquired the rt_mutex after we removed ourself from the
1675         * rt_mutex waiters list.
1676         */
1677        if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
1678            locked = 1;
1679            goto out;
1680        }
1681
1682        /*
1683         * pi_state is incorrect, some other task did a lock steal and
1684         * we returned due to timeout or signal without taking the
1685         * rt_mutex. Too late.
1686         */
1687        raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
1688        owner = rt_mutex_owner(&q->pi_state->pi_mutex);
1689        if (!owner)
1690            owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
1691        raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
1692        ret = fixup_pi_state_owner(uaddr, q, owner);
1693        goto out;
1694    }
1695
1696    /*
1697     * Paranoia check. If we did not take the lock, then we should not be
1698     * the owner of the rt_mutex.
1699     */
1700    if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
1701        printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
1702                "pi-state %p\n", ret,
1703                q->pi_state->pi_mutex.owner,
1704                q->pi_state->owner);
1705
1706out:
1707    return ret ? ret : locked;
1708}
1709
1710/**
1711 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
1712 * @hb: the futex hash bucket, must be locked by the caller
1713 * @q: the futex_q to queue up on
1714 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
1715 */
1716static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
1717                struct hrtimer_sleeper *timeout)
1718{
1719    /*
1720     * The task state is guaranteed to be set before another task can
1721     * wake it. set_current_state() is implemented using set_mb() and
1722     * queue_me() calls spin_unlock() upon completion, both serializing
1723     * access to the hash list and forcing another memory barrier.
1724     */
1725    set_current_state(TASK_INTERRUPTIBLE);
1726    queue_me(q, hb);
1727
1728    /* Arm the timer */
1729    if (timeout) {
1730        hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1731        if (!hrtimer_active(&timeout->timer))
1732            timeout->task = NULL;
1733    }
1734
1735    /*
1736     * If we have been removed from the hash list, then another task
1737     * has tried to wake us, and we can skip the call to schedule().
1738     */
1739    if (likely(!plist_node_empty(&q->list))) {
1740        /*
1741         * If the timer has already expired, current will already be
1742         * flagged for rescheduling. Only call schedule if there
1743         * is no timeout, or if it has yet to expire.
1744         */
1745        if (!timeout || timeout->task)
1746            schedule();
1747    }
1748    __set_current_state(TASK_RUNNING);
1749}
1750
1751/**
1752 * futex_wait_setup() - Prepare to wait on a futex
1753 * @uaddr: the futex userspace address
1754 * @val: the expected value
1755 * @flags: futex flags (FLAGS_SHARED, etc.)
1756 * @q: the associated futex_q
1757 * @hb: storage for hash_bucket pointer to be returned to caller
1758 *
1759 * Setup the futex_q and locate the hash_bucket. Get the futex value and
1760 * compare it with the expected value. Handle atomic faults internally.
1761 * Return with the hb lock held and a q.key reference on success, and unlocked
1762 * with no q.key reference on failure.
1763 *
1764 * Returns:
1765 * 0 - uaddr contains val and hb has been locked
1766 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked
1767 */
1768static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
1769               struct futex_q *q, struct futex_hash_bucket **hb)
1770{
1771    u32 uval;
1772    int ret;
1773
1774    /*
1775     * Access the page AFTER the hash-bucket is locked.
1776     * Order is important:
1777     *
1778     * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1779     * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
1780     *
1781     * The basic logical guarantee of a futex is that it blocks ONLY
1782     * if cond(var) is known to be true at the time of blocking, for
1783     * any cond. If we locked the hash-bucket after testing *uaddr, that
1784     * would open a race condition where we could block indefinitely with
1785     * cond(var) false, which would violate the guarantee.
1786     *
1787     * On the other hand, we insert q and release the hash-bucket only
1788     * after testing *uaddr. This guarantees that futex_wait() will NOT
1789     * absorb a wakeup if *uaddr does not match the desired values
1790     * while the syscall executes.
1791     */
1792retry:
1793    ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key);
1794    if (unlikely(ret != 0))
1795        return ret;
1796
1797retry_private:
1798    *hb = queue_lock(q);
1799
1800    ret = get_futex_value_locked(&uval, uaddr);
1801
1802    if (ret) {
1803        queue_unlock(q, *hb);
1804
1805        ret = get_user(uval, uaddr);
1806        if (ret)
1807            goto out;
1808
1809        if (!(flags & FLAGS_SHARED))
1810            goto retry_private;
1811
1812        put_futex_key(&q->key);
1813        goto retry;
1814    }
1815
1816    if (uval != val) {
1817        queue_unlock(q, *hb);
1818        ret = -EWOULDBLOCK;
1819    }
1820
1821out:
1822    if (ret)
1823        put_futex_key(&q->key);
1824    return ret;
1825}
1826
1827static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
1828              ktime_t *abs_time, u32 bitset)
1829{
1830    struct hrtimer_sleeper timeout, *to = NULL;
1831    struct restart_block *restart;
1832    struct futex_hash_bucket *hb;
1833    struct futex_q q = futex_q_init;
1834    int ret;
1835
1836    if (!bitset)
1837        return -EINVAL;
1838    q.bitset = bitset;
1839
1840    if (abs_time) {
1841        to = &timeout;
1842
1843        hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
1844                      CLOCK_REALTIME : CLOCK_MONOTONIC,
1845                      HRTIMER_MODE_ABS);
1846        hrtimer_init_sleeper(to, current);
1847        hrtimer_set_expires_range_ns(&to->timer, *abs_time,
1848                         current->timer_slack_ns);
1849    }
1850
1851retry:
1852    /*
1853     * Prepare to wait on uaddr. On success, holds hb lock and increments
1854     * q.key refs.
1855     */
1856    ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
1857    if (ret)
1858        goto out;
1859
1860    /* queue_me and wait for wakeup, timeout, or a signal. */
1861    futex_wait_queue_me(hb, &q, to);
1862
1863    /* If we were woken (and unqueued), we succeeded, whatever. */
1864    ret = 0;
1865    /* unqueue_me() drops q.key ref */
1866    if (!unqueue_me(&q))
1867        goto out;
1868    ret = -ETIMEDOUT;
1869    if (to && !to->task)
1870        goto out;
1871
1872    /*
1873     * We expect signal_pending(current), but we might be the
1874     * victim of a spurious wakeup as well.
1875     */
1876    if (!signal_pending(current))
1877        goto retry;
1878
1879    ret = -ERESTARTSYS;
1880    if (!abs_time)
1881        goto out;
1882
1883    restart = &current_thread_info()->restart_block;
1884    restart->fn = futex_wait_restart;
1885    restart->futex.uaddr = uaddr;
1886    restart->futex.val = val;
1887    restart->futex.time = abs_time->tv64;
1888    restart->futex.bitset = bitset;
1889    restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
1890
1891    ret = -ERESTART_RESTARTBLOCK;
1892
1893out:
1894    if (to) {
1895        hrtimer_cancel(&to->timer);
1896        destroy_hrtimer_on_stack(&to->timer);
1897    }
1898    return ret;
1899}
1900
1901
1902static long futex_wait_restart(struct restart_block *restart)
1903{
1904    u32 __user *uaddr = restart->futex.uaddr;
1905    ktime_t t, *tp = NULL;
1906
1907    if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
1908        t.tv64 = restart->futex.time;
1909        tp = &t;
1910    }
1911    restart->fn = do_no_restart_syscall;
1912
1913    return (long)futex_wait(uaddr, restart->futex.flags,
1914                restart->futex.val, tp, restart->futex.bitset);
1915}
1916
1917
1918/*
1919 * Userspace tried a 0 -> TID atomic transition of the futex value
1920 * and failed. The kernel side here does the whole locking operation:
1921 * if there are waiters then it will block, it does PI, etc. (Due to
1922 * races the kernel might see a 0 value of the futex too.)
1923 */
1924static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
1925             ktime_t *time, int trylock)
1926{
1927    struct hrtimer_sleeper timeout, *to = NULL;
1928    struct futex_hash_bucket *hb;
1929    struct futex_q q = futex_q_init;
1930    int res, ret;
1931
1932    if (refill_pi_state_cache())
1933        return -ENOMEM;
1934
1935    if (time) {
1936        to = &timeout;
1937        hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1938                      HRTIMER_MODE_ABS);
1939        hrtimer_init_sleeper(to, current);
1940        hrtimer_set_expires(&to->timer, *time);
1941    }
1942
1943retry:
1944    ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key);
1945    if (unlikely(ret != 0))
1946        goto out;
1947
1948retry_private:
1949    hb = queue_lock(&q);
1950
1951    ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
1952    if (unlikely(ret)) {
1953        switch (ret) {
1954        case 1:
1955            /* We got the lock. */
1956            ret = 0;
1957            goto out_unlock_put_key;
1958        case -EFAULT:
1959            goto uaddr_faulted;
1960        case -EAGAIN:
1961            /*
1962             * Task is exiting and we just wait for the
1963             * exit to complete.
1964             */
1965            queue_unlock(&q, hb);
1966            put_futex_key(&q.key);
1967            cond_resched();
1968            goto retry;
1969        default:
1970            goto out_unlock_put_key;
1971        }
1972    }
1973
1974    /*
1975     * Only actually queue now that the atomic ops are done:
1976     */
1977    queue_me(&q, hb);
1978
1979    WARN_ON(!q.pi_state);
1980    /*
1981     * Block on the PI mutex:
1982     */
1983    if (!trylock)
1984        ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
1985    else {
1986        ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1987        /* Fixup the trylock return value: */
1988        ret = ret ? 0 : -EWOULDBLOCK;
1989    }
1990
1991    spin_lock(q.lock_ptr);
1992    /*
1993     * Fixup the pi_state owner and possibly acquire the lock if we
1994     * haven't already.
1995     */
1996    res = fixup_owner(uaddr, &q, !ret);
1997    /*
1998     * If fixup_owner() returned an error, proprogate that. If it acquired
1999     * the lock, clear our -ETIMEDOUT or -EINTR.
2000     */
2001    if (res)
2002        ret = (res < 0) ? res : 0;
2003
2004    /*
2005     * If fixup_owner() faulted and was unable to handle the fault, unlock
2006     * it and return the fault to userspace.
2007     */
2008    if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
2009        rt_mutex_unlock(&q.pi_state->pi_mutex);
2010
2011    /* Unqueue and drop the lock */
2012    unqueue_me_pi(&q);
2013
2014    goto out_put_key;
2015
2016out_unlock_put_key:
2017    queue_unlock(&q, hb);
2018
2019out_put_key:
2020    put_futex_key(&q.key);
2021out:
2022    if (to)
2023        destroy_hrtimer_on_stack(&to->timer);
2024    return ret != -EINTR ? ret : -ERESTARTNOINTR;
2025
2026uaddr_faulted:
2027    queue_unlock(&q, hb);
2028
2029    ret = fault_in_user_writeable(uaddr);
2030    if (ret)
2031        goto out_put_key;
2032
2033    if (!(flags & FLAGS_SHARED))
2034        goto retry_private;
2035
2036    put_futex_key(&q.key);
2037    goto retry;
2038}
2039
2040/*
2041 * Userspace attempted a TID -> 0 atomic transition, and failed.
2042 * This is the in-kernel slowpath: we look up the PI state (if any),
2043 * and do the rt-mutex unlock.
2044 */
2045static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2046{
2047    struct futex_hash_bucket *hb;
2048    struct futex_q *this, *next;
2049    struct plist_head *head;
2050    union futex_key key = FUTEX_KEY_INIT;
2051    u32 uval, vpid = task_pid_vnr(current);
2052    int ret;
2053
2054retry:
2055    if (get_user(uval, uaddr))
2056        return -EFAULT;
2057    /*
2058     * We release only a lock we actually own:
2059     */
2060    if ((uval & FUTEX_TID_MASK) != vpid)
2061        return -EPERM;
2062
2063    ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key);
2064    if (unlikely(ret != 0))
2065        goto out;
2066
2067    hb = hash_futex(&key);
2068    spin_lock(&hb->lock);
2069
2070    /*
2071     * To avoid races, try to do the TID -> 0 atomic transition
2072     * again. If it succeeds then we can return without waking
2073     * anyone else up:
2074     */
2075    if (!(uval & FUTEX_OWNER_DIED) &&
2076        cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
2077        goto pi_faulted;
2078    /*
2079     * Rare case: we managed to release the lock atomically,
2080     * no need to wake anyone else up:
2081     */
2082    if (unlikely(uval == vpid))
2083        goto out_unlock;
2084
2085    /*
2086     * Ok, other tasks may need to be woken up - check waiters
2087     * and do the wakeup if necessary:
2088     */
2089    head = &hb->chain;
2090
2091    plist_for_each_entry_safe(this, next, head, list) {
2092        if (!match_futex (&this->key, &key))
2093            continue;
2094        ret = wake_futex_pi(uaddr, uval, this);
2095        /*
2096         * The atomic access to the futex value
2097         * generated a pagefault, so retry the
2098         * user-access and the wakeup:
2099         */
2100        if (ret == -EFAULT)
2101            goto pi_faulted;
2102        goto out_unlock;
2103    }
2104    /*
2105     * No waiters - kernel unlocks the futex:
2106     */
2107    if (!(uval & FUTEX_OWNER_DIED)) {
2108        ret = unlock_futex_pi(uaddr, uval);
2109        if (ret == -EFAULT)
2110            goto pi_faulted;
2111    }
2112
2113out_unlock:
2114    spin_unlock(&hb->lock);
2115    put_futex_key(&key);
2116
2117out:
2118    return ret;
2119
2120pi_faulted:
2121    spin_unlock(&hb->lock);
2122    put_futex_key(&key);
2123
2124    ret = fault_in_user_writeable(uaddr);
2125    if (!ret)
2126        goto retry;
2127
2128    return ret;
2129}
2130
2131/**
2132 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2133 * @hb: the hash_bucket futex_q was original enqueued on
2134 * @q: the futex_q woken while waiting to be requeued
2135 * @key2: the futex_key of the requeue target futex
2136 * @timeout: the timeout associated with the wait (NULL if none)
2137 *
2138 * Detect if the task was woken on the initial futex as opposed to the requeue
2139 * target futex. If so, determine if it was a timeout or a signal that caused
2140 * the wakeup and return the appropriate error code to the caller. Must be
2141 * called with the hb lock held.
2142 *
2143 * Returns
2144 * 0 - no early wakeup detected
2145 * <0 - -ETIMEDOUT or -ERESTARTNOINTR
2146 */
2147static inline
2148int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2149                   struct futex_q *q, union futex_key *key2,
2150                   struct hrtimer_sleeper *timeout)
2151{
2152    int ret = 0;
2153
2154    /*
2155     * With the hb lock held, we avoid races while we process the wakeup.
2156     * We only need to hold hb (and not hb2) to ensure atomicity as the
2157     * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2158     * It can't be requeued from uaddr2 to something else since we don't
2159     * support a PI aware source futex for requeue.
2160     */
2161    if (!match_futex(&q->key, key2)) {
2162        WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2163        /*
2164         * We were woken prior to requeue by a timeout or a signal.
2165         * Unqueue the futex_q and determine which it was.
2166         */
2167        plist_del(&q->list, &hb->chain);
2168
2169        /* Handle spurious wakeups gracefully */
2170        ret = -EWOULDBLOCK;
2171        if (timeout && !timeout->task)
2172            ret = -ETIMEDOUT;
2173        else if (signal_pending(current))
2174            ret = -ERESTARTNOINTR;
2175    }
2176    return ret;
2177}
2178
2179/**
2180 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2181 * @uaddr: the futex we initially wait on (non-pi)
2182 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2183 * the same type, no requeueing from private to shared, etc.
2184 * @val: the expected value of uaddr
2185 * @abs_time: absolute timeout
2186 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
2187 * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
2188 * @uaddr2: the pi futex we will take prior to returning to user-space
2189 *
2190 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2191 * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
2192 * complete the acquisition of the rt_mutex prior to returning to userspace.
2193 * This ensures the rt_mutex maintains an owner when it has waiters; without
2194 * one, the pi logic wouldn't know which task to boost/deboost, if there was a
2195 * need to.
2196 *
2197 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2198 * via the following:
2199 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2200 * 2) wakeup on uaddr2 after a requeue
2201 * 3) signal
2202 * 4) timeout
2203 *
2204 * If 3, cleanup and return -ERESTARTNOINTR.
2205 *
2206 * If 2, we may then block on trying to take the rt_mutex and return via:
2207 * 5) successful lock
2208 * 6) signal
2209 * 7) timeout
2210 * 8) other lock acquisition failure
2211 *
2212 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2213 *
2214 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2215 *
2216 * Returns:
2217 * 0 - On success
2218 * <0 - On error
2219 */
2220static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2221                 u32 val, ktime_t *abs_time, u32 bitset,
2222                 u32 __user *uaddr2)
2223{
2224    struct hrtimer_sleeper timeout, *to = NULL;
2225    struct rt_mutex_waiter rt_waiter;
2226    struct rt_mutex *pi_mutex = NULL;
2227    struct futex_hash_bucket *hb;
2228    union futex_key key2 = FUTEX_KEY_INIT;
2229    struct futex_q q = futex_q_init;
2230    int res, ret;
2231
2232    if (!bitset)
2233        return -EINVAL;
2234
2235    if (abs_time) {
2236        to = &timeout;
2237        hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2238                      CLOCK_REALTIME : CLOCK_MONOTONIC,
2239                      HRTIMER_MODE_ABS);
2240        hrtimer_init_sleeper(to, current);
2241        hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2242                         current->timer_slack_ns);
2243    }
2244
2245    /*
2246     * The waiter is allocated on our stack, manipulated by the requeue
2247     * code while we sleep on uaddr.
2248     */
2249    debug_rt_mutex_init_waiter(&rt_waiter);
2250    rt_waiter.task = NULL;
2251
2252    ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
2253    if (unlikely(ret != 0))
2254        goto out;
2255
2256    q.bitset = bitset;
2257    q.rt_waiter = &rt_waiter;
2258    q.requeue_pi_key = &key2;
2259
2260    /*
2261     * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2262     * count.
2263     */
2264    ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2265    if (ret)
2266        goto out_key2;
2267
2268    /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2269    futex_wait_queue_me(hb, &q, to);
2270
2271    spin_lock(&hb->lock);
2272    ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2273    spin_unlock(&hb->lock);
2274    if (ret)
2275        goto out_put_keys;
2276
2277    /*
2278     * In order for us to be here, we know our q.key == key2, and since
2279     * we took the hb->lock above, we also know that futex_requeue() has
2280     * completed and we no longer have to concern ourselves with a wakeup
2281     * race with the atomic proxy lock acquisition by the requeue code. The
2282     * futex_requeue dropped our key1 reference and incremented our key2
2283     * reference count.
2284     */
2285
2286    /* Check if the requeue code acquired the second futex for us. */
2287    if (!q.rt_waiter) {
2288        /*
2289         * Got the lock. We might not be the anticipated owner if we
2290         * did a lock-steal - fix up the PI-state in that case.
2291         */
2292        if (q.pi_state && (q.pi_state->owner != current)) {
2293            spin_lock(q.lock_ptr);
2294            ret = fixup_pi_state_owner(uaddr2, &q, current);
2295            spin_unlock(q.lock_ptr);
2296        }
2297    } else {
2298        /*
2299         * We have been woken up by futex_unlock_pi(), a timeout, or a
2300         * signal. futex_unlock_pi() will not destroy the lock_ptr nor
2301         * the pi_state.
2302         */
2303        WARN_ON(!&q.pi_state);
2304        pi_mutex = &q.pi_state->pi_mutex;
2305        ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
2306        debug_rt_mutex_free_waiter(&rt_waiter);
2307
2308        spin_lock(q.lock_ptr);
2309        /*
2310         * Fixup the pi_state owner and possibly acquire the lock if we
2311         * haven't already.
2312         */
2313        res = fixup_owner(uaddr2, &q, !ret);
2314        /*
2315         * If fixup_owner() returned an error, proprogate that. If it
2316         * acquired the lock, clear -ETIMEDOUT or -EINTR.
2317         */
2318        if (res)
2319            ret = (res < 0) ? res : 0;
2320
2321        /* Unqueue and drop the lock. */
2322        unqueue_me_pi(&q);
2323    }
2324
2325    /*
2326     * If fixup_pi_state_owner() faulted and was unable to handle the
2327     * fault, unlock the rt_mutex and return the fault to userspace.
2328     */
2329    if (ret == -EFAULT) {
2330        if (rt_mutex_owner(pi_mutex) == current)
2331            rt_mutex_unlock(pi_mutex);
2332    } else if (ret == -EINTR) {
2333        /*
2334         * We've already been requeued, but cannot restart by calling
2335         * futex_lock_pi() directly. We could restart this syscall, but
2336         * it would detect that the user space "val" changed and return
2337         * -EWOULDBLOCK. Save the overhead of the restart and return
2338         * -EWOULDBLOCK directly.
2339         */
2340        ret = -EWOULDBLOCK;
2341    }
2342
2343out_put_keys:
2344    put_futex_key(&q.key);
2345out_key2:
2346    put_futex_key(&key2);
2347
2348out:
2349    if (to) {
2350        hrtimer_cancel(&to->timer);
2351        destroy_hrtimer_on_stack(&to->timer);
2352    }
2353    return ret;
2354}
2355
2356/*
2357 * Support for robust futexes: the kernel cleans up held futexes at
2358 * thread exit time.
2359 *
2360 * Implementation: user-space maintains a per-thread list of locks it
2361 * is holding. Upon do_exit(), the kernel carefully walks this list,
2362 * and marks all locks that are owned by this thread with the
2363 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2364 * always manipulated with the lock held, so the list is private and
2365 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2366 * field, to allow the kernel to clean up if the thread dies after
2367 * acquiring the lock, but just before it could have added itself to
2368 * the list. There can only be one such pending lock.
2369 */
2370
2371/**
2372 * sys_set_robust_list() - Set the robust-futex list head of a task
2373 * @head: pointer to the list-head
2374 * @len: length of the list-head, as userspace expects
2375 */
2376SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2377        size_t, len)
2378{
2379    if (!futex_cmpxchg_enabled)
2380        return -ENOSYS;
2381    /*
2382     * The kernel knows only one size for now:
2383     */
2384    if (unlikely(len != sizeof(*head)))
2385        return -EINVAL;
2386
2387    current->robust_list = head;
2388
2389    return 0;
2390}
2391
2392/**
2393 * sys_get_robust_list() - Get the robust-futex list head of a task
2394 * @pid: pid of the process [zero for current task]
2395 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
2396 * @len_ptr: pointer to a length field, the kernel fills in the header size
2397 */
2398SYSCALL_DEFINE3(get_robust_list, int, pid,
2399        struct robust_list_head __user * __user *, head_ptr,
2400        size_t __user *, len_ptr)
2401{
2402    struct robust_list_head __user *head;
2403    unsigned long ret;
2404    const struct cred *cred = current_cred(), *pcred;
2405
2406    if (!futex_cmpxchg_enabled)
2407        return -ENOSYS;
2408
2409    if (!pid)
2410        head = current->robust_list;
2411    else {
2412        struct task_struct *p;
2413
2414        ret = -ESRCH;
2415        rcu_read_lock();
2416        p = find_task_by_vpid(pid);
2417        if (!p)
2418            goto err_unlock;
2419        ret = -EPERM;
2420        pcred = __task_cred(p);
2421        /* If victim is in different user_ns, then uids are not
2422           comparable, so we must have CAP_SYS_PTRACE */
2423        if (cred->user->user_ns != pcred->user->user_ns) {
2424            if (!ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
2425                goto err_unlock;
2426            goto ok;
2427        }
2428        /* If victim is in same user_ns, then uids are comparable */
2429        if (cred->euid != pcred->euid &&
2430            cred->euid != pcred->uid &&
2431            !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
2432            goto err_unlock;
2433ok:
2434        head = p->robust_list;
2435        rcu_read_unlock();
2436    }
2437
2438    if (put_user(sizeof(*head), len_ptr))
2439        return -EFAULT;
2440    return put_user(head, head_ptr);
2441
2442err_unlock:
2443    rcu_read_unlock();
2444
2445    return ret;
2446}
2447
2448/*
2449 * Process a futex-list entry, check whether it's owned by the
2450 * dying task, and do notification if so:
2451 */
2452int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
2453{
2454    u32 uval, nval, mval;
2455
2456retry:
2457    if (get_user(uval, uaddr))
2458        return -1;
2459
2460    if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
2461        /*
2462         * Ok, this dying thread is truly holding a futex
2463         * of interest. Set the OWNER_DIED bit atomically
2464         * via cmpxchg, and if the value had FUTEX_WAITERS
2465         * set, wake up a waiter (if any). (We have to do a
2466         * futex_wake() even if OWNER_DIED is already set -
2467         * to handle the rare but possible case of recursive
2468         * thread-death.) The rest of the cleanup is done in
2469         * userspace.
2470         */
2471        mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2472        /*
2473         * We are not holding a lock here, but we want to have
2474         * the pagefault_disable/enable() protection because
2475         * we want to handle the fault gracefully. If the
2476         * access fails we try to fault in the futex with R/W
2477         * verification via get_user_pages. get_user() above
2478         * does not guarantee R/W access. If that fails we
2479         * give up and leave the futex locked.
2480         */
2481        if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
2482            if (fault_in_user_writeable(uaddr))
2483                return -1;
2484            goto retry;
2485        }
2486        if (nval != uval)
2487            goto retry;
2488
2489        /*
2490         * Wake robust non-PI futexes here. The wakeup of
2491         * PI futexes happens in exit_pi_state():
2492         */
2493        if (!pi && (uval & FUTEX_WAITERS))
2494            futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
2495    }
2496    return 0;
2497}
2498
2499/*
2500 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2501 */
2502static inline int fetch_robust_entry(struct robust_list __user **entry,
2503                     struct robust_list __user * __user *head,
2504                     unsigned int *pi)
2505{
2506    unsigned long uentry;
2507
2508    if (get_user(uentry, (unsigned long __user *)head))
2509        return -EFAULT;
2510
2511    *entry = (void __user *)(uentry & ~1UL);
2512    *pi = uentry & 1;
2513
2514    return 0;
2515}
2516
2517/*
2518 * Walk curr->robust_list (very carefully, it's a userspace list!)
2519 * and mark any locks found there dead, and notify any waiters.
2520 *
2521 * We silently return on any sign of list-walking problem.
2522 */
2523void exit_robust_list(struct task_struct *curr)
2524{
2525    struct robust_list_head __user *head = curr->robust_list;
2526    struct robust_list __user *entry, *next_entry, *pending;
2527    unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
2528    unsigned int uninitialized_var(next_pi);
2529    unsigned long futex_offset;
2530    int rc;
2531
2532    if (!futex_cmpxchg_enabled)
2533        return;
2534
2535    /*
2536     * Fetch the list head (which was registered earlier, via
2537     * sys_set_robust_list()):
2538     */
2539    if (fetch_robust_entry(&entry, &head->list.next, &pi))
2540        return;
2541    /*
2542     * Fetch the relative futex offset:
2543     */
2544    if (get_user(futex_offset, &head->futex_offset))
2545        return;
2546    /*
2547     * Fetch any possibly pending lock-add first, and handle it
2548     * if it exists:
2549     */
2550    if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
2551        return;
2552
2553    next_entry = NULL; /* avoid warning with gcc */
2554    while (entry != &head->list) {
2555        /*
2556         * Fetch the next entry in the list before calling
2557         * handle_futex_death:
2558         */
2559        rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
2560        /*
2561         * A pending lock might already be on the list, so
2562         * don't process it twice:
2563         */
2564        if (entry != pending)
2565            if (handle_futex_death((void __user *)entry + futex_offset,
2566                        curr, pi))
2567                return;
2568        if (rc)
2569            return;
2570        entry = next_entry;
2571        pi = next_pi;
2572        /*
2573         * Avoid excessively long or circular lists:
2574         */
2575        if (!--limit)
2576            break;
2577
2578        cond_resched();
2579    }
2580
2581    if (pending)
2582        handle_futex_death((void __user *)pending + futex_offset,
2583                   curr, pip);
2584}
2585
2586long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2587        u32 __user *uaddr2, u32 val2, u32 val3)
2588{
2589    int ret = -ENOSYS, cmd = op & FUTEX_CMD_MASK;
2590    unsigned int flags = 0;
2591
2592    if (!(op & FUTEX_PRIVATE_FLAG))
2593        flags |= FLAGS_SHARED;
2594
2595    if (op & FUTEX_CLOCK_REALTIME) {
2596        flags |= FLAGS_CLOCKRT;
2597        if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
2598            return -ENOSYS;
2599    }
2600
2601    switch (cmd) {
2602    case FUTEX_WAIT:
2603        val3 = FUTEX_BITSET_MATCH_ANY;
2604    case FUTEX_WAIT_BITSET:
2605        ret = futex_wait(uaddr, flags, val, timeout, val3);
2606        break;
2607    case FUTEX_WAKE:
2608        val3 = FUTEX_BITSET_MATCH_ANY;
2609    case FUTEX_WAKE_BITSET:
2610        ret = futex_wake(uaddr, flags, val, val3);
2611        break;
2612    case FUTEX_REQUEUE:
2613        ret = futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
2614        break;
2615    case FUTEX_CMP_REQUEUE:
2616        ret = futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
2617        break;
2618    case FUTEX_WAKE_OP:
2619        ret = futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
2620        break;
2621    case FUTEX_LOCK_PI:
2622        if (futex_cmpxchg_enabled)
2623            ret = futex_lock_pi(uaddr, flags, val, timeout, 0);
2624        break;
2625    case FUTEX_UNLOCK_PI:
2626        if (futex_cmpxchg_enabled)
2627            ret = futex_unlock_pi(uaddr, flags);
2628        break;
2629    case FUTEX_TRYLOCK_PI:
2630        if (futex_cmpxchg_enabled)
2631            ret = futex_lock_pi(uaddr, flags, 0, timeout, 1);
2632        break;
2633    case FUTEX_WAIT_REQUEUE_PI:
2634        val3 = FUTEX_BITSET_MATCH_ANY;
2635        ret = futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
2636                        uaddr2);
2637        break;
2638    case FUTEX_CMP_REQUEUE_PI:
2639        ret = futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
2640        break;
2641    default:
2642        ret = -ENOSYS;
2643    }
2644    return ret;
2645}
2646
2647
2648SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
2649        struct timespec __user *, utime, u32 __user *, uaddr2,
2650        u32, val3)
2651{
2652    struct timespec ts;
2653    ktime_t t, *tp = NULL;
2654    u32 val2 = 0;
2655    int cmd = op & FUTEX_CMD_MASK;
2656
2657    if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
2658              cmd == FUTEX_WAIT_BITSET ||
2659              cmd == FUTEX_WAIT_REQUEUE_PI)) {
2660        if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
2661            return -EFAULT;
2662        if (!timespec_valid(&ts))
2663            return -EINVAL;
2664
2665        t = timespec_to_ktime(ts);
2666        if (cmd == FUTEX_WAIT)
2667            t = ktime_add_safe(ktime_get(), t);
2668        tp = &t;
2669    }
2670    /*
2671     * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
2672     * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
2673     */
2674    if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
2675        cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
2676        val2 = (u32) (unsigned long) utime;
2677
2678    return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2679}
2680
2681static int __init futex_init(void)
2682{
2683    u32 curval;
2684    int i;
2685
2686    /*
2687     * This will fail and we want it. Some arch implementations do
2688     * runtime detection of the futex_atomic_cmpxchg_inatomic()
2689     * functionality. We want to know that before we call in any
2690     * of the complex code paths. Also we want to prevent
2691     * registration of robust lists in that case. NULL is
2692     * guaranteed to fault and we get -EFAULT on functional
2693     * implementation, the non-functional ones will return
2694     * -ENOSYS.
2695     */
2696    if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
2697        futex_cmpxchg_enabled = 1;
2698
2699    for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2700        plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2701        spin_lock_init(&futex_queues[i].lock);
2702    }
2703
2704    return 0;
2705}
2706__initcall(futex_init);
2707

Archive Download this file



interactive