Root/kernel/futex.c

1/*
2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
4 *
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7 *
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier
10 *
11 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14 *
15 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18 *
19 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21 *
22 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23 * Copyright (C) IBM Corporation, 2009
24 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
25 *
26 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27 * enough at me, Linus for the original (flawed) idea, Matthew
28 * Kirkwood for proof-of-concept implementation.
29 *
30 * "The futexes are also cursed."
31 * "But they come in a choice of three flavours!"
32 *
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License as published by
35 * the Free Software Foundation; either version 2 of the License, or
36 * (at your option) any later version.
37 *
38 * This program is distributed in the hope that it will be useful,
39 * but WITHOUT ANY WARRANTY; without even the implied warranty of
40 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 * GNU General Public License for more details.
42 *
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 */
47#include <linux/slab.h>
48#include <linux/poll.h>
49#include <linux/fs.h>
50#include <linux/file.h>
51#include <linux/jhash.h>
52#include <linux/init.h>
53#include <linux/futex.h>
54#include <linux/mount.h>
55#include <linux/pagemap.h>
56#include <linux/syscalls.h>
57#include <linux/signal.h>
58#include <linux/export.h>
59#include <linux/magic.h>
60#include <linux/pid.h>
61#include <linux/nsproxy.h>
62#include <linux/ptrace.h>
63#include <linux/sched/rt.h>
64#include <linux/hugetlb.h>
65#include <linux/freezer.h>
66#include <linux/bootmem.h>
67
68#include <asm/futex.h>
69
70#include "locking/rtmutex_common.h"
71
72/*
73 * READ this before attempting to hack on futexes!
74 *
75 * Basic futex operation and ordering guarantees
76 * =============================================
77 *
78 * The waiter reads the futex value in user space and calls
79 * futex_wait(). This function computes the hash bucket and acquires
80 * the hash bucket lock. After that it reads the futex user space value
81 * again and verifies that the data has not changed. If it has not changed
82 * it enqueues itself into the hash bucket, releases the hash bucket lock
83 * and schedules.
84 *
85 * The waker side modifies the user space value of the futex and calls
86 * futex_wake(). This function computes the hash bucket and acquires the
87 * hash bucket lock. Then it looks for waiters on that futex in the hash
88 * bucket and wakes them.
89 *
90 * In futex wake up scenarios where no tasks are blocked on a futex, taking
91 * the hb spinlock can be avoided and simply return. In order for this
92 * optimization to work, ordering guarantees must exist so that the waiter
93 * being added to the list is acknowledged when the list is concurrently being
94 * checked by the waker, avoiding scenarios like the following:
95 *
96 * CPU 0 CPU 1
97 * val = *futex;
98 * sys_futex(WAIT, futex, val);
99 * futex_wait(futex, val);
100 * uval = *futex;
101 * *futex = newval;
102 * sys_futex(WAKE, futex);
103 * futex_wake(futex);
104 * if (queue_empty())
105 * return;
106 * if (uval == val)
107 * lock(hash_bucket(futex));
108 * queue();
109 * unlock(hash_bucket(futex));
110 * schedule();
111 *
112 * This would cause the waiter on CPU 0 to wait forever because it
113 * missed the transition of the user space value from val to newval
114 * and the waker did not find the waiter in the hash bucket queue.
115 *
116 * The correct serialization ensures that a waiter either observes
117 * the changed user space value before blocking or is woken by a
118 * concurrent waker:
119 *
120 * CPU 0 CPU 1
121 * val = *futex;
122 * sys_futex(WAIT, futex, val);
123 * futex_wait(futex, val);
124 *
125 * waiters++; (a)
126 * mb(); (A) <-- paired with -.
127 * |
128 * lock(hash_bucket(futex)); |
129 * |
130 * uval = *futex; |
131 * | *futex = newval;
132 * | sys_futex(WAKE, futex);
133 * | futex_wake(futex);
134 * |
135 * `-------> mb(); (B)
136 * if (uval == val)
137 * queue();
138 * unlock(hash_bucket(futex));
139 * schedule(); if (waiters)
140 * lock(hash_bucket(futex));
141 * else wake_waiters(futex);
142 * waiters--; (b) unlock(hash_bucket(futex));
143 *
144 * Where (A) orders the waiters increment and the futex value read through
145 * atomic operations (see hb_waiters_inc) and where (B) orders the write
146 * to futex and the waiters read -- this is done by the barriers in
147 * get_futex_key_refs(), through either ihold or atomic_inc, depending on the
148 * futex type.
149 *
150 * This yields the following case (where X:=waiters, Y:=futex):
151 *
152 * X = Y = 0
153 *
154 * w[X]=1 w[Y]=1
155 * MB MB
156 * r[Y]=y r[X]=x
157 *
158 * Which guarantees that x==0 && y==0 is impossible; which translates back into
159 * the guarantee that we cannot both miss the futex variable change and the
160 * enqueue.
161 *
162 * Note that a new waiter is accounted for in (a) even when it is possible that
163 * the wait call can return error, in which case we backtrack from it in (b).
164 * Refer to the comment in queue_lock().
165 *
166 * Similarly, in order to account for waiters being requeued on another
167 * address we always increment the waiters for the destination bucket before
168 * acquiring the lock. It then decrements them again after releasing it -
169 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
170 * will do the additional required waiter count housekeeping. This is done for
171 * double_lock_hb() and double_unlock_hb(), respectively.
172 */
173
174#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
175int __read_mostly futex_cmpxchg_enabled;
176#endif
177
178/*
179 * Futex flags used to encode options to functions and preserve them across
180 * restarts.
181 */
182#define FLAGS_SHARED 0x01
183#define FLAGS_CLOCKRT 0x02
184#define FLAGS_HAS_TIMEOUT 0x04
185
186/*
187 * Priority Inheritance state:
188 */
189struct futex_pi_state {
190    /*
191     * list of 'owned' pi_state instances - these have to be
192     * cleaned up in do_exit() if the task exits prematurely:
193     */
194    struct list_head list;
195
196    /*
197     * The PI object:
198     */
199    struct rt_mutex pi_mutex;
200
201    struct task_struct *owner;
202    atomic_t refcount;
203
204    union futex_key key;
205};
206
207/**
208 * struct futex_q - The hashed futex queue entry, one per waiting task
209 * @list: priority-sorted list of tasks waiting on this futex
210 * @task: the task waiting on the futex
211 * @lock_ptr: the hash bucket lock
212 * @key: the key the futex is hashed on
213 * @pi_state: optional priority inheritance state
214 * @rt_waiter: rt_waiter storage for use with requeue_pi
215 * @requeue_pi_key: the requeue_pi target futex key
216 * @bitset: bitset for the optional bitmasked wakeup
217 *
218 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
219 * we can wake only the relevant ones (hashed queues may be shared).
220 *
221 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
222 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
223 * The order of wakeup is always to make the first condition true, then
224 * the second.
225 *
226 * PI futexes are typically woken before they are removed from the hash list via
227 * the rt_mutex code. See unqueue_me_pi().
228 */
229struct futex_q {
230    struct plist_node list;
231
232    struct task_struct *task;
233    spinlock_t *lock_ptr;
234    union futex_key key;
235    struct futex_pi_state *pi_state;
236    struct rt_mutex_waiter *rt_waiter;
237    union futex_key *requeue_pi_key;
238    u32 bitset;
239};
240
241static const struct futex_q futex_q_init = {
242    /* list gets initialized in queue_me()*/
243    .key = FUTEX_KEY_INIT,
244    .bitset = FUTEX_BITSET_MATCH_ANY
245};
246
247/*
248 * Hash buckets are shared by all the futex_keys that hash to the same
249 * location. Each key may have multiple futex_q structures, one for each task
250 * waiting on a futex.
251 */
252struct futex_hash_bucket {
253    atomic_t waiters;
254    spinlock_t lock;
255    struct plist_head chain;
256} ____cacheline_aligned_in_smp;
257
258static unsigned long __read_mostly futex_hashsize;
259
260static struct futex_hash_bucket *futex_queues;
261
262static inline void futex_get_mm(union futex_key *key)
263{
264    atomic_inc(&key->private.mm->mm_count);
265    /*
266     * Ensure futex_get_mm() implies a full barrier such that
267     * get_futex_key() implies a full barrier. This is relied upon
268     * as full barrier (B), see the ordering comment above.
269     */
270    smp_mb__after_atomic_inc();
271}
272
273/*
274 * Reflects a new waiter being added to the waitqueue.
275 */
276static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
277{
278#ifdef CONFIG_SMP
279    atomic_inc(&hb->waiters);
280    /*
281     * Full barrier (A), see the ordering comment above.
282     */
283    smp_mb__after_atomic_inc();
284#endif
285}
286
287/*
288 * Reflects a waiter being removed from the waitqueue by wakeup
289 * paths.
290 */
291static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
292{
293#ifdef CONFIG_SMP
294    atomic_dec(&hb->waiters);
295#endif
296}
297
298static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
299{
300#ifdef CONFIG_SMP
301    return atomic_read(&hb->waiters);
302#else
303    return 1;
304#endif
305}
306
307/*
308 * We hash on the keys returned from get_futex_key (see below).
309 */
310static struct futex_hash_bucket *hash_futex(union futex_key *key)
311{
312    u32 hash = jhash2((u32*)&key->both.word,
313              (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
314              key->both.offset);
315    return &futex_queues[hash & (futex_hashsize - 1)];
316}
317
318/*
319 * Return 1 if two futex_keys are equal, 0 otherwise.
320 */
321static inline int match_futex(union futex_key *key1, union futex_key *key2)
322{
323    return (key1 && key2
324        && key1->both.word == key2->both.word
325        && key1->both.ptr == key2->both.ptr
326        && key1->both.offset == key2->both.offset);
327}
328
329/*
330 * Take a reference to the resource addressed by a key.
331 * Can be called while holding spinlocks.
332 *
333 */
334static void get_futex_key_refs(union futex_key *key)
335{
336    if (!key->both.ptr)
337        return;
338
339    switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
340    case FUT_OFF_INODE:
341        ihold(key->shared.inode); /* implies MB (B) */
342        break;
343    case FUT_OFF_MMSHARED:
344        futex_get_mm(key); /* implies MB (B) */
345        break;
346    }
347}
348
349/*
350 * Drop a reference to the resource addressed by a key.
351 * The hash bucket spinlock must not be held.
352 */
353static void drop_futex_key_refs(union futex_key *key)
354{
355    if (!key->both.ptr) {
356        /* If we're here then we tried to put a key we failed to get */
357        WARN_ON_ONCE(1);
358        return;
359    }
360
361    switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
362    case FUT_OFF_INODE:
363        iput(key->shared.inode);
364        break;
365    case FUT_OFF_MMSHARED:
366        mmdrop(key->private.mm);
367        break;
368    }
369}
370
371/**
372 * get_futex_key() - Get parameters which are the keys for a futex
373 * @uaddr: virtual address of the futex
374 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
375 * @key: address where result is stored.
376 * @rw: mapping needs to be read/write (values: VERIFY_READ,
377 * VERIFY_WRITE)
378 *
379 * Return: a negative error code or 0
380 *
381 * The key words are stored in *key on success.
382 *
383 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
384 * offset_within_page). For private mappings, it's (uaddr, current->mm).
385 * We can usually work out the index without swapping in the page.
386 *
387 * lock_page() might sleep, the caller should not hold a spinlock.
388 */
389static int
390get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
391{
392    unsigned long address = (unsigned long)uaddr;
393    struct mm_struct *mm = current->mm;
394    struct page *page, *page_head;
395    int err, ro = 0;
396
397    /*
398     * The futex address must be "naturally" aligned.
399     */
400    key->both.offset = address % PAGE_SIZE;
401    if (unlikely((address % sizeof(u32)) != 0))
402        return -EINVAL;
403    address -= key->both.offset;
404
405    if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
406        return -EFAULT;
407
408    /*
409     * PROCESS_PRIVATE futexes are fast.
410     * As the mm cannot disappear under us and the 'key' only needs
411     * virtual address, we dont even have to find the underlying vma.
412     * Note : We do have to check 'uaddr' is a valid user address,
413     * but access_ok() should be faster than find_vma()
414     */
415    if (!fshared) {
416        key->private.mm = mm;
417        key->private.address = address;
418        get_futex_key_refs(key); /* implies MB (B) */
419        return 0;
420    }
421
422again:
423    err = get_user_pages_fast(address, 1, 1, &page);
424    /*
425     * If write access is not required (eg. FUTEX_WAIT), try
426     * and get read-only access.
427     */
428    if (err == -EFAULT && rw == VERIFY_READ) {
429        err = get_user_pages_fast(address, 1, 0, &page);
430        ro = 1;
431    }
432    if (err < 0)
433        return err;
434    else
435        err = 0;
436
437#ifdef CONFIG_TRANSPARENT_HUGEPAGE
438    page_head = page;
439    if (unlikely(PageTail(page))) {
440        put_page(page);
441        /* serialize against __split_huge_page_splitting() */
442        local_irq_disable();
443        if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
444            page_head = compound_head(page);
445            /*
446             * page_head is valid pointer but we must pin
447             * it before taking the PG_lock and/or
448             * PG_compound_lock. The moment we re-enable
449             * irqs __split_huge_page_splitting() can
450             * return and the head page can be freed from
451             * under us. We can't take the PG_lock and/or
452             * PG_compound_lock on a page that could be
453             * freed from under us.
454             */
455            if (page != page_head) {
456                get_page(page_head);
457                put_page(page);
458            }
459            local_irq_enable();
460        } else {
461            local_irq_enable();
462            goto again;
463        }
464    }
465#else
466    page_head = compound_head(page);
467    if (page != page_head) {
468        get_page(page_head);
469        put_page(page);
470    }
471#endif
472
473    lock_page(page_head);
474
475    /*
476     * If page_head->mapping is NULL, then it cannot be a PageAnon
477     * page; but it might be the ZERO_PAGE or in the gate area or
478     * in a special mapping (all cases which we are happy to fail);
479     * or it may have been a good file page when get_user_pages_fast
480     * found it, but truncated or holepunched or subjected to
481     * invalidate_complete_page2 before we got the page lock (also
482     * cases which we are happy to fail). And we hold a reference,
483     * so refcount care in invalidate_complete_page's remove_mapping
484     * prevents drop_caches from setting mapping to NULL beneath us.
485     *
486     * The case we do have to guard against is when memory pressure made
487     * shmem_writepage move it from filecache to swapcache beneath us:
488     * an unlikely race, but we do need to retry for page_head->mapping.
489     */
490    if (!page_head->mapping) {
491        int shmem_swizzled = PageSwapCache(page_head);
492        unlock_page(page_head);
493        put_page(page_head);
494        if (shmem_swizzled)
495            goto again;
496        return -EFAULT;
497    }
498
499    /*
500     * Private mappings are handled in a simple way.
501     *
502     * NOTE: When userspace waits on a MAP_SHARED mapping, even if
503     * it's a read-only handle, it's expected that futexes attach to
504     * the object not the particular process.
505     */
506    if (PageAnon(page_head)) {
507        /*
508         * A RO anonymous page will never change and thus doesn't make
509         * sense for futex operations.
510         */
511        if (ro) {
512            err = -EFAULT;
513            goto out;
514        }
515
516        key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
517        key->private.mm = mm;
518        key->private.address = address;
519    } else {
520        key->both.offset |= FUT_OFF_INODE; /* inode-based key */
521        key->shared.inode = page_head->mapping->host;
522        key->shared.pgoff = basepage_index(page);
523    }
524
525    get_futex_key_refs(key); /* implies MB (B) */
526
527out:
528    unlock_page(page_head);
529    put_page(page_head);
530    return err;
531}
532
533static inline void put_futex_key(union futex_key *key)
534{
535    drop_futex_key_refs(key);
536}
537
538/**
539 * fault_in_user_writeable() - Fault in user address and verify RW access
540 * @uaddr: pointer to faulting user space address
541 *
542 * Slow path to fixup the fault we just took in the atomic write
543 * access to @uaddr.
544 *
545 * We have no generic implementation of a non-destructive write to the
546 * user address. We know that we faulted in the atomic pagefault
547 * disabled section so we can as well avoid the #PF overhead by
548 * calling get_user_pages() right away.
549 */
550static int fault_in_user_writeable(u32 __user *uaddr)
551{
552    struct mm_struct *mm = current->mm;
553    int ret;
554
555    down_read(&mm->mmap_sem);
556    ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
557                   FAULT_FLAG_WRITE);
558    up_read(&mm->mmap_sem);
559
560    return ret < 0 ? ret : 0;
561}
562
563/**
564 * futex_top_waiter() - Return the highest priority waiter on a futex
565 * @hb: the hash bucket the futex_q's reside in
566 * @key: the futex key (to distinguish it from other futex futex_q's)
567 *
568 * Must be called with the hb lock held.
569 */
570static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
571                    union futex_key *key)
572{
573    struct futex_q *this;
574
575    plist_for_each_entry(this, &hb->chain, list) {
576        if (match_futex(&this->key, key))
577            return this;
578    }
579    return NULL;
580}
581
582static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
583                      u32 uval, u32 newval)
584{
585    int ret;
586
587    pagefault_disable();
588    ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
589    pagefault_enable();
590
591    return ret;
592}
593
594static int get_futex_value_locked(u32 *dest, u32 __user *from)
595{
596    int ret;
597
598    pagefault_disable();
599    ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
600    pagefault_enable();
601
602    return ret ? -EFAULT : 0;
603}
604
605
606/*
607 * PI code:
608 */
609static int refill_pi_state_cache(void)
610{
611    struct futex_pi_state *pi_state;
612
613    if (likely(current->pi_state_cache))
614        return 0;
615
616    pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
617
618    if (!pi_state)
619        return -ENOMEM;
620
621    INIT_LIST_HEAD(&pi_state->list);
622    /* pi_mutex gets initialized later */
623    pi_state->owner = NULL;
624    atomic_set(&pi_state->refcount, 1);
625    pi_state->key = FUTEX_KEY_INIT;
626
627    current->pi_state_cache = pi_state;
628
629    return 0;
630}
631
632static struct futex_pi_state * alloc_pi_state(void)
633{
634    struct futex_pi_state *pi_state = current->pi_state_cache;
635
636    WARN_ON(!pi_state);
637    current->pi_state_cache = NULL;
638
639    return pi_state;
640}
641
642static void free_pi_state(struct futex_pi_state *pi_state)
643{
644    if (!atomic_dec_and_test(&pi_state->refcount))
645        return;
646
647    /*
648     * If pi_state->owner is NULL, the owner is most probably dying
649     * and has cleaned up the pi_state already
650     */
651    if (pi_state->owner) {
652        raw_spin_lock_irq(&pi_state->owner->pi_lock);
653        list_del_init(&pi_state->list);
654        raw_spin_unlock_irq(&pi_state->owner->pi_lock);
655
656        rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
657    }
658
659    if (current->pi_state_cache)
660        kfree(pi_state);
661    else {
662        /*
663         * pi_state->list is already empty.
664         * clear pi_state->owner.
665         * refcount is at 0 - put it back to 1.
666         */
667        pi_state->owner = NULL;
668        atomic_set(&pi_state->refcount, 1);
669        current->pi_state_cache = pi_state;
670    }
671}
672
673/*
674 * Look up the task based on what TID userspace gave us.
675 * We dont trust it.
676 */
677static struct task_struct * futex_find_get_task(pid_t pid)
678{
679    struct task_struct *p;
680
681    rcu_read_lock();
682    p = find_task_by_vpid(pid);
683    if (p)
684        get_task_struct(p);
685
686    rcu_read_unlock();
687
688    return p;
689}
690
691/*
692 * This task is holding PI mutexes at exit time => bad.
693 * Kernel cleans up PI-state, but userspace is likely hosed.
694 * (Robust-futex cleanup is separate and might save the day for userspace.)
695 */
696void exit_pi_state_list(struct task_struct *curr)
697{
698    struct list_head *next, *head = &curr->pi_state_list;
699    struct futex_pi_state *pi_state;
700    struct futex_hash_bucket *hb;
701    union futex_key key = FUTEX_KEY_INIT;
702
703    if (!futex_cmpxchg_enabled)
704        return;
705    /*
706     * We are a ZOMBIE and nobody can enqueue itself on
707     * pi_state_list anymore, but we have to be careful
708     * versus waiters unqueueing themselves:
709     */
710    raw_spin_lock_irq(&curr->pi_lock);
711    while (!list_empty(head)) {
712
713        next = head->next;
714        pi_state = list_entry(next, struct futex_pi_state, list);
715        key = pi_state->key;
716        hb = hash_futex(&key);
717        raw_spin_unlock_irq(&curr->pi_lock);
718
719        spin_lock(&hb->lock);
720
721        raw_spin_lock_irq(&curr->pi_lock);
722        /*
723         * We dropped the pi-lock, so re-check whether this
724         * task still owns the PI-state:
725         */
726        if (head->next != next) {
727            spin_unlock(&hb->lock);
728            continue;
729        }
730
731        WARN_ON(pi_state->owner != curr);
732        WARN_ON(list_empty(&pi_state->list));
733        list_del_init(&pi_state->list);
734        pi_state->owner = NULL;
735        raw_spin_unlock_irq(&curr->pi_lock);
736
737        rt_mutex_unlock(&pi_state->pi_mutex);
738
739        spin_unlock(&hb->lock);
740
741        raw_spin_lock_irq(&curr->pi_lock);
742    }
743    raw_spin_unlock_irq(&curr->pi_lock);
744}
745
746/*
747 * We need to check the following states:
748 *
749 * Waiter | pi_state | pi->owner | uTID | uODIED | ?
750 *
751 * [1] NULL | --- | --- | 0 | 0/1 | Valid
752 * [2] NULL | --- | --- | >0 | 0/1 | Valid
753 *
754 * [3] Found | NULL | -- | Any | 0/1 | Invalid
755 *
756 * [4] Found | Found | NULL | 0 | 1 | Valid
757 * [5] Found | Found | NULL | >0 | 1 | Invalid
758 *
759 * [6] Found | Found | task | 0 | 1 | Valid
760 *
761 * [7] Found | Found | NULL | Any | 0 | Invalid
762 *
763 * [8] Found | Found | task | ==taskTID | 0/1 | Valid
764 * [9] Found | Found | task | 0 | 0 | Invalid
765 * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
766 *
767 * [1] Indicates that the kernel can acquire the futex atomically. We
768 * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
769 *
770 * [2] Valid, if TID does not belong to a kernel thread. If no matching
771 * thread is found then it indicates that the owner TID has died.
772 *
773 * [3] Invalid. The waiter is queued on a non PI futex
774 *
775 * [4] Valid state after exit_robust_list(), which sets the user space
776 * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
777 *
778 * [5] The user space value got manipulated between exit_robust_list()
779 * and exit_pi_state_list()
780 *
781 * [6] Valid state after exit_pi_state_list() which sets the new owner in
782 * the pi_state but cannot access the user space value.
783 *
784 * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
785 *
786 * [8] Owner and user space value match
787 *
788 * [9] There is no transient state which sets the user space TID to 0
789 * except exit_robust_list(), but this is indicated by the
790 * FUTEX_OWNER_DIED bit. See [4]
791 *
792 * [10] There is no transient state which leaves owner and user space
793 * TID out of sync.
794 */
795static int
796lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
797        union futex_key *key, struct futex_pi_state **ps)
798{
799    struct futex_pi_state *pi_state = NULL;
800    struct futex_q *this, *next;
801    struct task_struct *p;
802    pid_t pid = uval & FUTEX_TID_MASK;
803
804    plist_for_each_entry_safe(this, next, &hb->chain, list) {
805        if (match_futex(&this->key, key)) {
806            /*
807             * Sanity check the waiter before increasing
808             * the refcount and attaching to it.
809             */
810            pi_state = this->pi_state;
811            /*
812             * Userspace might have messed up non-PI and
813             * PI futexes [3]
814             */
815            if (unlikely(!pi_state))
816                return -EINVAL;
817
818            WARN_ON(!atomic_read(&pi_state->refcount));
819
820            /*
821             * Handle the owner died case:
822             */
823            if (uval & FUTEX_OWNER_DIED) {
824                /*
825                 * exit_pi_state_list sets owner to NULL and
826                 * wakes the topmost waiter. The task which
827                 * acquires the pi_state->rt_mutex will fixup
828                 * owner.
829                 */
830                if (!pi_state->owner) {
831                    /*
832                     * No pi state owner, but the user
833                     * space TID is not 0. Inconsistent
834                     * state. [5]
835                     */
836                    if (pid)
837                        return -EINVAL;
838                    /*
839                     * Take a ref on the state and
840                     * return. [4]
841                     */
842                    goto out_state;
843                }
844
845                /*
846                 * If TID is 0, then either the dying owner
847                 * has not yet executed exit_pi_state_list()
848                 * or some waiter acquired the rtmutex in the
849                 * pi state, but did not yet fixup the TID in
850                 * user space.
851                 *
852                 * Take a ref on the state and return. [6]
853                 */
854                if (!pid)
855                    goto out_state;
856            } else {
857                /*
858                 * If the owner died bit is not set,
859                 * then the pi_state must have an
860                 * owner. [7]
861                 */
862                if (!pi_state->owner)
863                    return -EINVAL;
864            }
865
866            /*
867             * Bail out if user space manipulated the
868             * futex value. If pi state exists then the
869             * owner TID must be the same as the user
870             * space TID. [9/10]
871             */
872            if (pid != task_pid_vnr(pi_state->owner))
873                return -EINVAL;
874
875        out_state:
876            atomic_inc(&pi_state->refcount);
877            *ps = pi_state;
878            return 0;
879        }
880    }
881
882    /*
883     * We are the first waiter - try to look up the real owner and attach
884     * the new pi_state to it, but bail out when TID = 0 [1]
885     */
886    if (!pid)
887        return -ESRCH;
888    p = futex_find_get_task(pid);
889    if (!p)
890        return -ESRCH;
891
892    if (!p->mm) {
893        put_task_struct(p);
894        return -EPERM;
895    }
896
897    /*
898     * We need to look at the task state flags to figure out,
899     * whether the task is exiting. To protect against the do_exit
900     * change of the task flags, we do this protected by
901     * p->pi_lock:
902     */
903    raw_spin_lock_irq(&p->pi_lock);
904    if (unlikely(p->flags & PF_EXITING)) {
905        /*
906         * The task is on the way out. When PF_EXITPIDONE is
907         * set, we know that the task has finished the
908         * cleanup:
909         */
910        int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
911
912        raw_spin_unlock_irq(&p->pi_lock);
913        put_task_struct(p);
914        return ret;
915    }
916
917    /*
918     * No existing pi state. First waiter. [2]
919     */
920    pi_state = alloc_pi_state();
921
922    /*
923     * Initialize the pi_mutex in locked state and make 'p'
924     * the owner of it:
925     */
926    rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
927
928    /* Store the key for possible exit cleanups: */
929    pi_state->key = *key;
930
931    WARN_ON(!list_empty(&pi_state->list));
932    list_add(&pi_state->list, &p->pi_state_list);
933    pi_state->owner = p;
934    raw_spin_unlock_irq(&p->pi_lock);
935
936    put_task_struct(p);
937
938    *ps = pi_state;
939
940    return 0;
941}
942
943/**
944 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
945 * @uaddr: the pi futex user address
946 * @hb: the pi futex hash bucket
947 * @key: the futex key associated with uaddr and hb
948 * @ps: the pi_state pointer where we store the result of the
949 * lookup
950 * @task: the task to perform the atomic lock work for. This will
951 * be "current" except in the case of requeue pi.
952 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
953 *
954 * Return:
955 * 0 - ready to wait;
956 * 1 - acquired the lock;
957 * <0 - error
958 *
959 * The hb->lock and futex_key refs shall be held by the caller.
960 */
961static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
962                union futex_key *key,
963                struct futex_pi_state **ps,
964                struct task_struct *task, int set_waiters)
965{
966    int lock_taken, ret, force_take = 0;
967    u32 uval, newval, curval, vpid = task_pid_vnr(task);
968
969retry:
970    ret = lock_taken = 0;
971
972    /*
973     * To avoid races, we attempt to take the lock here again
974     * (by doing a 0 -> TID atomic cmpxchg), while holding all
975     * the locks. It will most likely not succeed.
976     */
977    newval = vpid;
978    if (set_waiters)
979        newval |= FUTEX_WAITERS;
980
981    if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
982        return -EFAULT;
983
984    /*
985     * Detect deadlocks.
986     */
987    if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
988        return -EDEADLK;
989
990    /*
991     * Surprise - we got the lock, but we do not trust user space at all.
992     */
993    if (unlikely(!curval)) {
994        /*
995         * We verify whether there is kernel state for this
996         * futex. If not, we can safely assume, that the 0 ->
997         * TID transition is correct. If state exists, we do
998         * not bother to fixup the user space state as it was
999         * corrupted already.
1000         */
1001        return futex_top_waiter(hb, key) ? -EINVAL : 1;
1002    }
1003
1004    uval = curval;
1005
1006    /*
1007     * Set the FUTEX_WAITERS flag, so the owner will know it has someone
1008     * to wake at the next unlock.
1009     */
1010    newval = curval | FUTEX_WAITERS;
1011
1012    /*
1013     * Should we force take the futex? See below.
1014     */
1015    if (unlikely(force_take)) {
1016        /*
1017         * Keep the OWNER_DIED and the WAITERS bit and set the
1018         * new TID value.
1019         */
1020        newval = (curval & ~FUTEX_TID_MASK) | vpid;
1021        force_take = 0;
1022        lock_taken = 1;
1023    }
1024
1025    if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
1026        return -EFAULT;
1027    if (unlikely(curval != uval))
1028        goto retry;
1029
1030    /*
1031     * We took the lock due to forced take over.
1032     */
1033    if (unlikely(lock_taken))
1034        return 1;
1035
1036    /*
1037     * We dont have the lock. Look up the PI state (or create it if
1038     * we are the first waiter):
1039     */
1040    ret = lookup_pi_state(uval, hb, key, ps);
1041
1042    if (unlikely(ret)) {
1043        switch (ret) {
1044        case -ESRCH:
1045            /*
1046             * We failed to find an owner for this
1047             * futex. So we have no pi_state to block
1048             * on. This can happen in two cases:
1049             *
1050             * 1) The owner died
1051             * 2) A stale FUTEX_WAITERS bit
1052             *
1053             * Re-read the futex value.
1054             */
1055            if (get_futex_value_locked(&curval, uaddr))
1056                return -EFAULT;
1057
1058            /*
1059             * If the owner died or we have a stale
1060             * WAITERS bit the owner TID in the user space
1061             * futex is 0.
1062             */
1063            if (!(curval & FUTEX_TID_MASK)) {
1064                force_take = 1;
1065                goto retry;
1066            }
1067        default:
1068            break;
1069        }
1070    }
1071
1072    return ret;
1073}
1074
1075/**
1076 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1077 * @q: The futex_q to unqueue
1078 *
1079 * The q->lock_ptr must not be NULL and must be held by the caller.
1080 */
1081static void __unqueue_futex(struct futex_q *q)
1082{
1083    struct futex_hash_bucket *hb;
1084
1085    if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
1086        || WARN_ON(plist_node_empty(&q->list)))
1087        return;
1088
1089    hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1090    plist_del(&q->list, &hb->chain);
1091    hb_waiters_dec(hb);
1092}
1093
1094/*
1095 * The hash bucket lock must be held when this is called.
1096 * Afterwards, the futex_q must not be accessed.
1097 */
1098static void wake_futex(struct futex_q *q)
1099{
1100    struct task_struct *p = q->task;
1101
1102    if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1103        return;
1104
1105    /*
1106     * We set q->lock_ptr = NULL _before_ we wake up the task. If
1107     * a non-futex wake up happens on another CPU then the task
1108     * might exit and p would dereference a non-existing task
1109     * struct. Prevent this by holding a reference on p across the
1110     * wake up.
1111     */
1112    get_task_struct(p);
1113
1114    __unqueue_futex(q);
1115    /*
1116     * The waiting task can free the futex_q as soon as
1117     * q->lock_ptr = NULL is written, without taking any locks. A
1118     * memory barrier is required here to prevent the following
1119     * store to lock_ptr from getting ahead of the plist_del.
1120     */
1121    smp_wmb();
1122    q->lock_ptr = NULL;
1123
1124    wake_up_state(p, TASK_NORMAL);
1125    put_task_struct(p);
1126}
1127
1128static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
1129{
1130    struct task_struct *new_owner;
1131    struct futex_pi_state *pi_state = this->pi_state;
1132    u32 uninitialized_var(curval), newval;
1133    int ret = 0;
1134
1135    if (!pi_state)
1136        return -EINVAL;
1137
1138    /*
1139     * If current does not own the pi_state then the futex is
1140     * inconsistent and user space fiddled with the futex value.
1141     */
1142    if (pi_state->owner != current)
1143        return -EINVAL;
1144
1145    raw_spin_lock(&pi_state->pi_mutex.wait_lock);
1146    new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1147
1148    /*
1149     * It is possible that the next waiter (the one that brought
1150     * this owner to the kernel) timed out and is no longer
1151     * waiting on the lock.
1152     */
1153    if (!new_owner)
1154        new_owner = this->task;
1155
1156    /*
1157     * We pass it to the next owner. The WAITERS bit is always
1158     * kept enabled while there is PI state around. We cleanup the
1159     * owner died bit, because we are the owner.
1160     */
1161    newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1162
1163    if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
1164        ret = -EFAULT;
1165    else if (curval != uval)
1166        ret = -EINVAL;
1167    if (ret) {
1168        raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1169        return ret;
1170    }
1171
1172    raw_spin_lock_irq(&pi_state->owner->pi_lock);
1173    WARN_ON(list_empty(&pi_state->list));
1174    list_del_init(&pi_state->list);
1175    raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1176
1177    raw_spin_lock_irq(&new_owner->pi_lock);
1178    WARN_ON(!list_empty(&pi_state->list));
1179    list_add(&pi_state->list, &new_owner->pi_state_list);
1180    pi_state->owner = new_owner;
1181    raw_spin_unlock_irq(&new_owner->pi_lock);
1182
1183    raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1184    rt_mutex_unlock(&pi_state->pi_mutex);
1185
1186    return 0;
1187}
1188
1189static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
1190{
1191    u32 uninitialized_var(oldval);
1192
1193    /*
1194     * There is no waiter, so we unlock the futex. The owner died
1195     * bit has not to be preserved here. We are the owner:
1196     */
1197    if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
1198        return -EFAULT;
1199    if (oldval != uval)
1200        return -EAGAIN;
1201
1202    return 0;
1203}
1204
1205/*
1206 * Express the locking dependencies for lockdep:
1207 */
1208static inline void
1209double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1210{
1211    if (hb1 <= hb2) {
1212        spin_lock(&hb1->lock);
1213        if (hb1 < hb2)
1214            spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1215    } else { /* hb1 > hb2 */
1216        spin_lock(&hb2->lock);
1217        spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1218    }
1219}
1220
1221static inline void
1222double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1223{
1224    spin_unlock(&hb1->lock);
1225    if (hb1 != hb2)
1226        spin_unlock(&hb2->lock);
1227}
1228
1229/*
1230 * Wake up waiters matching bitset queued on this futex (uaddr).
1231 */
1232static int
1233futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
1234{
1235    struct futex_hash_bucket *hb;
1236    struct futex_q *this, *next;
1237    union futex_key key = FUTEX_KEY_INIT;
1238    int ret;
1239
1240    if (!bitset)
1241        return -EINVAL;
1242
1243    ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
1244    if (unlikely(ret != 0))
1245        goto out;
1246
1247    hb = hash_futex(&key);
1248
1249    /* Make sure we really have tasks to wakeup */
1250    if (!hb_waiters_pending(hb))
1251        goto out_put_key;
1252
1253    spin_lock(&hb->lock);
1254
1255    plist_for_each_entry_safe(this, next, &hb->chain, list) {
1256        if (match_futex (&this->key, &key)) {
1257            if (this->pi_state || this->rt_waiter) {
1258                ret = -EINVAL;
1259                break;
1260            }
1261
1262            /* Check if one of the bits is set in both bitsets */
1263            if (!(this->bitset & bitset))
1264                continue;
1265
1266            wake_futex(this);
1267            if (++ret >= nr_wake)
1268                break;
1269        }
1270    }
1271
1272    spin_unlock(&hb->lock);
1273out_put_key:
1274    put_futex_key(&key);
1275out:
1276    return ret;
1277}
1278
1279/*
1280 * Wake up all waiters hashed on the physical page that is mapped
1281 * to this virtual address:
1282 */
1283static int
1284futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1285          int nr_wake, int nr_wake2, int op)
1286{
1287    union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1288    struct futex_hash_bucket *hb1, *hb2;
1289    struct futex_q *this, *next;
1290    int ret, op_ret;
1291
1292retry:
1293    ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1294    if (unlikely(ret != 0))
1295        goto out;
1296    ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
1297    if (unlikely(ret != 0))
1298        goto out_put_key1;
1299
1300    hb1 = hash_futex(&key1);
1301    hb2 = hash_futex(&key2);
1302
1303retry_private:
1304    double_lock_hb(hb1, hb2);
1305    op_ret = futex_atomic_op_inuser(op, uaddr2);
1306    if (unlikely(op_ret < 0)) {
1307
1308        double_unlock_hb(hb1, hb2);
1309
1310#ifndef CONFIG_MMU
1311        /*
1312         * we don't get EFAULT from MMU faults if we don't have an MMU,
1313         * but we might get them from range checking
1314         */
1315        ret = op_ret;
1316        goto out_put_keys;
1317#endif
1318
1319        if (unlikely(op_ret != -EFAULT)) {
1320            ret = op_ret;
1321            goto out_put_keys;
1322        }
1323
1324        ret = fault_in_user_writeable(uaddr2);
1325        if (ret)
1326            goto out_put_keys;
1327
1328        if (!(flags & FLAGS_SHARED))
1329            goto retry_private;
1330
1331        put_futex_key(&key2);
1332        put_futex_key(&key1);
1333        goto retry;
1334    }
1335
1336    plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1337        if (match_futex (&this->key, &key1)) {
1338            if (this->pi_state || this->rt_waiter) {
1339                ret = -EINVAL;
1340                goto out_unlock;
1341            }
1342            wake_futex(this);
1343            if (++ret >= nr_wake)
1344                break;
1345        }
1346    }
1347
1348    if (op_ret > 0) {
1349        op_ret = 0;
1350        plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1351            if (match_futex (&this->key, &key2)) {
1352                if (this->pi_state || this->rt_waiter) {
1353                    ret = -EINVAL;
1354                    goto out_unlock;
1355                }
1356                wake_futex(this);
1357                if (++op_ret >= nr_wake2)
1358                    break;
1359            }
1360        }
1361        ret += op_ret;
1362    }
1363
1364out_unlock:
1365    double_unlock_hb(hb1, hb2);
1366out_put_keys:
1367    put_futex_key(&key2);
1368out_put_key1:
1369    put_futex_key(&key1);
1370out:
1371    return ret;
1372}
1373
1374/**
1375 * requeue_futex() - Requeue a futex_q from one hb to another
1376 * @q: the futex_q to requeue
1377 * @hb1: the source hash_bucket
1378 * @hb2: the target hash_bucket
1379 * @key2: the new key for the requeued futex_q
1380 */
1381static inline
1382void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1383           struct futex_hash_bucket *hb2, union futex_key *key2)
1384{
1385
1386    /*
1387     * If key1 and key2 hash to the same bucket, no need to
1388     * requeue.
1389     */
1390    if (likely(&hb1->chain != &hb2->chain)) {
1391        plist_del(&q->list, &hb1->chain);
1392        hb_waiters_dec(hb1);
1393        plist_add(&q->list, &hb2->chain);
1394        hb_waiters_inc(hb2);
1395        q->lock_ptr = &hb2->lock;
1396    }
1397    get_futex_key_refs(key2);
1398    q->key = *key2;
1399}
1400
1401/**
1402 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1403 * @q: the futex_q
1404 * @key: the key of the requeue target futex
1405 * @hb: the hash_bucket of the requeue target futex
1406 *
1407 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1408 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1409 * to the requeue target futex so the waiter can detect the wakeup on the right
1410 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1411 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1412 * to protect access to the pi_state to fixup the owner later. Must be called
1413 * with both q->lock_ptr and hb->lock held.
1414 */
1415static inline
1416void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1417               struct futex_hash_bucket *hb)
1418{
1419    get_futex_key_refs(key);
1420    q->key = *key;
1421
1422    __unqueue_futex(q);
1423
1424    WARN_ON(!q->rt_waiter);
1425    q->rt_waiter = NULL;
1426
1427    q->lock_ptr = &hb->lock;
1428
1429    wake_up_state(q->task, TASK_NORMAL);
1430}
1431
1432/**
1433 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1434 * @pifutex: the user address of the to futex
1435 * @hb1: the from futex hash bucket, must be locked by the caller
1436 * @hb2: the to futex hash bucket, must be locked by the caller
1437 * @key1: the from futex key
1438 * @key2: the to futex key
1439 * @ps: address to store the pi_state pointer
1440 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1441 *
1442 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1443 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1444 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1445 * hb1 and hb2 must be held by the caller.
1446 *
1447 * Return:
1448 * 0 - failed to acquire the lock atomically;
1449 * >0 - acquired the lock, return value is vpid of the top_waiter
1450 * <0 - error
1451 */
1452static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1453                 struct futex_hash_bucket *hb1,
1454                 struct futex_hash_bucket *hb2,
1455                 union futex_key *key1, union futex_key *key2,
1456                 struct futex_pi_state **ps, int set_waiters)
1457{
1458    struct futex_q *top_waiter = NULL;
1459    u32 curval;
1460    int ret, vpid;
1461
1462    if (get_futex_value_locked(&curval, pifutex))
1463        return -EFAULT;
1464
1465    /*
1466     * Find the top_waiter and determine if there are additional waiters.
1467     * If the caller intends to requeue more than 1 waiter to pifutex,
1468     * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1469     * as we have means to handle the possible fault. If not, don't set
1470     * the bit unecessarily as it will force the subsequent unlock to enter
1471     * the kernel.
1472     */
1473    top_waiter = futex_top_waiter(hb1, key1);
1474
1475    /* There are no waiters, nothing for us to do. */
1476    if (!top_waiter)
1477        return 0;
1478
1479    /* Ensure we requeue to the expected futex. */
1480    if (!match_futex(top_waiter->requeue_pi_key, key2))
1481        return -EINVAL;
1482
1483    /*
1484     * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1485     * the contended case or if set_waiters is 1. The pi_state is returned
1486     * in ps in contended cases.
1487     */
1488    vpid = task_pid_vnr(top_waiter->task);
1489    ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1490                   set_waiters);
1491    if (ret == 1) {
1492        requeue_pi_wake_futex(top_waiter, key2, hb2);
1493        return vpid;
1494    }
1495    return ret;
1496}
1497
1498/**
1499 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1500 * @uaddr1: source futex user address
1501 * @flags: futex flags (FLAGS_SHARED, etc.)
1502 * @uaddr2: target futex user address
1503 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1504 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1505 * @cmpval: @uaddr1 expected value (or %NULL)
1506 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
1507 * pi futex (pi to pi requeue is not supported)
1508 *
1509 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1510 * uaddr2 atomically on behalf of the top waiter.
1511 *
1512 * Return:
1513 * >=0 - on success, the number of tasks requeued or woken;
1514 * <0 - on error
1515 */
1516static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1517             u32 __user *uaddr2, int nr_wake, int nr_requeue,
1518             u32 *cmpval, int requeue_pi)
1519{
1520    union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1521    int drop_count = 0, task_count = 0, ret;
1522    struct futex_pi_state *pi_state = NULL;
1523    struct futex_hash_bucket *hb1, *hb2;
1524    struct futex_q *this, *next;
1525
1526    if (requeue_pi) {
1527        /*
1528         * Requeue PI only works on two distinct uaddrs. This
1529         * check is only valid for private futexes. See below.
1530         */
1531        if (uaddr1 == uaddr2)
1532            return -EINVAL;
1533
1534        /*
1535         * requeue_pi requires a pi_state, try to allocate it now
1536         * without any locks in case it fails.
1537         */
1538        if (refill_pi_state_cache())
1539            return -ENOMEM;
1540        /*
1541         * requeue_pi must wake as many tasks as it can, up to nr_wake
1542         * + nr_requeue, since it acquires the rt_mutex prior to
1543         * returning to userspace, so as to not leave the rt_mutex with
1544         * waiters and no owner. However, second and third wake-ups
1545         * cannot be predicted as they involve race conditions with the
1546         * first wake and a fault while looking up the pi_state. Both
1547         * pthread_cond_signal() and pthread_cond_broadcast() should
1548         * use nr_wake=1.
1549         */
1550        if (nr_wake != 1)
1551            return -EINVAL;
1552    }
1553
1554retry:
1555    if (pi_state != NULL) {
1556        /*
1557         * We will have to lookup the pi_state again, so free this one
1558         * to keep the accounting correct.
1559         */
1560        free_pi_state(pi_state);
1561        pi_state = NULL;
1562    }
1563
1564    ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1565    if (unlikely(ret != 0))
1566        goto out;
1567    ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1568                requeue_pi ? VERIFY_WRITE : VERIFY_READ);
1569    if (unlikely(ret != 0))
1570        goto out_put_key1;
1571
1572    /*
1573     * The check above which compares uaddrs is not sufficient for
1574     * shared futexes. We need to compare the keys:
1575     */
1576    if (requeue_pi && match_futex(&key1, &key2)) {
1577        ret = -EINVAL;
1578        goto out_put_keys;
1579    }
1580
1581    hb1 = hash_futex(&key1);
1582    hb2 = hash_futex(&key2);
1583
1584retry_private:
1585    hb_waiters_inc(hb2);
1586    double_lock_hb(hb1, hb2);
1587
1588    if (likely(cmpval != NULL)) {
1589        u32 curval;
1590
1591        ret = get_futex_value_locked(&curval, uaddr1);
1592
1593        if (unlikely(ret)) {
1594            double_unlock_hb(hb1, hb2);
1595            hb_waiters_dec(hb2);
1596
1597            ret = get_user(curval, uaddr1);
1598            if (ret)
1599                goto out_put_keys;
1600
1601            if (!(flags & FLAGS_SHARED))
1602                goto retry_private;
1603
1604            put_futex_key(&key2);
1605            put_futex_key(&key1);
1606            goto retry;
1607        }
1608        if (curval != *cmpval) {
1609            ret = -EAGAIN;
1610            goto out_unlock;
1611        }
1612    }
1613
1614    if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1615        /*
1616         * Attempt to acquire uaddr2 and wake the top waiter. If we
1617         * intend to requeue waiters, force setting the FUTEX_WAITERS
1618         * bit. We force this here where we are able to easily handle
1619         * faults rather in the requeue loop below.
1620         */
1621        ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1622                         &key2, &pi_state, nr_requeue);
1623
1624        /*
1625         * At this point the top_waiter has either taken uaddr2 or is
1626         * waiting on it. If the former, then the pi_state will not
1627         * exist yet, look it up one more time to ensure we have a
1628         * reference to it. If the lock was taken, ret contains the
1629         * vpid of the top waiter task.
1630         */
1631        if (ret > 0) {
1632            WARN_ON(pi_state);
1633            drop_count++;
1634            task_count++;
1635            /*
1636             * If we acquired the lock, then the user
1637             * space value of uaddr2 should be vpid. It
1638             * cannot be changed by the top waiter as it
1639             * is blocked on hb2 lock if it tries to do
1640             * so. If something fiddled with it behind our
1641             * back the pi state lookup might unearth
1642             * it. So we rather use the known value than
1643             * rereading and handing potential crap to
1644             * lookup_pi_state.
1645             */
1646            ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
1647        }
1648
1649        switch (ret) {
1650        case 0:
1651            break;
1652        case -EFAULT:
1653            double_unlock_hb(hb1, hb2);
1654            hb_waiters_dec(hb2);
1655            put_futex_key(&key2);
1656            put_futex_key(&key1);
1657            ret = fault_in_user_writeable(uaddr2);
1658            if (!ret)
1659                goto retry;
1660            goto out;
1661        case -EAGAIN:
1662            /* The owner was exiting, try again. */
1663            double_unlock_hb(hb1, hb2);
1664            hb_waiters_dec(hb2);
1665            put_futex_key(&key2);
1666            put_futex_key(&key1);
1667            cond_resched();
1668            goto retry;
1669        default:
1670            goto out_unlock;
1671        }
1672    }
1673
1674    plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1675        if (task_count - nr_wake >= nr_requeue)
1676            break;
1677
1678        if (!match_futex(&this->key, &key1))
1679            continue;
1680
1681        /*
1682         * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1683         * be paired with each other and no other futex ops.
1684         *
1685         * We should never be requeueing a futex_q with a pi_state,
1686         * which is awaiting a futex_unlock_pi().
1687         */
1688        if ((requeue_pi && !this->rt_waiter) ||
1689            (!requeue_pi && this->rt_waiter) ||
1690            this->pi_state) {
1691            ret = -EINVAL;
1692            break;
1693        }
1694
1695        /*
1696         * Wake nr_wake waiters. For requeue_pi, if we acquired the
1697         * lock, we already woke the top_waiter. If not, it will be
1698         * woken by futex_unlock_pi().
1699         */
1700        if (++task_count <= nr_wake && !requeue_pi) {
1701            wake_futex(this);
1702            continue;
1703        }
1704
1705        /* Ensure we requeue to the expected futex for requeue_pi. */
1706        if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1707            ret = -EINVAL;
1708            break;
1709        }
1710
1711        /*
1712         * Requeue nr_requeue waiters and possibly one more in the case
1713         * of requeue_pi if we couldn't acquire the lock atomically.
1714         */
1715        if (requeue_pi) {
1716            /* Prepare the waiter to take the rt_mutex. */
1717            atomic_inc(&pi_state->refcount);
1718            this->pi_state = pi_state;
1719            ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1720                            this->rt_waiter,
1721                            this->task, 1);
1722            if (ret == 1) {
1723                /* We got the lock. */
1724                requeue_pi_wake_futex(this, &key2, hb2);
1725                drop_count++;
1726                continue;
1727            } else if (ret) {
1728                /* -EDEADLK */
1729                this->pi_state = NULL;
1730                free_pi_state(pi_state);
1731                goto out_unlock;
1732            }
1733        }
1734        requeue_futex(this, hb1, hb2, &key2);
1735        drop_count++;
1736    }
1737
1738out_unlock:
1739    double_unlock_hb(hb1, hb2);
1740    hb_waiters_dec(hb2);
1741
1742    /*
1743     * drop_futex_key_refs() must be called outside the spinlocks. During
1744     * the requeue we moved futex_q's from the hash bucket at key1 to the
1745     * one at key2 and updated their key pointer. We no longer need to
1746     * hold the references to key1.
1747     */
1748    while (--drop_count >= 0)
1749        drop_futex_key_refs(&key1);
1750
1751out_put_keys:
1752    put_futex_key(&key2);
1753out_put_key1:
1754    put_futex_key(&key1);
1755out:
1756    if (pi_state != NULL)
1757        free_pi_state(pi_state);
1758    return ret ? ret : task_count;
1759}
1760
1761/* The key must be already stored in q->key. */
1762static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1763    __acquires(&hb->lock)
1764{
1765    struct futex_hash_bucket *hb;
1766
1767    hb = hash_futex(&q->key);
1768
1769    /*
1770     * Increment the counter before taking the lock so that
1771     * a potential waker won't miss a to-be-slept task that is
1772     * waiting for the spinlock. This is safe as all queue_lock()
1773     * users end up calling queue_me(). Similarly, for housekeeping,
1774     * decrement the counter at queue_unlock() when some error has
1775     * occurred and we don't end up adding the task to the list.
1776     */
1777    hb_waiters_inc(hb);
1778
1779    q->lock_ptr = &hb->lock;
1780
1781    spin_lock(&hb->lock); /* implies MB (A) */
1782    return hb;
1783}
1784
1785static inline void
1786queue_unlock(struct futex_hash_bucket *hb)
1787    __releases(&hb->lock)
1788{
1789    spin_unlock(&hb->lock);
1790    hb_waiters_dec(hb);
1791}
1792
1793/**
1794 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1795 * @q: The futex_q to enqueue
1796 * @hb: The destination hash bucket
1797 *
1798 * The hb->lock must be held by the caller, and is released here. A call to
1799 * queue_me() is typically paired with exactly one call to unqueue_me(). The
1800 * exceptions involve the PI related operations, which may use unqueue_me_pi()
1801 * or nothing if the unqueue is done as part of the wake process and the unqueue
1802 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1803 * an example).
1804 */
1805static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1806    __releases(&hb->lock)
1807{
1808    int prio;
1809
1810    /*
1811     * The priority used to register this element is
1812     * - either the real thread-priority for the real-time threads
1813     * (i.e. threads with a priority lower than MAX_RT_PRIO)
1814     * - or MAX_RT_PRIO for non-RT threads.
1815     * Thus, all RT-threads are woken first in priority order, and
1816     * the others are woken last, in FIFO order.
1817     */
1818    prio = min(current->normal_prio, MAX_RT_PRIO);
1819
1820    plist_node_init(&q->list, prio);
1821    plist_add(&q->list, &hb->chain);
1822    q->task = current;
1823    spin_unlock(&hb->lock);
1824}
1825
1826/**
1827 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1828 * @q: The futex_q to unqueue
1829 *
1830 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1831 * be paired with exactly one earlier call to queue_me().
1832 *
1833 * Return:
1834 * 1 - if the futex_q was still queued (and we removed unqueued it);
1835 * 0 - if the futex_q was already removed by the waking thread
1836 */
1837static int unqueue_me(struct futex_q *q)
1838{
1839    spinlock_t *lock_ptr;
1840    int ret = 0;
1841
1842    /* In the common case we don't take the spinlock, which is nice. */
1843retry:
1844    lock_ptr = q->lock_ptr;
1845    barrier();
1846    if (lock_ptr != NULL) {
1847        spin_lock(lock_ptr);
1848        /*
1849         * q->lock_ptr can change between reading it and
1850         * spin_lock(), causing us to take the wrong lock. This
1851         * corrects the race condition.
1852         *
1853         * Reasoning goes like this: if we have the wrong lock,
1854         * q->lock_ptr must have changed (maybe several times)
1855         * between reading it and the spin_lock(). It can
1856         * change again after the spin_lock() but only if it was
1857         * already changed before the spin_lock(). It cannot,
1858         * however, change back to the original value. Therefore
1859         * we can detect whether we acquired the correct lock.
1860         */
1861        if (unlikely(lock_ptr != q->lock_ptr)) {
1862            spin_unlock(lock_ptr);
1863            goto retry;
1864        }
1865        __unqueue_futex(q);
1866
1867        BUG_ON(q->pi_state);
1868
1869        spin_unlock(lock_ptr);
1870        ret = 1;
1871    }
1872
1873    drop_futex_key_refs(&q->key);
1874    return ret;
1875}
1876
1877/*
1878 * PI futexes can not be requeued and must remove themself from the
1879 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1880 * and dropped here.
1881 */
1882static void unqueue_me_pi(struct futex_q *q)
1883    __releases(q->lock_ptr)
1884{
1885    __unqueue_futex(q);
1886
1887    BUG_ON(!q->pi_state);
1888    free_pi_state(q->pi_state);
1889    q->pi_state = NULL;
1890
1891    spin_unlock(q->lock_ptr);
1892}
1893
1894/*
1895 * Fixup the pi_state owner with the new owner.
1896 *
1897 * Must be called with hash bucket lock held and mm->sem held for non
1898 * private futexes.
1899 */
1900static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1901                struct task_struct *newowner)
1902{
1903    u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1904    struct futex_pi_state *pi_state = q->pi_state;
1905    struct task_struct *oldowner = pi_state->owner;
1906    u32 uval, uninitialized_var(curval), newval;
1907    int ret;
1908
1909    /* Owner died? */
1910    if (!pi_state->owner)
1911        newtid |= FUTEX_OWNER_DIED;
1912
1913    /*
1914     * We are here either because we stole the rtmutex from the
1915     * previous highest priority waiter or we are the highest priority
1916     * waiter but failed to get the rtmutex the first time.
1917     * We have to replace the newowner TID in the user space variable.
1918     * This must be atomic as we have to preserve the owner died bit here.
1919     *
1920     * Note: We write the user space value _before_ changing the pi_state
1921     * because we can fault here. Imagine swapped out pages or a fork
1922     * that marked all the anonymous memory readonly for cow.
1923     *
1924     * Modifying pi_state _before_ the user space value would
1925     * leave the pi_state in an inconsistent state when we fault
1926     * here, because we need to drop the hash bucket lock to
1927     * handle the fault. This might be observed in the PID check
1928     * in lookup_pi_state.
1929     */
1930retry:
1931    if (get_futex_value_locked(&uval, uaddr))
1932        goto handle_fault;
1933
1934    while (1) {
1935        newval = (uval & FUTEX_OWNER_DIED) | newtid;
1936
1937        if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
1938            goto handle_fault;
1939        if (curval == uval)
1940            break;
1941        uval = curval;
1942    }
1943
1944    /*
1945     * We fixed up user space. Now we need to fix the pi_state
1946     * itself.
1947     */
1948    if (pi_state->owner != NULL) {
1949        raw_spin_lock_irq(&pi_state->owner->pi_lock);
1950        WARN_ON(list_empty(&pi_state->list));
1951        list_del_init(&pi_state->list);
1952        raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1953    }
1954
1955    pi_state->owner = newowner;
1956
1957    raw_spin_lock_irq(&newowner->pi_lock);
1958    WARN_ON(!list_empty(&pi_state->list));
1959    list_add(&pi_state->list, &newowner->pi_state_list);
1960    raw_spin_unlock_irq(&newowner->pi_lock);
1961    return 0;
1962
1963    /*
1964     * To handle the page fault we need to drop the hash bucket
1965     * lock here. That gives the other task (either the highest priority
1966     * waiter itself or the task which stole the rtmutex) the
1967     * chance to try the fixup of the pi_state. So once we are
1968     * back from handling the fault we need to check the pi_state
1969     * after reacquiring the hash bucket lock and before trying to
1970     * do another fixup. When the fixup has been done already we
1971     * simply return.
1972     */
1973handle_fault:
1974    spin_unlock(q->lock_ptr);
1975
1976    ret = fault_in_user_writeable(uaddr);
1977
1978    spin_lock(q->lock_ptr);
1979
1980    /*
1981     * Check if someone else fixed it for us:
1982     */
1983    if (pi_state->owner != oldowner)
1984        return 0;
1985
1986    if (ret)
1987        return ret;
1988
1989    goto retry;
1990}
1991
1992static long futex_wait_restart(struct restart_block *restart);
1993
1994/**
1995 * fixup_owner() - Post lock pi_state and corner case management
1996 * @uaddr: user address of the futex
1997 * @q: futex_q (contains pi_state and access to the rt_mutex)
1998 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
1999 *
2000 * After attempting to lock an rt_mutex, this function is called to cleanup
2001 * the pi_state owner as well as handle race conditions that may allow us to
2002 * acquire the lock. Must be called with the hb lock held.
2003 *
2004 * Return:
2005 * 1 - success, lock taken;
2006 * 0 - success, lock not taken;
2007 * <0 - on error (-EFAULT)
2008 */
2009static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2010{
2011    struct task_struct *owner;
2012    int ret = 0;
2013
2014    if (locked) {
2015        /*
2016         * Got the lock. We might not be the anticipated owner if we
2017         * did a lock-steal - fix up the PI-state in that case:
2018         */
2019        if (q->pi_state->owner != current)
2020            ret = fixup_pi_state_owner(uaddr, q, current);
2021        goto out;
2022    }
2023
2024    /*
2025     * Catch the rare case, where the lock was released when we were on the
2026     * way back before we locked the hash bucket.
2027     */
2028    if (q->pi_state->owner == current) {
2029        /*
2030         * Try to get the rt_mutex now. This might fail as some other
2031         * task acquired the rt_mutex after we removed ourself from the
2032         * rt_mutex waiters list.
2033         */
2034        if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
2035            locked = 1;
2036            goto out;
2037        }
2038
2039        /*
2040         * pi_state is incorrect, some other task did a lock steal and
2041         * we returned due to timeout or signal without taking the
2042         * rt_mutex. Too late.
2043         */
2044        raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
2045        owner = rt_mutex_owner(&q->pi_state->pi_mutex);
2046        if (!owner)
2047            owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
2048        raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
2049        ret = fixup_pi_state_owner(uaddr, q, owner);
2050        goto out;
2051    }
2052
2053    /*
2054     * Paranoia check. If we did not take the lock, then we should not be
2055     * the owner of the rt_mutex.
2056     */
2057    if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
2058        printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
2059                "pi-state %p\n", ret,
2060                q->pi_state->pi_mutex.owner,
2061                q->pi_state->owner);
2062
2063out:
2064    return ret ? ret : locked;
2065}
2066
2067/**
2068 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2069 * @hb: the futex hash bucket, must be locked by the caller
2070 * @q: the futex_q to queue up on
2071 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
2072 */
2073static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
2074                struct hrtimer_sleeper *timeout)
2075{
2076    /*
2077     * The task state is guaranteed to be set before another task can
2078     * wake it. set_current_state() is implemented using set_mb() and
2079     * queue_me() calls spin_unlock() upon completion, both serializing
2080     * access to the hash list and forcing another memory barrier.
2081     */
2082    set_current_state(TASK_INTERRUPTIBLE);
2083    queue_me(q, hb);
2084
2085    /* Arm the timer */
2086    if (timeout) {
2087        hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
2088        if (!hrtimer_active(&timeout->timer))
2089            timeout->task = NULL;
2090    }
2091
2092    /*
2093     * If we have been removed from the hash list, then another task
2094     * has tried to wake us, and we can skip the call to schedule().
2095     */
2096    if (likely(!plist_node_empty(&q->list))) {
2097        /*
2098         * If the timer has already expired, current will already be
2099         * flagged for rescheduling. Only call schedule if there
2100         * is no timeout, or if it has yet to expire.
2101         */
2102        if (!timeout || timeout->task)
2103            freezable_schedule();
2104    }
2105    __set_current_state(TASK_RUNNING);
2106}
2107
2108/**
2109 * futex_wait_setup() - Prepare to wait on a futex
2110 * @uaddr: the futex userspace address
2111 * @val: the expected value
2112 * @flags: futex flags (FLAGS_SHARED, etc.)
2113 * @q: the associated futex_q
2114 * @hb: storage for hash_bucket pointer to be returned to caller
2115 *
2116 * Setup the futex_q and locate the hash_bucket. Get the futex value and
2117 * compare it with the expected value. Handle atomic faults internally.
2118 * Return with the hb lock held and a q.key reference on success, and unlocked
2119 * with no q.key reference on failure.
2120 *
2121 * Return:
2122 * 0 - uaddr contains val and hb has been locked;
2123 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2124 */
2125static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2126               struct futex_q *q, struct futex_hash_bucket **hb)
2127{
2128    u32 uval;
2129    int ret;
2130
2131    /*
2132     * Access the page AFTER the hash-bucket is locked.
2133     * Order is important:
2134     *
2135     * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2136     * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
2137     *
2138     * The basic logical guarantee of a futex is that it blocks ONLY
2139     * if cond(var) is known to be true at the time of blocking, for
2140     * any cond. If we locked the hash-bucket after testing *uaddr, that
2141     * would open a race condition where we could block indefinitely with
2142     * cond(var) false, which would violate the guarantee.
2143     *
2144     * On the other hand, we insert q and release the hash-bucket only
2145     * after testing *uaddr. This guarantees that futex_wait() will NOT
2146     * absorb a wakeup if *uaddr does not match the desired values
2147     * while the syscall executes.
2148     */
2149retry:
2150    ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
2151    if (unlikely(ret != 0))
2152        return ret;
2153
2154retry_private:
2155    *hb = queue_lock(q);
2156
2157    ret = get_futex_value_locked(&uval, uaddr);
2158
2159    if (ret) {
2160        queue_unlock(*hb);
2161
2162        ret = get_user(uval, uaddr);
2163        if (ret)
2164            goto out;
2165
2166        if (!(flags & FLAGS_SHARED))
2167            goto retry_private;
2168
2169        put_futex_key(&q->key);
2170        goto retry;
2171    }
2172
2173    if (uval != val) {
2174        queue_unlock(*hb);
2175        ret = -EWOULDBLOCK;
2176    }
2177
2178out:
2179    if (ret)
2180        put_futex_key(&q->key);
2181    return ret;
2182}
2183
2184static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2185              ktime_t *abs_time, u32 bitset)
2186{
2187    struct hrtimer_sleeper timeout, *to = NULL;
2188    struct restart_block *restart;
2189    struct futex_hash_bucket *hb;
2190    struct futex_q q = futex_q_init;
2191    int ret;
2192
2193    if (!bitset)
2194        return -EINVAL;
2195    q.bitset = bitset;
2196
2197    if (abs_time) {
2198        to = &timeout;
2199
2200        hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2201                      CLOCK_REALTIME : CLOCK_MONOTONIC,
2202                      HRTIMER_MODE_ABS);
2203        hrtimer_init_sleeper(to, current);
2204        hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2205                         current->timer_slack_ns);
2206    }
2207
2208retry:
2209    /*
2210     * Prepare to wait on uaddr. On success, holds hb lock and increments
2211     * q.key refs.
2212     */
2213    ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2214    if (ret)
2215        goto out;
2216
2217    /* queue_me and wait for wakeup, timeout, or a signal. */
2218    futex_wait_queue_me(hb, &q, to);
2219
2220    /* If we were woken (and unqueued), we succeeded, whatever. */
2221    ret = 0;
2222    /* unqueue_me() drops q.key ref */
2223    if (!unqueue_me(&q))
2224        goto out;
2225    ret = -ETIMEDOUT;
2226    if (to && !to->task)
2227        goto out;
2228
2229    /*
2230     * We expect signal_pending(current), but we might be the
2231     * victim of a spurious wakeup as well.
2232     */
2233    if (!signal_pending(current))
2234        goto retry;
2235
2236    ret = -ERESTARTSYS;
2237    if (!abs_time)
2238        goto out;
2239
2240    restart = &current_thread_info()->restart_block;
2241    restart->fn = futex_wait_restart;
2242    restart->futex.uaddr = uaddr;
2243    restart->futex.val = val;
2244    restart->futex.time = abs_time->tv64;
2245    restart->futex.bitset = bitset;
2246    restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2247
2248    ret = -ERESTART_RESTARTBLOCK;
2249
2250out:
2251    if (to) {
2252        hrtimer_cancel(&to->timer);
2253        destroy_hrtimer_on_stack(&to->timer);
2254    }
2255    return ret;
2256}
2257
2258
2259static long futex_wait_restart(struct restart_block *restart)
2260{
2261    u32 __user *uaddr = restart->futex.uaddr;
2262    ktime_t t, *tp = NULL;
2263
2264    if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2265        t.tv64 = restart->futex.time;
2266        tp = &t;
2267    }
2268    restart->fn = do_no_restart_syscall;
2269
2270    return (long)futex_wait(uaddr, restart->futex.flags,
2271                restart->futex.val, tp, restart->futex.bitset);
2272}
2273
2274
2275/*
2276 * Userspace tried a 0 -> TID atomic transition of the futex value
2277 * and failed. The kernel side here does the whole locking operation:
2278 * if there are waiters then it will block, it does PI, etc. (Due to
2279 * races the kernel might see a 0 value of the futex too.)
2280 */
2281static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
2282             ktime_t *time, int trylock)
2283{
2284    struct hrtimer_sleeper timeout, *to = NULL;
2285    struct futex_hash_bucket *hb;
2286    struct futex_q q = futex_q_init;
2287    int res, ret;
2288
2289    if (refill_pi_state_cache())
2290        return -ENOMEM;
2291
2292    if (time) {
2293        to = &timeout;
2294        hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
2295                      HRTIMER_MODE_ABS);
2296        hrtimer_init_sleeper(to, current);
2297        hrtimer_set_expires(&to->timer, *time);
2298    }
2299
2300retry:
2301    ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
2302    if (unlikely(ret != 0))
2303        goto out;
2304
2305retry_private:
2306    hb = queue_lock(&q);
2307
2308    ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2309    if (unlikely(ret)) {
2310        switch (ret) {
2311        case 1:
2312            /* We got the lock. */
2313            ret = 0;
2314            goto out_unlock_put_key;
2315        case -EFAULT:
2316            goto uaddr_faulted;
2317        case -EAGAIN:
2318            /*
2319             * Task is exiting and we just wait for the
2320             * exit to complete.
2321             */
2322            queue_unlock(hb);
2323            put_futex_key(&q.key);
2324            cond_resched();
2325            goto retry;
2326        default:
2327            goto out_unlock_put_key;
2328        }
2329    }
2330
2331    /*
2332     * Only actually queue now that the atomic ops are done:
2333     */
2334    queue_me(&q, hb);
2335
2336    WARN_ON(!q.pi_state);
2337    /*
2338     * Block on the PI mutex:
2339     */
2340    if (!trylock)
2341        ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
2342    else {
2343        ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
2344        /* Fixup the trylock return value: */
2345        ret = ret ? 0 : -EWOULDBLOCK;
2346    }
2347
2348    spin_lock(q.lock_ptr);
2349    /*
2350     * Fixup the pi_state owner and possibly acquire the lock if we
2351     * haven't already.
2352     */
2353    res = fixup_owner(uaddr, &q, !ret);
2354    /*
2355     * If fixup_owner() returned an error, proprogate that. If it acquired
2356     * the lock, clear our -ETIMEDOUT or -EINTR.
2357     */
2358    if (res)
2359        ret = (res < 0) ? res : 0;
2360
2361    /*
2362     * If fixup_owner() faulted and was unable to handle the fault, unlock
2363     * it and return the fault to userspace.
2364     */
2365    if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
2366        rt_mutex_unlock(&q.pi_state->pi_mutex);
2367
2368    /* Unqueue and drop the lock */
2369    unqueue_me_pi(&q);
2370
2371    goto out_put_key;
2372
2373out_unlock_put_key:
2374    queue_unlock(hb);
2375
2376out_put_key:
2377    put_futex_key(&q.key);
2378out:
2379    if (to)
2380        destroy_hrtimer_on_stack(&to->timer);
2381    return ret != -EINTR ? ret : -ERESTARTNOINTR;
2382
2383uaddr_faulted:
2384    queue_unlock(hb);
2385
2386    ret = fault_in_user_writeable(uaddr);
2387    if (ret)
2388        goto out_put_key;
2389
2390    if (!(flags & FLAGS_SHARED))
2391        goto retry_private;
2392
2393    put_futex_key(&q.key);
2394    goto retry;
2395}
2396
2397/*
2398 * Userspace attempted a TID -> 0 atomic transition, and failed.
2399 * This is the in-kernel slowpath: we look up the PI state (if any),
2400 * and do the rt-mutex unlock.
2401 */
2402static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2403{
2404    struct futex_hash_bucket *hb;
2405    struct futex_q *this, *next;
2406    union futex_key key = FUTEX_KEY_INIT;
2407    u32 uval, vpid = task_pid_vnr(current);
2408    int ret;
2409
2410retry:
2411    if (get_user(uval, uaddr))
2412        return -EFAULT;
2413    /*
2414     * We release only a lock we actually own:
2415     */
2416    if ((uval & FUTEX_TID_MASK) != vpid)
2417        return -EPERM;
2418
2419    ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2420    if (unlikely(ret != 0))
2421        goto out;
2422
2423    hb = hash_futex(&key);
2424    spin_lock(&hb->lock);
2425
2426    /*
2427     * To avoid races, try to do the TID -> 0 atomic transition
2428     * again. If it succeeds then we can return without waking
2429     * anyone else up. We only try this if neither the waiters nor
2430     * the owner died bit are set.
2431     */
2432    if (!(uval & ~FUTEX_TID_MASK) &&
2433        cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
2434        goto pi_faulted;
2435    /*
2436     * Rare case: we managed to release the lock atomically,
2437     * no need to wake anyone else up:
2438     */
2439    if (unlikely(uval == vpid))
2440        goto out_unlock;
2441
2442    /*
2443     * Ok, other tasks may need to be woken up - check waiters
2444     * and do the wakeup if necessary:
2445     */
2446    plist_for_each_entry_safe(this, next, &hb->chain, list) {
2447        if (!match_futex (&this->key, &key))
2448            continue;
2449        ret = wake_futex_pi(uaddr, uval, this);
2450        /*
2451         * The atomic access to the futex value
2452         * generated a pagefault, so retry the
2453         * user-access and the wakeup:
2454         */
2455        if (ret == -EFAULT)
2456            goto pi_faulted;
2457        goto out_unlock;
2458    }
2459    /*
2460     * No waiters - kernel unlocks the futex:
2461     */
2462    ret = unlock_futex_pi(uaddr, uval);
2463    if (ret == -EFAULT)
2464        goto pi_faulted;
2465
2466out_unlock:
2467    spin_unlock(&hb->lock);
2468    put_futex_key(&key);
2469
2470out:
2471    return ret;
2472
2473pi_faulted:
2474    spin_unlock(&hb->lock);
2475    put_futex_key(&key);
2476
2477    ret = fault_in_user_writeable(uaddr);
2478    if (!ret)
2479        goto retry;
2480
2481    return ret;
2482}
2483
2484/**
2485 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2486 * @hb: the hash_bucket futex_q was original enqueued on
2487 * @q: the futex_q woken while waiting to be requeued
2488 * @key2: the futex_key of the requeue target futex
2489 * @timeout: the timeout associated with the wait (NULL if none)
2490 *
2491 * Detect if the task was woken on the initial futex as opposed to the requeue
2492 * target futex. If so, determine if it was a timeout or a signal that caused
2493 * the wakeup and return the appropriate error code to the caller. Must be
2494 * called with the hb lock held.
2495 *
2496 * Return:
2497 * 0 = no early wakeup detected;
2498 * <0 = -ETIMEDOUT or -ERESTARTNOINTR
2499 */
2500static inline
2501int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2502                   struct futex_q *q, union futex_key *key2,
2503                   struct hrtimer_sleeper *timeout)
2504{
2505    int ret = 0;
2506
2507    /*
2508     * With the hb lock held, we avoid races while we process the wakeup.
2509     * We only need to hold hb (and not hb2) to ensure atomicity as the
2510     * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2511     * It can't be requeued from uaddr2 to something else since we don't
2512     * support a PI aware source futex for requeue.
2513     */
2514    if (!match_futex(&q->key, key2)) {
2515        WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2516        /*
2517         * We were woken prior to requeue by a timeout or a signal.
2518         * Unqueue the futex_q and determine which it was.
2519         */
2520        plist_del(&q->list, &hb->chain);
2521        hb_waiters_dec(hb);
2522
2523        /* Handle spurious wakeups gracefully */
2524        ret = -EWOULDBLOCK;
2525        if (timeout && !timeout->task)
2526            ret = -ETIMEDOUT;
2527        else if (signal_pending(current))
2528            ret = -ERESTARTNOINTR;
2529    }
2530    return ret;
2531}
2532
2533/**
2534 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2535 * @uaddr: the futex we initially wait on (non-pi)
2536 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2537 * the same type, no requeueing from private to shared, etc.
2538 * @val: the expected value of uaddr
2539 * @abs_time: absolute timeout
2540 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
2541 * @uaddr2: the pi futex we will take prior to returning to user-space
2542 *
2543 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2544 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
2545 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
2546 * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
2547 * without one, the pi logic would not know which task to boost/deboost, if
2548 * there was a need to.
2549 *
2550 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2551 * via the following--
2552 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2553 * 2) wakeup on uaddr2 after a requeue
2554 * 3) signal
2555 * 4) timeout
2556 *
2557 * If 3, cleanup and return -ERESTARTNOINTR.
2558 *
2559 * If 2, we may then block on trying to take the rt_mutex and return via:
2560 * 5) successful lock
2561 * 6) signal
2562 * 7) timeout
2563 * 8) other lock acquisition failure
2564 *
2565 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2566 *
2567 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2568 *
2569 * Return:
2570 * 0 - On success;
2571 * <0 - On error
2572 */
2573static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2574                 u32 val, ktime_t *abs_time, u32 bitset,
2575                 u32 __user *uaddr2)
2576{
2577    struct hrtimer_sleeper timeout, *to = NULL;
2578    struct rt_mutex_waiter rt_waiter;
2579    struct rt_mutex *pi_mutex = NULL;
2580    struct futex_hash_bucket *hb;
2581    union futex_key key2 = FUTEX_KEY_INIT;
2582    struct futex_q q = futex_q_init;
2583    int res, ret;
2584
2585    if (uaddr == uaddr2)
2586        return -EINVAL;
2587
2588    if (!bitset)
2589        return -EINVAL;
2590
2591    if (abs_time) {
2592        to = &timeout;
2593        hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2594                      CLOCK_REALTIME : CLOCK_MONOTONIC,
2595                      HRTIMER_MODE_ABS);
2596        hrtimer_init_sleeper(to, current);
2597        hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2598                         current->timer_slack_ns);
2599    }
2600
2601    /*
2602     * The waiter is allocated on our stack, manipulated by the requeue
2603     * code while we sleep on uaddr.
2604     */
2605    debug_rt_mutex_init_waiter(&rt_waiter);
2606    RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
2607    RB_CLEAR_NODE(&rt_waiter.tree_entry);
2608    rt_waiter.task = NULL;
2609
2610    ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
2611    if (unlikely(ret != 0))
2612        goto out;
2613
2614    q.bitset = bitset;
2615    q.rt_waiter = &rt_waiter;
2616    q.requeue_pi_key = &key2;
2617
2618    /*
2619     * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2620     * count.
2621     */
2622    ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2623    if (ret)
2624        goto out_key2;
2625
2626    /*
2627     * The check above which compares uaddrs is not sufficient for
2628     * shared futexes. We need to compare the keys:
2629     */
2630    if (match_futex(&q.key, &key2)) {
2631        ret = -EINVAL;
2632        goto out_put_keys;
2633    }
2634
2635    /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2636    futex_wait_queue_me(hb, &q, to);
2637
2638    spin_lock(&hb->lock);
2639    ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2640    spin_unlock(&hb->lock);
2641    if (ret)
2642        goto out_put_keys;
2643
2644    /*
2645     * In order for us to be here, we know our q.key == key2, and since
2646     * we took the hb->lock above, we also know that futex_requeue() has
2647     * completed and we no longer have to concern ourselves with a wakeup
2648     * race with the atomic proxy lock acquisition by the requeue code. The
2649     * futex_requeue dropped our key1 reference and incremented our key2
2650     * reference count.
2651     */
2652
2653    /* Check if the requeue code acquired the second futex for us. */
2654    if (!q.rt_waiter) {
2655        /*
2656         * Got the lock. We might not be the anticipated owner if we
2657         * did a lock-steal - fix up the PI-state in that case.
2658         */
2659        if (q.pi_state && (q.pi_state->owner != current)) {
2660            spin_lock(q.lock_ptr);
2661            ret = fixup_pi_state_owner(uaddr2, &q, current);
2662            spin_unlock(q.lock_ptr);
2663        }
2664    } else {
2665        /*
2666         * We have been woken up by futex_unlock_pi(), a timeout, or a
2667         * signal. futex_unlock_pi() will not destroy the lock_ptr nor
2668         * the pi_state.
2669         */
2670        WARN_ON(!q.pi_state);
2671        pi_mutex = &q.pi_state->pi_mutex;
2672        ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
2673        debug_rt_mutex_free_waiter(&rt_waiter);
2674
2675        spin_lock(q.lock_ptr);
2676        /*
2677         * Fixup the pi_state owner and possibly acquire the lock if we
2678         * haven't already.
2679         */
2680        res = fixup_owner(uaddr2, &q, !ret);
2681        /*
2682         * If fixup_owner() returned an error, proprogate that. If it
2683         * acquired the lock, clear -ETIMEDOUT or -EINTR.
2684         */
2685        if (res)
2686            ret = (res < 0) ? res : 0;
2687
2688        /* Unqueue and drop the lock. */
2689        unqueue_me_pi(&q);
2690    }
2691
2692    /*
2693     * If fixup_pi_state_owner() faulted and was unable to handle the
2694     * fault, unlock the rt_mutex and return the fault to userspace.
2695     */
2696    if (ret == -EFAULT) {
2697        if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
2698            rt_mutex_unlock(pi_mutex);
2699    } else if (ret == -EINTR) {
2700        /*
2701         * We've already been requeued, but cannot restart by calling
2702         * futex_lock_pi() directly. We could restart this syscall, but
2703         * it would detect that the user space "val" changed and return
2704         * -EWOULDBLOCK. Save the overhead of the restart and return
2705         * -EWOULDBLOCK directly.
2706         */
2707        ret = -EWOULDBLOCK;
2708    }
2709
2710out_put_keys:
2711    put_futex_key(&q.key);
2712out_key2:
2713    put_futex_key(&key2);
2714
2715out:
2716    if (to) {
2717        hrtimer_cancel(&to->timer);
2718        destroy_hrtimer_on_stack(&to->timer);
2719    }
2720    return ret;
2721}
2722
2723/*
2724 * Support for robust futexes: the kernel cleans up held futexes at
2725 * thread exit time.
2726 *
2727 * Implementation: user-space maintains a per-thread list of locks it
2728 * is holding. Upon do_exit(), the kernel carefully walks this list,
2729 * and marks all locks that are owned by this thread with the
2730 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2731 * always manipulated with the lock held, so the list is private and
2732 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2733 * field, to allow the kernel to clean up if the thread dies after
2734 * acquiring the lock, but just before it could have added itself to
2735 * the list. There can only be one such pending lock.
2736 */
2737
2738/**
2739 * sys_set_robust_list() - Set the robust-futex list head of a task
2740 * @head: pointer to the list-head
2741 * @len: length of the list-head, as userspace expects
2742 */
2743SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2744        size_t, len)
2745{
2746    if (!futex_cmpxchg_enabled)
2747        return -ENOSYS;
2748    /*
2749     * The kernel knows only one size for now:
2750     */
2751    if (unlikely(len != sizeof(*head)))
2752        return -EINVAL;
2753
2754    current->robust_list = head;
2755
2756    return 0;
2757}
2758
2759/**
2760 * sys_get_robust_list() - Get the robust-futex list head of a task
2761 * @pid: pid of the process [zero for current task]
2762 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
2763 * @len_ptr: pointer to a length field, the kernel fills in the header size
2764 */
2765SYSCALL_DEFINE3(get_robust_list, int, pid,
2766        struct robust_list_head __user * __user *, head_ptr,
2767        size_t __user *, len_ptr)
2768{
2769    struct robust_list_head __user *head;
2770    unsigned long ret;
2771    struct task_struct *p;
2772
2773    if (!futex_cmpxchg_enabled)
2774        return -ENOSYS;
2775
2776    rcu_read_lock();
2777
2778    ret = -ESRCH;
2779    if (!pid)
2780        p = current;
2781    else {
2782        p = find_task_by_vpid(pid);
2783        if (!p)
2784            goto err_unlock;
2785    }
2786
2787    ret = -EPERM;
2788    if (!ptrace_may_access(p, PTRACE_MODE_READ))
2789        goto err_unlock;
2790
2791    head = p->robust_list;
2792    rcu_read_unlock();
2793
2794    if (put_user(sizeof(*head), len_ptr))
2795        return -EFAULT;
2796    return put_user(head, head_ptr);
2797
2798err_unlock:
2799    rcu_read_unlock();
2800
2801    return ret;
2802}
2803
2804/*
2805 * Process a futex-list entry, check whether it's owned by the
2806 * dying task, and do notification if so:
2807 */
2808int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
2809{
2810    u32 uval, uninitialized_var(nval), mval;
2811
2812retry:
2813    if (get_user(uval, uaddr))
2814        return -1;
2815
2816    if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
2817        /*
2818         * Ok, this dying thread is truly holding a futex
2819         * of interest. Set the OWNER_DIED bit atomically
2820         * via cmpxchg, and if the value had FUTEX_WAITERS
2821         * set, wake up a waiter (if any). (We have to do a
2822         * futex_wake() even if OWNER_DIED is already set -
2823         * to handle the rare but possible case of recursive
2824         * thread-death.) The rest of the cleanup is done in
2825         * userspace.
2826         */
2827        mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2828        /*
2829         * We are not holding a lock here, but we want to have
2830         * the pagefault_disable/enable() protection because
2831         * we want to handle the fault gracefully. If the
2832         * access fails we try to fault in the futex with R/W
2833         * verification via get_user_pages. get_user() above
2834         * does not guarantee R/W access. If that fails we
2835         * give up and leave the futex locked.
2836         */
2837        if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
2838            if (fault_in_user_writeable(uaddr))
2839                return -1;
2840            goto retry;
2841        }
2842        if (nval != uval)
2843            goto retry;
2844
2845        /*
2846         * Wake robust non-PI futexes here. The wakeup of
2847         * PI futexes happens in exit_pi_state():
2848         */
2849        if (!pi && (uval & FUTEX_WAITERS))
2850            futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
2851    }
2852    return 0;
2853}
2854
2855/*
2856 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2857 */
2858static inline int fetch_robust_entry(struct robust_list __user **entry,
2859                     struct robust_list __user * __user *head,
2860                     unsigned int *pi)
2861{
2862    unsigned long uentry;
2863
2864    if (get_user(uentry, (unsigned long __user *)head))
2865        return -EFAULT;
2866
2867    *entry = (void __user *)(uentry & ~1UL);
2868    *pi = uentry & 1;
2869
2870    return 0;
2871}
2872
2873/*
2874 * Walk curr->robust_list (very carefully, it's a userspace list!)
2875 * and mark any locks found there dead, and notify any waiters.
2876 *
2877 * We silently return on any sign of list-walking problem.
2878 */
2879void exit_robust_list(struct task_struct *curr)
2880{
2881    struct robust_list_head __user *head = curr->robust_list;
2882    struct robust_list __user *entry, *next_entry, *pending;
2883    unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
2884    unsigned int uninitialized_var(next_pi);
2885    unsigned long futex_offset;
2886    int rc;
2887
2888    if (!futex_cmpxchg_enabled)
2889        return;
2890
2891    /*
2892     * Fetch the list head (which was registered earlier, via
2893     * sys_set_robust_list()):
2894     */
2895    if (fetch_robust_entry(&entry, &head->list.next, &pi))
2896        return;
2897    /*
2898     * Fetch the relative futex offset:
2899     */
2900    if (get_user(futex_offset, &head->futex_offset))
2901        return;
2902    /*
2903     * Fetch any possibly pending lock-add first, and handle it
2904     * if it exists:
2905     */
2906    if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
2907        return;
2908
2909    next_entry = NULL; /* avoid warning with gcc */
2910    while (entry != &head->list) {
2911        /*
2912         * Fetch the next entry in the list before calling
2913         * handle_futex_death:
2914         */
2915        rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
2916        /*
2917         * A pending lock might already be on the list, so
2918         * don't process it twice:
2919         */
2920        if (entry != pending)
2921            if (handle_futex_death((void __user *)entry + futex_offset,
2922                        curr, pi))
2923                return;
2924        if (rc)
2925            return;
2926        entry = next_entry;
2927        pi = next_pi;
2928        /*
2929         * Avoid excessively long or circular lists:
2930         */
2931        if (!--limit)
2932            break;
2933
2934        cond_resched();
2935    }
2936
2937    if (pending)
2938        handle_futex_death((void __user *)pending + futex_offset,
2939                   curr, pip);
2940}
2941
2942long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2943        u32 __user *uaddr2, u32 val2, u32 val3)
2944{
2945    int cmd = op & FUTEX_CMD_MASK;
2946    unsigned int flags = 0;
2947
2948    if (!(op & FUTEX_PRIVATE_FLAG))
2949        flags |= FLAGS_SHARED;
2950
2951    if (op & FUTEX_CLOCK_REALTIME) {
2952        flags |= FLAGS_CLOCKRT;
2953        if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
2954            return -ENOSYS;
2955    }
2956
2957    switch (cmd) {
2958    case FUTEX_LOCK_PI:
2959    case FUTEX_UNLOCK_PI:
2960    case FUTEX_TRYLOCK_PI:
2961    case FUTEX_WAIT_REQUEUE_PI:
2962    case FUTEX_CMP_REQUEUE_PI:
2963        if (!futex_cmpxchg_enabled)
2964            return -ENOSYS;
2965    }
2966
2967    switch (cmd) {
2968    case FUTEX_WAIT:
2969        val3 = FUTEX_BITSET_MATCH_ANY;
2970    case FUTEX_WAIT_BITSET:
2971        return futex_wait(uaddr, flags, val, timeout, val3);
2972    case FUTEX_WAKE:
2973        val3 = FUTEX_BITSET_MATCH_ANY;
2974    case FUTEX_WAKE_BITSET:
2975        return futex_wake(uaddr, flags, val, val3);
2976    case FUTEX_REQUEUE:
2977        return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
2978    case FUTEX_CMP_REQUEUE:
2979        return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
2980    case FUTEX_WAKE_OP:
2981        return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
2982    case FUTEX_LOCK_PI:
2983        return futex_lock_pi(uaddr, flags, val, timeout, 0);
2984    case FUTEX_UNLOCK_PI:
2985        return futex_unlock_pi(uaddr, flags);
2986    case FUTEX_TRYLOCK_PI:
2987        return futex_lock_pi(uaddr, flags, 0, timeout, 1);
2988    case FUTEX_WAIT_REQUEUE_PI:
2989        val3 = FUTEX_BITSET_MATCH_ANY;
2990        return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
2991                         uaddr2);
2992    case FUTEX_CMP_REQUEUE_PI:
2993        return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
2994    }
2995    return -ENOSYS;
2996}
2997
2998
2999SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3000        struct timespec __user *, utime, u32 __user *, uaddr2,
3001        u32, val3)
3002{
3003    struct timespec ts;
3004    ktime_t t, *tp = NULL;
3005    u32 val2 = 0;
3006    int cmd = op & FUTEX_CMD_MASK;
3007
3008    if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3009              cmd == FUTEX_WAIT_BITSET ||
3010              cmd == FUTEX_WAIT_REQUEUE_PI)) {
3011        if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
3012            return -EFAULT;
3013        if (!timespec_valid(&ts))
3014            return -EINVAL;
3015
3016        t = timespec_to_ktime(ts);
3017        if (cmd == FUTEX_WAIT)
3018            t = ktime_add_safe(ktime_get(), t);
3019        tp = &t;
3020    }
3021    /*
3022     * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3023     * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
3024     */
3025    if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3026        cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3027        val2 = (u32) (unsigned long) utime;
3028
3029    return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3030}
3031
3032static void __init futex_detect_cmpxchg(void)
3033{
3034#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3035    u32 curval;
3036
3037    /*
3038     * This will fail and we want it. Some arch implementations do
3039     * runtime detection of the futex_atomic_cmpxchg_inatomic()
3040     * functionality. We want to know that before we call in any
3041     * of the complex code paths. Also we want to prevent
3042     * registration of robust lists in that case. NULL is
3043     * guaranteed to fault and we get -EFAULT on functional
3044     * implementation, the non-functional ones will return
3045     * -ENOSYS.
3046     */
3047    if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
3048        futex_cmpxchg_enabled = 1;
3049#endif
3050}
3051
3052static int __init futex_init(void)
3053{
3054    unsigned int futex_shift;
3055    unsigned long i;
3056
3057#if CONFIG_BASE_SMALL
3058    futex_hashsize = 16;
3059#else
3060    futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
3061#endif
3062
3063    futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
3064                           futex_hashsize, 0,
3065                           futex_hashsize < 256 ? HASH_SMALL : 0,
3066                           &futex_shift, NULL,
3067                           futex_hashsize, futex_hashsize);
3068    futex_hashsize = 1UL << futex_shift;
3069
3070    futex_detect_cmpxchg();
3071
3072    for (i = 0; i < futex_hashsize; i++) {
3073        atomic_set(&futex_queues[i].waiters, 0);
3074        plist_head_init(&futex_queues[i].chain);
3075        spin_lock_init(&futex_queues[i].lock);
3076    }
3077
3078    return 0;
3079}
3080__initcall(futex_init);
3081

Archive Download this file



interactive