Root/
1 | /* |
2 | * Fast Userspace Mutexes (which I call "Futexes!"). |
3 | * (C) Rusty Russell, IBM 2002 |
4 | * |
5 | * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar |
6 | * (C) Copyright 2003 Red Hat Inc, All Rights Reserved |
7 | * |
8 | * Removed page pinning, fix privately mapped COW pages and other cleanups |
9 | * (C) Copyright 2003, 2004 Jamie Lokier |
10 | * |
11 | * Robust futex support started by Ingo Molnar |
12 | * (C) Copyright 2006 Red Hat Inc, All Rights Reserved |
13 | * Thanks to Thomas Gleixner for suggestions, analysis and fixes. |
14 | * |
15 | * PI-futex support started by Ingo Molnar and Thomas Gleixner |
16 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
17 | * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> |
18 | * |
19 | * PRIVATE futexes by Eric Dumazet |
20 | * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com> |
21 | * |
22 | * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com> |
23 | * Copyright (C) IBM Corporation, 2009 |
24 | * Thanks to Thomas Gleixner for conceptual design and careful reviews. |
25 | * |
26 | * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly |
27 | * enough at me, Linus for the original (flawed) idea, Matthew |
28 | * Kirkwood for proof-of-concept implementation. |
29 | * |
30 | * "The futexes are also cursed." |
31 | * "But they come in a choice of three flavours!" |
32 | * |
33 | * This program is free software; you can redistribute it and/or modify |
34 | * it under the terms of the GNU General Public License as published by |
35 | * the Free Software Foundation; either version 2 of the License, or |
36 | * (at your option) any later version. |
37 | * |
38 | * This program is distributed in the hope that it will be useful, |
39 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
40 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
41 | * GNU General Public License for more details. |
42 | * |
43 | * You should have received a copy of the GNU General Public License |
44 | * along with this program; if not, write to the Free Software |
45 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
46 | */ |
47 | #include <linux/slab.h> |
48 | #include <linux/poll.h> |
49 | #include <linux/fs.h> |
50 | #include <linux/file.h> |
51 | #include <linux/jhash.h> |
52 | #include <linux/init.h> |
53 | #include <linux/futex.h> |
54 | #include <linux/mount.h> |
55 | #include <linux/pagemap.h> |
56 | #include <linux/syscalls.h> |
57 | #include <linux/signal.h> |
58 | #include <linux/module.h> |
59 | #include <linux/magic.h> |
60 | #include <linux/pid.h> |
61 | #include <linux/nsproxy.h> |
62 | |
63 | #include <asm/futex.h> |
64 | |
65 | #include "rtmutex_common.h" |
66 | |
67 | int __read_mostly futex_cmpxchg_enabled; |
68 | |
69 | #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) |
70 | |
71 | /* |
72 | * Priority Inheritance state: |
73 | */ |
74 | struct futex_pi_state { |
75 | /* |
76 | * list of 'owned' pi_state instances - these have to be |
77 | * cleaned up in do_exit() if the task exits prematurely: |
78 | */ |
79 | struct list_head list; |
80 | |
81 | /* |
82 | * The PI object: |
83 | */ |
84 | struct rt_mutex pi_mutex; |
85 | |
86 | struct task_struct *owner; |
87 | atomic_t refcount; |
88 | |
89 | union futex_key key; |
90 | }; |
91 | |
92 | /** |
93 | * struct futex_q - The hashed futex queue entry, one per waiting task |
94 | * @task: the task waiting on the futex |
95 | * @lock_ptr: the hash bucket lock |
96 | * @key: the key the futex is hashed on |
97 | * @pi_state: optional priority inheritance state |
98 | * @rt_waiter: rt_waiter storage for use with requeue_pi |
99 | * @requeue_pi_key: the requeue_pi target futex key |
100 | * @bitset: bitset for the optional bitmasked wakeup |
101 | * |
102 | * We use this hashed waitqueue, instead of a normal wait_queue_t, so |
103 | * we can wake only the relevant ones (hashed queues may be shared). |
104 | * |
105 | * A futex_q has a woken state, just like tasks have TASK_RUNNING. |
106 | * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. |
107 | * The order of wakup is always to make the first condition true, then |
108 | * the second. |
109 | * |
110 | * PI futexes are typically woken before they are removed from the hash list via |
111 | * the rt_mutex code. See unqueue_me_pi(). |
112 | */ |
113 | struct futex_q { |
114 | struct plist_node list; |
115 | |
116 | struct task_struct *task; |
117 | spinlock_t *lock_ptr; |
118 | union futex_key key; |
119 | struct futex_pi_state *pi_state; |
120 | struct rt_mutex_waiter *rt_waiter; |
121 | union futex_key *requeue_pi_key; |
122 | u32 bitset; |
123 | }; |
124 | |
125 | /* |
126 | * Hash buckets are shared by all the futex_keys that hash to the same |
127 | * location. Each key may have multiple futex_q structures, one for each task |
128 | * waiting on a futex. |
129 | */ |
130 | struct futex_hash_bucket { |
131 | spinlock_t lock; |
132 | struct plist_head chain; |
133 | }; |
134 | |
135 | static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS]; |
136 | |
137 | /* |
138 | * We hash on the keys returned from get_futex_key (see below). |
139 | */ |
140 | static struct futex_hash_bucket *hash_futex(union futex_key *key) |
141 | { |
142 | u32 hash = jhash2((u32*)&key->both.word, |
143 | (sizeof(key->both.word)+sizeof(key->both.ptr))/4, |
144 | key->both.offset); |
145 | return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)]; |
146 | } |
147 | |
148 | /* |
149 | * Return 1 if two futex_keys are equal, 0 otherwise. |
150 | */ |
151 | static inline int match_futex(union futex_key *key1, union futex_key *key2) |
152 | { |
153 | return (key1 && key2 |
154 | && key1->both.word == key2->both.word |
155 | && key1->both.ptr == key2->both.ptr |
156 | && key1->both.offset == key2->both.offset); |
157 | } |
158 | |
159 | /* |
160 | * Take a reference to the resource addressed by a key. |
161 | * Can be called while holding spinlocks. |
162 | * |
163 | */ |
164 | static void get_futex_key_refs(union futex_key *key) |
165 | { |
166 | if (!key->both.ptr) |
167 | return; |
168 | |
169 | switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { |
170 | case FUT_OFF_INODE: |
171 | atomic_inc(&key->shared.inode->i_count); |
172 | break; |
173 | case FUT_OFF_MMSHARED: |
174 | atomic_inc(&key->private.mm->mm_count); |
175 | break; |
176 | } |
177 | } |
178 | |
179 | /* |
180 | * Drop a reference to the resource addressed by a key. |
181 | * The hash bucket spinlock must not be held. |
182 | */ |
183 | static void drop_futex_key_refs(union futex_key *key) |
184 | { |
185 | if (!key->both.ptr) { |
186 | /* If we're here then we tried to put a key we failed to get */ |
187 | WARN_ON_ONCE(1); |
188 | return; |
189 | } |
190 | |
191 | switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { |
192 | case FUT_OFF_INODE: |
193 | iput(key->shared.inode); |
194 | break; |
195 | case FUT_OFF_MMSHARED: |
196 | mmdrop(key->private.mm); |
197 | break; |
198 | } |
199 | } |
200 | |
201 | /** |
202 | * get_futex_key() - Get parameters which are the keys for a futex |
203 | * @uaddr: virtual address of the futex |
204 | * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED |
205 | * @key: address where result is stored. |
206 | * |
207 | * Returns a negative error code or 0 |
208 | * The key words are stored in *key on success. |
209 | * |
210 | * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode, |
211 | * offset_within_page). For private mappings, it's (uaddr, current->mm). |
212 | * We can usually work out the index without swapping in the page. |
213 | * |
214 | * lock_page() might sleep, the caller should not hold a spinlock. |
215 | */ |
216 | static int |
217 | get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) |
218 | { |
219 | unsigned long address = (unsigned long)uaddr; |
220 | struct mm_struct *mm = current->mm; |
221 | struct page *page; |
222 | int err; |
223 | |
224 | /* |
225 | * The futex address must be "naturally" aligned. |
226 | */ |
227 | key->both.offset = address % PAGE_SIZE; |
228 | if (unlikely((address % sizeof(u32)) != 0)) |
229 | return -EINVAL; |
230 | address -= key->both.offset; |
231 | |
232 | /* |
233 | * PROCESS_PRIVATE futexes are fast. |
234 | * As the mm cannot disappear under us and the 'key' only needs |
235 | * virtual address, we dont even have to find the underlying vma. |
236 | * Note : We do have to check 'uaddr' is a valid user address, |
237 | * but access_ok() should be faster than find_vma() |
238 | */ |
239 | if (!fshared) { |
240 | if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) |
241 | return -EFAULT; |
242 | key->private.mm = mm; |
243 | key->private.address = address; |
244 | get_futex_key_refs(key); |
245 | return 0; |
246 | } |
247 | |
248 | again: |
249 | err = get_user_pages_fast(address, 1, 1, &page); |
250 | if (err < 0) |
251 | return err; |
252 | |
253 | page = compound_head(page); |
254 | lock_page(page); |
255 | if (!page->mapping) { |
256 | unlock_page(page); |
257 | put_page(page); |
258 | goto again; |
259 | } |
260 | |
261 | /* |
262 | * Private mappings are handled in a simple way. |
263 | * |
264 | * NOTE: When userspace waits on a MAP_SHARED mapping, even if |
265 | * it's a read-only handle, it's expected that futexes attach to |
266 | * the object not the particular process. |
267 | */ |
268 | if (PageAnon(page)) { |
269 | key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ |
270 | key->private.mm = mm; |
271 | key->private.address = address; |
272 | } else { |
273 | key->both.offset |= FUT_OFF_INODE; /* inode-based key */ |
274 | key->shared.inode = page->mapping->host; |
275 | key->shared.pgoff = page->index; |
276 | } |
277 | |
278 | get_futex_key_refs(key); |
279 | |
280 | unlock_page(page); |
281 | put_page(page); |
282 | return 0; |
283 | } |
284 | |
285 | static inline |
286 | void put_futex_key(int fshared, union futex_key *key) |
287 | { |
288 | drop_futex_key_refs(key); |
289 | } |
290 | |
291 | /** |
292 | * fault_in_user_writeable() - Fault in user address and verify RW access |
293 | * @uaddr: pointer to faulting user space address |
294 | * |
295 | * Slow path to fixup the fault we just took in the atomic write |
296 | * access to @uaddr. |
297 | * |
298 | * We have no generic implementation of a non destructive write to the |
299 | * user address. We know that we faulted in the atomic pagefault |
300 | * disabled section so we can as well avoid the #PF overhead by |
301 | * calling get_user_pages() right away. |
302 | */ |
303 | static int fault_in_user_writeable(u32 __user *uaddr) |
304 | { |
305 | struct mm_struct *mm = current->mm; |
306 | int ret; |
307 | |
308 | down_read(&mm->mmap_sem); |
309 | ret = get_user_pages(current, mm, (unsigned long)uaddr, |
310 | 1, 1, 0, NULL, NULL); |
311 | up_read(&mm->mmap_sem); |
312 | |
313 | return ret < 0 ? ret : 0; |
314 | } |
315 | |
316 | /** |
317 | * futex_top_waiter() - Return the highest priority waiter on a futex |
318 | * @hb: the hash bucket the futex_q's reside in |
319 | * @key: the futex key (to distinguish it from other futex futex_q's) |
320 | * |
321 | * Must be called with the hb lock held. |
322 | */ |
323 | static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, |
324 | union futex_key *key) |
325 | { |
326 | struct futex_q *this; |
327 | |
328 | plist_for_each_entry(this, &hb->chain, list) { |
329 | if (match_futex(&this->key, key)) |
330 | return this; |
331 | } |
332 | return NULL; |
333 | } |
334 | |
335 | static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval) |
336 | { |
337 | u32 curval; |
338 | |
339 | pagefault_disable(); |
340 | curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); |
341 | pagefault_enable(); |
342 | |
343 | return curval; |
344 | } |
345 | |
346 | static int get_futex_value_locked(u32 *dest, u32 __user *from) |
347 | { |
348 | int ret; |
349 | |
350 | pagefault_disable(); |
351 | ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); |
352 | pagefault_enable(); |
353 | |
354 | return ret ? -EFAULT : 0; |
355 | } |
356 | |
357 | |
358 | /* |
359 | * PI code: |
360 | */ |
361 | static int refill_pi_state_cache(void) |
362 | { |
363 | struct futex_pi_state *pi_state; |
364 | |
365 | if (likely(current->pi_state_cache)) |
366 | return 0; |
367 | |
368 | pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL); |
369 | |
370 | if (!pi_state) |
371 | return -ENOMEM; |
372 | |
373 | INIT_LIST_HEAD(&pi_state->list); |
374 | /* pi_mutex gets initialized later */ |
375 | pi_state->owner = NULL; |
376 | atomic_set(&pi_state->refcount, 1); |
377 | pi_state->key = FUTEX_KEY_INIT; |
378 | |
379 | current->pi_state_cache = pi_state; |
380 | |
381 | return 0; |
382 | } |
383 | |
384 | static struct futex_pi_state * alloc_pi_state(void) |
385 | { |
386 | struct futex_pi_state *pi_state = current->pi_state_cache; |
387 | |
388 | WARN_ON(!pi_state); |
389 | current->pi_state_cache = NULL; |
390 | |
391 | return pi_state; |
392 | } |
393 | |
394 | static void free_pi_state(struct futex_pi_state *pi_state) |
395 | { |
396 | if (!atomic_dec_and_test(&pi_state->refcount)) |
397 | return; |
398 | |
399 | /* |
400 | * If pi_state->owner is NULL, the owner is most probably dying |
401 | * and has cleaned up the pi_state already |
402 | */ |
403 | if (pi_state->owner) { |
404 | spin_lock_irq(&pi_state->owner->pi_lock); |
405 | list_del_init(&pi_state->list); |
406 | spin_unlock_irq(&pi_state->owner->pi_lock); |
407 | |
408 | rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); |
409 | } |
410 | |
411 | if (current->pi_state_cache) |
412 | kfree(pi_state); |
413 | else { |
414 | /* |
415 | * pi_state->list is already empty. |
416 | * clear pi_state->owner. |
417 | * refcount is at 0 - put it back to 1. |
418 | */ |
419 | pi_state->owner = NULL; |
420 | atomic_set(&pi_state->refcount, 1); |
421 | current->pi_state_cache = pi_state; |
422 | } |
423 | } |
424 | |
425 | /* |
426 | * Look up the task based on what TID userspace gave us. |
427 | * We dont trust it. |
428 | */ |
429 | static struct task_struct * futex_find_get_task(pid_t pid) |
430 | { |
431 | struct task_struct *p; |
432 | const struct cred *cred = current_cred(), *pcred; |
433 | |
434 | rcu_read_lock(); |
435 | p = find_task_by_vpid(pid); |
436 | if (!p) { |
437 | p = ERR_PTR(-ESRCH); |
438 | } else { |
439 | pcred = __task_cred(p); |
440 | if (cred->euid != pcred->euid && |
441 | cred->euid != pcred->uid) |
442 | p = ERR_PTR(-ESRCH); |
443 | else |
444 | get_task_struct(p); |
445 | } |
446 | |
447 | rcu_read_unlock(); |
448 | |
449 | return p; |
450 | } |
451 | |
452 | /* |
453 | * This task is holding PI mutexes at exit time => bad. |
454 | * Kernel cleans up PI-state, but userspace is likely hosed. |
455 | * (Robust-futex cleanup is separate and might save the day for userspace.) |
456 | */ |
457 | void exit_pi_state_list(struct task_struct *curr) |
458 | { |
459 | struct list_head *next, *head = &curr->pi_state_list; |
460 | struct futex_pi_state *pi_state; |
461 | struct futex_hash_bucket *hb; |
462 | union futex_key key = FUTEX_KEY_INIT; |
463 | |
464 | if (!futex_cmpxchg_enabled) |
465 | return; |
466 | /* |
467 | * We are a ZOMBIE and nobody can enqueue itself on |
468 | * pi_state_list anymore, but we have to be careful |
469 | * versus waiters unqueueing themselves: |
470 | */ |
471 | spin_lock_irq(&curr->pi_lock); |
472 | while (!list_empty(head)) { |
473 | |
474 | next = head->next; |
475 | pi_state = list_entry(next, struct futex_pi_state, list); |
476 | key = pi_state->key; |
477 | hb = hash_futex(&key); |
478 | spin_unlock_irq(&curr->pi_lock); |
479 | |
480 | spin_lock(&hb->lock); |
481 | |
482 | spin_lock_irq(&curr->pi_lock); |
483 | /* |
484 | * We dropped the pi-lock, so re-check whether this |
485 | * task still owns the PI-state: |
486 | */ |
487 | if (head->next != next) { |
488 | spin_unlock(&hb->lock); |
489 | continue; |
490 | } |
491 | |
492 | WARN_ON(pi_state->owner != curr); |
493 | WARN_ON(list_empty(&pi_state->list)); |
494 | list_del_init(&pi_state->list); |
495 | pi_state->owner = NULL; |
496 | spin_unlock_irq(&curr->pi_lock); |
497 | |
498 | rt_mutex_unlock(&pi_state->pi_mutex); |
499 | |
500 | spin_unlock(&hb->lock); |
501 | |
502 | spin_lock_irq(&curr->pi_lock); |
503 | } |
504 | spin_unlock_irq(&curr->pi_lock); |
505 | } |
506 | |
507 | static int |
508 | lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, |
509 | union futex_key *key, struct futex_pi_state **ps) |
510 | { |
511 | struct futex_pi_state *pi_state = NULL; |
512 | struct futex_q *this, *next; |
513 | struct plist_head *head; |
514 | struct task_struct *p; |
515 | pid_t pid = uval & FUTEX_TID_MASK; |
516 | |
517 | head = &hb->chain; |
518 | |
519 | plist_for_each_entry_safe(this, next, head, list) { |
520 | if (match_futex(&this->key, key)) { |
521 | /* |
522 | * Another waiter already exists - bump up |
523 | * the refcount and return its pi_state: |
524 | */ |
525 | pi_state = this->pi_state; |
526 | /* |
527 | * Userspace might have messed up non PI and PI futexes |
528 | */ |
529 | if (unlikely(!pi_state)) |
530 | return -EINVAL; |
531 | |
532 | WARN_ON(!atomic_read(&pi_state->refcount)); |
533 | |
534 | /* |
535 | * When pi_state->owner is NULL then the owner died |
536 | * and another waiter is on the fly. pi_state->owner |
537 | * is fixed up by the task which acquires |
538 | * pi_state->rt_mutex. |
539 | * |
540 | * We do not check for pid == 0 which can happen when |
541 | * the owner died and robust_list_exit() cleared the |
542 | * TID. |
543 | */ |
544 | if (pid && pi_state->owner) { |
545 | /* |
546 | * Bail out if user space manipulated the |
547 | * futex value. |
548 | */ |
549 | if (pid != task_pid_vnr(pi_state->owner)) |
550 | return -EINVAL; |
551 | } |
552 | |
553 | atomic_inc(&pi_state->refcount); |
554 | *ps = pi_state; |
555 | |
556 | return 0; |
557 | } |
558 | } |
559 | |
560 | /* |
561 | * We are the first waiter - try to look up the real owner and attach |
562 | * the new pi_state to it, but bail out when TID = 0 |
563 | */ |
564 | if (!pid) |
565 | return -ESRCH; |
566 | p = futex_find_get_task(pid); |
567 | if (IS_ERR(p)) |
568 | return PTR_ERR(p); |
569 | |
570 | /* |
571 | * We need to look at the task state flags to figure out, |
572 | * whether the task is exiting. To protect against the do_exit |
573 | * change of the task flags, we do this protected by |
574 | * p->pi_lock: |
575 | */ |
576 | spin_lock_irq(&p->pi_lock); |
577 | if (unlikely(p->flags & PF_EXITING)) { |
578 | /* |
579 | * The task is on the way out. When PF_EXITPIDONE is |
580 | * set, we know that the task has finished the |
581 | * cleanup: |
582 | */ |
583 | int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; |
584 | |
585 | spin_unlock_irq(&p->pi_lock); |
586 | put_task_struct(p); |
587 | return ret; |
588 | } |
589 | |
590 | pi_state = alloc_pi_state(); |
591 | |
592 | /* |
593 | * Initialize the pi_mutex in locked state and make 'p' |
594 | * the owner of it: |
595 | */ |
596 | rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); |
597 | |
598 | /* Store the key for possible exit cleanups: */ |
599 | pi_state->key = *key; |
600 | |
601 | WARN_ON(!list_empty(&pi_state->list)); |
602 | list_add(&pi_state->list, &p->pi_state_list); |
603 | pi_state->owner = p; |
604 | spin_unlock_irq(&p->pi_lock); |
605 | |
606 | put_task_struct(p); |
607 | |
608 | *ps = pi_state; |
609 | |
610 | return 0; |
611 | } |
612 | |
613 | /** |
614 | * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex |
615 | * @uaddr: the pi futex user address |
616 | * @hb: the pi futex hash bucket |
617 | * @key: the futex key associated with uaddr and hb |
618 | * @ps: the pi_state pointer where we store the result of the |
619 | * lookup |
620 | * @task: the task to perform the atomic lock work for. This will |
621 | * be "current" except in the case of requeue pi. |
622 | * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) |
623 | * |
624 | * Returns: |
625 | * 0 - ready to wait |
626 | * 1 - acquired the lock |
627 | * <0 - error |
628 | * |
629 | * The hb->lock and futex_key refs shall be held by the caller. |
630 | */ |
631 | static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, |
632 | union futex_key *key, |
633 | struct futex_pi_state **ps, |
634 | struct task_struct *task, int set_waiters) |
635 | { |
636 | int lock_taken, ret, ownerdied = 0; |
637 | u32 uval, newval, curval; |
638 | |
639 | retry: |
640 | ret = lock_taken = 0; |
641 | |
642 | /* |
643 | * To avoid races, we attempt to take the lock here again |
644 | * (by doing a 0 -> TID atomic cmpxchg), while holding all |
645 | * the locks. It will most likely not succeed. |
646 | */ |
647 | newval = task_pid_vnr(task); |
648 | if (set_waiters) |
649 | newval |= FUTEX_WAITERS; |
650 | |
651 | curval = cmpxchg_futex_value_locked(uaddr, 0, newval); |
652 | |
653 | if (unlikely(curval == -EFAULT)) |
654 | return -EFAULT; |
655 | |
656 | /* |
657 | * Detect deadlocks. |
658 | */ |
659 | if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task)))) |
660 | return -EDEADLK; |
661 | |
662 | /* |
663 | * Surprise - we got the lock. Just return to userspace: |
664 | */ |
665 | if (unlikely(!curval)) |
666 | return 1; |
667 | |
668 | uval = curval; |
669 | |
670 | /* |
671 | * Set the FUTEX_WAITERS flag, so the owner will know it has someone |
672 | * to wake at the next unlock. |
673 | */ |
674 | newval = curval | FUTEX_WAITERS; |
675 | |
676 | /* |
677 | * There are two cases, where a futex might have no owner (the |
678 | * owner TID is 0): OWNER_DIED. We take over the futex in this |
679 | * case. We also do an unconditional take over, when the owner |
680 | * of the futex died. |
681 | * |
682 | * This is safe as we are protected by the hash bucket lock ! |
683 | */ |
684 | if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { |
685 | /* Keep the OWNER_DIED bit */ |
686 | newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task); |
687 | ownerdied = 0; |
688 | lock_taken = 1; |
689 | } |
690 | |
691 | curval = cmpxchg_futex_value_locked(uaddr, uval, newval); |
692 | |
693 | if (unlikely(curval == -EFAULT)) |
694 | return -EFAULT; |
695 | if (unlikely(curval != uval)) |
696 | goto retry; |
697 | |
698 | /* |
699 | * We took the lock due to owner died take over. |
700 | */ |
701 | if (unlikely(lock_taken)) |
702 | return 1; |
703 | |
704 | /* |
705 | * We dont have the lock. Look up the PI state (or create it if |
706 | * we are the first waiter): |
707 | */ |
708 | ret = lookup_pi_state(uval, hb, key, ps); |
709 | |
710 | if (unlikely(ret)) { |
711 | switch (ret) { |
712 | case -ESRCH: |
713 | /* |
714 | * No owner found for this futex. Check if the |
715 | * OWNER_DIED bit is set to figure out whether |
716 | * this is a robust futex or not. |
717 | */ |
718 | if (get_futex_value_locked(&curval, uaddr)) |
719 | return -EFAULT; |
720 | |
721 | /* |
722 | * We simply start over in case of a robust |
723 | * futex. The code above will take the futex |
724 | * and return happy. |
725 | */ |
726 | if (curval & FUTEX_OWNER_DIED) { |
727 | ownerdied = 1; |
728 | goto retry; |
729 | } |
730 | default: |
731 | break; |
732 | } |
733 | } |
734 | |
735 | return ret; |
736 | } |
737 | |
738 | /* |
739 | * The hash bucket lock must be held when this is called. |
740 | * Afterwards, the futex_q must not be accessed. |
741 | */ |
742 | static void wake_futex(struct futex_q *q) |
743 | { |
744 | struct task_struct *p = q->task; |
745 | |
746 | /* |
747 | * We set q->lock_ptr = NULL _before_ we wake up the task. If |
748 | * a non futex wake up happens on another CPU then the task |
749 | * might exit and p would dereference a non existing task |
750 | * struct. Prevent this by holding a reference on p across the |
751 | * wake up. |
752 | */ |
753 | get_task_struct(p); |
754 | |
755 | plist_del(&q->list, &q->list.plist); |
756 | /* |
757 | * The waiting task can free the futex_q as soon as |
758 | * q->lock_ptr = NULL is written, without taking any locks. A |
759 | * memory barrier is required here to prevent the following |
760 | * store to lock_ptr from getting ahead of the plist_del. |
761 | */ |
762 | smp_wmb(); |
763 | q->lock_ptr = NULL; |
764 | |
765 | wake_up_state(p, TASK_NORMAL); |
766 | put_task_struct(p); |
767 | } |
768 | |
769 | static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) |
770 | { |
771 | struct task_struct *new_owner; |
772 | struct futex_pi_state *pi_state = this->pi_state; |
773 | u32 curval, newval; |
774 | |
775 | if (!pi_state) |
776 | return -EINVAL; |
777 | |
778 | /* |
779 | * If current does not own the pi_state then the futex is |
780 | * inconsistent and user space fiddled with the futex value. |
781 | */ |
782 | if (pi_state->owner != current) |
783 | return -EINVAL; |
784 | |
785 | spin_lock(&pi_state->pi_mutex.wait_lock); |
786 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); |
787 | |
788 | /* |
789 | * This happens when we have stolen the lock and the original |
790 | * pending owner did not enqueue itself back on the rt_mutex. |
791 | * Thats not a tragedy. We know that way, that a lock waiter |
792 | * is on the fly. We make the futex_q waiter the pending owner. |
793 | */ |
794 | if (!new_owner) |
795 | new_owner = this->task; |
796 | |
797 | /* |
798 | * We pass it to the next owner. (The WAITERS bit is always |
799 | * kept enabled while there is PI state around. We must also |
800 | * preserve the owner died bit.) |
801 | */ |
802 | if (!(uval & FUTEX_OWNER_DIED)) { |
803 | int ret = 0; |
804 | |
805 | newval = FUTEX_WAITERS | task_pid_vnr(new_owner); |
806 | |
807 | curval = cmpxchg_futex_value_locked(uaddr, uval, newval); |
808 | |
809 | if (curval == -EFAULT) |
810 | ret = -EFAULT; |
811 | else if (curval != uval) |
812 | ret = -EINVAL; |
813 | if (ret) { |
814 | spin_unlock(&pi_state->pi_mutex.wait_lock); |
815 | return ret; |
816 | } |
817 | } |
818 | |
819 | spin_lock_irq(&pi_state->owner->pi_lock); |
820 | WARN_ON(list_empty(&pi_state->list)); |
821 | list_del_init(&pi_state->list); |
822 | spin_unlock_irq(&pi_state->owner->pi_lock); |
823 | |
824 | spin_lock_irq(&new_owner->pi_lock); |
825 | WARN_ON(!list_empty(&pi_state->list)); |
826 | list_add(&pi_state->list, &new_owner->pi_state_list); |
827 | pi_state->owner = new_owner; |
828 | spin_unlock_irq(&new_owner->pi_lock); |
829 | |
830 | spin_unlock(&pi_state->pi_mutex.wait_lock); |
831 | rt_mutex_unlock(&pi_state->pi_mutex); |
832 | |
833 | return 0; |
834 | } |
835 | |
836 | static int unlock_futex_pi(u32 __user *uaddr, u32 uval) |
837 | { |
838 | u32 oldval; |
839 | |
840 | /* |
841 | * There is no waiter, so we unlock the futex. The owner died |
842 | * bit has not to be preserved here. We are the owner: |
843 | */ |
844 | oldval = cmpxchg_futex_value_locked(uaddr, uval, 0); |
845 | |
846 | if (oldval == -EFAULT) |
847 | return oldval; |
848 | if (oldval != uval) |
849 | return -EAGAIN; |
850 | |
851 | return 0; |
852 | } |
853 | |
854 | /* |
855 | * Express the locking dependencies for lockdep: |
856 | */ |
857 | static inline void |
858 | double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) |
859 | { |
860 | if (hb1 <= hb2) { |
861 | spin_lock(&hb1->lock); |
862 | if (hb1 < hb2) |
863 | spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); |
864 | } else { /* hb1 > hb2 */ |
865 | spin_lock(&hb2->lock); |
866 | spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); |
867 | } |
868 | } |
869 | |
870 | static inline void |
871 | double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) |
872 | { |
873 | spin_unlock(&hb1->lock); |
874 | if (hb1 != hb2) |
875 | spin_unlock(&hb2->lock); |
876 | } |
877 | |
878 | /* |
879 | * Wake up waiters matching bitset queued on this futex (uaddr). |
880 | */ |
881 | static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) |
882 | { |
883 | struct futex_hash_bucket *hb; |
884 | struct futex_q *this, *next; |
885 | struct plist_head *head; |
886 | union futex_key key = FUTEX_KEY_INIT; |
887 | int ret; |
888 | |
889 | if (!bitset) |
890 | return -EINVAL; |
891 | |
892 | ret = get_futex_key(uaddr, fshared, &key); |
893 | if (unlikely(ret != 0)) |
894 | goto out; |
895 | |
896 | hb = hash_futex(&key); |
897 | spin_lock(&hb->lock); |
898 | head = &hb->chain; |
899 | |
900 | plist_for_each_entry_safe(this, next, head, list) { |
901 | if (match_futex (&this->key, &key)) { |
902 | if (this->pi_state || this->rt_waiter) { |
903 | ret = -EINVAL; |
904 | break; |
905 | } |
906 | |
907 | /* Check if one of the bits is set in both bitsets */ |
908 | if (!(this->bitset & bitset)) |
909 | continue; |
910 | |
911 | wake_futex(this); |
912 | if (++ret >= nr_wake) |
913 | break; |
914 | } |
915 | } |
916 | |
917 | spin_unlock(&hb->lock); |
918 | put_futex_key(fshared, &key); |
919 | out: |
920 | return ret; |
921 | } |
922 | |
923 | /* |
924 | * Wake up all waiters hashed on the physical page that is mapped |
925 | * to this virtual address: |
926 | */ |
927 | static int |
928 | futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, |
929 | int nr_wake, int nr_wake2, int op) |
930 | { |
931 | union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; |
932 | struct futex_hash_bucket *hb1, *hb2; |
933 | struct plist_head *head; |
934 | struct futex_q *this, *next; |
935 | int ret, op_ret; |
936 | |
937 | retry: |
938 | ret = get_futex_key(uaddr1, fshared, &key1); |
939 | if (unlikely(ret != 0)) |
940 | goto out; |
941 | ret = get_futex_key(uaddr2, fshared, &key2); |
942 | if (unlikely(ret != 0)) |
943 | goto out_put_key1; |
944 | |
945 | hb1 = hash_futex(&key1); |
946 | hb2 = hash_futex(&key2); |
947 | |
948 | retry_private: |
949 | double_lock_hb(hb1, hb2); |
950 | op_ret = futex_atomic_op_inuser(op, uaddr2); |
951 | if (unlikely(op_ret < 0)) { |
952 | |
953 | double_unlock_hb(hb1, hb2); |
954 | |
955 | #ifndef CONFIG_MMU |
956 | /* |
957 | * we don't get EFAULT from MMU faults if we don't have an MMU, |
958 | * but we might get them from range checking |
959 | */ |
960 | ret = op_ret; |
961 | goto out_put_keys; |
962 | #endif |
963 | |
964 | if (unlikely(op_ret != -EFAULT)) { |
965 | ret = op_ret; |
966 | goto out_put_keys; |
967 | } |
968 | |
969 | ret = fault_in_user_writeable(uaddr2); |
970 | if (ret) |
971 | goto out_put_keys; |
972 | |
973 | if (!fshared) |
974 | goto retry_private; |
975 | |
976 | put_futex_key(fshared, &key2); |
977 | put_futex_key(fshared, &key1); |
978 | goto retry; |
979 | } |
980 | |
981 | head = &hb1->chain; |
982 | |
983 | plist_for_each_entry_safe(this, next, head, list) { |
984 | if (match_futex (&this->key, &key1)) { |
985 | wake_futex(this); |
986 | if (++ret >= nr_wake) |
987 | break; |
988 | } |
989 | } |
990 | |
991 | if (op_ret > 0) { |
992 | head = &hb2->chain; |
993 | |
994 | op_ret = 0; |
995 | plist_for_each_entry_safe(this, next, head, list) { |
996 | if (match_futex (&this->key, &key2)) { |
997 | wake_futex(this); |
998 | if (++op_ret >= nr_wake2) |
999 | break; |
1000 | } |
1001 | } |
1002 | ret += op_ret; |
1003 | } |
1004 | |
1005 | double_unlock_hb(hb1, hb2); |
1006 | out_put_keys: |
1007 | put_futex_key(fshared, &key2); |
1008 | out_put_key1: |
1009 | put_futex_key(fshared, &key1); |
1010 | out: |
1011 | return ret; |
1012 | } |
1013 | |
1014 | /** |
1015 | * requeue_futex() - Requeue a futex_q from one hb to another |
1016 | * @q: the futex_q to requeue |
1017 | * @hb1: the source hash_bucket |
1018 | * @hb2: the target hash_bucket |
1019 | * @key2: the new key for the requeued futex_q |
1020 | */ |
1021 | static inline |
1022 | void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, |
1023 | struct futex_hash_bucket *hb2, union futex_key *key2) |
1024 | { |
1025 | |
1026 | /* |
1027 | * If key1 and key2 hash to the same bucket, no need to |
1028 | * requeue. |
1029 | */ |
1030 | if (likely(&hb1->chain != &hb2->chain)) { |
1031 | plist_del(&q->list, &hb1->chain); |
1032 | plist_add(&q->list, &hb2->chain); |
1033 | q->lock_ptr = &hb2->lock; |
1034 | #ifdef CONFIG_DEBUG_PI_LIST |
1035 | q->list.plist.lock = &hb2->lock; |
1036 | #endif |
1037 | } |
1038 | get_futex_key_refs(key2); |
1039 | q->key = *key2; |
1040 | } |
1041 | |
1042 | /** |
1043 | * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue |
1044 | * @q: the futex_q |
1045 | * @key: the key of the requeue target futex |
1046 | * @hb: the hash_bucket of the requeue target futex |
1047 | * |
1048 | * During futex_requeue, with requeue_pi=1, it is possible to acquire the |
1049 | * target futex if it is uncontended or via a lock steal. Set the futex_q key |
1050 | * to the requeue target futex so the waiter can detect the wakeup on the right |
1051 | * futex, but remove it from the hb and NULL the rt_waiter so it can detect |
1052 | * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock |
1053 | * to protect access to the pi_state to fixup the owner later. Must be called |
1054 | * with both q->lock_ptr and hb->lock held. |
1055 | */ |
1056 | static inline |
1057 | void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, |
1058 | struct futex_hash_bucket *hb) |
1059 | { |
1060 | get_futex_key_refs(key); |
1061 | q->key = *key; |
1062 | |
1063 | WARN_ON(plist_node_empty(&q->list)); |
1064 | plist_del(&q->list, &q->list.plist); |
1065 | |
1066 | WARN_ON(!q->rt_waiter); |
1067 | q->rt_waiter = NULL; |
1068 | |
1069 | q->lock_ptr = &hb->lock; |
1070 | #ifdef CONFIG_DEBUG_PI_LIST |
1071 | q->list.plist.lock = &hb->lock; |
1072 | #endif |
1073 | |
1074 | wake_up_state(q->task, TASK_NORMAL); |
1075 | } |
1076 | |
1077 | /** |
1078 | * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter |
1079 | * @pifutex: the user address of the to futex |
1080 | * @hb1: the from futex hash bucket, must be locked by the caller |
1081 | * @hb2: the to futex hash bucket, must be locked by the caller |
1082 | * @key1: the from futex key |
1083 | * @key2: the to futex key |
1084 | * @ps: address to store the pi_state pointer |
1085 | * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) |
1086 | * |
1087 | * Try and get the lock on behalf of the top waiter if we can do it atomically. |
1088 | * Wake the top waiter if we succeed. If the caller specified set_waiters, |
1089 | * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. |
1090 | * hb1 and hb2 must be held by the caller. |
1091 | * |
1092 | * Returns: |
1093 | * 0 - failed to acquire the lock atomicly |
1094 | * 1 - acquired the lock |
1095 | * <0 - error |
1096 | */ |
1097 | static int futex_proxy_trylock_atomic(u32 __user *pifutex, |
1098 | struct futex_hash_bucket *hb1, |
1099 | struct futex_hash_bucket *hb2, |
1100 | union futex_key *key1, union futex_key *key2, |
1101 | struct futex_pi_state **ps, int set_waiters) |
1102 | { |
1103 | struct futex_q *top_waiter = NULL; |
1104 | u32 curval; |
1105 | int ret; |
1106 | |
1107 | if (get_futex_value_locked(&curval, pifutex)) |
1108 | return -EFAULT; |
1109 | |
1110 | /* |
1111 | * Find the top_waiter and determine if there are additional waiters. |
1112 | * If the caller intends to requeue more than 1 waiter to pifutex, |
1113 | * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now, |
1114 | * as we have means to handle the possible fault. If not, don't set |
1115 | * the bit unecessarily as it will force the subsequent unlock to enter |
1116 | * the kernel. |
1117 | */ |
1118 | top_waiter = futex_top_waiter(hb1, key1); |
1119 | |
1120 | /* There are no waiters, nothing for us to do. */ |
1121 | if (!top_waiter) |
1122 | return 0; |
1123 | |
1124 | /* Ensure we requeue to the expected futex. */ |
1125 | if (!match_futex(top_waiter->requeue_pi_key, key2)) |
1126 | return -EINVAL; |
1127 | |
1128 | /* |
1129 | * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in |
1130 | * the contended case or if set_waiters is 1. The pi_state is returned |
1131 | * in ps in contended cases. |
1132 | */ |
1133 | ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, |
1134 | set_waiters); |
1135 | if (ret == 1) |
1136 | requeue_pi_wake_futex(top_waiter, key2, hb2); |
1137 | |
1138 | return ret; |
1139 | } |
1140 | |
1141 | /** |
1142 | * futex_requeue() - Requeue waiters from uaddr1 to uaddr2 |
1143 | * uaddr1: source futex user address |
1144 | * uaddr2: target futex user address |
1145 | * nr_wake: number of waiters to wake (must be 1 for requeue_pi) |
1146 | * nr_requeue: number of waiters to requeue (0-INT_MAX) |
1147 | * requeue_pi: if we are attempting to requeue from a non-pi futex to a |
1148 | * pi futex (pi to pi requeue is not supported) |
1149 | * |
1150 | * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire |
1151 | * uaddr2 atomically on behalf of the top waiter. |
1152 | * |
1153 | * Returns: |
1154 | * >=0 - on success, the number of tasks requeued or woken |
1155 | * <0 - on error |
1156 | */ |
1157 | static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, |
1158 | int nr_wake, int nr_requeue, u32 *cmpval, |
1159 | int requeue_pi) |
1160 | { |
1161 | union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; |
1162 | int drop_count = 0, task_count = 0, ret; |
1163 | struct futex_pi_state *pi_state = NULL; |
1164 | struct futex_hash_bucket *hb1, *hb2; |
1165 | struct plist_head *head1; |
1166 | struct futex_q *this, *next; |
1167 | u32 curval2; |
1168 | |
1169 | if (requeue_pi) { |
1170 | /* |
1171 | * requeue_pi requires a pi_state, try to allocate it now |
1172 | * without any locks in case it fails. |
1173 | */ |
1174 | if (refill_pi_state_cache()) |
1175 | return -ENOMEM; |
1176 | /* |
1177 | * requeue_pi must wake as many tasks as it can, up to nr_wake |
1178 | * + nr_requeue, since it acquires the rt_mutex prior to |
1179 | * returning to userspace, so as to not leave the rt_mutex with |
1180 | * waiters and no owner. However, second and third wake-ups |
1181 | * cannot be predicted as they involve race conditions with the |
1182 | * first wake and a fault while looking up the pi_state. Both |
1183 | * pthread_cond_signal() and pthread_cond_broadcast() should |
1184 | * use nr_wake=1. |
1185 | */ |
1186 | if (nr_wake != 1) |
1187 | return -EINVAL; |
1188 | } |
1189 | |
1190 | retry: |
1191 | if (pi_state != NULL) { |
1192 | /* |
1193 | * We will have to lookup the pi_state again, so free this one |
1194 | * to keep the accounting correct. |
1195 | */ |
1196 | free_pi_state(pi_state); |
1197 | pi_state = NULL; |
1198 | } |
1199 | |
1200 | ret = get_futex_key(uaddr1, fshared, &key1); |
1201 | if (unlikely(ret != 0)) |
1202 | goto out; |
1203 | ret = get_futex_key(uaddr2, fshared, &key2); |
1204 | if (unlikely(ret != 0)) |
1205 | goto out_put_key1; |
1206 | |
1207 | hb1 = hash_futex(&key1); |
1208 | hb2 = hash_futex(&key2); |
1209 | |
1210 | retry_private: |
1211 | double_lock_hb(hb1, hb2); |
1212 | |
1213 | if (likely(cmpval != NULL)) { |
1214 | u32 curval; |
1215 | |
1216 | ret = get_futex_value_locked(&curval, uaddr1); |
1217 | |
1218 | if (unlikely(ret)) { |
1219 | double_unlock_hb(hb1, hb2); |
1220 | |
1221 | ret = get_user(curval, uaddr1); |
1222 | if (ret) |
1223 | goto out_put_keys; |
1224 | |
1225 | if (!fshared) |
1226 | goto retry_private; |
1227 | |
1228 | put_futex_key(fshared, &key2); |
1229 | put_futex_key(fshared, &key1); |
1230 | goto retry; |
1231 | } |
1232 | if (curval != *cmpval) { |
1233 | ret = -EAGAIN; |
1234 | goto out_unlock; |
1235 | } |
1236 | } |
1237 | |
1238 | if (requeue_pi && (task_count - nr_wake < nr_requeue)) { |
1239 | /* |
1240 | * Attempt to acquire uaddr2 and wake the top waiter. If we |
1241 | * intend to requeue waiters, force setting the FUTEX_WAITERS |
1242 | * bit. We force this here where we are able to easily handle |
1243 | * faults rather in the requeue loop below. |
1244 | */ |
1245 | ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, |
1246 | &key2, &pi_state, nr_requeue); |
1247 | |
1248 | /* |
1249 | * At this point the top_waiter has either taken uaddr2 or is |
1250 | * waiting on it. If the former, then the pi_state will not |
1251 | * exist yet, look it up one more time to ensure we have a |
1252 | * reference to it. |
1253 | */ |
1254 | if (ret == 1) { |
1255 | WARN_ON(pi_state); |
1256 | drop_count++; |
1257 | task_count++; |
1258 | ret = get_futex_value_locked(&curval2, uaddr2); |
1259 | if (!ret) |
1260 | ret = lookup_pi_state(curval2, hb2, &key2, |
1261 | &pi_state); |
1262 | } |
1263 | |
1264 | switch (ret) { |
1265 | case 0: |
1266 | break; |
1267 | case -EFAULT: |
1268 | double_unlock_hb(hb1, hb2); |
1269 | put_futex_key(fshared, &key2); |
1270 | put_futex_key(fshared, &key1); |
1271 | ret = fault_in_user_writeable(uaddr2); |
1272 | if (!ret) |
1273 | goto retry; |
1274 | goto out; |
1275 | case -EAGAIN: |
1276 | /* The owner was exiting, try again. */ |
1277 | double_unlock_hb(hb1, hb2); |
1278 | put_futex_key(fshared, &key2); |
1279 | put_futex_key(fshared, &key1); |
1280 | cond_resched(); |
1281 | goto retry; |
1282 | default: |
1283 | goto out_unlock; |
1284 | } |
1285 | } |
1286 | |
1287 | head1 = &hb1->chain; |
1288 | plist_for_each_entry_safe(this, next, head1, list) { |
1289 | if (task_count - nr_wake >= nr_requeue) |
1290 | break; |
1291 | |
1292 | if (!match_futex(&this->key, &key1)) |
1293 | continue; |
1294 | |
1295 | /* |
1296 | * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always |
1297 | * be paired with each other and no other futex ops. |
1298 | */ |
1299 | if ((requeue_pi && !this->rt_waiter) || |
1300 | (!requeue_pi && this->rt_waiter)) { |
1301 | ret = -EINVAL; |
1302 | break; |
1303 | } |
1304 | |
1305 | /* |
1306 | * Wake nr_wake waiters. For requeue_pi, if we acquired the |
1307 | * lock, we already woke the top_waiter. If not, it will be |
1308 | * woken by futex_unlock_pi(). |
1309 | */ |
1310 | if (++task_count <= nr_wake && !requeue_pi) { |
1311 | wake_futex(this); |
1312 | continue; |
1313 | } |
1314 | |
1315 | /* Ensure we requeue to the expected futex for requeue_pi. */ |
1316 | if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) { |
1317 | ret = -EINVAL; |
1318 | break; |
1319 | } |
1320 | |
1321 | /* |
1322 | * Requeue nr_requeue waiters and possibly one more in the case |
1323 | * of requeue_pi if we couldn't acquire the lock atomically. |
1324 | */ |
1325 | if (requeue_pi) { |
1326 | /* Prepare the waiter to take the rt_mutex. */ |
1327 | atomic_inc(&pi_state->refcount); |
1328 | this->pi_state = pi_state; |
1329 | ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, |
1330 | this->rt_waiter, |
1331 | this->task, 1); |
1332 | if (ret == 1) { |
1333 | /* We got the lock. */ |
1334 | requeue_pi_wake_futex(this, &key2, hb2); |
1335 | drop_count++; |
1336 | continue; |
1337 | } else if (ret) { |
1338 | /* -EDEADLK */ |
1339 | this->pi_state = NULL; |
1340 | free_pi_state(pi_state); |
1341 | goto out_unlock; |
1342 | } |
1343 | } |
1344 | requeue_futex(this, hb1, hb2, &key2); |
1345 | drop_count++; |
1346 | } |
1347 | |
1348 | out_unlock: |
1349 | double_unlock_hb(hb1, hb2); |
1350 | |
1351 | /* |
1352 | * drop_futex_key_refs() must be called outside the spinlocks. During |
1353 | * the requeue we moved futex_q's from the hash bucket at key1 to the |
1354 | * one at key2 and updated their key pointer. We no longer need to |
1355 | * hold the references to key1. |
1356 | */ |
1357 | while (--drop_count >= 0) |
1358 | drop_futex_key_refs(&key1); |
1359 | |
1360 | out_put_keys: |
1361 | put_futex_key(fshared, &key2); |
1362 | out_put_key1: |
1363 | put_futex_key(fshared, &key1); |
1364 | out: |
1365 | if (pi_state != NULL) |
1366 | free_pi_state(pi_state); |
1367 | return ret ? ret : task_count; |
1368 | } |
1369 | |
1370 | /* The key must be already stored in q->key. */ |
1371 | static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) |
1372 | { |
1373 | struct futex_hash_bucket *hb; |
1374 | |
1375 | get_futex_key_refs(&q->key); |
1376 | hb = hash_futex(&q->key); |
1377 | q->lock_ptr = &hb->lock; |
1378 | |
1379 | spin_lock(&hb->lock); |
1380 | return hb; |
1381 | } |
1382 | |
1383 | static inline void |
1384 | queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) |
1385 | { |
1386 | spin_unlock(&hb->lock); |
1387 | drop_futex_key_refs(&q->key); |
1388 | } |
1389 | |
1390 | /** |
1391 | * queue_me() - Enqueue the futex_q on the futex_hash_bucket |
1392 | * @q: The futex_q to enqueue |
1393 | * @hb: The destination hash bucket |
1394 | * |
1395 | * The hb->lock must be held by the caller, and is released here. A call to |
1396 | * queue_me() is typically paired with exactly one call to unqueue_me(). The |
1397 | * exceptions involve the PI related operations, which may use unqueue_me_pi() |
1398 | * or nothing if the unqueue is done as part of the wake process and the unqueue |
1399 | * state is implicit in the state of woken task (see futex_wait_requeue_pi() for |
1400 | * an example). |
1401 | */ |
1402 | static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) |
1403 | { |
1404 | int prio; |
1405 | |
1406 | /* |
1407 | * The priority used to register this element is |
1408 | * - either the real thread-priority for the real-time threads |
1409 | * (i.e. threads with a priority lower than MAX_RT_PRIO) |
1410 | * - or MAX_RT_PRIO for non-RT threads. |
1411 | * Thus, all RT-threads are woken first in priority order, and |
1412 | * the others are woken last, in FIFO order. |
1413 | */ |
1414 | prio = min(current->normal_prio, MAX_RT_PRIO); |
1415 | |
1416 | plist_node_init(&q->list, prio); |
1417 | #ifdef CONFIG_DEBUG_PI_LIST |
1418 | q->list.plist.lock = &hb->lock; |
1419 | #endif |
1420 | plist_add(&q->list, &hb->chain); |
1421 | q->task = current; |
1422 | spin_unlock(&hb->lock); |
1423 | } |
1424 | |
1425 | /** |
1426 | * unqueue_me() - Remove the futex_q from its futex_hash_bucket |
1427 | * @q: The futex_q to unqueue |
1428 | * |
1429 | * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must |
1430 | * be paired with exactly one earlier call to queue_me(). |
1431 | * |
1432 | * Returns: |
1433 | * 1 - if the futex_q was still queued (and we removed unqueued it) |
1434 | * 0 - if the futex_q was already removed by the waking thread |
1435 | */ |
1436 | static int unqueue_me(struct futex_q *q) |
1437 | { |
1438 | spinlock_t *lock_ptr; |
1439 | int ret = 0; |
1440 | |
1441 | /* In the common case we don't take the spinlock, which is nice. */ |
1442 | retry: |
1443 | lock_ptr = q->lock_ptr; |
1444 | barrier(); |
1445 | if (lock_ptr != NULL) { |
1446 | spin_lock(lock_ptr); |
1447 | /* |
1448 | * q->lock_ptr can change between reading it and |
1449 | * spin_lock(), causing us to take the wrong lock. This |
1450 | * corrects the race condition. |
1451 | * |
1452 | * Reasoning goes like this: if we have the wrong lock, |
1453 | * q->lock_ptr must have changed (maybe several times) |
1454 | * between reading it and the spin_lock(). It can |
1455 | * change again after the spin_lock() but only if it was |
1456 | * already changed before the spin_lock(). It cannot, |
1457 | * however, change back to the original value. Therefore |
1458 | * we can detect whether we acquired the correct lock. |
1459 | */ |
1460 | if (unlikely(lock_ptr != q->lock_ptr)) { |
1461 | spin_unlock(lock_ptr); |
1462 | goto retry; |
1463 | } |
1464 | WARN_ON(plist_node_empty(&q->list)); |
1465 | plist_del(&q->list, &q->list.plist); |
1466 | |
1467 | BUG_ON(q->pi_state); |
1468 | |
1469 | spin_unlock(lock_ptr); |
1470 | ret = 1; |
1471 | } |
1472 | |
1473 | drop_futex_key_refs(&q->key); |
1474 | return ret; |
1475 | } |
1476 | |
1477 | /* |
1478 | * PI futexes can not be requeued and must remove themself from the |
1479 | * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry |
1480 | * and dropped here. |
1481 | */ |
1482 | static void unqueue_me_pi(struct futex_q *q) |
1483 | { |
1484 | WARN_ON(plist_node_empty(&q->list)); |
1485 | plist_del(&q->list, &q->list.plist); |
1486 | |
1487 | BUG_ON(!q->pi_state); |
1488 | free_pi_state(q->pi_state); |
1489 | q->pi_state = NULL; |
1490 | |
1491 | spin_unlock(q->lock_ptr); |
1492 | |
1493 | drop_futex_key_refs(&q->key); |
1494 | } |
1495 | |
1496 | /* |
1497 | * Fixup the pi_state owner with the new owner. |
1498 | * |
1499 | * Must be called with hash bucket lock held and mm->sem held for non |
1500 | * private futexes. |
1501 | */ |
1502 | static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
1503 | struct task_struct *newowner, int fshared) |
1504 | { |
1505 | u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; |
1506 | struct futex_pi_state *pi_state = q->pi_state; |
1507 | struct task_struct *oldowner = pi_state->owner; |
1508 | u32 uval, curval, newval; |
1509 | int ret; |
1510 | |
1511 | /* Owner died? */ |
1512 | if (!pi_state->owner) |
1513 | newtid |= FUTEX_OWNER_DIED; |
1514 | |
1515 | /* |
1516 | * We are here either because we stole the rtmutex from the |
1517 | * pending owner or we are the pending owner which failed to |
1518 | * get the rtmutex. We have to replace the pending owner TID |
1519 | * in the user space variable. This must be atomic as we have |
1520 | * to preserve the owner died bit here. |
1521 | * |
1522 | * Note: We write the user space value _before_ changing the pi_state |
1523 | * because we can fault here. Imagine swapped out pages or a fork |
1524 | * that marked all the anonymous memory readonly for cow. |
1525 | * |
1526 | * Modifying pi_state _before_ the user space value would |
1527 | * leave the pi_state in an inconsistent state when we fault |
1528 | * here, because we need to drop the hash bucket lock to |
1529 | * handle the fault. This might be observed in the PID check |
1530 | * in lookup_pi_state. |
1531 | */ |
1532 | retry: |
1533 | if (get_futex_value_locked(&uval, uaddr)) |
1534 | goto handle_fault; |
1535 | |
1536 | while (1) { |
1537 | newval = (uval & FUTEX_OWNER_DIED) | newtid; |
1538 | |
1539 | curval = cmpxchg_futex_value_locked(uaddr, uval, newval); |
1540 | |
1541 | if (curval == -EFAULT) |
1542 | goto handle_fault; |
1543 | if (curval == uval) |
1544 | break; |
1545 | uval = curval; |
1546 | } |
1547 | |
1548 | /* |
1549 | * We fixed up user space. Now we need to fix the pi_state |
1550 | * itself. |
1551 | */ |
1552 | if (pi_state->owner != NULL) { |
1553 | spin_lock_irq(&pi_state->owner->pi_lock); |
1554 | WARN_ON(list_empty(&pi_state->list)); |
1555 | list_del_init(&pi_state->list); |
1556 | spin_unlock_irq(&pi_state->owner->pi_lock); |
1557 | } |
1558 | |
1559 | pi_state->owner = newowner; |
1560 | |
1561 | spin_lock_irq(&newowner->pi_lock); |
1562 | WARN_ON(!list_empty(&pi_state->list)); |
1563 | list_add(&pi_state->list, &newowner->pi_state_list); |
1564 | spin_unlock_irq(&newowner->pi_lock); |
1565 | return 0; |
1566 | |
1567 | /* |
1568 | * To handle the page fault we need to drop the hash bucket |
1569 | * lock here. That gives the other task (either the pending |
1570 | * owner itself or the task which stole the rtmutex) the |
1571 | * chance to try the fixup of the pi_state. So once we are |
1572 | * back from handling the fault we need to check the pi_state |
1573 | * after reacquiring the hash bucket lock and before trying to |
1574 | * do another fixup. When the fixup has been done already we |
1575 | * simply return. |
1576 | */ |
1577 | handle_fault: |
1578 | spin_unlock(q->lock_ptr); |
1579 | |
1580 | ret = fault_in_user_writeable(uaddr); |
1581 | |
1582 | spin_lock(q->lock_ptr); |
1583 | |
1584 | /* |
1585 | * Check if someone else fixed it for us: |
1586 | */ |
1587 | if (pi_state->owner != oldowner) |
1588 | return 0; |
1589 | |
1590 | if (ret) |
1591 | return ret; |
1592 | |
1593 | goto retry; |
1594 | } |
1595 | |
1596 | /* |
1597 | * In case we must use restart_block to restart a futex_wait, |
1598 | * we encode in the 'flags' shared capability |
1599 | */ |
1600 | #define FLAGS_SHARED 0x01 |
1601 | #define FLAGS_CLOCKRT 0x02 |
1602 | #define FLAGS_HAS_TIMEOUT 0x04 |
1603 | |
1604 | static long futex_wait_restart(struct restart_block *restart); |
1605 | |
1606 | /** |
1607 | * fixup_owner() - Post lock pi_state and corner case management |
1608 | * @uaddr: user address of the futex |
1609 | * @fshared: whether the futex is shared (1) or not (0) |
1610 | * @q: futex_q (contains pi_state and access to the rt_mutex) |
1611 | * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0) |
1612 | * |
1613 | * After attempting to lock an rt_mutex, this function is called to cleanup |
1614 | * the pi_state owner as well as handle race conditions that may allow us to |
1615 | * acquire the lock. Must be called with the hb lock held. |
1616 | * |
1617 | * Returns: |
1618 | * 1 - success, lock taken |
1619 | * 0 - success, lock not taken |
1620 | * <0 - on error (-EFAULT) |
1621 | */ |
1622 | static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q, |
1623 | int locked) |
1624 | { |
1625 | struct task_struct *owner; |
1626 | int ret = 0; |
1627 | |
1628 | if (locked) { |
1629 | /* |
1630 | * Got the lock. We might not be the anticipated owner if we |
1631 | * did a lock-steal - fix up the PI-state in that case: |
1632 | */ |
1633 | if (q->pi_state->owner != current) |
1634 | ret = fixup_pi_state_owner(uaddr, q, current, fshared); |
1635 | goto out; |
1636 | } |
1637 | |
1638 | /* |
1639 | * Catch the rare case, where the lock was released when we were on the |
1640 | * way back before we locked the hash bucket. |
1641 | */ |
1642 | if (q->pi_state->owner == current) { |
1643 | /* |
1644 | * Try to get the rt_mutex now. This might fail as some other |
1645 | * task acquired the rt_mutex after we removed ourself from the |
1646 | * rt_mutex waiters list. |
1647 | */ |
1648 | if (rt_mutex_trylock(&q->pi_state->pi_mutex)) { |
1649 | locked = 1; |
1650 | goto out; |
1651 | } |
1652 | |
1653 | /* |
1654 | * pi_state is incorrect, some other task did a lock steal and |
1655 | * we returned due to timeout or signal without taking the |
1656 | * rt_mutex. Too late. We can access the rt_mutex_owner without |
1657 | * locking, as the other task is now blocked on the hash bucket |
1658 | * lock. Fix the state up. |
1659 | */ |
1660 | owner = rt_mutex_owner(&q->pi_state->pi_mutex); |
1661 | ret = fixup_pi_state_owner(uaddr, q, owner, fshared); |
1662 | goto out; |
1663 | } |
1664 | |
1665 | /* |
1666 | * Paranoia check. If we did not take the lock, then we should not be |
1667 | * the owner, nor the pending owner, of the rt_mutex. |
1668 | */ |
1669 | if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) |
1670 | printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " |
1671 | "pi-state %p\n", ret, |
1672 | q->pi_state->pi_mutex.owner, |
1673 | q->pi_state->owner); |
1674 | |
1675 | out: |
1676 | return ret ? ret : locked; |
1677 | } |
1678 | |
1679 | /** |
1680 | * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal |
1681 | * @hb: the futex hash bucket, must be locked by the caller |
1682 | * @q: the futex_q to queue up on |
1683 | * @timeout: the prepared hrtimer_sleeper, or null for no timeout |
1684 | */ |
1685 | static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, |
1686 | struct hrtimer_sleeper *timeout) |
1687 | { |
1688 | /* |
1689 | * The task state is guaranteed to be set before another task can |
1690 | * wake it. set_current_state() is implemented using set_mb() and |
1691 | * queue_me() calls spin_unlock() upon completion, both serializing |
1692 | * access to the hash list and forcing another memory barrier. |
1693 | */ |
1694 | set_current_state(TASK_INTERRUPTIBLE); |
1695 | queue_me(q, hb); |
1696 | |
1697 | /* Arm the timer */ |
1698 | if (timeout) { |
1699 | hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); |
1700 | if (!hrtimer_active(&timeout->timer)) |
1701 | timeout->task = NULL; |
1702 | } |
1703 | |
1704 | /* |
1705 | * If we have been removed from the hash list, then another task |
1706 | * has tried to wake us, and we can skip the call to schedule(). |
1707 | */ |
1708 | if (likely(!plist_node_empty(&q->list))) { |
1709 | /* |
1710 | * If the timer has already expired, current will already be |
1711 | * flagged for rescheduling. Only call schedule if there |
1712 | * is no timeout, or if it has yet to expire. |
1713 | */ |
1714 | if (!timeout || timeout->task) |
1715 | schedule(); |
1716 | } |
1717 | __set_current_state(TASK_RUNNING); |
1718 | } |
1719 | |
1720 | /** |
1721 | * futex_wait_setup() - Prepare to wait on a futex |
1722 | * @uaddr: the futex userspace address |
1723 | * @val: the expected value |
1724 | * @fshared: whether the futex is shared (1) or not (0) |
1725 | * @q: the associated futex_q |
1726 | * @hb: storage for hash_bucket pointer to be returned to caller |
1727 | * |
1728 | * Setup the futex_q and locate the hash_bucket. Get the futex value and |
1729 | * compare it with the expected value. Handle atomic faults internally. |
1730 | * Return with the hb lock held and a q.key reference on success, and unlocked |
1731 | * with no q.key reference on failure. |
1732 | * |
1733 | * Returns: |
1734 | * 0 - uaddr contains val and hb has been locked |
1735 | * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked |
1736 | */ |
1737 | static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared, |
1738 | struct futex_q *q, struct futex_hash_bucket **hb) |
1739 | { |
1740 | u32 uval; |
1741 | int ret; |
1742 | |
1743 | /* |
1744 | * Access the page AFTER the hash-bucket is locked. |
1745 | * Order is important: |
1746 | * |
1747 | * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); |
1748 | * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); } |
1749 | * |
1750 | * The basic logical guarantee of a futex is that it blocks ONLY |
1751 | * if cond(var) is known to be true at the time of blocking, for |
1752 | * any cond. If we queued after testing *uaddr, that would open |
1753 | * a race condition where we could block indefinitely with |
1754 | * cond(var) false, which would violate the guarantee. |
1755 | * |
1756 | * A consequence is that futex_wait() can return zero and absorb |
1757 | * a wakeup when *uaddr != val on entry to the syscall. This is |
1758 | * rare, but normal. |
1759 | */ |
1760 | retry: |
1761 | q->key = FUTEX_KEY_INIT; |
1762 | ret = get_futex_key(uaddr, fshared, &q->key); |
1763 | if (unlikely(ret != 0)) |
1764 | return ret; |
1765 | |
1766 | retry_private: |
1767 | *hb = queue_lock(q); |
1768 | |
1769 | ret = get_futex_value_locked(&uval, uaddr); |
1770 | |
1771 | if (ret) { |
1772 | queue_unlock(q, *hb); |
1773 | |
1774 | ret = get_user(uval, uaddr); |
1775 | if (ret) |
1776 | goto out; |
1777 | |
1778 | if (!fshared) |
1779 | goto retry_private; |
1780 | |
1781 | put_futex_key(fshared, &q->key); |
1782 | goto retry; |
1783 | } |
1784 | |
1785 | if (uval != val) { |
1786 | queue_unlock(q, *hb); |
1787 | ret = -EWOULDBLOCK; |
1788 | } |
1789 | |
1790 | out: |
1791 | if (ret) |
1792 | put_futex_key(fshared, &q->key); |
1793 | return ret; |
1794 | } |
1795 | |
1796 | static int futex_wait(u32 __user *uaddr, int fshared, |
1797 | u32 val, ktime_t *abs_time, u32 bitset, int clockrt) |
1798 | { |
1799 | struct hrtimer_sleeper timeout, *to = NULL; |
1800 | struct restart_block *restart; |
1801 | struct futex_hash_bucket *hb; |
1802 | struct futex_q q; |
1803 | int ret; |
1804 | |
1805 | if (!bitset) |
1806 | return -EINVAL; |
1807 | |
1808 | q.pi_state = NULL; |
1809 | q.bitset = bitset; |
1810 | q.rt_waiter = NULL; |
1811 | q.requeue_pi_key = NULL; |
1812 | |
1813 | if (abs_time) { |
1814 | to = &timeout; |
1815 | |
1816 | hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME : |
1817 | CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
1818 | hrtimer_init_sleeper(to, current); |
1819 | hrtimer_set_expires_range_ns(&to->timer, *abs_time, |
1820 | current->timer_slack_ns); |
1821 | } |
1822 | |
1823 | retry: |
1824 | /* Prepare to wait on uaddr. */ |
1825 | ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); |
1826 | if (ret) |
1827 | goto out; |
1828 | |
1829 | /* queue_me and wait for wakeup, timeout, or a signal. */ |
1830 | futex_wait_queue_me(hb, &q, to); |
1831 | |
1832 | /* If we were woken (and unqueued), we succeeded, whatever. */ |
1833 | ret = 0; |
1834 | if (!unqueue_me(&q)) |
1835 | goto out_put_key; |
1836 | ret = -ETIMEDOUT; |
1837 | if (to && !to->task) |
1838 | goto out_put_key; |
1839 | |
1840 | /* |
1841 | * We expect signal_pending(current), but we might be the |
1842 | * victim of a spurious wakeup as well. |
1843 | */ |
1844 | if (!signal_pending(current)) { |
1845 | put_futex_key(fshared, &q.key); |
1846 | goto retry; |
1847 | } |
1848 | |
1849 | ret = -ERESTARTSYS; |
1850 | if (!abs_time) |
1851 | goto out_put_key; |
1852 | |
1853 | restart = ¤t_thread_info()->restart_block; |
1854 | restart->fn = futex_wait_restart; |
1855 | restart->futex.uaddr = (u32 *)uaddr; |
1856 | restart->futex.val = val; |
1857 | restart->futex.time = abs_time->tv64; |
1858 | restart->futex.bitset = bitset; |
1859 | restart->futex.flags = FLAGS_HAS_TIMEOUT; |
1860 | |
1861 | if (fshared) |
1862 | restart->futex.flags |= FLAGS_SHARED; |
1863 | if (clockrt) |
1864 | restart->futex.flags |= FLAGS_CLOCKRT; |
1865 | |
1866 | ret = -ERESTART_RESTARTBLOCK; |
1867 | |
1868 | out_put_key: |
1869 | put_futex_key(fshared, &q.key); |
1870 | out: |
1871 | if (to) { |
1872 | hrtimer_cancel(&to->timer); |
1873 | destroy_hrtimer_on_stack(&to->timer); |
1874 | } |
1875 | return ret; |
1876 | } |
1877 | |
1878 | |
1879 | static long futex_wait_restart(struct restart_block *restart) |
1880 | { |
1881 | u32 __user *uaddr = (u32 __user *)restart->futex.uaddr; |
1882 | int fshared = 0; |
1883 | ktime_t t, *tp = NULL; |
1884 | |
1885 | if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { |
1886 | t.tv64 = restart->futex.time; |
1887 | tp = &t; |
1888 | } |
1889 | restart->fn = do_no_restart_syscall; |
1890 | if (restart->futex.flags & FLAGS_SHARED) |
1891 | fshared = 1; |
1892 | return (long)futex_wait(uaddr, fshared, restart->futex.val, tp, |
1893 | restart->futex.bitset, |
1894 | restart->futex.flags & FLAGS_CLOCKRT); |
1895 | } |
1896 | |
1897 | |
1898 | /* |
1899 | * Userspace tried a 0 -> TID atomic transition of the futex value |
1900 | * and failed. The kernel side here does the whole locking operation: |
1901 | * if there are waiters then it will block, it does PI, etc. (Due to |
1902 | * races the kernel might see a 0 value of the futex too.) |
1903 | */ |
1904 | static int futex_lock_pi(u32 __user *uaddr, int fshared, |
1905 | int detect, ktime_t *time, int trylock) |
1906 | { |
1907 | struct hrtimer_sleeper timeout, *to = NULL; |
1908 | struct futex_hash_bucket *hb; |
1909 | struct futex_q q; |
1910 | int res, ret; |
1911 | |
1912 | if (refill_pi_state_cache()) |
1913 | return -ENOMEM; |
1914 | |
1915 | if (time) { |
1916 | to = &timeout; |
1917 | hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME, |
1918 | HRTIMER_MODE_ABS); |
1919 | hrtimer_init_sleeper(to, current); |
1920 | hrtimer_set_expires(&to->timer, *time); |
1921 | } |
1922 | |
1923 | q.pi_state = NULL; |
1924 | q.rt_waiter = NULL; |
1925 | q.requeue_pi_key = NULL; |
1926 | retry: |
1927 | q.key = FUTEX_KEY_INIT; |
1928 | ret = get_futex_key(uaddr, fshared, &q.key); |
1929 | if (unlikely(ret != 0)) |
1930 | goto out; |
1931 | |
1932 | retry_private: |
1933 | hb = queue_lock(&q); |
1934 | |
1935 | ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0); |
1936 | if (unlikely(ret)) { |
1937 | switch (ret) { |
1938 | case 1: |
1939 | /* We got the lock. */ |
1940 | ret = 0; |
1941 | goto out_unlock_put_key; |
1942 | case -EFAULT: |
1943 | goto uaddr_faulted; |
1944 | case -EAGAIN: |
1945 | /* |
1946 | * Task is exiting and we just wait for the |
1947 | * exit to complete. |
1948 | */ |
1949 | queue_unlock(&q, hb); |
1950 | put_futex_key(fshared, &q.key); |
1951 | cond_resched(); |
1952 | goto retry; |
1953 | default: |
1954 | goto out_unlock_put_key; |
1955 | } |
1956 | } |
1957 | |
1958 | /* |
1959 | * Only actually queue now that the atomic ops are done: |
1960 | */ |
1961 | queue_me(&q, hb); |
1962 | |
1963 | WARN_ON(!q.pi_state); |
1964 | /* |
1965 | * Block on the PI mutex: |
1966 | */ |
1967 | if (!trylock) |
1968 | ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1); |
1969 | else { |
1970 | ret = rt_mutex_trylock(&q.pi_state->pi_mutex); |
1971 | /* Fixup the trylock return value: */ |
1972 | ret = ret ? 0 : -EWOULDBLOCK; |
1973 | } |
1974 | |
1975 | spin_lock(q.lock_ptr); |
1976 | /* |
1977 | * Fixup the pi_state owner and possibly acquire the lock if we |
1978 | * haven't already. |
1979 | */ |
1980 | res = fixup_owner(uaddr, fshared, &q, !ret); |
1981 | /* |
1982 | * If fixup_owner() returned an error, proprogate that. If it acquired |
1983 | * the lock, clear our -ETIMEDOUT or -EINTR. |
1984 | */ |
1985 | if (res) |
1986 | ret = (res < 0) ? res : 0; |
1987 | |
1988 | /* |
1989 | * If fixup_owner() faulted and was unable to handle the fault, unlock |
1990 | * it and return the fault to userspace. |
1991 | */ |
1992 | if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) |
1993 | rt_mutex_unlock(&q.pi_state->pi_mutex); |
1994 | |
1995 | /* Unqueue and drop the lock */ |
1996 | unqueue_me_pi(&q); |
1997 | |
1998 | goto out_put_key; |
1999 | |
2000 | out_unlock_put_key: |
2001 | queue_unlock(&q, hb); |
2002 | |
2003 | out_put_key: |
2004 | put_futex_key(fshared, &q.key); |
2005 | out: |
2006 | if (to) |
2007 | destroy_hrtimer_on_stack(&to->timer); |
2008 | return ret != -EINTR ? ret : -ERESTARTNOINTR; |
2009 | |
2010 | uaddr_faulted: |
2011 | queue_unlock(&q, hb); |
2012 | |
2013 | ret = fault_in_user_writeable(uaddr); |
2014 | if (ret) |
2015 | goto out_put_key; |
2016 | |
2017 | if (!fshared) |
2018 | goto retry_private; |
2019 | |
2020 | put_futex_key(fshared, &q.key); |
2021 | goto retry; |
2022 | } |
2023 | |
2024 | /* |
2025 | * Userspace attempted a TID -> 0 atomic transition, and failed. |
2026 | * This is the in-kernel slowpath: we look up the PI state (if any), |
2027 | * and do the rt-mutex unlock. |
2028 | */ |
2029 | static int futex_unlock_pi(u32 __user *uaddr, int fshared) |
2030 | { |
2031 | struct futex_hash_bucket *hb; |
2032 | struct futex_q *this, *next; |
2033 | u32 uval; |
2034 | struct plist_head *head; |
2035 | union futex_key key = FUTEX_KEY_INIT; |
2036 | int ret; |
2037 | |
2038 | retry: |
2039 | if (get_user(uval, uaddr)) |
2040 | return -EFAULT; |
2041 | /* |
2042 | * We release only a lock we actually own: |
2043 | */ |
2044 | if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) |
2045 | return -EPERM; |
2046 | |
2047 | ret = get_futex_key(uaddr, fshared, &key); |
2048 | if (unlikely(ret != 0)) |
2049 | goto out; |
2050 | |
2051 | hb = hash_futex(&key); |
2052 | spin_lock(&hb->lock); |
2053 | |
2054 | /* |
2055 | * To avoid races, try to do the TID -> 0 atomic transition |
2056 | * again. If it succeeds then we can return without waking |
2057 | * anyone else up: |
2058 | */ |
2059 | if (!(uval & FUTEX_OWNER_DIED)) |
2060 | uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0); |
2061 | |
2062 | |
2063 | if (unlikely(uval == -EFAULT)) |
2064 | goto pi_faulted; |
2065 | /* |
2066 | * Rare case: we managed to release the lock atomically, |
2067 | * no need to wake anyone else up: |
2068 | */ |
2069 | if (unlikely(uval == task_pid_vnr(current))) |
2070 | goto out_unlock; |
2071 | |
2072 | /* |
2073 | * Ok, other tasks may need to be woken up - check waiters |
2074 | * and do the wakeup if necessary: |
2075 | */ |
2076 | head = &hb->chain; |
2077 | |
2078 | plist_for_each_entry_safe(this, next, head, list) { |
2079 | if (!match_futex (&this->key, &key)) |
2080 | continue; |
2081 | ret = wake_futex_pi(uaddr, uval, this); |
2082 | /* |
2083 | * The atomic access to the futex value |
2084 | * generated a pagefault, so retry the |
2085 | * user-access and the wakeup: |
2086 | */ |
2087 | if (ret == -EFAULT) |
2088 | goto pi_faulted; |
2089 | goto out_unlock; |
2090 | } |
2091 | /* |
2092 | * No waiters - kernel unlocks the futex: |
2093 | */ |
2094 | if (!(uval & FUTEX_OWNER_DIED)) { |
2095 | ret = unlock_futex_pi(uaddr, uval); |
2096 | if (ret == -EFAULT) |
2097 | goto pi_faulted; |
2098 | } |
2099 | |
2100 | out_unlock: |
2101 | spin_unlock(&hb->lock); |
2102 | put_futex_key(fshared, &key); |
2103 | |
2104 | out: |
2105 | return ret; |
2106 | |
2107 | pi_faulted: |
2108 | spin_unlock(&hb->lock); |
2109 | put_futex_key(fshared, &key); |
2110 | |
2111 | ret = fault_in_user_writeable(uaddr); |
2112 | if (!ret) |
2113 | goto retry; |
2114 | |
2115 | return ret; |
2116 | } |
2117 | |
2118 | /** |
2119 | * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex |
2120 | * @hb: the hash_bucket futex_q was original enqueued on |
2121 | * @q: the futex_q woken while waiting to be requeued |
2122 | * @key2: the futex_key of the requeue target futex |
2123 | * @timeout: the timeout associated with the wait (NULL if none) |
2124 | * |
2125 | * Detect if the task was woken on the initial futex as opposed to the requeue |
2126 | * target futex. If so, determine if it was a timeout or a signal that caused |
2127 | * the wakeup and return the appropriate error code to the caller. Must be |
2128 | * called with the hb lock held. |
2129 | * |
2130 | * Returns |
2131 | * 0 - no early wakeup detected |
2132 | * <0 - -ETIMEDOUT or -ERESTARTNOINTR |
2133 | */ |
2134 | static inline |
2135 | int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, |
2136 | struct futex_q *q, union futex_key *key2, |
2137 | struct hrtimer_sleeper *timeout) |
2138 | { |
2139 | int ret = 0; |
2140 | |
2141 | /* |
2142 | * With the hb lock held, we avoid races while we process the wakeup. |
2143 | * We only need to hold hb (and not hb2) to ensure atomicity as the |
2144 | * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb. |
2145 | * It can't be requeued from uaddr2 to something else since we don't |
2146 | * support a PI aware source futex for requeue. |
2147 | */ |
2148 | if (!match_futex(&q->key, key2)) { |
2149 | WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr)); |
2150 | /* |
2151 | * We were woken prior to requeue by a timeout or a signal. |
2152 | * Unqueue the futex_q and determine which it was. |
2153 | */ |
2154 | plist_del(&q->list, &q->list.plist); |
2155 | |
2156 | /* Handle spurious wakeups gracefully */ |
2157 | ret = -EWOULDBLOCK; |
2158 | if (timeout && !timeout->task) |
2159 | ret = -ETIMEDOUT; |
2160 | else if (signal_pending(current)) |
2161 | ret = -ERESTARTNOINTR; |
2162 | } |
2163 | return ret; |
2164 | } |
2165 | |
2166 | /** |
2167 | * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2 |
2168 | * @uaddr: the futex we initially wait on (non-pi) |
2169 | * @fshared: whether the futexes are shared (1) or not (0). They must be |
2170 | * the same type, no requeueing from private to shared, etc. |
2171 | * @val: the expected value of uaddr |
2172 | * @abs_time: absolute timeout |
2173 | * @bitset: 32 bit wakeup bitset set by userspace, defaults to all |
2174 | * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0) |
2175 | * @uaddr2: the pi futex we will take prior to returning to user-space |
2176 | * |
2177 | * The caller will wait on uaddr and will be requeued by futex_requeue() to |
2178 | * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and |
2179 | * complete the acquisition of the rt_mutex prior to returning to userspace. |
2180 | * This ensures the rt_mutex maintains an owner when it has waiters; without |
2181 | * one, the pi logic wouldn't know which task to boost/deboost, if there was a |
2182 | * need to. |
2183 | * |
2184 | * We call schedule in futex_wait_queue_me() when we enqueue and return there |
2185 | * via the following: |
2186 | * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() |
2187 | * 2) wakeup on uaddr2 after a requeue |
2188 | * 3) signal |
2189 | * 4) timeout |
2190 | * |
2191 | * If 3, cleanup and return -ERESTARTNOINTR. |
2192 | * |
2193 | * If 2, we may then block on trying to take the rt_mutex and return via: |
2194 | * 5) successful lock |
2195 | * 6) signal |
2196 | * 7) timeout |
2197 | * 8) other lock acquisition failure |
2198 | * |
2199 | * If 6, return -EWOULDBLOCK (restarting the syscall would do the same). |
2200 | * |
2201 | * If 4 or 7, we cleanup and return with -ETIMEDOUT. |
2202 | * |
2203 | * Returns: |
2204 | * 0 - On success |
2205 | * <0 - On error |
2206 | */ |
2207 | static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, |
2208 | u32 val, ktime_t *abs_time, u32 bitset, |
2209 | int clockrt, u32 __user *uaddr2) |
2210 | { |
2211 | struct hrtimer_sleeper timeout, *to = NULL; |
2212 | struct rt_mutex_waiter rt_waiter; |
2213 | struct rt_mutex *pi_mutex = NULL; |
2214 | struct futex_hash_bucket *hb; |
2215 | union futex_key key2; |
2216 | struct futex_q q; |
2217 | int res, ret; |
2218 | |
2219 | if (!bitset) |
2220 | return -EINVAL; |
2221 | |
2222 | if (abs_time) { |
2223 | to = &timeout; |
2224 | hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME : |
2225 | CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
2226 | hrtimer_init_sleeper(to, current); |
2227 | hrtimer_set_expires_range_ns(&to->timer, *abs_time, |
2228 | current->timer_slack_ns); |
2229 | } |
2230 | |
2231 | /* |
2232 | * The waiter is allocated on our stack, manipulated by the requeue |
2233 | * code while we sleep on uaddr. |
2234 | */ |
2235 | debug_rt_mutex_init_waiter(&rt_waiter); |
2236 | rt_waiter.task = NULL; |
2237 | |
2238 | key2 = FUTEX_KEY_INIT; |
2239 | ret = get_futex_key(uaddr2, fshared, &key2); |
2240 | if (unlikely(ret != 0)) |
2241 | goto out; |
2242 | |
2243 | q.pi_state = NULL; |
2244 | q.bitset = bitset; |
2245 | q.rt_waiter = &rt_waiter; |
2246 | q.requeue_pi_key = &key2; |
2247 | |
2248 | /* Prepare to wait on uaddr. */ |
2249 | ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); |
2250 | if (ret) |
2251 | goto out_key2; |
2252 | |
2253 | /* Queue the futex_q, drop the hb lock, wait for wakeup. */ |
2254 | futex_wait_queue_me(hb, &q, to); |
2255 | |
2256 | spin_lock(&hb->lock); |
2257 | ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); |
2258 | spin_unlock(&hb->lock); |
2259 | if (ret) |
2260 | goto out_put_keys; |
2261 | |
2262 | /* |
2263 | * In order for us to be here, we know our q.key == key2, and since |
2264 | * we took the hb->lock above, we also know that futex_requeue() has |
2265 | * completed and we no longer have to concern ourselves with a wakeup |
2266 | * race with the atomic proxy lock acquition by the requeue code. |
2267 | */ |
2268 | |
2269 | /* Check if the requeue code acquired the second futex for us. */ |
2270 | if (!q.rt_waiter) { |
2271 | /* |
2272 | * Got the lock. We might not be the anticipated owner if we |
2273 | * did a lock-steal - fix up the PI-state in that case. |
2274 | */ |
2275 | if (q.pi_state && (q.pi_state->owner != current)) { |
2276 | spin_lock(q.lock_ptr); |
2277 | ret = fixup_pi_state_owner(uaddr2, &q, current, |
2278 | fshared); |
2279 | spin_unlock(q.lock_ptr); |
2280 | } |
2281 | } else { |
2282 | /* |
2283 | * We have been woken up by futex_unlock_pi(), a timeout, or a |
2284 | * signal. futex_unlock_pi() will not destroy the lock_ptr nor |
2285 | * the pi_state. |
2286 | */ |
2287 | WARN_ON(!&q.pi_state); |
2288 | pi_mutex = &q.pi_state->pi_mutex; |
2289 | ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1); |
2290 | debug_rt_mutex_free_waiter(&rt_waiter); |
2291 | |
2292 | spin_lock(q.lock_ptr); |
2293 | /* |
2294 | * Fixup the pi_state owner and possibly acquire the lock if we |
2295 | * haven't already. |
2296 | */ |
2297 | res = fixup_owner(uaddr2, fshared, &q, !ret); |
2298 | /* |
2299 | * If fixup_owner() returned an error, proprogate that. If it |
2300 | * acquired the lock, clear -ETIMEDOUT or -EINTR. |
2301 | */ |
2302 | if (res) |
2303 | ret = (res < 0) ? res : 0; |
2304 | |
2305 | /* Unqueue and drop the lock. */ |
2306 | unqueue_me_pi(&q); |
2307 | } |
2308 | |
2309 | /* |
2310 | * If fixup_pi_state_owner() faulted and was unable to handle the |
2311 | * fault, unlock the rt_mutex and return the fault to userspace. |
2312 | */ |
2313 | if (ret == -EFAULT) { |
2314 | if (rt_mutex_owner(pi_mutex) == current) |
2315 | rt_mutex_unlock(pi_mutex); |
2316 | } else if (ret == -EINTR) { |
2317 | /* |
2318 | * We've already been requeued, but cannot restart by calling |
2319 | * futex_lock_pi() directly. We could restart this syscall, but |
2320 | * it would detect that the user space "val" changed and return |
2321 | * -EWOULDBLOCK. Save the overhead of the restart and return |
2322 | * -EWOULDBLOCK directly. |
2323 | */ |
2324 | ret = -EWOULDBLOCK; |
2325 | } |
2326 | |
2327 | out_put_keys: |
2328 | put_futex_key(fshared, &q.key); |
2329 | out_key2: |
2330 | put_futex_key(fshared, &key2); |
2331 | |
2332 | out: |
2333 | if (to) { |
2334 | hrtimer_cancel(&to->timer); |
2335 | destroy_hrtimer_on_stack(&to->timer); |
2336 | } |
2337 | return ret; |
2338 | } |
2339 | |
2340 | /* |
2341 | * Support for robust futexes: the kernel cleans up held futexes at |
2342 | * thread exit time. |
2343 | * |
2344 | * Implementation: user-space maintains a per-thread list of locks it |
2345 | * is holding. Upon do_exit(), the kernel carefully walks this list, |
2346 | * and marks all locks that are owned by this thread with the |
2347 | * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is |
2348 | * always manipulated with the lock held, so the list is private and |
2349 | * per-thread. Userspace also maintains a per-thread 'list_op_pending' |
2350 | * field, to allow the kernel to clean up if the thread dies after |
2351 | * acquiring the lock, but just before it could have added itself to |
2352 | * the list. There can only be one such pending lock. |
2353 | */ |
2354 | |
2355 | /** |
2356 | * sys_set_robust_list() - Set the robust-futex list head of a task |
2357 | * @head: pointer to the list-head |
2358 | * @len: length of the list-head, as userspace expects |
2359 | */ |
2360 | SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, |
2361 | size_t, len) |
2362 | { |
2363 | if (!futex_cmpxchg_enabled) |
2364 | return -ENOSYS; |
2365 | /* |
2366 | * The kernel knows only one size for now: |
2367 | */ |
2368 | if (unlikely(len != sizeof(*head))) |
2369 | return -EINVAL; |
2370 | |
2371 | current->robust_list = head; |
2372 | |
2373 | return 0; |
2374 | } |
2375 | |
2376 | /** |
2377 | * sys_get_robust_list() - Get the robust-futex list head of a task |
2378 | * @pid: pid of the process [zero for current task] |
2379 | * @head_ptr: pointer to a list-head pointer, the kernel fills it in |
2380 | * @len_ptr: pointer to a length field, the kernel fills in the header size |
2381 | */ |
2382 | SYSCALL_DEFINE3(get_robust_list, int, pid, |
2383 | struct robust_list_head __user * __user *, head_ptr, |
2384 | size_t __user *, len_ptr) |
2385 | { |
2386 | struct robust_list_head __user *head; |
2387 | unsigned long ret; |
2388 | const struct cred *cred = current_cred(), *pcred; |
2389 | |
2390 | if (!futex_cmpxchg_enabled) |
2391 | return -ENOSYS; |
2392 | |
2393 | if (!pid) |
2394 | head = current->robust_list; |
2395 | else { |
2396 | struct task_struct *p; |
2397 | |
2398 | ret = -ESRCH; |
2399 | rcu_read_lock(); |
2400 | p = find_task_by_vpid(pid); |
2401 | if (!p) |
2402 | goto err_unlock; |
2403 | ret = -EPERM; |
2404 | pcred = __task_cred(p); |
2405 | if (cred->euid != pcred->euid && |
2406 | cred->euid != pcred->uid && |
2407 | !capable(CAP_SYS_PTRACE)) |
2408 | goto err_unlock; |
2409 | head = p->robust_list; |
2410 | rcu_read_unlock(); |
2411 | } |
2412 | |
2413 | if (put_user(sizeof(*head), len_ptr)) |
2414 | return -EFAULT; |
2415 | return put_user(head, head_ptr); |
2416 | |
2417 | err_unlock: |
2418 | rcu_read_unlock(); |
2419 | |
2420 | return ret; |
2421 | } |
2422 | |
2423 | /* |
2424 | * Process a futex-list entry, check whether it's owned by the |
2425 | * dying task, and do notification if so: |
2426 | */ |
2427 | int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) |
2428 | { |
2429 | u32 uval, nval, mval; |
2430 | |
2431 | retry: |
2432 | if (get_user(uval, uaddr)) |
2433 | return -1; |
2434 | |
2435 | if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) { |
2436 | /* |
2437 | * Ok, this dying thread is truly holding a futex |
2438 | * of interest. Set the OWNER_DIED bit atomically |
2439 | * via cmpxchg, and if the value had FUTEX_WAITERS |
2440 | * set, wake up a waiter (if any). (We have to do a |
2441 | * futex_wake() even if OWNER_DIED is already set - |
2442 | * to handle the rare but possible case of recursive |
2443 | * thread-death.) The rest of the cleanup is done in |
2444 | * userspace. |
2445 | */ |
2446 | mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; |
2447 | nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval); |
2448 | |
2449 | if (nval == -EFAULT) |
2450 | return -1; |
2451 | |
2452 | if (nval != uval) |
2453 | goto retry; |
2454 | |
2455 | /* |
2456 | * Wake robust non-PI futexes here. The wakeup of |
2457 | * PI futexes happens in exit_pi_state(): |
2458 | */ |
2459 | if (!pi && (uval & FUTEX_WAITERS)) |
2460 | futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); |
2461 | } |
2462 | return 0; |
2463 | } |
2464 | |
2465 | /* |
2466 | * Fetch a robust-list pointer. Bit 0 signals PI futexes: |
2467 | */ |
2468 | static inline int fetch_robust_entry(struct robust_list __user **entry, |
2469 | struct robust_list __user * __user *head, |
2470 | int *pi) |
2471 | { |
2472 | unsigned long uentry; |
2473 | |
2474 | if (get_user(uentry, (unsigned long __user *)head)) |
2475 | return -EFAULT; |
2476 | |
2477 | *entry = (void __user *)(uentry & ~1UL); |
2478 | *pi = uentry & 1; |
2479 | |
2480 | return 0; |
2481 | } |
2482 | |
2483 | /* |
2484 | * Walk curr->robust_list (very carefully, it's a userspace list!) |
2485 | * and mark any locks found there dead, and notify any waiters. |
2486 | * |
2487 | * We silently return on any sign of list-walking problem. |
2488 | */ |
2489 | void exit_robust_list(struct task_struct *curr) |
2490 | { |
2491 | struct robust_list_head __user *head = curr->robust_list; |
2492 | struct robust_list __user *entry, *next_entry, *pending; |
2493 | unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; |
2494 | unsigned long futex_offset; |
2495 | int rc; |
2496 | |
2497 | if (!futex_cmpxchg_enabled) |
2498 | return; |
2499 | |
2500 | /* |
2501 | * Fetch the list head (which was registered earlier, via |
2502 | * sys_set_robust_list()): |
2503 | */ |
2504 | if (fetch_robust_entry(&entry, &head->list.next, &pi)) |
2505 | return; |
2506 | /* |
2507 | * Fetch the relative futex offset: |
2508 | */ |
2509 | if (get_user(futex_offset, &head->futex_offset)) |
2510 | return; |
2511 | /* |
2512 | * Fetch any possibly pending lock-add first, and handle it |
2513 | * if it exists: |
2514 | */ |
2515 | if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) |
2516 | return; |
2517 | |
2518 | next_entry = NULL; /* avoid warning with gcc */ |
2519 | while (entry != &head->list) { |
2520 | /* |
2521 | * Fetch the next entry in the list before calling |
2522 | * handle_futex_death: |
2523 | */ |
2524 | rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); |
2525 | /* |
2526 | * A pending lock might already be on the list, so |
2527 | * don't process it twice: |
2528 | */ |
2529 | if (entry != pending) |
2530 | if (handle_futex_death((void __user *)entry + futex_offset, |
2531 | curr, pi)) |
2532 | return; |
2533 | if (rc) |
2534 | return; |
2535 | entry = next_entry; |
2536 | pi = next_pi; |
2537 | /* |
2538 | * Avoid excessively long or circular lists: |
2539 | */ |
2540 | if (!--limit) |
2541 | break; |
2542 | |
2543 | cond_resched(); |
2544 | } |
2545 | |
2546 | if (pending) |
2547 | handle_futex_death((void __user *)pending + futex_offset, |
2548 | curr, pip); |
2549 | } |
2550 | |
2551 | long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, |
2552 | u32 __user *uaddr2, u32 val2, u32 val3) |
2553 | { |
2554 | int clockrt, ret = -ENOSYS; |
2555 | int cmd = op & FUTEX_CMD_MASK; |
2556 | int fshared = 0; |
2557 | |
2558 | if (!(op & FUTEX_PRIVATE_FLAG)) |
2559 | fshared = 1; |
2560 | |
2561 | clockrt = op & FUTEX_CLOCK_REALTIME; |
2562 | if (clockrt && cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI) |
2563 | return -ENOSYS; |
2564 | |
2565 | switch (cmd) { |
2566 | case FUTEX_WAIT: |
2567 | val3 = FUTEX_BITSET_MATCH_ANY; |
2568 | case FUTEX_WAIT_BITSET: |
2569 | ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt); |
2570 | break; |
2571 | case FUTEX_WAKE: |
2572 | val3 = FUTEX_BITSET_MATCH_ANY; |
2573 | case FUTEX_WAKE_BITSET: |
2574 | ret = futex_wake(uaddr, fshared, val, val3); |
2575 | break; |
2576 | case FUTEX_REQUEUE: |
2577 | ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL, 0); |
2578 | break; |
2579 | case FUTEX_CMP_REQUEUE: |
2580 | ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3, |
2581 | 0); |
2582 | break; |
2583 | case FUTEX_WAKE_OP: |
2584 | ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3); |
2585 | break; |
2586 | case FUTEX_LOCK_PI: |
2587 | if (futex_cmpxchg_enabled) |
2588 | ret = futex_lock_pi(uaddr, fshared, val, timeout, 0); |
2589 | break; |
2590 | case FUTEX_UNLOCK_PI: |
2591 | if (futex_cmpxchg_enabled) |
2592 | ret = futex_unlock_pi(uaddr, fshared); |
2593 | break; |
2594 | case FUTEX_TRYLOCK_PI: |
2595 | if (futex_cmpxchg_enabled) |
2596 | ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1); |
2597 | break; |
2598 | case FUTEX_WAIT_REQUEUE_PI: |
2599 | val3 = FUTEX_BITSET_MATCH_ANY; |
2600 | ret = futex_wait_requeue_pi(uaddr, fshared, val, timeout, val3, |
2601 | clockrt, uaddr2); |
2602 | break; |
2603 | case FUTEX_CMP_REQUEUE_PI: |
2604 | ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3, |
2605 | 1); |
2606 | break; |
2607 | default: |
2608 | ret = -ENOSYS; |
2609 | } |
2610 | return ret; |
2611 | } |
2612 | |
2613 | |
2614 | SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, |
2615 | struct timespec __user *, utime, u32 __user *, uaddr2, |
2616 | u32, val3) |
2617 | { |
2618 | struct timespec ts; |
2619 | ktime_t t, *tp = NULL; |
2620 | u32 val2 = 0; |
2621 | int cmd = op & FUTEX_CMD_MASK; |
2622 | |
2623 | if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || |
2624 | cmd == FUTEX_WAIT_BITSET || |
2625 | cmd == FUTEX_WAIT_REQUEUE_PI)) { |
2626 | if (copy_from_user(&ts, utime, sizeof(ts)) != 0) |
2627 | return -EFAULT; |
2628 | if (!timespec_valid(&ts)) |
2629 | return -EINVAL; |
2630 | |
2631 | t = timespec_to_ktime(ts); |
2632 | if (cmd == FUTEX_WAIT) |
2633 | t = ktime_add_safe(ktime_get(), t); |
2634 | tp = &t; |
2635 | } |
2636 | /* |
2637 | * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*. |
2638 | * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP. |
2639 | */ |
2640 | if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || |
2641 | cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) |
2642 | val2 = (u32) (unsigned long) utime; |
2643 | |
2644 | return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); |
2645 | } |
2646 | |
2647 | static int __init futex_init(void) |
2648 | { |
2649 | u32 curval; |
2650 | int i; |
2651 | |
2652 | /* |
2653 | * This will fail and we want it. Some arch implementations do |
2654 | * runtime detection of the futex_atomic_cmpxchg_inatomic() |
2655 | * functionality. We want to know that before we call in any |
2656 | * of the complex code paths. Also we want to prevent |
2657 | * registration of robust lists in that case. NULL is |
2658 | * guaranteed to fault and we get -EFAULT on functional |
2659 | * implementation, the non functional ones will return |
2660 | * -ENOSYS. |
2661 | */ |
2662 | curval = cmpxchg_futex_value_locked(NULL, 0, 0); |
2663 | if (curval == -EFAULT) |
2664 | futex_cmpxchg_enabled = 1; |
2665 | |
2666 | for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { |
2667 | plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock); |
2668 | spin_lock_init(&futex_queues[i].lock); |
2669 | } |
2670 | |
2671 | return 0; |
2672 | } |
2673 | __initcall(futex_init); |
2674 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9