Root/
1 | /* |
2 | * This file contains the procedures for the handling of select and poll |
3 | * |
4 | * Created for Linux based loosely upon Mathius Lattner's minix |
5 | * patches by Peter MacDonald. Heavily edited by Linus. |
6 | * |
7 | * 4 February 1994 |
8 | * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS |
9 | * flag set in its personality we do *not* modify the given timeout |
10 | * parameter to reflect time remaining. |
11 | * |
12 | * 24 January 2000 |
13 | * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation |
14 | * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian). |
15 | */ |
16 | |
17 | #include <linux/kernel.h> |
18 | #include <linux/sched.h> |
19 | #include <linux/syscalls.h> |
20 | #include <linux/export.h> |
21 | #include <linux/slab.h> |
22 | #include <linux/poll.h> |
23 | #include <linux/personality.h> /* for STICKY_TIMEOUTS */ |
24 | #include <linux/file.h> |
25 | #include <linux/fdtable.h> |
26 | #include <linux/fs.h> |
27 | #include <linux/rcupdate.h> |
28 | #include <linux/hrtimer.h> |
29 | |
30 | #include <asm/uaccess.h> |
31 | |
32 | |
33 | /* |
34 | * Estimate expected accuracy in ns from a timeval. |
35 | * |
36 | * After quite a bit of churning around, we've settled on |
37 | * a simple thing of taking 0.1% of the timeout as the |
38 | * slack, with a cap of 100 msec. |
39 | * "nice" tasks get a 0.5% slack instead. |
40 | * |
41 | * Consider this comment an open invitation to come up with even |
42 | * better solutions.. |
43 | */ |
44 | |
45 | #define MAX_SLACK (100 * NSEC_PER_MSEC) |
46 | |
47 | static long __estimate_accuracy(struct timespec *tv) |
48 | { |
49 | long slack; |
50 | int divfactor = 1000; |
51 | |
52 | if (tv->tv_sec < 0) |
53 | return 0; |
54 | |
55 | if (task_nice(current) > 0) |
56 | divfactor = divfactor / 5; |
57 | |
58 | if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor)) |
59 | return MAX_SLACK; |
60 | |
61 | slack = tv->tv_nsec / divfactor; |
62 | slack += tv->tv_sec * (NSEC_PER_SEC/divfactor); |
63 | |
64 | if (slack > MAX_SLACK) |
65 | return MAX_SLACK; |
66 | |
67 | return slack; |
68 | } |
69 | |
70 | long select_estimate_accuracy(struct timespec *tv) |
71 | { |
72 | unsigned long ret; |
73 | struct timespec now; |
74 | |
75 | /* |
76 | * Realtime tasks get a slack of 0 for obvious reasons. |
77 | */ |
78 | |
79 | if (rt_task(current)) |
80 | return 0; |
81 | |
82 | ktime_get_ts(&now); |
83 | now = timespec_sub(*tv, now); |
84 | ret = __estimate_accuracy(&now); |
85 | if (ret < current->timer_slack_ns) |
86 | return current->timer_slack_ns; |
87 | return ret; |
88 | } |
89 | |
90 | |
91 | |
92 | struct poll_table_page { |
93 | struct poll_table_page * next; |
94 | struct poll_table_entry * entry; |
95 | struct poll_table_entry entries[0]; |
96 | }; |
97 | |
98 | #define POLL_TABLE_FULL(table) \ |
99 | ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table)) |
100 | |
101 | /* |
102 | * Ok, Peter made a complicated, but straightforward multiple_wait() function. |
103 | * I have rewritten this, taking some shortcuts: This code may not be easy to |
104 | * follow, but it should be free of race-conditions, and it's practical. If you |
105 | * understand what I'm doing here, then you understand how the linux |
106 | * sleep/wakeup mechanism works. |
107 | * |
108 | * Two very simple procedures, poll_wait() and poll_freewait() make all the |
109 | * work. poll_wait() is an inline-function defined in <linux/poll.h>, |
110 | * as all select/poll functions have to call it to add an entry to the |
111 | * poll table. |
112 | */ |
113 | static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, |
114 | poll_table *p); |
115 | |
116 | void poll_initwait(struct poll_wqueues *pwq) |
117 | { |
118 | init_poll_funcptr(&pwq->pt, __pollwait); |
119 | pwq->polling_task = current; |
120 | pwq->triggered = 0; |
121 | pwq->error = 0; |
122 | pwq->table = NULL; |
123 | pwq->inline_index = 0; |
124 | } |
125 | EXPORT_SYMBOL(poll_initwait); |
126 | |
127 | static void free_poll_entry(struct poll_table_entry *entry) |
128 | { |
129 | remove_wait_queue(entry->wait_address, &entry->wait); |
130 | fput(entry->filp); |
131 | } |
132 | |
133 | void poll_freewait(struct poll_wqueues *pwq) |
134 | { |
135 | struct poll_table_page * p = pwq->table; |
136 | int i; |
137 | for (i = 0; i < pwq->inline_index; i++) |
138 | free_poll_entry(pwq->inline_entries + i); |
139 | while (p) { |
140 | struct poll_table_entry * entry; |
141 | struct poll_table_page *old; |
142 | |
143 | entry = p->entry; |
144 | do { |
145 | entry--; |
146 | free_poll_entry(entry); |
147 | } while (entry > p->entries); |
148 | old = p; |
149 | p = p->next; |
150 | free_page((unsigned long) old); |
151 | } |
152 | } |
153 | EXPORT_SYMBOL(poll_freewait); |
154 | |
155 | static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p) |
156 | { |
157 | struct poll_table_page *table = p->table; |
158 | |
159 | if (p->inline_index < N_INLINE_POLL_ENTRIES) |
160 | return p->inline_entries + p->inline_index++; |
161 | |
162 | if (!table || POLL_TABLE_FULL(table)) { |
163 | struct poll_table_page *new_table; |
164 | |
165 | new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL); |
166 | if (!new_table) { |
167 | p->error = -ENOMEM; |
168 | return NULL; |
169 | } |
170 | new_table->entry = new_table->entries; |
171 | new_table->next = table; |
172 | p->table = new_table; |
173 | table = new_table; |
174 | } |
175 | |
176 | return table->entry++; |
177 | } |
178 | |
179 | static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) |
180 | { |
181 | struct poll_wqueues *pwq = wait->private; |
182 | DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task); |
183 | |
184 | /* |
185 | * Although this function is called under waitqueue lock, LOCK |
186 | * doesn't imply write barrier and the users expect write |
187 | * barrier semantics on wakeup functions. The following |
188 | * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() |
189 | * and is paired with set_mb() in poll_schedule_timeout. |
190 | */ |
191 | smp_wmb(); |
192 | pwq->triggered = 1; |
193 | |
194 | /* |
195 | * Perform the default wake up operation using a dummy |
196 | * waitqueue. |
197 | * |
198 | * TODO: This is hacky but there currently is no interface to |
199 | * pass in @sync. @sync is scheduled to be removed and once |
200 | * that happens, wake_up_process() can be used directly. |
201 | */ |
202 | return default_wake_function(&dummy_wait, mode, sync, key); |
203 | } |
204 | |
205 | static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) |
206 | { |
207 | struct poll_table_entry *entry; |
208 | |
209 | entry = container_of(wait, struct poll_table_entry, wait); |
210 | if (key && !((unsigned long)key & entry->key)) |
211 | return 0; |
212 | return __pollwake(wait, mode, sync, key); |
213 | } |
214 | |
215 | /* Add a new entry */ |
216 | static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, |
217 | poll_table *p) |
218 | { |
219 | struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt); |
220 | struct poll_table_entry *entry = poll_get_entry(pwq); |
221 | if (!entry) |
222 | return; |
223 | get_file(filp); |
224 | entry->filp = filp; |
225 | entry->wait_address = wait_address; |
226 | entry->key = p->_key; |
227 | init_waitqueue_func_entry(&entry->wait, pollwake); |
228 | entry->wait.private = pwq; |
229 | add_wait_queue(wait_address, &entry->wait); |
230 | } |
231 | |
232 | int poll_schedule_timeout(struct poll_wqueues *pwq, int state, |
233 | ktime_t *expires, unsigned long slack) |
234 | { |
235 | int rc = -EINTR; |
236 | |
237 | set_current_state(state); |
238 | if (!pwq->triggered) |
239 | rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS); |
240 | __set_current_state(TASK_RUNNING); |
241 | |
242 | /* |
243 | * Prepare for the next iteration. |
244 | * |
245 | * The following set_mb() serves two purposes. First, it's |
246 | * the counterpart rmb of the wmb in pollwake() such that data |
247 | * written before wake up is always visible after wake up. |
248 | * Second, the full barrier guarantees that triggered clearing |
249 | * doesn't pass event check of the next iteration. Note that |
250 | * this problem doesn't exist for the first iteration as |
251 | * add_wait_queue() has full barrier semantics. |
252 | */ |
253 | set_mb(pwq->triggered, 0); |
254 | |
255 | return rc; |
256 | } |
257 | EXPORT_SYMBOL(poll_schedule_timeout); |
258 | |
259 | /** |
260 | * poll_select_set_timeout - helper function to setup the timeout value |
261 | * @to: pointer to timespec variable for the final timeout |
262 | * @sec: seconds (from user space) |
263 | * @nsec: nanoseconds (from user space) |
264 | * |
265 | * Note, we do not use a timespec for the user space value here, That |
266 | * way we can use the function for timeval and compat interfaces as well. |
267 | * |
268 | * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0. |
269 | */ |
270 | int poll_select_set_timeout(struct timespec *to, long sec, long nsec) |
271 | { |
272 | struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec}; |
273 | |
274 | if (!timespec_valid(&ts)) |
275 | return -EINVAL; |
276 | |
277 | /* Optimize for the zero timeout value here */ |
278 | if (!sec && !nsec) { |
279 | to->tv_sec = to->tv_nsec = 0; |
280 | } else { |
281 | ktime_get_ts(to); |
282 | *to = timespec_add_safe(*to, ts); |
283 | } |
284 | return 0; |
285 | } |
286 | |
287 | static int poll_select_copy_remaining(struct timespec *end_time, void __user *p, |
288 | int timeval, int ret) |
289 | { |
290 | struct timespec rts; |
291 | struct timeval rtv; |
292 | |
293 | if (!p) |
294 | return ret; |
295 | |
296 | if (current->personality & STICKY_TIMEOUTS) |
297 | goto sticky; |
298 | |
299 | /* No update for zero timeout */ |
300 | if (!end_time->tv_sec && !end_time->tv_nsec) |
301 | return ret; |
302 | |
303 | ktime_get_ts(&rts); |
304 | rts = timespec_sub(*end_time, rts); |
305 | if (rts.tv_sec < 0) |
306 | rts.tv_sec = rts.tv_nsec = 0; |
307 | |
308 | if (timeval) { |
309 | if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec)) |
310 | memset(&rtv, 0, sizeof(rtv)); |
311 | rtv.tv_sec = rts.tv_sec; |
312 | rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC; |
313 | |
314 | if (!copy_to_user(p, &rtv, sizeof(rtv))) |
315 | return ret; |
316 | |
317 | } else if (!copy_to_user(p, &rts, sizeof(rts))) |
318 | return ret; |
319 | |
320 | /* |
321 | * If an application puts its timeval in read-only memory, we |
322 | * don't want the Linux-specific update to the timeval to |
323 | * cause a fault after the select has completed |
324 | * successfully. However, because we're not updating the |
325 | * timeval, we can't restart the system call. |
326 | */ |
327 | |
328 | sticky: |
329 | if (ret == -ERESTARTNOHAND) |
330 | ret = -EINTR; |
331 | return ret; |
332 | } |
333 | |
334 | #define FDS_IN(fds, n) (fds->in + n) |
335 | #define FDS_OUT(fds, n) (fds->out + n) |
336 | #define FDS_EX(fds, n) (fds->ex + n) |
337 | |
338 | #define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n)) |
339 | |
340 | static int max_select_fd(unsigned long n, fd_set_bits *fds) |
341 | { |
342 | unsigned long *open_fds; |
343 | unsigned long set; |
344 | int max; |
345 | struct fdtable *fdt; |
346 | |
347 | /* handle last in-complete long-word first */ |
348 | set = ~(~0UL << (n & (BITS_PER_LONG-1))); |
349 | n /= BITS_PER_LONG; |
350 | fdt = files_fdtable(current->files); |
351 | open_fds = fdt->open_fds + n; |
352 | max = 0; |
353 | if (set) { |
354 | set &= BITS(fds, n); |
355 | if (set) { |
356 | if (!(set & ~*open_fds)) |
357 | goto get_max; |
358 | return -EBADF; |
359 | } |
360 | } |
361 | while (n) { |
362 | open_fds--; |
363 | n--; |
364 | set = BITS(fds, n); |
365 | if (!set) |
366 | continue; |
367 | if (set & ~*open_fds) |
368 | return -EBADF; |
369 | if (max) |
370 | continue; |
371 | get_max: |
372 | do { |
373 | max++; |
374 | set >>= 1; |
375 | } while (set); |
376 | max += n * BITS_PER_LONG; |
377 | } |
378 | |
379 | return max; |
380 | } |
381 | |
382 | #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR) |
383 | #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR) |
384 | #define POLLEX_SET (POLLPRI) |
385 | |
386 | static inline void wait_key_set(poll_table *wait, unsigned long in, |
387 | unsigned long out, unsigned long bit) |
388 | { |
389 | wait->_key = POLLEX_SET; |
390 | if (in & bit) |
391 | wait->_key |= POLLIN_SET; |
392 | if (out & bit) |
393 | wait->_key |= POLLOUT_SET; |
394 | } |
395 | |
396 | int do_select(int n, fd_set_bits *fds, struct timespec *end_time) |
397 | { |
398 | ktime_t expire, *to = NULL; |
399 | struct poll_wqueues table; |
400 | poll_table *wait; |
401 | int retval, i, timed_out = 0; |
402 | unsigned long slack = 0; |
403 | |
404 | rcu_read_lock(); |
405 | retval = max_select_fd(n, fds); |
406 | rcu_read_unlock(); |
407 | |
408 | if (retval < 0) |
409 | return retval; |
410 | n = retval; |
411 | |
412 | poll_initwait(&table); |
413 | wait = &table.pt; |
414 | if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { |
415 | wait->_qproc = NULL; |
416 | timed_out = 1; |
417 | } |
418 | |
419 | if (end_time && !timed_out) |
420 | slack = select_estimate_accuracy(end_time); |
421 | |
422 | retval = 0; |
423 | for (;;) { |
424 | unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; |
425 | |
426 | inp = fds->in; outp = fds->out; exp = fds->ex; |
427 | rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex; |
428 | |
429 | for (i = 0; i < n; ++rinp, ++routp, ++rexp) { |
430 | unsigned long in, out, ex, all_bits, bit = 1, mask, j; |
431 | unsigned long res_in = 0, res_out = 0, res_ex = 0; |
432 | const struct file_operations *f_op = NULL; |
433 | struct file *file = NULL; |
434 | |
435 | in = *inp++; out = *outp++; ex = *exp++; |
436 | all_bits = in | out | ex; |
437 | if (all_bits == 0) { |
438 | i += BITS_PER_LONG; |
439 | continue; |
440 | } |
441 | |
442 | for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) { |
443 | int fput_needed; |
444 | if (i >= n) |
445 | break; |
446 | if (!(bit & all_bits)) |
447 | continue; |
448 | file = fget_light(i, &fput_needed); |
449 | if (file) { |
450 | f_op = file->f_op; |
451 | mask = DEFAULT_POLLMASK; |
452 | if (f_op && f_op->poll) { |
453 | wait_key_set(wait, in, out, bit); |
454 | mask = (*f_op->poll)(file, wait); |
455 | } |
456 | fput_light(file, fput_needed); |
457 | if ((mask & POLLIN_SET) && (in & bit)) { |
458 | res_in |= bit; |
459 | retval++; |
460 | wait->_qproc = NULL; |
461 | } |
462 | if ((mask & POLLOUT_SET) && (out & bit)) { |
463 | res_out |= bit; |
464 | retval++; |
465 | wait->_qproc = NULL; |
466 | } |
467 | if ((mask & POLLEX_SET) && (ex & bit)) { |
468 | res_ex |= bit; |
469 | retval++; |
470 | wait->_qproc = NULL; |
471 | } |
472 | } |
473 | } |
474 | if (res_in) |
475 | *rinp = res_in; |
476 | if (res_out) |
477 | *routp = res_out; |
478 | if (res_ex) |
479 | *rexp = res_ex; |
480 | cond_resched(); |
481 | } |
482 | wait->_qproc = NULL; |
483 | if (retval || timed_out || signal_pending(current)) |
484 | break; |
485 | if (table.error) { |
486 | retval = table.error; |
487 | break; |
488 | } |
489 | |
490 | /* |
491 | * If this is the first loop and we have a timeout |
492 | * given, then we convert to ktime_t and set the to |
493 | * pointer to the expiry value. |
494 | */ |
495 | if (end_time && !to) { |
496 | expire = timespec_to_ktime(*end_time); |
497 | to = &expire; |
498 | } |
499 | |
500 | if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE, |
501 | to, slack)) |
502 | timed_out = 1; |
503 | } |
504 | |
505 | poll_freewait(&table); |
506 | |
507 | return retval; |
508 | } |
509 | |
510 | /* |
511 | * We can actually return ERESTARTSYS instead of EINTR, but I'd |
512 | * like to be certain this leads to no problems. So I return |
513 | * EINTR just for safety. |
514 | * |
515 | * Update: ERESTARTSYS breaks at least the xview clock binary, so |
516 | * I'm trying ERESTARTNOHAND which restart only when you want to. |
517 | */ |
518 | int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, |
519 | fd_set __user *exp, struct timespec *end_time) |
520 | { |
521 | fd_set_bits fds; |
522 | void *bits; |
523 | int ret, max_fds; |
524 | unsigned int size; |
525 | struct fdtable *fdt; |
526 | /* Allocate small arguments on the stack to save memory and be faster */ |
527 | long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; |
528 | |
529 | ret = -EINVAL; |
530 | if (n < 0) |
531 | goto out_nofds; |
532 | |
533 | /* max_fds can increase, so grab it once to avoid race */ |
534 | rcu_read_lock(); |
535 | fdt = files_fdtable(current->files); |
536 | max_fds = fdt->max_fds; |
537 | rcu_read_unlock(); |
538 | if (n > max_fds) |
539 | n = max_fds; |
540 | |
541 | /* |
542 | * We need 6 bitmaps (in/out/ex for both incoming and outgoing), |
543 | * since we used fdset we need to allocate memory in units of |
544 | * long-words. |
545 | */ |
546 | size = FDS_BYTES(n); |
547 | bits = stack_fds; |
548 | if (size > sizeof(stack_fds) / 6) { |
549 | /* Not enough space in on-stack array; must use kmalloc */ |
550 | ret = -ENOMEM; |
551 | bits = kmalloc(6 * size, GFP_KERNEL); |
552 | if (!bits) |
553 | goto out_nofds; |
554 | } |
555 | fds.in = bits; |
556 | fds.out = bits + size; |
557 | fds.ex = bits + 2*size; |
558 | fds.res_in = bits + 3*size; |
559 | fds.res_out = bits + 4*size; |
560 | fds.res_ex = bits + 5*size; |
561 | |
562 | if ((ret = get_fd_set(n, inp, fds.in)) || |
563 | (ret = get_fd_set(n, outp, fds.out)) || |
564 | (ret = get_fd_set(n, exp, fds.ex))) |
565 | goto out; |
566 | zero_fd_set(n, fds.res_in); |
567 | zero_fd_set(n, fds.res_out); |
568 | zero_fd_set(n, fds.res_ex); |
569 | |
570 | ret = do_select(n, &fds, end_time); |
571 | |
572 | if (ret < 0) |
573 | goto out; |
574 | if (!ret) { |
575 | ret = -ERESTARTNOHAND; |
576 | if (signal_pending(current)) |
577 | goto out; |
578 | ret = 0; |
579 | } |
580 | |
581 | if (set_fd_set(n, inp, fds.res_in) || |
582 | set_fd_set(n, outp, fds.res_out) || |
583 | set_fd_set(n, exp, fds.res_ex)) |
584 | ret = -EFAULT; |
585 | |
586 | out: |
587 | if (bits != stack_fds) |
588 | kfree(bits); |
589 | out_nofds: |
590 | return ret; |
591 | } |
592 | |
593 | SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp, |
594 | fd_set __user *, exp, struct timeval __user *, tvp) |
595 | { |
596 | struct timespec end_time, *to = NULL; |
597 | struct timeval tv; |
598 | int ret; |
599 | |
600 | if (tvp) { |
601 | if (copy_from_user(&tv, tvp, sizeof(tv))) |
602 | return -EFAULT; |
603 | |
604 | to = &end_time; |
605 | if (poll_select_set_timeout(to, |
606 | tv.tv_sec + (tv.tv_usec / USEC_PER_SEC), |
607 | (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC)) |
608 | return -EINVAL; |
609 | } |
610 | |
611 | ret = core_sys_select(n, inp, outp, exp, to); |
612 | ret = poll_select_copy_remaining(&end_time, tvp, 1, ret); |
613 | |
614 | return ret; |
615 | } |
616 | |
617 | static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp, |
618 | fd_set __user *exp, struct timespec __user *tsp, |
619 | const sigset_t __user *sigmask, size_t sigsetsize) |
620 | { |
621 | sigset_t ksigmask, sigsaved; |
622 | struct timespec ts, end_time, *to = NULL; |
623 | int ret; |
624 | |
625 | if (tsp) { |
626 | if (copy_from_user(&ts, tsp, sizeof(ts))) |
627 | return -EFAULT; |
628 | |
629 | to = &end_time; |
630 | if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) |
631 | return -EINVAL; |
632 | } |
633 | |
634 | if (sigmask) { |
635 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
636 | if (sigsetsize != sizeof(sigset_t)) |
637 | return -EINVAL; |
638 | if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) |
639 | return -EFAULT; |
640 | |
641 | sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
642 | sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); |
643 | } |
644 | |
645 | ret = core_sys_select(n, inp, outp, exp, to); |
646 | ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); |
647 | |
648 | if (ret == -ERESTARTNOHAND) { |
649 | /* |
650 | * Don't restore the signal mask yet. Let do_signal() deliver |
651 | * the signal on the way back to userspace, before the signal |
652 | * mask is restored. |
653 | */ |
654 | if (sigmask) { |
655 | memcpy(¤t->saved_sigmask, &sigsaved, |
656 | sizeof(sigsaved)); |
657 | set_restore_sigmask(); |
658 | } |
659 | } else if (sigmask) |
660 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
661 | |
662 | return ret; |
663 | } |
664 | |
665 | /* |
666 | * Most architectures can't handle 7-argument syscalls. So we provide a |
667 | * 6-argument version where the sixth argument is a pointer to a structure |
668 | * which has a pointer to the sigset_t itself followed by a size_t containing |
669 | * the sigset size. |
670 | */ |
671 | SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp, |
672 | fd_set __user *, exp, struct timespec __user *, tsp, |
673 | void __user *, sig) |
674 | { |
675 | size_t sigsetsize = 0; |
676 | sigset_t __user *up = NULL; |
677 | |
678 | if (sig) { |
679 | if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t)) |
680 | || __get_user(up, (sigset_t __user * __user *)sig) |
681 | || __get_user(sigsetsize, |
682 | (size_t __user *)(sig+sizeof(void *)))) |
683 | return -EFAULT; |
684 | } |
685 | |
686 | return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize); |
687 | } |
688 | |
689 | #ifdef __ARCH_WANT_SYS_OLD_SELECT |
690 | struct sel_arg_struct { |
691 | unsigned long n; |
692 | fd_set __user *inp, *outp, *exp; |
693 | struct timeval __user *tvp; |
694 | }; |
695 | |
696 | SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg) |
697 | { |
698 | struct sel_arg_struct a; |
699 | |
700 | if (copy_from_user(&a, arg, sizeof(a))) |
701 | return -EFAULT; |
702 | return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); |
703 | } |
704 | #endif |
705 | |
706 | struct poll_list { |
707 | struct poll_list *next; |
708 | int len; |
709 | struct pollfd entries[0]; |
710 | }; |
711 | |
712 | #define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd)) |
713 | |
714 | /* |
715 | * Fish for pollable events on the pollfd->fd file descriptor. We're only |
716 | * interested in events matching the pollfd->events mask, and the result |
717 | * matching that mask is both recorded in pollfd->revents and returned. The |
718 | * pwait poll_table will be used by the fd-provided poll handler for waiting, |
719 | * if pwait->_qproc is non-NULL. |
720 | */ |
721 | static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait) |
722 | { |
723 | unsigned int mask; |
724 | int fd; |
725 | |
726 | mask = 0; |
727 | fd = pollfd->fd; |
728 | if (fd >= 0) { |
729 | int fput_needed; |
730 | struct file * file; |
731 | |
732 | file = fget_light(fd, &fput_needed); |
733 | mask = POLLNVAL; |
734 | if (file != NULL) { |
735 | mask = DEFAULT_POLLMASK; |
736 | if (file->f_op && file->f_op->poll) { |
737 | pwait->_key = pollfd->events|POLLERR|POLLHUP; |
738 | mask = file->f_op->poll(file, pwait); |
739 | } |
740 | /* Mask out unneeded events. */ |
741 | mask &= pollfd->events | POLLERR | POLLHUP; |
742 | fput_light(file, fput_needed); |
743 | } |
744 | } |
745 | pollfd->revents = mask; |
746 | |
747 | return mask; |
748 | } |
749 | |
750 | static int do_poll(unsigned int nfds, struct poll_list *list, |
751 | struct poll_wqueues *wait, struct timespec *end_time) |
752 | { |
753 | poll_table* pt = &wait->pt; |
754 | ktime_t expire, *to = NULL; |
755 | int timed_out = 0, count = 0; |
756 | unsigned long slack = 0; |
757 | |
758 | /* Optimise the no-wait case */ |
759 | if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { |
760 | pt->_qproc = NULL; |
761 | timed_out = 1; |
762 | } |
763 | |
764 | if (end_time && !timed_out) |
765 | slack = select_estimate_accuracy(end_time); |
766 | |
767 | for (;;) { |
768 | struct poll_list *walk; |
769 | |
770 | for (walk = list; walk != NULL; walk = walk->next) { |
771 | struct pollfd * pfd, * pfd_end; |
772 | |
773 | pfd = walk->entries; |
774 | pfd_end = pfd + walk->len; |
775 | for (; pfd != pfd_end; pfd++) { |
776 | /* |
777 | * Fish for events. If we found one, record it |
778 | * and kill poll_table->_qproc, so we don't |
779 | * needlessly register any other waiters after |
780 | * this. They'll get immediately deregistered |
781 | * when we break out and return. |
782 | */ |
783 | if (do_pollfd(pfd, pt)) { |
784 | count++; |
785 | pt->_qproc = NULL; |
786 | } |
787 | } |
788 | } |
789 | /* |
790 | * All waiters have already been registered, so don't provide |
791 | * a poll_table->_qproc to them on the next loop iteration. |
792 | */ |
793 | pt->_qproc = NULL; |
794 | if (!count) { |
795 | count = wait->error; |
796 | if (signal_pending(current)) |
797 | count = -EINTR; |
798 | } |
799 | if (count || timed_out) |
800 | break; |
801 | |
802 | /* |
803 | * If this is the first loop and we have a timeout |
804 | * given, then we convert to ktime_t and set the to |
805 | * pointer to the expiry value. |
806 | */ |
807 | if (end_time && !to) { |
808 | expire = timespec_to_ktime(*end_time); |
809 | to = &expire; |
810 | } |
811 | |
812 | if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack)) |
813 | timed_out = 1; |
814 | } |
815 | return count; |
816 | } |
817 | |
818 | #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \ |
819 | sizeof(struct pollfd)) |
820 | |
821 | int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, |
822 | struct timespec *end_time) |
823 | { |
824 | struct poll_wqueues table; |
825 | int err = -EFAULT, fdcount, len, size; |
826 | /* Allocate small arguments on the stack to save memory and be |
827 | faster - use long to make sure the buffer is aligned properly |
828 | on 64 bit archs to avoid unaligned access */ |
829 | long stack_pps[POLL_STACK_ALLOC/sizeof(long)]; |
830 | struct poll_list *const head = (struct poll_list *)stack_pps; |
831 | struct poll_list *walk = head; |
832 | unsigned long todo = nfds; |
833 | |
834 | if (nfds > rlimit(RLIMIT_NOFILE)) |
835 | return -EINVAL; |
836 | |
837 | len = min_t(unsigned int, nfds, N_STACK_PPS); |
838 | for (;;) { |
839 | walk->next = NULL; |
840 | walk->len = len; |
841 | if (!len) |
842 | break; |
843 | |
844 | if (copy_from_user(walk->entries, ufds + nfds-todo, |
845 | sizeof(struct pollfd) * walk->len)) |
846 | goto out_fds; |
847 | |
848 | todo -= walk->len; |
849 | if (!todo) |
850 | break; |
851 | |
852 | len = min(todo, POLLFD_PER_PAGE); |
853 | size = sizeof(struct poll_list) + sizeof(struct pollfd) * len; |
854 | walk = walk->next = kmalloc(size, GFP_KERNEL); |
855 | if (!walk) { |
856 | err = -ENOMEM; |
857 | goto out_fds; |
858 | } |
859 | } |
860 | |
861 | poll_initwait(&table); |
862 | fdcount = do_poll(nfds, head, &table, end_time); |
863 | poll_freewait(&table); |
864 | |
865 | for (walk = head; walk; walk = walk->next) { |
866 | struct pollfd *fds = walk->entries; |
867 | int j; |
868 | |
869 | for (j = 0; j < walk->len; j++, ufds++) |
870 | if (__put_user(fds[j].revents, &ufds->revents)) |
871 | goto out_fds; |
872 | } |
873 | |
874 | err = fdcount; |
875 | out_fds: |
876 | walk = head->next; |
877 | while (walk) { |
878 | struct poll_list *pos = walk; |
879 | walk = walk->next; |
880 | kfree(pos); |
881 | } |
882 | |
883 | return err; |
884 | } |
885 | |
886 | static long do_restart_poll(struct restart_block *restart_block) |
887 | { |
888 | struct pollfd __user *ufds = restart_block->poll.ufds; |
889 | int nfds = restart_block->poll.nfds; |
890 | struct timespec *to = NULL, end_time; |
891 | int ret; |
892 | |
893 | if (restart_block->poll.has_timeout) { |
894 | end_time.tv_sec = restart_block->poll.tv_sec; |
895 | end_time.tv_nsec = restart_block->poll.tv_nsec; |
896 | to = &end_time; |
897 | } |
898 | |
899 | ret = do_sys_poll(ufds, nfds, to); |
900 | |
901 | if (ret == -EINTR) { |
902 | restart_block->fn = do_restart_poll; |
903 | ret = -ERESTART_RESTARTBLOCK; |
904 | } |
905 | return ret; |
906 | } |
907 | |
908 | SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, |
909 | int, timeout_msecs) |
910 | { |
911 | struct timespec end_time, *to = NULL; |
912 | int ret; |
913 | |
914 | if (timeout_msecs >= 0) { |
915 | to = &end_time; |
916 | poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC, |
917 | NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC)); |
918 | } |
919 | |
920 | ret = do_sys_poll(ufds, nfds, to); |
921 | |
922 | if (ret == -EINTR) { |
923 | struct restart_block *restart_block; |
924 | |
925 | restart_block = ¤t_thread_info()->restart_block; |
926 | restart_block->fn = do_restart_poll; |
927 | restart_block->poll.ufds = ufds; |
928 | restart_block->poll.nfds = nfds; |
929 | |
930 | if (timeout_msecs >= 0) { |
931 | restart_block->poll.tv_sec = end_time.tv_sec; |
932 | restart_block->poll.tv_nsec = end_time.tv_nsec; |
933 | restart_block->poll.has_timeout = 1; |
934 | } else |
935 | restart_block->poll.has_timeout = 0; |
936 | |
937 | ret = -ERESTART_RESTARTBLOCK; |
938 | } |
939 | return ret; |
940 | } |
941 | |
942 | SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, |
943 | struct timespec __user *, tsp, const sigset_t __user *, sigmask, |
944 | size_t, sigsetsize) |
945 | { |
946 | sigset_t ksigmask, sigsaved; |
947 | struct timespec ts, end_time, *to = NULL; |
948 | int ret; |
949 | |
950 | if (tsp) { |
951 | if (copy_from_user(&ts, tsp, sizeof(ts))) |
952 | return -EFAULT; |
953 | |
954 | to = &end_time; |
955 | if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) |
956 | return -EINVAL; |
957 | } |
958 | |
959 | if (sigmask) { |
960 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
961 | if (sigsetsize != sizeof(sigset_t)) |
962 | return -EINVAL; |
963 | if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) |
964 | return -EFAULT; |
965 | |
966 | sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
967 | sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); |
968 | } |
969 | |
970 | ret = do_sys_poll(ufds, nfds, to); |
971 | |
972 | /* We can restart this syscall, usually */ |
973 | if (ret == -EINTR) { |
974 | /* |
975 | * Don't restore the signal mask yet. Let do_signal() deliver |
976 | * the signal on the way back to userspace, before the signal |
977 | * mask is restored. |
978 | */ |
979 | if (sigmask) { |
980 | memcpy(¤t->saved_sigmask, &sigsaved, |
981 | sizeof(sigsaved)); |
982 | set_restore_sigmask(); |
983 | } |
984 | ret = -ERESTARTNOHAND; |
985 | } else if (sigmask) |
986 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
987 | |
988 | ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); |
989 | |
990 | return ret; |
991 | } |
992 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9