Root/kernel/posix-timers.c

1/*
2 * linux/kernel/posix-timers.c
3 *
4 *
5 * 2002-10-15 Posix Clocks & timers
6 * by George Anzinger george@mvista.com
7 *
8 * Copyright (C) 2002 2003 by MontaVista Software.
9 *
10 * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
11 * Copyright (C) 2004 Boris Hu
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or (at
16 * your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA
28 */
29
30/* These are all the functions necessary to implement
31 * POSIX clocks & timers
32 */
33#include <linux/mm.h>
34#include <linux/interrupt.h>
35#include <linux/slab.h>
36#include <linux/time.h>
37#include <linux/mutex.h>
38
39#include <asm/uaccess.h>
40#include <linux/list.h>
41#include <linux/init.h>
42#include <linux/compiler.h>
43#include <linux/idr.h>
44#include <linux/posix-timers.h>
45#include <linux/syscalls.h>
46#include <linux/wait.h>
47#include <linux/workqueue.h>
48#include <linux/module.h>
49
50/*
51 * Management arrays for POSIX timers. Timers are kept in slab memory
52 * Timer ids are allocated by an external routine that keeps track of the
53 * id and the timer. The external interface is:
54 *
55 * void *idr_find(struct idr *idp, int id); to find timer_id <id>
56 * int idr_get_new(struct idr *idp, void *ptr); to get a new id and
57 * related it to <ptr>
58 * void idr_remove(struct idr *idp, int id); to release <id>
59 * void idr_init(struct idr *idp); to initialize <idp>
60 * which we supply.
61 * The idr_get_new *may* call slab for more memory so it must not be
62 * called under a spin lock. Likewise idr_remore may release memory
63 * (but it may be ok to do this under a lock...).
64 * idr_find is just a memory look up and is quite fast. A -1 return
65 * indicates that the requested id does not exist.
66 */
67
68/*
69 * Lets keep our timers in a slab cache :-)
70 */
71static struct kmem_cache *posix_timers_cache;
72static struct idr posix_timers_id;
73static DEFINE_SPINLOCK(idr_lock);
74
75/*
76 * we assume that the new SIGEV_THREAD_ID shares no bits with the other
77 * SIGEV values. Here we put out an error if this assumption fails.
78 */
79#if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
80                       ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
81#error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
82#endif
83
84
85/*
86 * The timer ID is turned into a timer address by idr_find().
87 * Verifying a valid ID consists of:
88 *
89 * a) checking that idr_find() returns other than -1.
90 * b) checking that the timer id matches the one in the timer itself.
91 * c) that the timer owner is in the callers thread group.
92 */
93
94/*
95 * CLOCKs: The POSIX standard calls for a couple of clocks and allows us
96 * to implement others. This structure defines the various
97 * clocks and allows the possibility of adding others. We
98 * provide an interface to add clocks to the table and expect
99 * the "arch" code to add at least one clock that is high
100 * resolution. Here we define the standard CLOCK_REALTIME as a
101 * 1/HZ resolution clock.
102 *
103 * RESOLUTION: Clock resolution is used to round up timer and interval
104 * times, NOT to report clock times, which are reported with as
105 * much resolution as the system can muster. In some cases this
106 * resolution may depend on the underlying clock hardware and
107 * may not be quantifiable until run time, and only then is the
108 * necessary code is written. The standard says we should say
109 * something about this issue in the documentation...
110 *
111 * FUNCTIONS: The CLOCKs structure defines possible functions to handle
112 * various clock functions. For clocks that use the standard
113 * system timer code these entries should be NULL. This will
114 * allow dispatch without the overhead of indirect function
115 * calls. CLOCKS that depend on other sources (e.g. WWV or GPS)
116 * must supply functions here, even if the function just returns
117 * ENOSYS. The standard POSIX timer management code assumes the
118 * following: 1.) The k_itimer struct (sched.h) is used for the
119 * timer. 2.) The list, it_lock, it_clock, it_id and it_pid
120 * fields are not modified by timer code.
121 *
122 * At this time all functions EXCEPT clock_nanosleep can be
123 * redirected by the CLOCKS structure. Clock_nanosleep is in
124 * there, but the code ignores it.
125 *
126 * Permissions: It is assumed that the clock_settime() function defined
127 * for each clock will take care of permission checks. Some
128 * clocks may be set able by any user (i.e. local process
129 * clocks) others not. Currently the only set able clock we
130 * have is CLOCK_REALTIME and its high res counter part, both of
131 * which we beg off on and pass to do_sys_settimeofday().
132 */
133
134static struct k_clock posix_clocks[MAX_CLOCKS];
135
136/*
137 * These ones are defined below.
138 */
139static int common_nsleep(const clockid_t, int flags, struct timespec *t,
140             struct timespec __user *rmtp);
141static void common_timer_get(struct k_itimer *, struct itimerspec *);
142static int common_timer_set(struct k_itimer *, int,
143                struct itimerspec *, struct itimerspec *);
144static int common_timer_del(struct k_itimer *timer);
145
146static enum hrtimer_restart posix_timer_fn(struct hrtimer *data);
147
148static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags);
149
150static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
151{
152    spin_unlock_irqrestore(&timr->it_lock, flags);
153}
154
155/*
156 * Call the k_clock hook function if non-null, or the default function.
157 */
158#define CLOCK_DISPATCH(clock, call, arglist) \
159     ((clock) < 0 ? posix_cpu_##call arglist : \
160      (posix_clocks[clock].call != NULL \
161       ? (*posix_clocks[clock].call) arglist : common_##call arglist))
162
163/*
164 * Default clock hook functions when the struct k_clock passed
165 * to register_posix_clock leaves a function pointer null.
166 *
167 * The function common_CALL is the default implementation for
168 * the function pointer CALL in struct k_clock.
169 */
170
171static inline int common_clock_getres(const clockid_t which_clock,
172                      struct timespec *tp)
173{
174    tp->tv_sec = 0;
175    tp->tv_nsec = posix_clocks[which_clock].res;
176    return 0;
177}
178
179/*
180 * Get real time for posix timers
181 */
182static int common_clock_get(clockid_t which_clock, struct timespec *tp)
183{
184    ktime_get_real_ts(tp);
185    return 0;
186}
187
188static inline int common_clock_set(const clockid_t which_clock,
189                   struct timespec *tp)
190{
191    return do_sys_settimeofday(tp, NULL);
192}
193
194static int common_timer_create(struct k_itimer *new_timer)
195{
196    hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
197    return 0;
198}
199
200static int no_timer_create(struct k_itimer *new_timer)
201{
202    return -EOPNOTSUPP;
203}
204
205static int no_nsleep(const clockid_t which_clock, int flags,
206             struct timespec *tsave, struct timespec __user *rmtp)
207{
208    return -EOPNOTSUPP;
209}
210
211/*
212 * Return nonzero if we know a priori this clockid_t value is bogus.
213 */
214static inline int invalid_clockid(const clockid_t which_clock)
215{
216    if (which_clock < 0) /* CPU clock, posix_cpu_* will check it */
217        return 0;
218    if ((unsigned) which_clock >= MAX_CLOCKS)
219        return 1;
220    if (posix_clocks[which_clock].clock_getres != NULL)
221        return 0;
222    if (posix_clocks[which_clock].res != 0)
223        return 0;
224    return 1;
225}
226
227/*
228 * Get monotonic time for posix timers
229 */
230static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
231{
232    ktime_get_ts(tp);
233    return 0;
234}
235
236/*
237 * Get monotonic time for posix timers
238 */
239static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
240{
241    getrawmonotonic(tp);
242    return 0;
243}
244
245
246static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec *tp)
247{
248    *tp = current_kernel_time();
249    return 0;
250}
251
252static int posix_get_monotonic_coarse(clockid_t which_clock,
253                        struct timespec *tp)
254{
255    *tp = get_monotonic_coarse();
256    return 0;
257}
258
259static int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
260{
261    *tp = ktime_to_timespec(KTIME_LOW_RES);
262    return 0;
263}
264/*
265 * Initialize everything, well, just everything in Posix clocks/timers ;)
266 */
267static __init int init_posix_timers(void)
268{
269    struct k_clock clock_realtime = {
270        .clock_getres = hrtimer_get_res,
271    };
272    struct k_clock clock_monotonic = {
273        .clock_getres = hrtimer_get_res,
274        .clock_get = posix_ktime_get_ts,
275        .clock_set = do_posix_clock_nosettime,
276    };
277    struct k_clock clock_monotonic_raw = {
278        .clock_getres = hrtimer_get_res,
279        .clock_get = posix_get_monotonic_raw,
280        .clock_set = do_posix_clock_nosettime,
281        .timer_create = no_timer_create,
282        .nsleep = no_nsleep,
283    };
284    struct k_clock clock_realtime_coarse = {
285        .clock_getres = posix_get_coarse_res,
286        .clock_get = posix_get_realtime_coarse,
287        .clock_set = do_posix_clock_nosettime,
288        .timer_create = no_timer_create,
289        .nsleep = no_nsleep,
290    };
291    struct k_clock clock_monotonic_coarse = {
292        .clock_getres = posix_get_coarse_res,
293        .clock_get = posix_get_monotonic_coarse,
294        .clock_set = do_posix_clock_nosettime,
295        .timer_create = no_timer_create,
296        .nsleep = no_nsleep,
297    };
298
299    register_posix_clock(CLOCK_REALTIME, &clock_realtime);
300    register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
301    register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
302    register_posix_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
303    register_posix_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
304
305    posix_timers_cache = kmem_cache_create("posix_timers_cache",
306                    sizeof (struct k_itimer), 0, SLAB_PANIC,
307                    NULL);
308    idr_init(&posix_timers_id);
309    return 0;
310}
311
312__initcall(init_posix_timers);
313
314static void schedule_next_timer(struct k_itimer *timr)
315{
316    struct hrtimer *timer = &timr->it.real.timer;
317
318    if (timr->it.real.interval.tv64 == 0)
319        return;
320
321    timr->it_overrun += (unsigned int) hrtimer_forward(timer,
322                        timer->base->get_time(),
323                        timr->it.real.interval);
324
325    timr->it_overrun_last = timr->it_overrun;
326    timr->it_overrun = -1;
327    ++timr->it_requeue_pending;
328    hrtimer_restart(timer);
329}
330
331/*
332 * This function is exported for use by the signal deliver code. It is
333 * called just prior to the info block being released and passes that
334 * block to us. It's function is to update the overrun entry AND to
335 * restart the timer. It should only be called if the timer is to be
336 * restarted (i.e. we have flagged this in the sys_private entry of the
337 * info block).
338 *
339 * To protect aginst the timer going away while the interrupt is queued,
340 * we require that the it_requeue_pending flag be set.
341 */
342void do_schedule_next_timer(struct siginfo *info)
343{
344    struct k_itimer *timr;
345    unsigned long flags;
346
347    timr = lock_timer(info->si_tid, &flags);
348
349    if (timr && timr->it_requeue_pending == info->si_sys_private) {
350        if (timr->it_clock < 0)
351            posix_cpu_timer_schedule(timr);
352        else
353            schedule_next_timer(timr);
354
355        info->si_overrun += timr->it_overrun_last;
356    }
357
358    if (timr)
359        unlock_timer(timr, flags);
360}
361
362int posix_timer_event(struct k_itimer *timr, int si_private)
363{
364    struct task_struct *task;
365    int shared, ret = -1;
366    /*
367     * FIXME: if ->sigq is queued we can race with
368     * dequeue_signal()->do_schedule_next_timer().
369     *
370     * If dequeue_signal() sees the "right" value of
371     * si_sys_private it calls do_schedule_next_timer().
372     * We re-queue ->sigq and drop ->it_lock().
373     * do_schedule_next_timer() locks the timer
374     * and re-schedules it while ->sigq is pending.
375     * Not really bad, but not that we want.
376     */
377    timr->sigq->info.si_sys_private = si_private;
378
379    rcu_read_lock();
380    task = pid_task(timr->it_pid, PIDTYPE_PID);
381    if (task) {
382        shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
383        ret = send_sigqueue(timr->sigq, task, shared);
384    }
385    rcu_read_unlock();
386    /* If we failed to send the signal the timer stops. */
387    return ret > 0;
388}
389EXPORT_SYMBOL_GPL(posix_timer_event);
390
391/*
392 * This function gets called when a POSIX.1b interval timer expires. It
393 * is used as a callback from the kernel internal timer. The
394 * run_timer_list code ALWAYS calls with interrupts on.
395
396 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
397 */
398static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
399{
400    struct k_itimer *timr;
401    unsigned long flags;
402    int si_private = 0;
403    enum hrtimer_restart ret = HRTIMER_NORESTART;
404
405    timr = container_of(timer, struct k_itimer, it.real.timer);
406    spin_lock_irqsave(&timr->it_lock, flags);
407
408    if (timr->it.real.interval.tv64 != 0)
409        si_private = ++timr->it_requeue_pending;
410
411    if (posix_timer_event(timr, si_private)) {
412        /*
413         * signal was not sent because of sig_ignor
414         * we will not get a call back to restart it AND
415         * it should be restarted.
416         */
417        if (timr->it.real.interval.tv64 != 0) {
418            ktime_t now = hrtimer_cb_get_time(timer);
419
420            /*
421             * FIXME: What we really want, is to stop this
422             * timer completely and restart it in case the
423             * SIG_IGN is removed. This is a non trivial
424             * change which involves sighand locking
425             * (sigh !), which we don't want to do late in
426             * the release cycle.
427             *
428             * For now we just let timers with an interval
429             * less than a jiffie expire every jiffie to
430             * avoid softirq starvation in case of SIG_IGN
431             * and a very small interval, which would put
432             * the timer right back on the softirq pending
433             * list. By moving now ahead of time we trick
434             * hrtimer_forward() to expire the timer
435             * later, while we still maintain the overrun
436             * accuracy, but have some inconsistency in
437             * the timer_gettime() case. This is at least
438             * better than a starved softirq. A more
439             * complex fix which solves also another related
440             * inconsistency is already in the pipeline.
441             */
442#ifdef CONFIG_HIGH_RES_TIMERS
443            {
444                ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ);
445
446                if (timr->it.real.interval.tv64 < kj.tv64)
447                    now = ktime_add(now, kj);
448            }
449#endif
450            timr->it_overrun += (unsigned int)
451                hrtimer_forward(timer, now,
452                        timr->it.real.interval);
453            ret = HRTIMER_RESTART;
454            ++timr->it_requeue_pending;
455        }
456    }
457
458    unlock_timer(timr, flags);
459    return ret;
460}
461
462static struct pid *good_sigevent(sigevent_t * event)
463{
464    struct task_struct *rtn = current->group_leader;
465
466    if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
467        (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
468         !same_thread_group(rtn, current) ||
469         (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
470        return NULL;
471
472    if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
473        ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
474        return NULL;
475
476    return task_pid(rtn);
477}
478
479void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
480{
481    if ((unsigned) clock_id >= MAX_CLOCKS) {
482        printk("POSIX clock register failed for clock_id %d\n",
483               clock_id);
484        return;
485    }
486
487    posix_clocks[clock_id] = *new_clock;
488}
489EXPORT_SYMBOL_GPL(register_posix_clock);
490
491static struct k_itimer * alloc_posix_timer(void)
492{
493    struct k_itimer *tmr;
494    tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
495    if (!tmr)
496        return tmr;
497    if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
498        kmem_cache_free(posix_timers_cache, tmr);
499        return NULL;
500    }
501    memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
502    return tmr;
503}
504
505#define IT_ID_SET 1
506#define IT_ID_NOT_SET 0
507static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
508{
509    if (it_id_set) {
510        unsigned long flags;
511        spin_lock_irqsave(&idr_lock, flags);
512        idr_remove(&posix_timers_id, tmr->it_id);
513        spin_unlock_irqrestore(&idr_lock, flags);
514    }
515    put_pid(tmr->it_pid);
516    sigqueue_free(tmr->sigq);
517    kmem_cache_free(posix_timers_cache, tmr);
518}
519
520/* Create a POSIX.1b interval timer. */
521
522SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
523        struct sigevent __user *, timer_event_spec,
524        timer_t __user *, created_timer_id)
525{
526    struct k_itimer *new_timer;
527    int error, new_timer_id;
528    sigevent_t event;
529    int it_id_set = IT_ID_NOT_SET;
530
531    if (invalid_clockid(which_clock))
532        return -EINVAL;
533
534    new_timer = alloc_posix_timer();
535    if (unlikely(!new_timer))
536        return -EAGAIN;
537
538    spin_lock_init(&new_timer->it_lock);
539 retry:
540    if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) {
541        error = -EAGAIN;
542        goto out;
543    }
544    spin_lock_irq(&idr_lock);
545    error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
546    spin_unlock_irq(&idr_lock);
547    if (error) {
548        if (error == -EAGAIN)
549            goto retry;
550        /*
551         * Weird looking, but we return EAGAIN if the IDR is
552         * full (proper POSIX return value for this)
553         */
554        error = -EAGAIN;
555        goto out;
556    }
557
558    it_id_set = IT_ID_SET;
559    new_timer->it_id = (timer_t) new_timer_id;
560    new_timer->it_clock = which_clock;
561    new_timer->it_overrun = -1;
562
563    if (timer_event_spec) {
564        if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
565            error = -EFAULT;
566            goto out;
567        }
568        rcu_read_lock();
569        new_timer->it_pid = get_pid(good_sigevent(&event));
570        rcu_read_unlock();
571        if (!new_timer->it_pid) {
572            error = -EINVAL;
573            goto out;
574        }
575    } else {
576        event.sigev_notify = SIGEV_SIGNAL;
577        event.sigev_signo = SIGALRM;
578        event.sigev_value.sival_int = new_timer->it_id;
579        new_timer->it_pid = get_pid(task_tgid(current));
580    }
581
582    new_timer->it_sigev_notify = event.sigev_notify;
583    new_timer->sigq->info.si_signo = event.sigev_signo;
584    new_timer->sigq->info.si_value = event.sigev_value;
585    new_timer->sigq->info.si_tid = new_timer->it_id;
586    new_timer->sigq->info.si_code = SI_TIMER;
587
588    if (copy_to_user(created_timer_id,
589             &new_timer_id, sizeof (new_timer_id))) {
590        error = -EFAULT;
591        goto out;
592    }
593
594    error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
595    if (error)
596        goto out;
597
598    spin_lock_irq(&current->sighand->siglock);
599    new_timer->it_signal = current->signal;
600    list_add(&new_timer->list, &current->signal->posix_timers);
601    spin_unlock_irq(&current->sighand->siglock);
602
603    return 0;
604     /*
605     * In the case of the timer belonging to another task, after
606     * the task is unlocked, the timer is owned by the other task
607     * and may cease to exist at any time. Don't use or modify
608     * new_timer after the unlock call.
609     */
610out:
611    release_posix_timer(new_timer, it_id_set);
612    return error;
613}
614
615/*
616 * Locking issues: We need to protect the result of the id look up until
617 * we get the timer locked down so it is not deleted under us. The
618 * removal is done under the idr spinlock so we use that here to bridge
619 * the find to the timer lock. To avoid a dead lock, the timer id MUST
620 * be release with out holding the timer lock.
621 */
622static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
623{
624    struct k_itimer *timr;
625    /*
626     * Watch out here. We do a irqsave on the idr_lock and pass the
627     * flags part over to the timer lock. Must not let interrupts in
628     * while we are moving the lock.
629     */
630    spin_lock_irqsave(&idr_lock, *flags);
631    timr = idr_find(&posix_timers_id, (int)timer_id);
632    if (timr) {
633        spin_lock(&timr->it_lock);
634        if (timr->it_signal == current->signal) {
635            spin_unlock(&idr_lock);
636            return timr;
637        }
638        spin_unlock(&timr->it_lock);
639    }
640    spin_unlock_irqrestore(&idr_lock, *flags);
641
642    return NULL;
643}
644
645/*
646 * Get the time remaining on a POSIX.1b interval timer. This function
647 * is ALWAYS called with spin_lock_irq on the timer, thus it must not
648 * mess with irq.
649 *
650 * We have a couple of messes to clean up here. First there is the case
651 * of a timer that has a requeue pending. These timers should appear to
652 * be in the timer list with an expiry as if we were to requeue them
653 * now.
654 *
655 * The second issue is the SIGEV_NONE timer which may be active but is
656 * not really ever put in the timer list (to save system resources).
657 * This timer may be expired, and if so, we will do it here. Otherwise
658 * it is the same as a requeue pending timer WRT to what we should
659 * report.
660 */
661static void
662common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
663{
664    ktime_t now, remaining, iv;
665    struct hrtimer *timer = &timr->it.real.timer;
666
667    memset(cur_setting, 0, sizeof(struct itimerspec));
668
669    iv = timr->it.real.interval;
670
671    /* interval timer ? */
672    if (iv.tv64)
673        cur_setting->it_interval = ktime_to_timespec(iv);
674    else if (!hrtimer_active(timer) &&
675         (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
676        return;
677
678    now = timer->base->get_time();
679
680    /*
681     * When a requeue is pending or this is a SIGEV_NONE
682     * timer move the expiry time forward by intervals, so
683     * expiry is > now.
684     */
685    if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
686        (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
687        timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
688
689    remaining = ktime_sub(hrtimer_get_expires(timer), now);
690    /* Return 0 only, when the timer is expired and not pending */
691    if (remaining.tv64 <= 0) {
692        /*
693         * A single shot SIGEV_NONE timer must return 0, when
694         * it is expired !
695         */
696        if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
697            cur_setting->it_value.tv_nsec = 1;
698    } else
699        cur_setting->it_value = ktime_to_timespec(remaining);
700}
701
702/* Get the time remaining on a POSIX.1b interval timer. */
703SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
704        struct itimerspec __user *, setting)
705{
706    struct k_itimer *timr;
707    struct itimerspec cur_setting;
708    unsigned long flags;
709
710    timr = lock_timer(timer_id, &flags);
711    if (!timr)
712        return -EINVAL;
713
714    CLOCK_DISPATCH(timr->it_clock, timer_get, (timr, &cur_setting));
715
716    unlock_timer(timr, flags);
717
718    if (copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
719        return -EFAULT;
720
721    return 0;
722}
723
724/*
725 * Get the number of overruns of a POSIX.1b interval timer. This is to
726 * be the overrun of the timer last delivered. At the same time we are
727 * accumulating overruns on the next timer. The overrun is frozen when
728 * the signal is delivered, either at the notify time (if the info block
729 * is not queued) or at the actual delivery time (as we are informed by
730 * the call back to do_schedule_next_timer(). So all we need to do is
731 * to pick up the frozen overrun.
732 */
733SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
734{
735    struct k_itimer *timr;
736    int overrun;
737    unsigned long flags;
738
739    timr = lock_timer(timer_id, &flags);
740    if (!timr)
741        return -EINVAL;
742
743    overrun = timr->it_overrun_last;
744    unlock_timer(timr, flags);
745
746    return overrun;
747}
748
749/* Set a POSIX.1b interval timer. */
750/* timr->it_lock is taken. */
751static int
752common_timer_set(struct k_itimer *timr, int flags,
753         struct itimerspec *new_setting, struct itimerspec *old_setting)
754{
755    struct hrtimer *timer = &timr->it.real.timer;
756    enum hrtimer_mode mode;
757
758    if (old_setting)
759        common_timer_get(timr, old_setting);
760
761    /* disable the timer */
762    timr->it.real.interval.tv64 = 0;
763    /*
764     * careful here. If smp we could be in the "fire" routine which will
765     * be spinning as we hold the lock. But this is ONLY an SMP issue.
766     */
767    if (hrtimer_try_to_cancel(timer) < 0)
768        return TIMER_RETRY;
769
770    timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
771        ~REQUEUE_PENDING;
772    timr->it_overrun_last = 0;
773
774    /* switch off the timer when it_value is zero */
775    if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
776        return 0;
777
778    mode = flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
779    hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
780    timr->it.real.timer.function = posix_timer_fn;
781
782    hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value));
783
784    /* Convert interval */
785    timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
786
787    /* SIGEV_NONE timers are not queued ! See common_timer_get */
788    if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
789        /* Setup correct expiry time for relative timers */
790        if (mode == HRTIMER_MODE_REL) {
791            hrtimer_add_expires(timer, timer->base->get_time());
792        }
793        return 0;
794    }
795
796    hrtimer_start_expires(timer, mode);
797    return 0;
798}
799
800/* Set a POSIX.1b interval timer */
801SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
802        const struct itimerspec __user *, new_setting,
803        struct itimerspec __user *, old_setting)
804{
805    struct k_itimer *timr;
806    struct itimerspec new_spec, old_spec;
807    int error = 0;
808    unsigned long flag;
809    struct itimerspec *rtn = old_setting ? &old_spec : NULL;
810
811    if (!new_setting)
812        return -EINVAL;
813
814    if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
815        return -EFAULT;
816
817    if (!timespec_valid(&new_spec.it_interval) ||
818        !timespec_valid(&new_spec.it_value))
819        return -EINVAL;
820retry:
821    timr = lock_timer(timer_id, &flag);
822    if (!timr)
823        return -EINVAL;
824
825    error = CLOCK_DISPATCH(timr->it_clock, timer_set,
826                   (timr, flags, &new_spec, rtn));
827
828    unlock_timer(timr, flag);
829    if (error == TIMER_RETRY) {
830        rtn = NULL; // We already got the old time...
831        goto retry;
832    }
833
834    if (old_setting && !error &&
835        copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
836        error = -EFAULT;
837
838    return error;
839}
840
841static inline int common_timer_del(struct k_itimer *timer)
842{
843    timer->it.real.interval.tv64 = 0;
844
845    if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
846        return TIMER_RETRY;
847    return 0;
848}
849
850static inline int timer_delete_hook(struct k_itimer *timer)
851{
852    return CLOCK_DISPATCH(timer->it_clock, timer_del, (timer));
853}
854
855/* Delete a POSIX.1b interval timer. */
856SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
857{
858    struct k_itimer *timer;
859    unsigned long flags;
860
861retry_delete:
862    timer = lock_timer(timer_id, &flags);
863    if (!timer)
864        return -EINVAL;
865
866    if (timer_delete_hook(timer) == TIMER_RETRY) {
867        unlock_timer(timer, flags);
868        goto retry_delete;
869    }
870
871    spin_lock(&current->sighand->siglock);
872    list_del(&timer->list);
873    spin_unlock(&current->sighand->siglock);
874    /*
875     * This keeps any tasks waiting on the spin lock from thinking
876     * they got something (see the lock code above).
877     */
878    timer->it_signal = NULL;
879
880    unlock_timer(timer, flags);
881    release_posix_timer(timer, IT_ID_SET);
882    return 0;
883}
884
885/*
886 * return timer owned by the process, used by exit_itimers
887 */
888static void itimer_delete(struct k_itimer *timer)
889{
890    unsigned long flags;
891
892retry_delete:
893    spin_lock_irqsave(&timer->it_lock, flags);
894
895    if (timer_delete_hook(timer) == TIMER_RETRY) {
896        unlock_timer(timer, flags);
897        goto retry_delete;
898    }
899    list_del(&timer->list);
900    /*
901     * This keeps any tasks waiting on the spin lock from thinking
902     * they got something (see the lock code above).
903     */
904    timer->it_signal = NULL;
905
906    unlock_timer(timer, flags);
907    release_posix_timer(timer, IT_ID_SET);
908}
909
910/*
911 * This is called by do_exit or de_thread, only when there are no more
912 * references to the shared signal_struct.
913 */
914void exit_itimers(struct signal_struct *sig)
915{
916    struct k_itimer *tmr;
917
918    while (!list_empty(&sig->posix_timers)) {
919        tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
920        itimer_delete(tmr);
921    }
922}
923
924/* Not available / possible... functions */
925int do_posix_clock_nosettime(const clockid_t clockid, struct timespec *tp)
926{
927    return -EINVAL;
928}
929EXPORT_SYMBOL_GPL(do_posix_clock_nosettime);
930
931int do_posix_clock_nonanosleep(const clockid_t clock, int flags,
932                   struct timespec *t, struct timespec __user *r)
933{
934#ifndef ENOTSUP
935    return -EOPNOTSUPP; /* aka ENOTSUP in userland for POSIX */
936#else /* parisc does define it separately. */
937    return -ENOTSUP;
938#endif
939}
940EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep);
941
942SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
943        const struct timespec __user *, tp)
944{
945    struct timespec new_tp;
946
947    if (invalid_clockid(which_clock))
948        return -EINVAL;
949    if (copy_from_user(&new_tp, tp, sizeof (*tp)))
950        return -EFAULT;
951
952    return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
953}
954
955SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
956        struct timespec __user *,tp)
957{
958    struct timespec kernel_tp;
959    int error;
960
961    if (invalid_clockid(which_clock))
962        return -EINVAL;
963    error = CLOCK_DISPATCH(which_clock, clock_get,
964                   (which_clock, &kernel_tp));
965    if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
966        error = -EFAULT;
967
968    return error;
969
970}
971
972SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
973        struct timespec __user *, tp)
974{
975    struct timespec rtn_tp;
976    int error;
977
978    if (invalid_clockid(which_clock))
979        return -EINVAL;
980
981    error = CLOCK_DISPATCH(which_clock, clock_getres,
982                   (which_clock, &rtn_tp));
983
984    if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp))) {
985        error = -EFAULT;
986    }
987
988    return error;
989}
990
991/*
992 * nanosleep for monotonic and realtime clocks
993 */
994static int common_nsleep(const clockid_t which_clock, int flags,
995             struct timespec *tsave, struct timespec __user *rmtp)
996{
997    return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
998                 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
999                 which_clock);
1000}
1001
1002SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
1003        const struct timespec __user *, rqtp,
1004        struct timespec __user *, rmtp)
1005{
1006    struct timespec t;
1007
1008    if (invalid_clockid(which_clock))
1009        return -EINVAL;
1010
1011    if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
1012        return -EFAULT;
1013
1014    if (!timespec_valid(&t))
1015        return -EINVAL;
1016
1017    return CLOCK_DISPATCH(which_clock, nsleep,
1018                  (which_clock, flags, &t, rmtp));
1019}
1020
1021/*
1022 * nanosleep_restart for monotonic and realtime clocks
1023 */
1024static int common_nsleep_restart(struct restart_block *restart_block)
1025{
1026    return hrtimer_nanosleep_restart(restart_block);
1027}
1028
1029/*
1030 * This will restart clock_nanosleep. This is required only by
1031 * compat_clock_nanosleep_restart for now.
1032 */
1033long
1034clock_nanosleep_restart(struct restart_block *restart_block)
1035{
1036    clockid_t which_clock = restart_block->arg0;
1037
1038    return CLOCK_DISPATCH(which_clock, nsleep_restart,
1039                  (restart_block));
1040}
1041

Archive Download this file



interactive