Root/
1 | /* |
2 | * linux/kernel/hrtimer.c |
3 | * |
4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> |
5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar |
6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner |
7 | * |
8 | * High-resolution kernel timers |
9 | * |
10 | * In contrast to the low-resolution timeout API implemented in |
11 | * kernel/timer.c, hrtimers provide finer resolution and accuracy |
12 | * depending on system configuration and capabilities. |
13 | * |
14 | * These timers are currently used for: |
15 | * - itimers |
16 | * - POSIX timers |
17 | * - nanosleep |
18 | * - precise in-kernel timing |
19 | * |
20 | * Started by: Thomas Gleixner and Ingo Molnar |
21 | * |
22 | * Credits: |
23 | * based on kernel/timer.c |
24 | * |
25 | * Help, testing, suggestions, bugfixes, improvements were |
26 | * provided by: |
27 | * |
28 | * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel |
29 | * et. al. |
30 | * |
31 | * For licencing details see kernel-base/COPYING |
32 | */ |
33 | |
34 | #include <linux/cpu.h> |
35 | #include <linux/export.h> |
36 | #include <linux/percpu.h> |
37 | #include <linux/hrtimer.h> |
38 | #include <linux/notifier.h> |
39 | #include <linux/syscalls.h> |
40 | #include <linux/kallsyms.h> |
41 | #include <linux/interrupt.h> |
42 | #include <linux/tick.h> |
43 | #include <linux/seq_file.h> |
44 | #include <linux/err.h> |
45 | #include <linux/debugobjects.h> |
46 | #include <linux/sched.h> |
47 | #include <linux/sched/sysctl.h> |
48 | #include <linux/sched/rt.h> |
49 | #include <linux/timer.h> |
50 | |
51 | #include <asm/uaccess.h> |
52 | |
53 | #include <trace/events/timer.h> |
54 | |
55 | /* |
56 | * The timer bases: |
57 | * |
58 | * There are more clockids then hrtimer bases. Thus, we index |
59 | * into the timer bases by the hrtimer_base_type enum. When trying |
60 | * to reach a base using a clockid, hrtimer_clockid_to_base() |
61 | * is used to convert from clockid to the proper hrtimer_base_type. |
62 | */ |
63 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = |
64 | { |
65 | |
66 | .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock), |
67 | .clock_base = |
68 | { |
69 | { |
70 | .index = HRTIMER_BASE_MONOTONIC, |
71 | .clockid = CLOCK_MONOTONIC, |
72 | .get_time = &ktime_get, |
73 | .resolution = KTIME_LOW_RES, |
74 | }, |
75 | { |
76 | .index = HRTIMER_BASE_REALTIME, |
77 | .clockid = CLOCK_REALTIME, |
78 | .get_time = &ktime_get_real, |
79 | .resolution = KTIME_LOW_RES, |
80 | }, |
81 | { |
82 | .index = HRTIMER_BASE_BOOTTIME, |
83 | .clockid = CLOCK_BOOTTIME, |
84 | .get_time = &ktime_get_boottime, |
85 | .resolution = KTIME_LOW_RES, |
86 | }, |
87 | } |
88 | }; |
89 | |
90 | static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { |
91 | [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, |
92 | [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, |
93 | [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, |
94 | }; |
95 | |
96 | static inline int hrtimer_clockid_to_base(clockid_t clock_id) |
97 | { |
98 | return hrtimer_clock_to_base_table[clock_id]; |
99 | } |
100 | |
101 | |
102 | /* |
103 | * Get the coarse grained time at the softirq based on xtime and |
104 | * wall_to_monotonic. |
105 | */ |
106 | static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) |
107 | { |
108 | ktime_t xtim, mono, boot; |
109 | struct timespec xts, tom, slp; |
110 | |
111 | get_xtime_and_monotonic_and_sleep_offset(&xts, &tom, &slp); |
112 | |
113 | xtim = timespec_to_ktime(xts); |
114 | mono = ktime_add(xtim, timespec_to_ktime(tom)); |
115 | boot = ktime_add(mono, timespec_to_ktime(slp)); |
116 | base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim; |
117 | base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono; |
118 | base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot; |
119 | } |
120 | |
121 | /* |
122 | * Functions and macros which are different for UP/SMP systems are kept in a |
123 | * single place |
124 | */ |
125 | #ifdef CONFIG_SMP |
126 | |
127 | /* |
128 | * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock |
129 | * means that all timers which are tied to this base via timer->base are |
130 | * locked, and the base itself is locked too. |
131 | * |
132 | * So __run_timers/migrate_timers can safely modify all timers which could |
133 | * be found on the lists/queues. |
134 | * |
135 | * When the timer's base is locked, and the timer removed from list, it is |
136 | * possible to set timer->base = NULL and drop the lock: the timer remains |
137 | * locked. |
138 | */ |
139 | static |
140 | struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, |
141 | unsigned long *flags) |
142 | { |
143 | struct hrtimer_clock_base *base; |
144 | |
145 | for (;;) { |
146 | base = timer->base; |
147 | if (likely(base != NULL)) { |
148 | raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); |
149 | if (likely(base == timer->base)) |
150 | return base; |
151 | /* The timer has migrated to another CPU: */ |
152 | raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); |
153 | } |
154 | cpu_relax(); |
155 | } |
156 | } |
157 | |
158 | |
159 | /* |
160 | * Get the preferred target CPU for NOHZ |
161 | */ |
162 | static int hrtimer_get_target(int this_cpu, int pinned) |
163 | { |
164 | #ifdef CONFIG_NO_HZ |
165 | if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) |
166 | return get_nohz_timer_target(); |
167 | #endif |
168 | return this_cpu; |
169 | } |
170 | |
171 | /* |
172 | * With HIGHRES=y we do not migrate the timer when it is expiring |
173 | * before the next event on the target cpu because we cannot reprogram |
174 | * the target cpu hardware and we would cause it to fire late. |
175 | * |
176 | * Called with cpu_base->lock of target cpu held. |
177 | */ |
178 | static int |
179 | hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) |
180 | { |
181 | #ifdef CONFIG_HIGH_RES_TIMERS |
182 | ktime_t expires; |
183 | |
184 | if (!new_base->cpu_base->hres_active) |
185 | return 0; |
186 | |
187 | expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); |
188 | return expires.tv64 <= new_base->cpu_base->expires_next.tv64; |
189 | #else |
190 | return 0; |
191 | #endif |
192 | } |
193 | |
194 | /* |
195 | * Switch the timer base to the current CPU when possible. |
196 | */ |
197 | static inline struct hrtimer_clock_base * |
198 | switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, |
199 | int pinned) |
200 | { |
201 | struct hrtimer_clock_base *new_base; |
202 | struct hrtimer_cpu_base *new_cpu_base; |
203 | int this_cpu = smp_processor_id(); |
204 | int cpu = hrtimer_get_target(this_cpu, pinned); |
205 | int basenum = base->index; |
206 | |
207 | again: |
208 | new_cpu_base = &per_cpu(hrtimer_bases, cpu); |
209 | new_base = &new_cpu_base->clock_base[basenum]; |
210 | |
211 | if (base != new_base) { |
212 | /* |
213 | * We are trying to move timer to new_base. |
214 | * However we can't change timer's base while it is running, |
215 | * so we keep it on the same CPU. No hassle vs. reprogramming |
216 | * the event source in the high resolution case. The softirq |
217 | * code will take care of this when the timer function has |
218 | * completed. There is no conflict as we hold the lock until |
219 | * the timer is enqueued. |
220 | */ |
221 | if (unlikely(hrtimer_callback_running(timer))) |
222 | return base; |
223 | |
224 | /* See the comment in lock_timer_base() */ |
225 | timer->base = NULL; |
226 | raw_spin_unlock(&base->cpu_base->lock); |
227 | raw_spin_lock(&new_base->cpu_base->lock); |
228 | |
229 | if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { |
230 | cpu = this_cpu; |
231 | raw_spin_unlock(&new_base->cpu_base->lock); |
232 | raw_spin_lock(&base->cpu_base->lock); |
233 | timer->base = base; |
234 | goto again; |
235 | } |
236 | timer->base = new_base; |
237 | } |
238 | return new_base; |
239 | } |
240 | |
241 | #else /* CONFIG_SMP */ |
242 | |
243 | static inline struct hrtimer_clock_base * |
244 | lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) |
245 | { |
246 | struct hrtimer_clock_base *base = timer->base; |
247 | |
248 | raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); |
249 | |
250 | return base; |
251 | } |
252 | |
253 | # define switch_hrtimer_base(t, b, p) (b) |
254 | |
255 | #endif /* !CONFIG_SMP */ |
256 | |
257 | /* |
258 | * Functions for the union type storage format of ktime_t which are |
259 | * too large for inlining: |
260 | */ |
261 | #if BITS_PER_LONG < 64 |
262 | # ifndef CONFIG_KTIME_SCALAR |
263 | /** |
264 | * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable |
265 | * @kt: addend |
266 | * @nsec: the scalar nsec value to add |
267 | * |
268 | * Returns the sum of kt and nsec in ktime_t format |
269 | */ |
270 | ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) |
271 | { |
272 | ktime_t tmp; |
273 | |
274 | if (likely(nsec < NSEC_PER_SEC)) { |
275 | tmp.tv64 = nsec; |
276 | } else { |
277 | unsigned long rem = do_div(nsec, NSEC_PER_SEC); |
278 | |
279 | tmp = ktime_set((long)nsec, rem); |
280 | } |
281 | |
282 | return ktime_add(kt, tmp); |
283 | } |
284 | |
285 | EXPORT_SYMBOL_GPL(ktime_add_ns); |
286 | |
287 | /** |
288 | * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable |
289 | * @kt: minuend |
290 | * @nsec: the scalar nsec value to subtract |
291 | * |
292 | * Returns the subtraction of @nsec from @kt in ktime_t format |
293 | */ |
294 | ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec) |
295 | { |
296 | ktime_t tmp; |
297 | |
298 | if (likely(nsec < NSEC_PER_SEC)) { |
299 | tmp.tv64 = nsec; |
300 | } else { |
301 | unsigned long rem = do_div(nsec, NSEC_PER_SEC); |
302 | |
303 | tmp = ktime_set((long)nsec, rem); |
304 | } |
305 | |
306 | return ktime_sub(kt, tmp); |
307 | } |
308 | |
309 | EXPORT_SYMBOL_GPL(ktime_sub_ns); |
310 | # endif /* !CONFIG_KTIME_SCALAR */ |
311 | |
312 | /* |
313 | * Divide a ktime value by a nanosecond value |
314 | */ |
315 | u64 ktime_divns(const ktime_t kt, s64 div) |
316 | { |
317 | u64 dclc; |
318 | int sft = 0; |
319 | |
320 | dclc = ktime_to_ns(kt); |
321 | /* Make sure the divisor is less than 2^32: */ |
322 | while (div >> 32) { |
323 | sft++; |
324 | div >>= 1; |
325 | } |
326 | dclc >>= sft; |
327 | do_div(dclc, (unsigned long) div); |
328 | |
329 | return dclc; |
330 | } |
331 | #endif /* BITS_PER_LONG >= 64 */ |
332 | |
333 | /* |
334 | * Add two ktime values and do a safety check for overflow: |
335 | */ |
336 | ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) |
337 | { |
338 | ktime_t res = ktime_add(lhs, rhs); |
339 | |
340 | /* |
341 | * We use KTIME_SEC_MAX here, the maximum timeout which we can |
342 | * return to user space in a timespec: |
343 | */ |
344 | if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64) |
345 | res = ktime_set(KTIME_SEC_MAX, 0); |
346 | |
347 | return res; |
348 | } |
349 | |
350 | EXPORT_SYMBOL_GPL(ktime_add_safe); |
351 | |
352 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS |
353 | |
354 | static struct debug_obj_descr hrtimer_debug_descr; |
355 | |
356 | static void *hrtimer_debug_hint(void *addr) |
357 | { |
358 | return ((struct hrtimer *) addr)->function; |
359 | } |
360 | |
361 | /* |
362 | * fixup_init is called when: |
363 | * - an active object is initialized |
364 | */ |
365 | static int hrtimer_fixup_init(void *addr, enum debug_obj_state state) |
366 | { |
367 | struct hrtimer *timer = addr; |
368 | |
369 | switch (state) { |
370 | case ODEBUG_STATE_ACTIVE: |
371 | hrtimer_cancel(timer); |
372 | debug_object_init(timer, &hrtimer_debug_descr); |
373 | return 1; |
374 | default: |
375 | return 0; |
376 | } |
377 | } |
378 | |
379 | /* |
380 | * fixup_activate is called when: |
381 | * - an active object is activated |
382 | * - an unknown object is activated (might be a statically initialized object) |
383 | */ |
384 | static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state) |
385 | { |
386 | switch (state) { |
387 | |
388 | case ODEBUG_STATE_NOTAVAILABLE: |
389 | WARN_ON_ONCE(1); |
390 | return 0; |
391 | |
392 | case ODEBUG_STATE_ACTIVE: |
393 | WARN_ON(1); |
394 | |
395 | default: |
396 | return 0; |
397 | } |
398 | } |
399 | |
400 | /* |
401 | * fixup_free is called when: |
402 | * - an active object is freed |
403 | */ |
404 | static int hrtimer_fixup_free(void *addr, enum debug_obj_state state) |
405 | { |
406 | struct hrtimer *timer = addr; |
407 | |
408 | switch (state) { |
409 | case ODEBUG_STATE_ACTIVE: |
410 | hrtimer_cancel(timer); |
411 | debug_object_free(timer, &hrtimer_debug_descr); |
412 | return 1; |
413 | default: |
414 | return 0; |
415 | } |
416 | } |
417 | |
418 | static struct debug_obj_descr hrtimer_debug_descr = { |
419 | .name = "hrtimer", |
420 | .debug_hint = hrtimer_debug_hint, |
421 | .fixup_init = hrtimer_fixup_init, |
422 | .fixup_activate = hrtimer_fixup_activate, |
423 | .fixup_free = hrtimer_fixup_free, |
424 | }; |
425 | |
426 | static inline void debug_hrtimer_init(struct hrtimer *timer) |
427 | { |
428 | debug_object_init(timer, &hrtimer_debug_descr); |
429 | } |
430 | |
431 | static inline void debug_hrtimer_activate(struct hrtimer *timer) |
432 | { |
433 | debug_object_activate(timer, &hrtimer_debug_descr); |
434 | } |
435 | |
436 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) |
437 | { |
438 | debug_object_deactivate(timer, &hrtimer_debug_descr); |
439 | } |
440 | |
441 | static inline void debug_hrtimer_free(struct hrtimer *timer) |
442 | { |
443 | debug_object_free(timer, &hrtimer_debug_descr); |
444 | } |
445 | |
446 | static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, |
447 | enum hrtimer_mode mode); |
448 | |
449 | void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id, |
450 | enum hrtimer_mode mode) |
451 | { |
452 | debug_object_init_on_stack(timer, &hrtimer_debug_descr); |
453 | __hrtimer_init(timer, clock_id, mode); |
454 | } |
455 | EXPORT_SYMBOL_GPL(hrtimer_init_on_stack); |
456 | |
457 | void destroy_hrtimer_on_stack(struct hrtimer *timer) |
458 | { |
459 | debug_object_free(timer, &hrtimer_debug_descr); |
460 | } |
461 | |
462 | #else |
463 | static inline void debug_hrtimer_init(struct hrtimer *timer) { } |
464 | static inline void debug_hrtimer_activate(struct hrtimer *timer) { } |
465 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } |
466 | #endif |
467 | |
468 | static inline void |
469 | debug_init(struct hrtimer *timer, clockid_t clockid, |
470 | enum hrtimer_mode mode) |
471 | { |
472 | debug_hrtimer_init(timer); |
473 | trace_hrtimer_init(timer, clockid, mode); |
474 | } |
475 | |
476 | static inline void debug_activate(struct hrtimer *timer) |
477 | { |
478 | debug_hrtimer_activate(timer); |
479 | trace_hrtimer_start(timer); |
480 | } |
481 | |
482 | static inline void debug_deactivate(struct hrtimer *timer) |
483 | { |
484 | debug_hrtimer_deactivate(timer); |
485 | trace_hrtimer_cancel(timer); |
486 | } |
487 | |
488 | /* High resolution timer related functions */ |
489 | #ifdef CONFIG_HIGH_RES_TIMERS |
490 | |
491 | /* |
492 | * High resolution timer enabled ? |
493 | */ |
494 | static int hrtimer_hres_enabled __read_mostly = 1; |
495 | |
496 | /* |
497 | * Enable / Disable high resolution mode |
498 | */ |
499 | static int __init setup_hrtimer_hres(char *str) |
500 | { |
501 | if (!strcmp(str, "off")) |
502 | hrtimer_hres_enabled = 0; |
503 | else if (!strcmp(str, "on")) |
504 | hrtimer_hres_enabled = 1; |
505 | else |
506 | return 0; |
507 | return 1; |
508 | } |
509 | |
510 | __setup("highres=", setup_hrtimer_hres); |
511 | |
512 | /* |
513 | * hrtimer_high_res_enabled - query, if the highres mode is enabled |
514 | */ |
515 | static inline int hrtimer_is_hres_enabled(void) |
516 | { |
517 | return hrtimer_hres_enabled; |
518 | } |
519 | |
520 | /* |
521 | * Is the high resolution mode active ? |
522 | */ |
523 | static inline int hrtimer_hres_active(void) |
524 | { |
525 | return __this_cpu_read(hrtimer_bases.hres_active); |
526 | } |
527 | |
528 | /* |
529 | * Reprogram the event source with checking both queues for the |
530 | * next event |
531 | * Called with interrupts disabled and base->lock held |
532 | */ |
533 | static void |
534 | hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) |
535 | { |
536 | int i; |
537 | struct hrtimer_clock_base *base = cpu_base->clock_base; |
538 | ktime_t expires, expires_next; |
539 | |
540 | expires_next.tv64 = KTIME_MAX; |
541 | |
542 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { |
543 | struct hrtimer *timer; |
544 | struct timerqueue_node *next; |
545 | |
546 | next = timerqueue_getnext(&base->active); |
547 | if (!next) |
548 | continue; |
549 | timer = container_of(next, struct hrtimer, node); |
550 | |
551 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
552 | /* |
553 | * clock_was_set() has changed base->offset so the |
554 | * result might be negative. Fix it up to prevent a |
555 | * false positive in clockevents_program_event() |
556 | */ |
557 | if (expires.tv64 < 0) |
558 | expires.tv64 = 0; |
559 | if (expires.tv64 < expires_next.tv64) |
560 | expires_next = expires; |
561 | } |
562 | |
563 | if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) |
564 | return; |
565 | |
566 | cpu_base->expires_next.tv64 = expires_next.tv64; |
567 | |
568 | if (cpu_base->expires_next.tv64 != KTIME_MAX) |
569 | tick_program_event(cpu_base->expires_next, 1); |
570 | } |
571 | |
572 | /* |
573 | * Shared reprogramming for clock_realtime and clock_monotonic |
574 | * |
575 | * When a timer is enqueued and expires earlier than the already enqueued |
576 | * timers, we have to check, whether it expires earlier than the timer for |
577 | * which the clock event device was armed. |
578 | * |
579 | * Called with interrupts disabled and base->cpu_base.lock held |
580 | */ |
581 | static int hrtimer_reprogram(struct hrtimer *timer, |
582 | struct hrtimer_clock_base *base) |
583 | { |
584 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
585 | ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
586 | int res; |
587 | |
588 | WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); |
589 | |
590 | /* |
591 | * When the callback is running, we do not reprogram the clock event |
592 | * device. The timer callback is either running on a different CPU or |
593 | * the callback is executed in the hrtimer_interrupt context. The |
594 | * reprogramming is handled either by the softirq, which called the |
595 | * callback or at the end of the hrtimer_interrupt. |
596 | */ |
597 | if (hrtimer_callback_running(timer)) |
598 | return 0; |
599 | |
600 | /* |
601 | * CLOCK_REALTIME timer might be requested with an absolute |
602 | * expiry time which is less than base->offset. Nothing wrong |
603 | * about that, just avoid to call into the tick code, which |
604 | * has now objections against negative expiry values. |
605 | */ |
606 | if (expires.tv64 < 0) |
607 | return -ETIME; |
608 | |
609 | if (expires.tv64 >= cpu_base->expires_next.tv64) |
610 | return 0; |
611 | |
612 | /* |
613 | * If a hang was detected in the last timer interrupt then we |
614 | * do not schedule a timer which is earlier than the expiry |
615 | * which we enforced in the hang detection. We want the system |
616 | * to make progress. |
617 | */ |
618 | if (cpu_base->hang_detected) |
619 | return 0; |
620 | |
621 | /* |
622 | * Clockevents returns -ETIME, when the event was in the past. |
623 | */ |
624 | res = tick_program_event(expires, 0); |
625 | if (!IS_ERR_VALUE(res)) |
626 | cpu_base->expires_next = expires; |
627 | return res; |
628 | } |
629 | |
630 | /* |
631 | * Initialize the high resolution related parts of cpu_base |
632 | */ |
633 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) |
634 | { |
635 | base->expires_next.tv64 = KTIME_MAX; |
636 | base->hres_active = 0; |
637 | } |
638 | |
639 | /* |
640 | * When High resolution timers are active, try to reprogram. Note, that in case |
641 | * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry |
642 | * check happens. The timer gets enqueued into the rbtree. The reprogramming |
643 | * and expiry check is done in the hrtimer_interrupt or in the softirq. |
644 | */ |
645 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
646 | struct hrtimer_clock_base *base) |
647 | { |
648 | return base->cpu_base->hres_active && hrtimer_reprogram(timer, base); |
649 | } |
650 | |
651 | static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) |
652 | { |
653 | ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; |
654 | ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; |
655 | |
656 | return ktime_get_update_offsets(offs_real, offs_boot); |
657 | } |
658 | |
659 | /* |
660 | * Retrigger next event is called after clock was set |
661 | * |
662 | * Called with interrupts disabled via on_each_cpu() |
663 | */ |
664 | static void retrigger_next_event(void *arg) |
665 | { |
666 | struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); |
667 | |
668 | if (!hrtimer_hres_active()) |
669 | return; |
670 | |
671 | raw_spin_lock(&base->lock); |
672 | hrtimer_update_base(base); |
673 | hrtimer_force_reprogram(base, 0); |
674 | raw_spin_unlock(&base->lock); |
675 | } |
676 | |
677 | /* |
678 | * Switch to high resolution mode |
679 | */ |
680 | static int hrtimer_switch_to_hres(void) |
681 | { |
682 | int i, cpu = smp_processor_id(); |
683 | struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); |
684 | unsigned long flags; |
685 | |
686 | if (base->hres_active) |
687 | return 1; |
688 | |
689 | local_irq_save(flags); |
690 | |
691 | if (tick_init_highres()) { |
692 | local_irq_restore(flags); |
693 | printk(KERN_WARNING "Could not switch to high resolution " |
694 | "mode on CPU %d\n", cpu); |
695 | return 0; |
696 | } |
697 | base->hres_active = 1; |
698 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) |
699 | base->clock_base[i].resolution = KTIME_HIGH_RES; |
700 | |
701 | tick_setup_sched_timer(); |
702 | /* "Retrigger" the interrupt to get things going */ |
703 | retrigger_next_event(NULL); |
704 | local_irq_restore(flags); |
705 | return 1; |
706 | } |
707 | |
708 | /* |
709 | * Called from timekeeping code to reprogramm the hrtimer interrupt |
710 | * device. If called from the timer interrupt context we defer it to |
711 | * softirq context. |
712 | */ |
713 | void clock_was_set_delayed(void) |
714 | { |
715 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
716 | |
717 | cpu_base->clock_was_set = 1; |
718 | __raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
719 | } |
720 | |
721 | #else |
722 | |
723 | static inline int hrtimer_hres_active(void) { return 0; } |
724 | static inline int hrtimer_is_hres_enabled(void) { return 0; } |
725 | static inline int hrtimer_switch_to_hres(void) { return 0; } |
726 | static inline void |
727 | hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } |
728 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
729 | struct hrtimer_clock_base *base) |
730 | { |
731 | return 0; |
732 | } |
733 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } |
734 | static inline void retrigger_next_event(void *arg) { } |
735 | |
736 | #endif /* CONFIG_HIGH_RES_TIMERS */ |
737 | |
738 | /* |
739 | * Clock realtime was set |
740 | * |
741 | * Change the offset of the realtime clock vs. the monotonic |
742 | * clock. |
743 | * |
744 | * We might have to reprogram the high resolution timer interrupt. On |
745 | * SMP we call the architecture specific code to retrigger _all_ high |
746 | * resolution timer interrupts. On UP we just disable interrupts and |
747 | * call the high resolution interrupt code. |
748 | */ |
749 | void clock_was_set(void) |
750 | { |
751 | #ifdef CONFIG_HIGH_RES_TIMERS |
752 | /* Retrigger the CPU local events everywhere */ |
753 | on_each_cpu(retrigger_next_event, NULL, 1); |
754 | #endif |
755 | timerfd_clock_was_set(); |
756 | } |
757 | |
758 | /* |
759 | * During resume we might have to reprogram the high resolution timer |
760 | * interrupt (on the local CPU): |
761 | */ |
762 | void hrtimers_resume(void) |
763 | { |
764 | WARN_ONCE(!irqs_disabled(), |
765 | KERN_INFO "hrtimers_resume() called with IRQs enabled!"); |
766 | |
767 | retrigger_next_event(NULL); |
768 | timerfd_clock_was_set(); |
769 | } |
770 | |
771 | static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) |
772 | { |
773 | #ifdef CONFIG_TIMER_STATS |
774 | if (timer->start_site) |
775 | return; |
776 | timer->start_site = __builtin_return_address(0); |
777 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); |
778 | timer->start_pid = current->pid; |
779 | #endif |
780 | } |
781 | |
782 | static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer) |
783 | { |
784 | #ifdef CONFIG_TIMER_STATS |
785 | timer->start_site = NULL; |
786 | #endif |
787 | } |
788 | |
789 | static inline void timer_stats_account_hrtimer(struct hrtimer *timer) |
790 | { |
791 | #ifdef CONFIG_TIMER_STATS |
792 | if (likely(!timer_stats_active)) |
793 | return; |
794 | timer_stats_update_stats(timer, timer->start_pid, timer->start_site, |
795 | timer->function, timer->start_comm, 0); |
796 | #endif |
797 | } |
798 | |
799 | /* |
800 | * Counterpart to lock_hrtimer_base above: |
801 | */ |
802 | static inline |
803 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) |
804 | { |
805 | raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); |
806 | } |
807 | |
808 | /** |
809 | * hrtimer_forward - forward the timer expiry |
810 | * @timer: hrtimer to forward |
811 | * @now: forward past this time |
812 | * @interval: the interval to forward |
813 | * |
814 | * Forward the timer expiry so it will expire in the future. |
815 | * Returns the number of overruns. |
816 | */ |
817 | u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) |
818 | { |
819 | u64 orun = 1; |
820 | ktime_t delta; |
821 | |
822 | delta = ktime_sub(now, hrtimer_get_expires(timer)); |
823 | |
824 | if (delta.tv64 < 0) |
825 | return 0; |
826 | |
827 | if (interval.tv64 < timer->base->resolution.tv64) |
828 | interval.tv64 = timer->base->resolution.tv64; |
829 | |
830 | if (unlikely(delta.tv64 >= interval.tv64)) { |
831 | s64 incr = ktime_to_ns(interval); |
832 | |
833 | orun = ktime_divns(delta, incr); |
834 | hrtimer_add_expires_ns(timer, incr * orun); |
835 | if (hrtimer_get_expires_tv64(timer) > now.tv64) |
836 | return orun; |
837 | /* |
838 | * This (and the ktime_add() below) is the |
839 | * correction for exact: |
840 | */ |
841 | orun++; |
842 | } |
843 | hrtimer_add_expires(timer, interval); |
844 | |
845 | return orun; |
846 | } |
847 | EXPORT_SYMBOL_GPL(hrtimer_forward); |
848 | |
849 | /* |
850 | * enqueue_hrtimer - internal function to (re)start a timer |
851 | * |
852 | * The timer is inserted in expiry order. Insertion into the |
853 | * red black tree is O(log(n)). Must hold the base lock. |
854 | * |
855 | * Returns 1 when the new timer is the leftmost timer in the tree. |
856 | */ |
857 | static int enqueue_hrtimer(struct hrtimer *timer, |
858 | struct hrtimer_clock_base *base) |
859 | { |
860 | debug_activate(timer); |
861 | |
862 | timerqueue_add(&base->active, &timer->node); |
863 | base->cpu_base->active_bases |= 1 << base->index; |
864 | |
865 | /* |
866 | * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the |
867 | * state of a possibly running callback. |
868 | */ |
869 | timer->state |= HRTIMER_STATE_ENQUEUED; |
870 | |
871 | return (&timer->node == base->active.next); |
872 | } |
873 | |
874 | /* |
875 | * __remove_hrtimer - internal function to remove a timer |
876 | * |
877 | * Caller must hold the base lock. |
878 | * |
879 | * High resolution timer mode reprograms the clock event device when the |
880 | * timer is the one which expires next. The caller can disable this by setting |
881 | * reprogram to zero. This is useful, when the context does a reprogramming |
882 | * anyway (e.g. timer interrupt) |
883 | */ |
884 | static void __remove_hrtimer(struct hrtimer *timer, |
885 | struct hrtimer_clock_base *base, |
886 | unsigned long newstate, int reprogram) |
887 | { |
888 | struct timerqueue_node *next_timer; |
889 | if (!(timer->state & HRTIMER_STATE_ENQUEUED)) |
890 | goto out; |
891 | |
892 | next_timer = timerqueue_getnext(&base->active); |
893 | timerqueue_del(&base->active, &timer->node); |
894 | if (&timer->node == next_timer) { |
895 | #ifdef CONFIG_HIGH_RES_TIMERS |
896 | /* Reprogram the clock event device. if enabled */ |
897 | if (reprogram && hrtimer_hres_active()) { |
898 | ktime_t expires; |
899 | |
900 | expires = ktime_sub(hrtimer_get_expires(timer), |
901 | base->offset); |
902 | if (base->cpu_base->expires_next.tv64 == expires.tv64) |
903 | hrtimer_force_reprogram(base->cpu_base, 1); |
904 | } |
905 | #endif |
906 | } |
907 | if (!timerqueue_getnext(&base->active)) |
908 | base->cpu_base->active_bases &= ~(1 << base->index); |
909 | out: |
910 | timer->state = newstate; |
911 | } |
912 | |
913 | /* |
914 | * remove hrtimer, called with base lock held |
915 | */ |
916 | static inline int |
917 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) |
918 | { |
919 | if (hrtimer_is_queued(timer)) { |
920 | unsigned long state; |
921 | int reprogram; |
922 | |
923 | /* |
924 | * Remove the timer and force reprogramming when high |
925 | * resolution mode is active and the timer is on the current |
926 | * CPU. If we remove a timer on another CPU, reprogramming is |
927 | * skipped. The interrupt event on this CPU is fired and |
928 | * reprogramming happens in the interrupt handler. This is a |
929 | * rare case and less expensive than a smp call. |
930 | */ |
931 | debug_deactivate(timer); |
932 | timer_stats_hrtimer_clear_start_info(timer); |
933 | reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); |
934 | /* |
935 | * We must preserve the CALLBACK state flag here, |
936 | * otherwise we could move the timer base in |
937 | * switch_hrtimer_base. |
938 | */ |
939 | state = timer->state & HRTIMER_STATE_CALLBACK; |
940 | __remove_hrtimer(timer, base, state, reprogram); |
941 | return 1; |
942 | } |
943 | return 0; |
944 | } |
945 | |
946 | int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
947 | unsigned long delta_ns, const enum hrtimer_mode mode, |
948 | int wakeup) |
949 | { |
950 | struct hrtimer_clock_base *base, *new_base; |
951 | unsigned long flags; |
952 | int ret, leftmost; |
953 | |
954 | base = lock_hrtimer_base(timer, &flags); |
955 | |
956 | /* Remove an active timer from the queue: */ |
957 | ret = remove_hrtimer(timer, base); |
958 | |
959 | /* Switch the timer base, if necessary: */ |
960 | new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); |
961 | |
962 | if (mode & HRTIMER_MODE_REL) { |
963 | tim = ktime_add_safe(tim, new_base->get_time()); |
964 | /* |
965 | * CONFIG_TIME_LOW_RES is a temporary way for architectures |
966 | * to signal that they simply return xtime in |
967 | * do_gettimeoffset(). In this case we want to round up by |
968 | * resolution when starting a relative timer, to avoid short |
969 | * timeouts. This will go away with the GTOD framework. |
970 | */ |
971 | #ifdef CONFIG_TIME_LOW_RES |
972 | tim = ktime_add_safe(tim, base->resolution); |
973 | #endif |
974 | } |
975 | |
976 | hrtimer_set_expires_range_ns(timer, tim, delta_ns); |
977 | |
978 | timer_stats_hrtimer_set_start_info(timer); |
979 | |
980 | leftmost = enqueue_hrtimer(timer, new_base); |
981 | |
982 | /* |
983 | * Only allow reprogramming if the new base is on this CPU. |
984 | * (it might still be on another CPU if the timer was pending) |
985 | * |
986 | * XXX send_remote_softirq() ? |
987 | */ |
988 | if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases) |
989 | && hrtimer_enqueue_reprogram(timer, new_base)) { |
990 | if (wakeup) { |
991 | /* |
992 | * We need to drop cpu_base->lock to avoid a |
993 | * lock ordering issue vs. rq->lock. |
994 | */ |
995 | raw_spin_unlock(&new_base->cpu_base->lock); |
996 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
997 | local_irq_restore(flags); |
998 | return ret; |
999 | } else { |
1000 | __raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
1001 | } |
1002 | } |
1003 | |
1004 | unlock_hrtimer_base(timer, &flags); |
1005 | |
1006 | return ret; |
1007 | } |
1008 | |
1009 | /** |
1010 | * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU |
1011 | * @timer: the timer to be added |
1012 | * @tim: expiry time |
1013 | * @delta_ns: "slack" range for the timer |
1014 | * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) |
1015 | * |
1016 | * Returns: |
1017 | * 0 on success |
1018 | * 1 when the timer was active |
1019 | */ |
1020 | int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
1021 | unsigned long delta_ns, const enum hrtimer_mode mode) |
1022 | { |
1023 | return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1); |
1024 | } |
1025 | EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); |
1026 | |
1027 | /** |
1028 | * hrtimer_start - (re)start an hrtimer on the current CPU |
1029 | * @timer: the timer to be added |
1030 | * @tim: expiry time |
1031 | * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) |
1032 | * |
1033 | * Returns: |
1034 | * 0 on success |
1035 | * 1 when the timer was active |
1036 | */ |
1037 | int |
1038 | hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) |
1039 | { |
1040 | return __hrtimer_start_range_ns(timer, tim, 0, mode, 1); |
1041 | } |
1042 | EXPORT_SYMBOL_GPL(hrtimer_start); |
1043 | |
1044 | |
1045 | /** |
1046 | * hrtimer_try_to_cancel - try to deactivate a timer |
1047 | * @timer: hrtimer to stop |
1048 | * |
1049 | * Returns: |
1050 | * 0 when the timer was not active |
1051 | * 1 when the timer was active |
1052 | * -1 when the timer is currently excuting the callback function and |
1053 | * cannot be stopped |
1054 | */ |
1055 | int hrtimer_try_to_cancel(struct hrtimer *timer) |
1056 | { |
1057 | struct hrtimer_clock_base *base; |
1058 | unsigned long flags; |
1059 | int ret = -1; |
1060 | |
1061 | base = lock_hrtimer_base(timer, &flags); |
1062 | |
1063 | if (!hrtimer_callback_running(timer)) |
1064 | ret = remove_hrtimer(timer, base); |
1065 | |
1066 | unlock_hrtimer_base(timer, &flags); |
1067 | |
1068 | return ret; |
1069 | |
1070 | } |
1071 | EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); |
1072 | |
1073 | /** |
1074 | * hrtimer_cancel - cancel a timer and wait for the handler to finish. |
1075 | * @timer: the timer to be cancelled |
1076 | * |
1077 | * Returns: |
1078 | * 0 when the timer was not active |
1079 | * 1 when the timer was active |
1080 | */ |
1081 | int hrtimer_cancel(struct hrtimer *timer) |
1082 | { |
1083 | for (;;) { |
1084 | int ret = hrtimer_try_to_cancel(timer); |
1085 | |
1086 | if (ret >= 0) |
1087 | return ret; |
1088 | cpu_relax(); |
1089 | } |
1090 | } |
1091 | EXPORT_SYMBOL_GPL(hrtimer_cancel); |
1092 | |
1093 | /** |
1094 | * hrtimer_get_remaining - get remaining time for the timer |
1095 | * @timer: the timer to read |
1096 | */ |
1097 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) |
1098 | { |
1099 | unsigned long flags; |
1100 | ktime_t rem; |
1101 | |
1102 | lock_hrtimer_base(timer, &flags); |
1103 | rem = hrtimer_expires_remaining(timer); |
1104 | unlock_hrtimer_base(timer, &flags); |
1105 | |
1106 | return rem; |
1107 | } |
1108 | EXPORT_SYMBOL_GPL(hrtimer_get_remaining); |
1109 | |
1110 | #ifdef CONFIG_NO_HZ |
1111 | /** |
1112 | * hrtimer_get_next_event - get the time until next expiry event |
1113 | * |
1114 | * Returns the delta to the next expiry event or KTIME_MAX if no timer |
1115 | * is pending. |
1116 | */ |
1117 | ktime_t hrtimer_get_next_event(void) |
1118 | { |
1119 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
1120 | struct hrtimer_clock_base *base = cpu_base->clock_base; |
1121 | ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; |
1122 | unsigned long flags; |
1123 | int i; |
1124 | |
1125 | raw_spin_lock_irqsave(&cpu_base->lock, flags); |
1126 | |
1127 | if (!hrtimer_hres_active()) { |
1128 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { |
1129 | struct hrtimer *timer; |
1130 | struct timerqueue_node *next; |
1131 | |
1132 | next = timerqueue_getnext(&base->active); |
1133 | if (!next) |
1134 | continue; |
1135 | |
1136 | timer = container_of(next, struct hrtimer, node); |
1137 | delta.tv64 = hrtimer_get_expires_tv64(timer); |
1138 | delta = ktime_sub(delta, base->get_time()); |
1139 | if (delta.tv64 < mindelta.tv64) |
1140 | mindelta.tv64 = delta.tv64; |
1141 | } |
1142 | } |
1143 | |
1144 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); |
1145 | |
1146 | if (mindelta.tv64 < 0) |
1147 | mindelta.tv64 = 0; |
1148 | return mindelta; |
1149 | } |
1150 | #endif |
1151 | |
1152 | static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, |
1153 | enum hrtimer_mode mode) |
1154 | { |
1155 | struct hrtimer_cpu_base *cpu_base; |
1156 | int base; |
1157 | |
1158 | memset(timer, 0, sizeof(struct hrtimer)); |
1159 | |
1160 | cpu_base = &__raw_get_cpu_var(hrtimer_bases); |
1161 | |
1162 | if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) |
1163 | clock_id = CLOCK_MONOTONIC; |
1164 | |
1165 | base = hrtimer_clockid_to_base(clock_id); |
1166 | timer->base = &cpu_base->clock_base[base]; |
1167 | timerqueue_init(&timer->node); |
1168 | |
1169 | #ifdef CONFIG_TIMER_STATS |
1170 | timer->start_site = NULL; |
1171 | timer->start_pid = -1; |
1172 | memset(timer->start_comm, 0, TASK_COMM_LEN); |
1173 | #endif |
1174 | } |
1175 | |
1176 | /** |
1177 | * hrtimer_init - initialize a timer to the given clock |
1178 | * @timer: the timer to be initialized |
1179 | * @clock_id: the clock to be used |
1180 | * @mode: timer mode abs/rel |
1181 | */ |
1182 | void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, |
1183 | enum hrtimer_mode mode) |
1184 | { |
1185 | debug_init(timer, clock_id, mode); |
1186 | __hrtimer_init(timer, clock_id, mode); |
1187 | } |
1188 | EXPORT_SYMBOL_GPL(hrtimer_init); |
1189 | |
1190 | /** |
1191 | * hrtimer_get_res - get the timer resolution for a clock |
1192 | * @which_clock: which clock to query |
1193 | * @tp: pointer to timespec variable to store the resolution |
1194 | * |
1195 | * Store the resolution of the clock selected by @which_clock in the |
1196 | * variable pointed to by @tp. |
1197 | */ |
1198 | int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) |
1199 | { |
1200 | struct hrtimer_cpu_base *cpu_base; |
1201 | int base = hrtimer_clockid_to_base(which_clock); |
1202 | |
1203 | cpu_base = &__raw_get_cpu_var(hrtimer_bases); |
1204 | *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution); |
1205 | |
1206 | return 0; |
1207 | } |
1208 | EXPORT_SYMBOL_GPL(hrtimer_get_res); |
1209 | |
1210 | static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) |
1211 | { |
1212 | struct hrtimer_clock_base *base = timer->base; |
1213 | struct hrtimer_cpu_base *cpu_base = base->cpu_base; |
1214 | enum hrtimer_restart (*fn)(struct hrtimer *); |
1215 | int restart; |
1216 | |
1217 | WARN_ON(!irqs_disabled()); |
1218 | |
1219 | debug_deactivate(timer); |
1220 | __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); |
1221 | timer_stats_account_hrtimer(timer); |
1222 | fn = timer->function; |
1223 | |
1224 | /* |
1225 | * Because we run timers from hardirq context, there is no chance |
1226 | * they get migrated to another cpu, therefore its safe to unlock |
1227 | * the timer base. |
1228 | */ |
1229 | raw_spin_unlock(&cpu_base->lock); |
1230 | trace_hrtimer_expire_entry(timer, now); |
1231 | restart = fn(timer); |
1232 | trace_hrtimer_expire_exit(timer); |
1233 | raw_spin_lock(&cpu_base->lock); |
1234 | |
1235 | /* |
1236 | * Note: We clear the CALLBACK bit after enqueue_hrtimer and |
1237 | * we do not reprogramm the event hardware. Happens either in |
1238 | * hrtimer_start_range_ns() or in hrtimer_interrupt() |
1239 | */ |
1240 | if (restart != HRTIMER_NORESTART) { |
1241 | BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); |
1242 | enqueue_hrtimer(timer, base); |
1243 | } |
1244 | |
1245 | WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK)); |
1246 | |
1247 | timer->state &= ~HRTIMER_STATE_CALLBACK; |
1248 | } |
1249 | |
1250 | #ifdef CONFIG_HIGH_RES_TIMERS |
1251 | |
1252 | /* |
1253 | * High resolution timer interrupt |
1254 | * Called with interrupts disabled |
1255 | */ |
1256 | void hrtimer_interrupt(struct clock_event_device *dev) |
1257 | { |
1258 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
1259 | ktime_t expires_next, now, entry_time, delta; |
1260 | int i, retries = 0; |
1261 | |
1262 | BUG_ON(!cpu_base->hres_active); |
1263 | cpu_base->nr_events++; |
1264 | dev->next_event.tv64 = KTIME_MAX; |
1265 | |
1266 | raw_spin_lock(&cpu_base->lock); |
1267 | entry_time = now = hrtimer_update_base(cpu_base); |
1268 | retry: |
1269 | expires_next.tv64 = KTIME_MAX; |
1270 | /* |
1271 | * We set expires_next to KTIME_MAX here with cpu_base->lock |
1272 | * held to prevent that a timer is enqueued in our queue via |
1273 | * the migration code. This does not affect enqueueing of |
1274 | * timers which run their callback and need to be requeued on |
1275 | * this CPU. |
1276 | */ |
1277 | cpu_base->expires_next.tv64 = KTIME_MAX; |
1278 | |
1279 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1280 | struct hrtimer_clock_base *base; |
1281 | struct timerqueue_node *node; |
1282 | ktime_t basenow; |
1283 | |
1284 | if (!(cpu_base->active_bases & (1 << i))) |
1285 | continue; |
1286 | |
1287 | base = cpu_base->clock_base + i; |
1288 | basenow = ktime_add(now, base->offset); |
1289 | |
1290 | while ((node = timerqueue_getnext(&base->active))) { |
1291 | struct hrtimer *timer; |
1292 | |
1293 | timer = container_of(node, struct hrtimer, node); |
1294 | |
1295 | /* |
1296 | * The immediate goal for using the softexpires is |
1297 | * minimizing wakeups, not running timers at the |
1298 | * earliest interrupt after their soft expiration. |
1299 | * This allows us to avoid using a Priority Search |
1300 | * Tree, which can answer a stabbing querry for |
1301 | * overlapping intervals and instead use the simple |
1302 | * BST we already have. |
1303 | * We don't add extra wakeups by delaying timers that |
1304 | * are right-of a not yet expired timer, because that |
1305 | * timer will have to trigger a wakeup anyway. |
1306 | */ |
1307 | |
1308 | if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) { |
1309 | ktime_t expires; |
1310 | |
1311 | expires = ktime_sub(hrtimer_get_expires(timer), |
1312 | base->offset); |
1313 | if (expires.tv64 < expires_next.tv64) |
1314 | expires_next = expires; |
1315 | break; |
1316 | } |
1317 | |
1318 | __run_hrtimer(timer, &basenow); |
1319 | } |
1320 | } |
1321 | |
1322 | /* |
1323 | * Store the new expiry value so the migration code can verify |
1324 | * against it. |
1325 | */ |
1326 | cpu_base->expires_next = expires_next; |
1327 | raw_spin_unlock(&cpu_base->lock); |
1328 | |
1329 | /* Reprogramming necessary ? */ |
1330 | if (expires_next.tv64 == KTIME_MAX || |
1331 | !tick_program_event(expires_next, 0)) { |
1332 | cpu_base->hang_detected = 0; |
1333 | return; |
1334 | } |
1335 | |
1336 | /* |
1337 | * The next timer was already expired due to: |
1338 | * - tracing |
1339 | * - long lasting callbacks |
1340 | * - being scheduled away when running in a VM |
1341 | * |
1342 | * We need to prevent that we loop forever in the hrtimer |
1343 | * interrupt routine. We give it 3 attempts to avoid |
1344 | * overreacting on some spurious event. |
1345 | * |
1346 | * Acquire base lock for updating the offsets and retrieving |
1347 | * the current time. |
1348 | */ |
1349 | raw_spin_lock(&cpu_base->lock); |
1350 | now = hrtimer_update_base(cpu_base); |
1351 | cpu_base->nr_retries++; |
1352 | if (++retries < 3) |
1353 | goto retry; |
1354 | /* |
1355 | * Give the system a chance to do something else than looping |
1356 | * here. We stored the entry time, so we know exactly how long |
1357 | * we spent here. We schedule the next event this amount of |
1358 | * time away. |
1359 | */ |
1360 | cpu_base->nr_hangs++; |
1361 | cpu_base->hang_detected = 1; |
1362 | raw_spin_unlock(&cpu_base->lock); |
1363 | delta = ktime_sub(now, entry_time); |
1364 | if (delta.tv64 > cpu_base->max_hang_time.tv64) |
1365 | cpu_base->max_hang_time = delta; |
1366 | /* |
1367 | * Limit it to a sensible value as we enforce a longer |
1368 | * delay. Give the CPU at least 100ms to catch up. |
1369 | */ |
1370 | if (delta.tv64 > 100 * NSEC_PER_MSEC) |
1371 | expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); |
1372 | else |
1373 | expires_next = ktime_add(now, delta); |
1374 | tick_program_event(expires_next, 1); |
1375 | printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", |
1376 | ktime_to_ns(delta)); |
1377 | } |
1378 | |
1379 | /* |
1380 | * local version of hrtimer_peek_ahead_timers() called with interrupts |
1381 | * disabled. |
1382 | */ |
1383 | static void __hrtimer_peek_ahead_timers(void) |
1384 | { |
1385 | struct tick_device *td; |
1386 | |
1387 | if (!hrtimer_hres_active()) |
1388 | return; |
1389 | |
1390 | td = &__get_cpu_var(tick_cpu_device); |
1391 | if (td && td->evtdev) |
1392 | hrtimer_interrupt(td->evtdev); |
1393 | } |
1394 | |
1395 | /** |
1396 | * hrtimer_peek_ahead_timers -- run soft-expired timers now |
1397 | * |
1398 | * hrtimer_peek_ahead_timers will peek at the timer queue of |
1399 | * the current cpu and check if there are any timers for which |
1400 | * the soft expires time has passed. If any such timers exist, |
1401 | * they are run immediately and then removed from the timer queue. |
1402 | * |
1403 | */ |
1404 | void hrtimer_peek_ahead_timers(void) |
1405 | { |
1406 | unsigned long flags; |
1407 | |
1408 | local_irq_save(flags); |
1409 | __hrtimer_peek_ahead_timers(); |
1410 | local_irq_restore(flags); |
1411 | } |
1412 | |
1413 | static void run_hrtimer_softirq(struct softirq_action *h) |
1414 | { |
1415 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
1416 | |
1417 | if (cpu_base->clock_was_set) { |
1418 | cpu_base->clock_was_set = 0; |
1419 | clock_was_set(); |
1420 | } |
1421 | |
1422 | hrtimer_peek_ahead_timers(); |
1423 | } |
1424 | |
1425 | #else /* CONFIG_HIGH_RES_TIMERS */ |
1426 | |
1427 | static inline void __hrtimer_peek_ahead_timers(void) { } |
1428 | |
1429 | #endif /* !CONFIG_HIGH_RES_TIMERS */ |
1430 | |
1431 | /* |
1432 | * Called from timer softirq every jiffy, expire hrtimers: |
1433 | * |
1434 | * For HRT its the fall back code to run the softirq in the timer |
1435 | * softirq context in case the hrtimer initialization failed or has |
1436 | * not been done yet. |
1437 | */ |
1438 | void hrtimer_run_pending(void) |
1439 | { |
1440 | if (hrtimer_hres_active()) |
1441 | return; |
1442 | |
1443 | /* |
1444 | * This _is_ ugly: We have to check in the softirq context, |
1445 | * whether we can switch to highres and / or nohz mode. The |
1446 | * clocksource switch happens in the timer interrupt with |
1447 | * xtime_lock held. Notification from there only sets the |
1448 | * check bit in the tick_oneshot code, otherwise we might |
1449 | * deadlock vs. xtime_lock. |
1450 | */ |
1451 | if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) |
1452 | hrtimer_switch_to_hres(); |
1453 | } |
1454 | |
1455 | /* |
1456 | * Called from hardirq context every jiffy |
1457 | */ |
1458 | void hrtimer_run_queues(void) |
1459 | { |
1460 | struct timerqueue_node *node; |
1461 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
1462 | struct hrtimer_clock_base *base; |
1463 | int index, gettime = 1; |
1464 | |
1465 | if (hrtimer_hres_active()) |
1466 | return; |
1467 | |
1468 | for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { |
1469 | base = &cpu_base->clock_base[index]; |
1470 | if (!timerqueue_getnext(&base->active)) |
1471 | continue; |
1472 | |
1473 | if (gettime) { |
1474 | hrtimer_get_softirq_time(cpu_base); |
1475 | gettime = 0; |
1476 | } |
1477 | |
1478 | raw_spin_lock(&cpu_base->lock); |
1479 | |
1480 | while ((node = timerqueue_getnext(&base->active))) { |
1481 | struct hrtimer *timer; |
1482 | |
1483 | timer = container_of(node, struct hrtimer, node); |
1484 | if (base->softirq_time.tv64 <= |
1485 | hrtimer_get_expires_tv64(timer)) |
1486 | break; |
1487 | |
1488 | __run_hrtimer(timer, &base->softirq_time); |
1489 | } |
1490 | raw_spin_unlock(&cpu_base->lock); |
1491 | } |
1492 | } |
1493 | |
1494 | /* |
1495 | * Sleep related functions: |
1496 | */ |
1497 | static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) |
1498 | { |
1499 | struct hrtimer_sleeper *t = |
1500 | container_of(timer, struct hrtimer_sleeper, timer); |
1501 | struct task_struct *task = t->task; |
1502 | |
1503 | t->task = NULL; |
1504 | if (task) |
1505 | wake_up_process(task); |
1506 | |
1507 | return HRTIMER_NORESTART; |
1508 | } |
1509 | |
1510 | void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) |
1511 | { |
1512 | sl->timer.function = hrtimer_wakeup; |
1513 | sl->task = task; |
1514 | } |
1515 | EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); |
1516 | |
1517 | static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) |
1518 | { |
1519 | hrtimer_init_sleeper(t, current); |
1520 | |
1521 | do { |
1522 | set_current_state(TASK_INTERRUPTIBLE); |
1523 | hrtimer_start_expires(&t->timer, mode); |
1524 | if (!hrtimer_active(&t->timer)) |
1525 | t->task = NULL; |
1526 | |
1527 | if (likely(t->task)) |
1528 | schedule(); |
1529 | |
1530 | hrtimer_cancel(&t->timer); |
1531 | mode = HRTIMER_MODE_ABS; |
1532 | |
1533 | } while (t->task && !signal_pending(current)); |
1534 | |
1535 | __set_current_state(TASK_RUNNING); |
1536 | |
1537 | return t->task == NULL; |
1538 | } |
1539 | |
1540 | static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp) |
1541 | { |
1542 | struct timespec rmt; |
1543 | ktime_t rem; |
1544 | |
1545 | rem = hrtimer_expires_remaining(timer); |
1546 | if (rem.tv64 <= 0) |
1547 | return 0; |
1548 | rmt = ktime_to_timespec(rem); |
1549 | |
1550 | if (copy_to_user(rmtp, &rmt, sizeof(*rmtp))) |
1551 | return -EFAULT; |
1552 | |
1553 | return 1; |
1554 | } |
1555 | |
1556 | long __sched hrtimer_nanosleep_restart(struct restart_block *restart) |
1557 | { |
1558 | struct hrtimer_sleeper t; |
1559 | struct timespec __user *rmtp; |
1560 | int ret = 0; |
1561 | |
1562 | hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid, |
1563 | HRTIMER_MODE_ABS); |
1564 | hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); |
1565 | |
1566 | if (do_nanosleep(&t, HRTIMER_MODE_ABS)) |
1567 | goto out; |
1568 | |
1569 | rmtp = restart->nanosleep.rmtp; |
1570 | if (rmtp) { |
1571 | ret = update_rmtp(&t.timer, rmtp); |
1572 | if (ret <= 0) |
1573 | goto out; |
1574 | } |
1575 | |
1576 | /* The other values in restart are already filled in */ |
1577 | ret = -ERESTART_RESTARTBLOCK; |
1578 | out: |
1579 | destroy_hrtimer_on_stack(&t.timer); |
1580 | return ret; |
1581 | } |
1582 | |
1583 | long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, |
1584 | const enum hrtimer_mode mode, const clockid_t clockid) |
1585 | { |
1586 | struct restart_block *restart; |
1587 | struct hrtimer_sleeper t; |
1588 | int ret = 0; |
1589 | unsigned long slack; |
1590 | |
1591 | slack = current->timer_slack_ns; |
1592 | if (rt_task(current)) |
1593 | slack = 0; |
1594 | |
1595 | hrtimer_init_on_stack(&t.timer, clockid, mode); |
1596 | hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); |
1597 | if (do_nanosleep(&t, mode)) |
1598 | goto out; |
1599 | |
1600 | /* Absolute timers do not update the rmtp value and restart: */ |
1601 | if (mode == HRTIMER_MODE_ABS) { |
1602 | ret = -ERESTARTNOHAND; |
1603 | goto out; |
1604 | } |
1605 | |
1606 | if (rmtp) { |
1607 | ret = update_rmtp(&t.timer, rmtp); |
1608 | if (ret <= 0) |
1609 | goto out; |
1610 | } |
1611 | |
1612 | restart = ¤t_thread_info()->restart_block; |
1613 | restart->fn = hrtimer_nanosleep_restart; |
1614 | restart->nanosleep.clockid = t.timer.base->clockid; |
1615 | restart->nanosleep.rmtp = rmtp; |
1616 | restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); |
1617 | |
1618 | ret = -ERESTART_RESTARTBLOCK; |
1619 | out: |
1620 | destroy_hrtimer_on_stack(&t.timer); |
1621 | return ret; |
1622 | } |
1623 | |
1624 | SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, |
1625 | struct timespec __user *, rmtp) |
1626 | { |
1627 | struct timespec tu; |
1628 | |
1629 | if (copy_from_user(&tu, rqtp, sizeof(tu))) |
1630 | return -EFAULT; |
1631 | |
1632 | if (!timespec_valid(&tu)) |
1633 | return -EINVAL; |
1634 | |
1635 | return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); |
1636 | } |
1637 | |
1638 | /* |
1639 | * Functions related to boot-time initialization: |
1640 | */ |
1641 | static void __cpuinit init_hrtimers_cpu(int cpu) |
1642 | { |
1643 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); |
1644 | int i; |
1645 | |
1646 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1647 | cpu_base->clock_base[i].cpu_base = cpu_base; |
1648 | timerqueue_init_head(&cpu_base->clock_base[i].active); |
1649 | } |
1650 | |
1651 | hrtimer_init_hres(cpu_base); |
1652 | } |
1653 | |
1654 | #ifdef CONFIG_HOTPLUG_CPU |
1655 | |
1656 | static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, |
1657 | struct hrtimer_clock_base *new_base) |
1658 | { |
1659 | struct hrtimer *timer; |
1660 | struct timerqueue_node *node; |
1661 | |
1662 | while ((node = timerqueue_getnext(&old_base->active))) { |
1663 | timer = container_of(node, struct hrtimer, node); |
1664 | BUG_ON(hrtimer_callback_running(timer)); |
1665 | debug_deactivate(timer); |
1666 | |
1667 | /* |
1668 | * Mark it as STATE_MIGRATE not INACTIVE otherwise the |
1669 | * timer could be seen as !active and just vanish away |
1670 | * under us on another CPU |
1671 | */ |
1672 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); |
1673 | timer->base = new_base; |
1674 | /* |
1675 | * Enqueue the timers on the new cpu. This does not |
1676 | * reprogram the event device in case the timer |
1677 | * expires before the earliest on this CPU, but we run |
1678 | * hrtimer_interrupt after we migrated everything to |
1679 | * sort out already expired timers and reprogram the |
1680 | * event device. |
1681 | */ |
1682 | enqueue_hrtimer(timer, new_base); |
1683 | |
1684 | /* Clear the migration state bit */ |
1685 | timer->state &= ~HRTIMER_STATE_MIGRATE; |
1686 | } |
1687 | } |
1688 | |
1689 | static void migrate_hrtimers(int scpu) |
1690 | { |
1691 | struct hrtimer_cpu_base *old_base, *new_base; |
1692 | int i; |
1693 | |
1694 | BUG_ON(cpu_online(scpu)); |
1695 | tick_cancel_sched_timer(scpu); |
1696 | |
1697 | local_irq_disable(); |
1698 | old_base = &per_cpu(hrtimer_bases, scpu); |
1699 | new_base = &__get_cpu_var(hrtimer_bases); |
1700 | /* |
1701 | * The caller is globally serialized and nobody else |
1702 | * takes two locks at once, deadlock is not possible. |
1703 | */ |
1704 | raw_spin_lock(&new_base->lock); |
1705 | raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
1706 | |
1707 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1708 | migrate_hrtimer_list(&old_base->clock_base[i], |
1709 | &new_base->clock_base[i]); |
1710 | } |
1711 | |
1712 | raw_spin_unlock(&old_base->lock); |
1713 | raw_spin_unlock(&new_base->lock); |
1714 | |
1715 | /* Check, if we got expired work to do */ |
1716 | __hrtimer_peek_ahead_timers(); |
1717 | local_irq_enable(); |
1718 | } |
1719 | |
1720 | #endif /* CONFIG_HOTPLUG_CPU */ |
1721 | |
1722 | static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, |
1723 | unsigned long action, void *hcpu) |
1724 | { |
1725 | int scpu = (long)hcpu; |
1726 | |
1727 | switch (action) { |
1728 | |
1729 | case CPU_UP_PREPARE: |
1730 | case CPU_UP_PREPARE_FROZEN: |
1731 | init_hrtimers_cpu(scpu); |
1732 | break; |
1733 | |
1734 | #ifdef CONFIG_HOTPLUG_CPU |
1735 | case CPU_DYING: |
1736 | case CPU_DYING_FROZEN: |
1737 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu); |
1738 | break; |
1739 | case CPU_DEAD: |
1740 | case CPU_DEAD_FROZEN: |
1741 | { |
1742 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu); |
1743 | migrate_hrtimers(scpu); |
1744 | break; |
1745 | } |
1746 | #endif |
1747 | |
1748 | default: |
1749 | break; |
1750 | } |
1751 | |
1752 | return NOTIFY_OK; |
1753 | } |
1754 | |
1755 | static struct notifier_block __cpuinitdata hrtimers_nb = { |
1756 | .notifier_call = hrtimer_cpu_notify, |
1757 | }; |
1758 | |
1759 | void __init hrtimers_init(void) |
1760 | { |
1761 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, |
1762 | (void *)(long)smp_processor_id()); |
1763 | register_cpu_notifier(&hrtimers_nb); |
1764 | #ifdef CONFIG_HIGH_RES_TIMERS |
1765 | open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); |
1766 | #endif |
1767 | } |
1768 | |
1769 | /** |
1770 | * schedule_hrtimeout_range_clock - sleep until timeout |
1771 | * @expires: timeout value (ktime_t) |
1772 | * @delta: slack in expires timeout (ktime_t) |
1773 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL |
1774 | * @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME |
1775 | */ |
1776 | int __sched |
1777 | schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta, |
1778 | const enum hrtimer_mode mode, int clock) |
1779 | { |
1780 | struct hrtimer_sleeper t; |
1781 | |
1782 | /* |
1783 | * Optimize when a zero timeout value is given. It does not |
1784 | * matter whether this is an absolute or a relative time. |
1785 | */ |
1786 | if (expires && !expires->tv64) { |
1787 | __set_current_state(TASK_RUNNING); |
1788 | return 0; |
1789 | } |
1790 | |
1791 | /* |
1792 | * A NULL parameter means "infinite" |
1793 | */ |
1794 | if (!expires) { |
1795 | schedule(); |
1796 | __set_current_state(TASK_RUNNING); |
1797 | return -EINTR; |
1798 | } |
1799 | |
1800 | hrtimer_init_on_stack(&t.timer, clock, mode); |
1801 | hrtimer_set_expires_range_ns(&t.timer, *expires, delta); |
1802 | |
1803 | hrtimer_init_sleeper(&t, current); |
1804 | |
1805 | hrtimer_start_expires(&t.timer, mode); |
1806 | if (!hrtimer_active(&t.timer)) |
1807 | t.task = NULL; |
1808 | |
1809 | if (likely(t.task)) |
1810 | schedule(); |
1811 | |
1812 | hrtimer_cancel(&t.timer); |
1813 | destroy_hrtimer_on_stack(&t.timer); |
1814 | |
1815 | __set_current_state(TASK_RUNNING); |
1816 | |
1817 | return !t.task ? 0 : -EINTR; |
1818 | } |
1819 | |
1820 | /** |
1821 | * schedule_hrtimeout_range - sleep until timeout |
1822 | * @expires: timeout value (ktime_t) |
1823 | * @delta: slack in expires timeout (ktime_t) |
1824 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL |
1825 | * |
1826 | * Make the current task sleep until the given expiry time has |
1827 | * elapsed. The routine will return immediately unless |
1828 | * the current task state has been set (see set_current_state()). |
1829 | * |
1830 | * The @delta argument gives the kernel the freedom to schedule the |
1831 | * actual wakeup to a time that is both power and performance friendly. |
1832 | * The kernel give the normal best effort behavior for "@expires+@delta", |
1833 | * but may decide to fire the timer earlier, but no earlier than @expires. |
1834 | * |
1835 | * You can set the task state as follows - |
1836 | * |
1837 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to |
1838 | * pass before the routine returns. |
1839 | * |
1840 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is |
1841 | * delivered to the current task. |
1842 | * |
1843 | * The current task state is guaranteed to be TASK_RUNNING when this |
1844 | * routine returns. |
1845 | * |
1846 | * Returns 0 when the timer has expired otherwise -EINTR |
1847 | */ |
1848 | int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, |
1849 | const enum hrtimer_mode mode) |
1850 | { |
1851 | return schedule_hrtimeout_range_clock(expires, delta, mode, |
1852 | CLOCK_MONOTONIC); |
1853 | } |
1854 | EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); |
1855 | |
1856 | /** |
1857 | * schedule_hrtimeout - sleep until timeout |
1858 | * @expires: timeout value (ktime_t) |
1859 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL |
1860 | * |
1861 | * Make the current task sleep until the given expiry time has |
1862 | * elapsed. The routine will return immediately unless |
1863 | * the current task state has been set (see set_current_state()). |
1864 | * |
1865 | * You can set the task state as follows - |
1866 | * |
1867 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to |
1868 | * pass before the routine returns. |
1869 | * |
1870 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is |
1871 | * delivered to the current task. |
1872 | * |
1873 | * The current task state is guaranteed to be TASK_RUNNING when this |
1874 | * routine returns. |
1875 | * |
1876 | * Returns 0 when the timer has expired otherwise -EINTR |
1877 | */ |
1878 | int __sched schedule_hrtimeout(ktime_t *expires, |
1879 | const enum hrtimer_mode mode) |
1880 | { |
1881 | return schedule_hrtimeout_range(expires, 0, mode); |
1882 | } |
1883 | EXPORT_SYMBOL_GPL(schedule_hrtimeout); |
1884 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9