Root/
1 | /* |
2 | * linux/kernel/time/tick-sched.c |
3 | * |
4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> |
5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar |
6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner |
7 | * |
8 | * No idle tick implementation for low and high resolution timers |
9 | * |
10 | * Started by: Thomas Gleixner and Ingo Molnar |
11 | * |
12 | * Distribute under GPLv2. |
13 | */ |
14 | #include <linux/cpu.h> |
15 | #include <linux/err.h> |
16 | #include <linux/hrtimer.h> |
17 | #include <linux/interrupt.h> |
18 | #include <linux/kernel_stat.h> |
19 | #include <linux/percpu.h> |
20 | #include <linux/profile.h> |
21 | #include <linux/sched.h> |
22 | #include <linux/tick.h> |
23 | #include <linux/module.h> |
24 | |
25 | #include <asm/irq_regs.h> |
26 | |
27 | #include "tick-internal.h" |
28 | |
29 | /* |
30 | * Per cpu nohz control structure |
31 | */ |
32 | static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); |
33 | |
34 | /* |
35 | * The time, when the last jiffy update happened. Protected by xtime_lock. |
36 | */ |
37 | static ktime_t last_jiffies_update; |
38 | |
39 | struct tick_sched *tick_get_tick_sched(int cpu) |
40 | { |
41 | return &per_cpu(tick_cpu_sched, cpu); |
42 | } |
43 | |
44 | /* |
45 | * Must be called with interrupts disabled ! |
46 | */ |
47 | static void tick_do_update_jiffies64(ktime_t now) |
48 | { |
49 | unsigned long ticks = 0; |
50 | ktime_t delta; |
51 | |
52 | /* |
53 | * Do a quick check without holding xtime_lock: |
54 | */ |
55 | delta = ktime_sub(now, last_jiffies_update); |
56 | if (delta.tv64 < tick_period.tv64) |
57 | return; |
58 | |
59 | /* Reevalute with xtime_lock held */ |
60 | write_seqlock(&xtime_lock); |
61 | |
62 | delta = ktime_sub(now, last_jiffies_update); |
63 | if (delta.tv64 >= tick_period.tv64) { |
64 | |
65 | delta = ktime_sub(delta, tick_period); |
66 | last_jiffies_update = ktime_add(last_jiffies_update, |
67 | tick_period); |
68 | |
69 | /* Slow path for long timeouts */ |
70 | if (unlikely(delta.tv64 >= tick_period.tv64)) { |
71 | s64 incr = ktime_to_ns(tick_period); |
72 | |
73 | ticks = ktime_divns(delta, incr); |
74 | |
75 | last_jiffies_update = ktime_add_ns(last_jiffies_update, |
76 | incr * ticks); |
77 | } |
78 | do_timer(++ticks); |
79 | |
80 | /* Keep the tick_next_period variable up to date */ |
81 | tick_next_period = ktime_add(last_jiffies_update, tick_period); |
82 | } |
83 | write_sequnlock(&xtime_lock); |
84 | } |
85 | |
86 | /* |
87 | * Initialize and return retrieve the jiffies update. |
88 | */ |
89 | static ktime_t tick_init_jiffy_update(void) |
90 | { |
91 | ktime_t period; |
92 | |
93 | write_seqlock(&xtime_lock); |
94 | /* Did we start the jiffies update yet ? */ |
95 | if (last_jiffies_update.tv64 == 0) |
96 | last_jiffies_update = tick_next_period; |
97 | period = last_jiffies_update; |
98 | write_sequnlock(&xtime_lock); |
99 | return period; |
100 | } |
101 | |
102 | /* |
103 | * NOHZ - aka dynamic tick functionality |
104 | */ |
105 | #ifdef CONFIG_NO_HZ |
106 | /* |
107 | * NO HZ enabled ? |
108 | */ |
109 | static int tick_nohz_enabled __read_mostly = 1; |
110 | |
111 | /* |
112 | * Enable / Disable tickless mode |
113 | */ |
114 | static int __init setup_tick_nohz(char *str) |
115 | { |
116 | if (!strcmp(str, "off")) |
117 | tick_nohz_enabled = 0; |
118 | else if (!strcmp(str, "on")) |
119 | tick_nohz_enabled = 1; |
120 | else |
121 | return 0; |
122 | return 1; |
123 | } |
124 | |
125 | __setup("nohz=", setup_tick_nohz); |
126 | |
127 | /** |
128 | * tick_nohz_update_jiffies - update jiffies when idle was interrupted |
129 | * |
130 | * Called from interrupt entry when the CPU was idle |
131 | * |
132 | * In case the sched_tick was stopped on this CPU, we have to check if jiffies |
133 | * must be updated. Otherwise an interrupt handler could use a stale jiffy |
134 | * value. We do this unconditionally on any cpu, as we don't know whether the |
135 | * cpu, which has the update task assigned is in a long sleep. |
136 | */ |
137 | static void tick_nohz_update_jiffies(ktime_t now) |
138 | { |
139 | int cpu = smp_processor_id(); |
140 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
141 | unsigned long flags; |
142 | |
143 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
144 | ts->idle_waketime = now; |
145 | |
146 | local_irq_save(flags); |
147 | tick_do_update_jiffies64(now); |
148 | local_irq_restore(flags); |
149 | |
150 | touch_softlockup_watchdog(); |
151 | } |
152 | |
153 | static void tick_nohz_stop_idle(int cpu, ktime_t now) |
154 | { |
155 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
156 | ktime_t delta; |
157 | |
158 | delta = ktime_sub(now, ts->idle_entrytime); |
159 | ts->idle_lastupdate = now; |
160 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); |
161 | ts->idle_active = 0; |
162 | |
163 | sched_clock_idle_wakeup_event(0); |
164 | } |
165 | |
166 | static ktime_t tick_nohz_start_idle(struct tick_sched *ts) |
167 | { |
168 | ktime_t now, delta; |
169 | |
170 | now = ktime_get(); |
171 | if (ts->idle_active) { |
172 | delta = ktime_sub(now, ts->idle_entrytime); |
173 | ts->idle_lastupdate = now; |
174 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); |
175 | } |
176 | ts->idle_entrytime = now; |
177 | ts->idle_active = 1; |
178 | sched_clock_idle_sleep_event(); |
179 | return now; |
180 | } |
181 | |
182 | u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) |
183 | { |
184 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
185 | |
186 | if (!tick_nohz_enabled) |
187 | return -1; |
188 | |
189 | if (ts->idle_active) |
190 | *last_update_time = ktime_to_us(ts->idle_lastupdate); |
191 | else |
192 | *last_update_time = ktime_to_us(ktime_get()); |
193 | |
194 | return ktime_to_us(ts->idle_sleeptime); |
195 | } |
196 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); |
197 | |
198 | /** |
199 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task |
200 | * |
201 | * When the next event is more than a tick into the future, stop the idle tick |
202 | * Called either from the idle loop or from irq_exit() when an idle period was |
203 | * just interrupted by an interrupt which did not cause a reschedule. |
204 | */ |
205 | void tick_nohz_stop_sched_tick(int inidle) |
206 | { |
207 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; |
208 | struct tick_sched *ts; |
209 | ktime_t last_update, expires, now; |
210 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; |
211 | u64 time_delta; |
212 | int cpu; |
213 | |
214 | local_irq_save(flags); |
215 | |
216 | cpu = smp_processor_id(); |
217 | ts = &per_cpu(tick_cpu_sched, cpu); |
218 | |
219 | /* |
220 | * Call to tick_nohz_start_idle stops the last_update_time from being |
221 | * updated. Thus, it must not be called in the event we are called from |
222 | * irq_exit() with the prior state different than idle. |
223 | */ |
224 | if (!inidle && !ts->inidle) |
225 | goto end; |
226 | |
227 | /* |
228 | * Set ts->inidle unconditionally. Even if the system did not |
229 | * switch to NOHZ mode the cpu frequency governers rely on the |
230 | * update of the idle time accounting in tick_nohz_start_idle(). |
231 | */ |
232 | ts->inidle = 1; |
233 | |
234 | now = tick_nohz_start_idle(ts); |
235 | |
236 | /* |
237 | * If this cpu is offline and it is the one which updates |
238 | * jiffies, then give up the assignment and let it be taken by |
239 | * the cpu which runs the tick timer next. If we don't drop |
240 | * this here the jiffies might be stale and do_timer() never |
241 | * invoked. |
242 | */ |
243 | if (unlikely(!cpu_online(cpu))) { |
244 | if (cpu == tick_do_timer_cpu) |
245 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
246 | } |
247 | |
248 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
249 | goto end; |
250 | |
251 | if (need_resched()) |
252 | goto end; |
253 | |
254 | if (unlikely(local_softirq_pending() && cpu_online(cpu))) { |
255 | static int ratelimit; |
256 | |
257 | if (ratelimit < 10) { |
258 | printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", |
259 | (unsigned int) local_softirq_pending()); |
260 | ratelimit++; |
261 | } |
262 | goto end; |
263 | } |
264 | |
265 | ts->idle_calls++; |
266 | /* Read jiffies and the time when jiffies were updated last */ |
267 | do { |
268 | seq = read_seqbegin(&xtime_lock); |
269 | last_update = last_jiffies_update; |
270 | last_jiffies = jiffies; |
271 | time_delta = timekeeping_max_deferment(); |
272 | } while (read_seqretry(&xtime_lock, seq)); |
273 | |
274 | if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || |
275 | arch_needs_cpu(cpu)) { |
276 | next_jiffies = last_jiffies + 1; |
277 | delta_jiffies = 1; |
278 | } else { |
279 | /* Get the next timer wheel timer */ |
280 | next_jiffies = get_next_timer_interrupt(last_jiffies); |
281 | delta_jiffies = next_jiffies - last_jiffies; |
282 | } |
283 | /* |
284 | * Do not stop the tick, if we are only one off |
285 | * or if the cpu is required for rcu |
286 | */ |
287 | if (!ts->tick_stopped && delta_jiffies == 1) |
288 | goto out; |
289 | |
290 | /* Schedule the tick, if we are at least one jiffie off */ |
291 | if ((long)delta_jiffies >= 1) { |
292 | |
293 | /* |
294 | * If this cpu is the one which updates jiffies, then |
295 | * give up the assignment and let it be taken by the |
296 | * cpu which runs the tick timer next, which might be |
297 | * this cpu as well. If we don't drop this here the |
298 | * jiffies might be stale and do_timer() never |
299 | * invoked. Keep track of the fact that it was the one |
300 | * which had the do_timer() duty last. If this cpu is |
301 | * the one which had the do_timer() duty last, we |
302 | * limit the sleep time to the timekeeping |
303 | * max_deferement value which we retrieved |
304 | * above. Otherwise we can sleep as long as we want. |
305 | */ |
306 | if (cpu == tick_do_timer_cpu) { |
307 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
308 | ts->do_timer_last = 1; |
309 | } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { |
310 | time_delta = KTIME_MAX; |
311 | ts->do_timer_last = 0; |
312 | } else if (!ts->do_timer_last) { |
313 | time_delta = KTIME_MAX; |
314 | } |
315 | |
316 | /* |
317 | * calculate the expiry time for the next timer wheel |
318 | * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals |
319 | * that there is no timer pending or at least extremely |
320 | * far into the future (12 days for HZ=1000). In this |
321 | * case we set the expiry to the end of time. |
322 | */ |
323 | if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) { |
324 | /* |
325 | * Calculate the time delta for the next timer event. |
326 | * If the time delta exceeds the maximum time delta |
327 | * permitted by the current clocksource then adjust |
328 | * the time delta accordingly to ensure the |
329 | * clocksource does not wrap. |
330 | */ |
331 | time_delta = min_t(u64, time_delta, |
332 | tick_period.tv64 * delta_jiffies); |
333 | } |
334 | |
335 | if (time_delta < KTIME_MAX) |
336 | expires = ktime_add_ns(last_update, time_delta); |
337 | else |
338 | expires.tv64 = KTIME_MAX; |
339 | |
340 | if (delta_jiffies > 1) |
341 | cpumask_set_cpu(cpu, nohz_cpu_mask); |
342 | |
343 | /* Skip reprogram of event if its not changed */ |
344 | if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) |
345 | goto out; |
346 | |
347 | /* |
348 | * nohz_stop_sched_tick can be called several times before |
349 | * the nohz_restart_sched_tick is called. This happens when |
350 | * interrupts arrive which do not cause a reschedule. In the |
351 | * first call we save the current tick time, so we can restart |
352 | * the scheduler tick in nohz_restart_sched_tick. |
353 | */ |
354 | if (!ts->tick_stopped) { |
355 | if (select_nohz_load_balancer(1)) { |
356 | /* |
357 | * sched tick not stopped! |
358 | */ |
359 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
360 | goto out; |
361 | } |
362 | |
363 | ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); |
364 | ts->tick_stopped = 1; |
365 | ts->idle_jiffies = last_jiffies; |
366 | rcu_enter_nohz(); |
367 | } |
368 | |
369 | ts->idle_sleeps++; |
370 | |
371 | /* Mark expires */ |
372 | ts->idle_expires = expires; |
373 | |
374 | /* |
375 | * If the expiration time == KTIME_MAX, then |
376 | * in this case we simply stop the tick timer. |
377 | */ |
378 | if (unlikely(expires.tv64 == KTIME_MAX)) { |
379 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) |
380 | hrtimer_cancel(&ts->sched_timer); |
381 | goto out; |
382 | } |
383 | |
384 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { |
385 | hrtimer_start(&ts->sched_timer, expires, |
386 | HRTIMER_MODE_ABS_PINNED); |
387 | /* Check, if the timer was already in the past */ |
388 | if (hrtimer_active(&ts->sched_timer)) |
389 | goto out; |
390 | } else if (!tick_program_event(expires, 0)) |
391 | goto out; |
392 | /* |
393 | * We are past the event already. So we crossed a |
394 | * jiffie boundary. Update jiffies and raise the |
395 | * softirq. |
396 | */ |
397 | tick_do_update_jiffies64(ktime_get()); |
398 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
399 | } |
400 | raise_softirq_irqoff(TIMER_SOFTIRQ); |
401 | out: |
402 | ts->next_jiffies = next_jiffies; |
403 | ts->last_jiffies = last_jiffies; |
404 | ts->sleep_length = ktime_sub(dev->next_event, now); |
405 | end: |
406 | local_irq_restore(flags); |
407 | } |
408 | |
409 | /** |
410 | * tick_nohz_get_sleep_length - return the length of the current sleep |
411 | * |
412 | * Called from power state control code with interrupts disabled |
413 | */ |
414 | ktime_t tick_nohz_get_sleep_length(void) |
415 | { |
416 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
417 | |
418 | return ts->sleep_length; |
419 | } |
420 | |
421 | static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) |
422 | { |
423 | hrtimer_cancel(&ts->sched_timer); |
424 | hrtimer_set_expires(&ts->sched_timer, ts->idle_tick); |
425 | |
426 | while (1) { |
427 | /* Forward the time to expire in the future */ |
428 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
429 | |
430 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { |
431 | hrtimer_start_expires(&ts->sched_timer, |
432 | HRTIMER_MODE_ABS_PINNED); |
433 | /* Check, if the timer was already in the past */ |
434 | if (hrtimer_active(&ts->sched_timer)) |
435 | break; |
436 | } else { |
437 | if (!tick_program_event( |
438 | hrtimer_get_expires(&ts->sched_timer), 0)) |
439 | break; |
440 | } |
441 | /* Update jiffies and reread time */ |
442 | tick_do_update_jiffies64(now); |
443 | now = ktime_get(); |
444 | } |
445 | } |
446 | |
447 | /** |
448 | * tick_nohz_restart_sched_tick - restart the idle tick from the idle task |
449 | * |
450 | * Restart the idle tick when the CPU is woken up from idle |
451 | */ |
452 | void tick_nohz_restart_sched_tick(void) |
453 | { |
454 | int cpu = smp_processor_id(); |
455 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
456 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
457 | unsigned long ticks; |
458 | #endif |
459 | ktime_t now; |
460 | |
461 | local_irq_disable(); |
462 | if (ts->idle_active || (ts->inidle && ts->tick_stopped)) |
463 | now = ktime_get(); |
464 | |
465 | if (ts->idle_active) |
466 | tick_nohz_stop_idle(cpu, now); |
467 | |
468 | if (!ts->inidle || !ts->tick_stopped) { |
469 | ts->inidle = 0; |
470 | local_irq_enable(); |
471 | return; |
472 | } |
473 | |
474 | ts->inidle = 0; |
475 | |
476 | rcu_exit_nohz(); |
477 | |
478 | /* Update jiffies first */ |
479 | select_nohz_load_balancer(0); |
480 | tick_do_update_jiffies64(now); |
481 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
482 | |
483 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
484 | /* |
485 | * We stopped the tick in idle. Update process times would miss the |
486 | * time we slept as update_process_times does only a 1 tick |
487 | * accounting. Enforce that this is accounted to idle ! |
488 | */ |
489 | ticks = jiffies - ts->idle_jiffies; |
490 | /* |
491 | * We might be one off. Do not randomly account a huge number of ticks! |
492 | */ |
493 | if (ticks && ticks < LONG_MAX) |
494 | account_idle_ticks(ticks); |
495 | #endif |
496 | |
497 | touch_softlockup_watchdog(); |
498 | /* |
499 | * Cancel the scheduled timer and restore the tick |
500 | */ |
501 | ts->tick_stopped = 0; |
502 | ts->idle_exittime = now; |
503 | |
504 | tick_nohz_restart(ts, now); |
505 | |
506 | local_irq_enable(); |
507 | } |
508 | |
509 | static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) |
510 | { |
511 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
512 | return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0); |
513 | } |
514 | |
515 | /* |
516 | * The nohz low res interrupt handler |
517 | */ |
518 | static void tick_nohz_handler(struct clock_event_device *dev) |
519 | { |
520 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
521 | struct pt_regs *regs = get_irq_regs(); |
522 | int cpu = smp_processor_id(); |
523 | ktime_t now = ktime_get(); |
524 | |
525 | dev->next_event.tv64 = KTIME_MAX; |
526 | |
527 | /* |
528 | * Check if the do_timer duty was dropped. We don't care about |
529 | * concurrency: This happens only when the cpu in charge went |
530 | * into a long sleep. If two cpus happen to assign themself to |
531 | * this duty, then the jiffies update is still serialized by |
532 | * xtime_lock. |
533 | */ |
534 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
535 | tick_do_timer_cpu = cpu; |
536 | |
537 | /* Check, if the jiffies need an update */ |
538 | if (tick_do_timer_cpu == cpu) |
539 | tick_do_update_jiffies64(now); |
540 | |
541 | /* |
542 | * When we are idle and the tick is stopped, we have to touch |
543 | * the watchdog as we might not schedule for a really long |
544 | * time. This happens on complete idle SMP systems while |
545 | * waiting on the login prompt. We also increment the "start |
546 | * of idle" jiffy stamp so the idle accounting adjustment we |
547 | * do when we go busy again does not account too much ticks. |
548 | */ |
549 | if (ts->tick_stopped) { |
550 | touch_softlockup_watchdog(); |
551 | ts->idle_jiffies++; |
552 | } |
553 | |
554 | update_process_times(user_mode(regs)); |
555 | profile_tick(CPU_PROFILING); |
556 | |
557 | while (tick_nohz_reprogram(ts, now)) { |
558 | now = ktime_get(); |
559 | tick_do_update_jiffies64(now); |
560 | } |
561 | } |
562 | |
563 | /** |
564 | * tick_nohz_switch_to_nohz - switch to nohz mode |
565 | */ |
566 | static void tick_nohz_switch_to_nohz(void) |
567 | { |
568 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
569 | ktime_t next; |
570 | |
571 | if (!tick_nohz_enabled) |
572 | return; |
573 | |
574 | local_irq_disable(); |
575 | if (tick_switch_to_oneshot(tick_nohz_handler)) { |
576 | local_irq_enable(); |
577 | return; |
578 | } |
579 | |
580 | ts->nohz_mode = NOHZ_MODE_LOWRES; |
581 | |
582 | /* |
583 | * Recycle the hrtimer in ts, so we can share the |
584 | * hrtimer_forward with the highres code. |
585 | */ |
586 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
587 | /* Get the next period */ |
588 | next = tick_init_jiffy_update(); |
589 | |
590 | for (;;) { |
591 | hrtimer_set_expires(&ts->sched_timer, next); |
592 | if (!tick_program_event(next, 0)) |
593 | break; |
594 | next = ktime_add(next, tick_period); |
595 | } |
596 | local_irq_enable(); |
597 | |
598 | printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", |
599 | smp_processor_id()); |
600 | } |
601 | |
602 | /* |
603 | * When NOHZ is enabled and the tick is stopped, we need to kick the |
604 | * tick timer from irq_enter() so that the jiffies update is kept |
605 | * alive during long running softirqs. That's ugly as hell, but |
606 | * correctness is key even if we need to fix the offending softirq in |
607 | * the first place. |
608 | * |
609 | * Note, this is different to tick_nohz_restart. We just kick the |
610 | * timer and do not touch the other magic bits which need to be done |
611 | * when idle is left. |
612 | */ |
613 | static void tick_nohz_kick_tick(int cpu, ktime_t now) |
614 | { |
615 | #if 0 |
616 | /* Switch back to 2.6.27 behaviour */ |
617 | |
618 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
619 | ktime_t delta; |
620 | |
621 | /* |
622 | * Do not touch the tick device, when the next expiry is either |
623 | * already reached or less/equal than the tick period. |
624 | */ |
625 | delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now); |
626 | if (delta.tv64 <= tick_period.tv64) |
627 | return; |
628 | |
629 | tick_nohz_restart(ts, now); |
630 | #endif |
631 | } |
632 | |
633 | static inline void tick_check_nohz(int cpu) |
634 | { |
635 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
636 | ktime_t now; |
637 | |
638 | if (!ts->idle_active && !ts->tick_stopped) |
639 | return; |
640 | now = ktime_get(); |
641 | if (ts->idle_active) |
642 | tick_nohz_stop_idle(cpu, now); |
643 | if (ts->tick_stopped) { |
644 | tick_nohz_update_jiffies(now); |
645 | tick_nohz_kick_tick(cpu, now); |
646 | } |
647 | } |
648 | |
649 | #else |
650 | |
651 | static inline void tick_nohz_switch_to_nohz(void) { } |
652 | static inline void tick_check_nohz(int cpu) { } |
653 | |
654 | #endif /* NO_HZ */ |
655 | |
656 | /* |
657 | * Called from irq_enter to notify about the possible interruption of idle() |
658 | */ |
659 | void tick_check_idle(int cpu) |
660 | { |
661 | tick_check_oneshot_broadcast(cpu); |
662 | tick_check_nohz(cpu); |
663 | } |
664 | |
665 | /* |
666 | * High resolution timer specific code |
667 | */ |
668 | #ifdef CONFIG_HIGH_RES_TIMERS |
669 | /* |
670 | * We rearm the timer until we get disabled by the idle code. |
671 | * Called with interrupts disabled and timer->base->cpu_base->lock held. |
672 | */ |
673 | static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) |
674 | { |
675 | struct tick_sched *ts = |
676 | container_of(timer, struct tick_sched, sched_timer); |
677 | struct pt_regs *regs = get_irq_regs(); |
678 | ktime_t now = ktime_get(); |
679 | int cpu = smp_processor_id(); |
680 | |
681 | #ifdef CONFIG_NO_HZ |
682 | /* |
683 | * Check if the do_timer duty was dropped. We don't care about |
684 | * concurrency: This happens only when the cpu in charge went |
685 | * into a long sleep. If two cpus happen to assign themself to |
686 | * this duty, then the jiffies update is still serialized by |
687 | * xtime_lock. |
688 | */ |
689 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
690 | tick_do_timer_cpu = cpu; |
691 | #endif |
692 | |
693 | /* Check, if the jiffies need an update */ |
694 | if (tick_do_timer_cpu == cpu) |
695 | tick_do_update_jiffies64(now); |
696 | |
697 | /* |
698 | * Do not call, when we are not in irq context and have |
699 | * no valid regs pointer |
700 | */ |
701 | if (regs) { |
702 | /* |
703 | * When we are idle and the tick is stopped, we have to touch |
704 | * the watchdog as we might not schedule for a really long |
705 | * time. This happens on complete idle SMP systems while |
706 | * waiting on the login prompt. We also increment the "start of |
707 | * idle" jiffy stamp so the idle accounting adjustment we do |
708 | * when we go busy again does not account too much ticks. |
709 | */ |
710 | if (ts->tick_stopped) { |
711 | touch_softlockup_watchdog(); |
712 | ts->idle_jiffies++; |
713 | } |
714 | update_process_times(user_mode(regs)); |
715 | profile_tick(CPU_PROFILING); |
716 | } |
717 | |
718 | hrtimer_forward(timer, now, tick_period); |
719 | |
720 | return HRTIMER_RESTART; |
721 | } |
722 | |
723 | /** |
724 | * tick_setup_sched_timer - setup the tick emulation timer |
725 | */ |
726 | void tick_setup_sched_timer(void) |
727 | { |
728 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
729 | ktime_t now = ktime_get(); |
730 | u64 offset; |
731 | |
732 | /* |
733 | * Emulate tick processing via per-CPU hrtimers: |
734 | */ |
735 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
736 | ts->sched_timer.function = tick_sched_timer; |
737 | |
738 | /* Get the next period (per cpu) */ |
739 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); |
740 | offset = ktime_to_ns(tick_period) >> 1; |
741 | do_div(offset, num_possible_cpus()); |
742 | offset *= smp_processor_id(); |
743 | hrtimer_add_expires_ns(&ts->sched_timer, offset); |
744 | |
745 | for (;;) { |
746 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
747 | hrtimer_start_expires(&ts->sched_timer, |
748 | HRTIMER_MODE_ABS_PINNED); |
749 | /* Check, if the timer was already in the past */ |
750 | if (hrtimer_active(&ts->sched_timer)) |
751 | break; |
752 | now = ktime_get(); |
753 | } |
754 | |
755 | #ifdef CONFIG_NO_HZ |
756 | if (tick_nohz_enabled) |
757 | ts->nohz_mode = NOHZ_MODE_HIGHRES; |
758 | #endif |
759 | } |
760 | #endif /* HIGH_RES_TIMERS */ |
761 | |
762 | #if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS |
763 | void tick_cancel_sched_timer(int cpu) |
764 | { |
765 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
766 | |
767 | # ifdef CONFIG_HIGH_RES_TIMERS |
768 | if (ts->sched_timer.base) |
769 | hrtimer_cancel(&ts->sched_timer); |
770 | # endif |
771 | |
772 | ts->nohz_mode = NOHZ_MODE_INACTIVE; |
773 | } |
774 | #endif |
775 | |
776 | /** |
777 | * Async notification about clocksource changes |
778 | */ |
779 | void tick_clock_notify(void) |
780 | { |
781 | int cpu; |
782 | |
783 | for_each_possible_cpu(cpu) |
784 | set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); |
785 | } |
786 | |
787 | /* |
788 | * Async notification about clock event changes |
789 | */ |
790 | void tick_oneshot_notify(void) |
791 | { |
792 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
793 | |
794 | set_bit(0, &ts->check_clocks); |
795 | } |
796 | |
797 | /** |
798 | * Check, if a change happened, which makes oneshot possible. |
799 | * |
800 | * Called cyclic from the hrtimer softirq (driven by the timer |
801 | * softirq) allow_nohz signals, that we can switch into low-res nohz |
802 | * mode, because high resolution timers are disabled (either compile |
803 | * or runtime). |
804 | */ |
805 | int tick_check_oneshot_change(int allow_nohz) |
806 | { |
807 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
808 | |
809 | if (!test_and_clear_bit(0, &ts->check_clocks)) |
810 | return 0; |
811 | |
812 | if (ts->nohz_mode != NOHZ_MODE_INACTIVE) |
813 | return 0; |
814 | |
815 | if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) |
816 | return 0; |
817 | |
818 | if (!allow_nohz) |
819 | return 1; |
820 | |
821 | tick_nohz_switch_to_nohz(); |
822 | return 0; |
823 | } |
824 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9