Root/
1 | /* |
2 | * Detect hard and soft lockups on a system |
3 | * |
4 | * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. |
5 | * |
6 | * Note: Most of this code is borrowed heavily from the original softlockup |
7 | * detector, so thanks to Ingo for the initial implementation. |
8 | * Some chunks also taken from the old x86-specific nmi watchdog code, thanks |
9 | * to those contributors as well. |
10 | */ |
11 | |
12 | #define pr_fmt(fmt) "NMI watchdog: " fmt |
13 | |
14 | #include <linux/mm.h> |
15 | #include <linux/cpu.h> |
16 | #include <linux/nmi.h> |
17 | #include <linux/init.h> |
18 | #include <linux/delay.h> |
19 | #include <linux/freezer.h> |
20 | #include <linux/kthread.h> |
21 | #include <linux/lockdep.h> |
22 | #include <linux/notifier.h> |
23 | #include <linux/module.h> |
24 | #include <linux/sysctl.h> |
25 | |
26 | #include <asm/irq_regs.h> |
27 | #include <linux/perf_event.h> |
28 | |
29 | int watchdog_enabled = 1; |
30 | int __read_mostly watchdog_thresh = 10; |
31 | |
32 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); |
33 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); |
34 | static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); |
35 | static DEFINE_PER_CPU(bool, softlockup_touch_sync); |
36 | static DEFINE_PER_CPU(bool, soft_watchdog_warn); |
37 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
38 | static DEFINE_PER_CPU(bool, hard_watchdog_warn); |
39 | static DEFINE_PER_CPU(bool, watchdog_nmi_touch); |
40 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); |
41 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); |
42 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); |
43 | #endif |
44 | |
45 | /* boot commands */ |
46 | /* |
47 | * Should we panic when a soft-lockup or hard-lockup occurs: |
48 | */ |
49 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
50 | static int hardlockup_panic = |
51 | CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; |
52 | |
53 | static int __init hardlockup_panic_setup(char *str) |
54 | { |
55 | if (!strncmp(str, "panic", 5)) |
56 | hardlockup_panic = 1; |
57 | else if (!strncmp(str, "nopanic", 7)) |
58 | hardlockup_panic = 0; |
59 | else if (!strncmp(str, "0", 1)) |
60 | watchdog_enabled = 0; |
61 | return 1; |
62 | } |
63 | __setup("nmi_watchdog=", hardlockup_panic_setup); |
64 | #endif |
65 | |
66 | unsigned int __read_mostly softlockup_panic = |
67 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; |
68 | |
69 | static int __init softlockup_panic_setup(char *str) |
70 | { |
71 | softlockup_panic = simple_strtoul(str, NULL, 0); |
72 | |
73 | return 1; |
74 | } |
75 | __setup("softlockup_panic=", softlockup_panic_setup); |
76 | |
77 | static int __init nowatchdog_setup(char *str) |
78 | { |
79 | watchdog_enabled = 0; |
80 | return 1; |
81 | } |
82 | __setup("nowatchdog", nowatchdog_setup); |
83 | |
84 | /* deprecated */ |
85 | static int __init nosoftlockup_setup(char *str) |
86 | { |
87 | watchdog_enabled = 0; |
88 | return 1; |
89 | } |
90 | __setup("nosoftlockup", nosoftlockup_setup); |
91 | /* */ |
92 | |
93 | /* |
94 | * Hard-lockup warnings should be triggered after just a few seconds. Soft- |
95 | * lockups can have false positives under extreme conditions. So we generally |
96 | * want a higher threshold for soft lockups than for hard lockups. So we couple |
97 | * the thresholds with a factor: we make the soft threshold twice the amount of |
98 | * time the hard threshold is. |
99 | */ |
100 | static int get_softlockup_thresh(void) |
101 | { |
102 | return watchdog_thresh * 2; |
103 | } |
104 | |
105 | /* |
106 | * Returns seconds, approximately. We don't need nanosecond |
107 | * resolution, and we don't need to waste time with a big divide when |
108 | * 2^30ns == 1.074s. |
109 | */ |
110 | static unsigned long get_timestamp(int this_cpu) |
111 | { |
112 | return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ |
113 | } |
114 | |
115 | static unsigned long get_sample_period(void) |
116 | { |
117 | /* |
118 | * convert watchdog_thresh from seconds to ns |
119 | * the divide by 5 is to give hrtimer several chances (two |
120 | * or three with the current relation between the soft |
121 | * and hard thresholds) to increment before the |
122 | * hardlockup detector generates a warning |
123 | */ |
124 | return get_softlockup_thresh() * (NSEC_PER_SEC / 5); |
125 | } |
126 | |
127 | /* Commands for resetting the watchdog */ |
128 | static void __touch_watchdog(void) |
129 | { |
130 | int this_cpu = smp_processor_id(); |
131 | |
132 | __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu)); |
133 | } |
134 | |
135 | void touch_softlockup_watchdog(void) |
136 | { |
137 | __this_cpu_write(watchdog_touch_ts, 0); |
138 | } |
139 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
140 | |
141 | void touch_all_softlockup_watchdogs(void) |
142 | { |
143 | int cpu; |
144 | |
145 | /* |
146 | * this is done lockless |
147 | * do we care if a 0 races with a timestamp? |
148 | * all it means is the softlock check starts one cycle later |
149 | */ |
150 | for_each_online_cpu(cpu) |
151 | per_cpu(watchdog_touch_ts, cpu) = 0; |
152 | } |
153 | |
154 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
155 | void touch_nmi_watchdog(void) |
156 | { |
157 | if (watchdog_enabled) { |
158 | unsigned cpu; |
159 | |
160 | for_each_present_cpu(cpu) { |
161 | if (per_cpu(watchdog_nmi_touch, cpu) != true) |
162 | per_cpu(watchdog_nmi_touch, cpu) = true; |
163 | } |
164 | } |
165 | touch_softlockup_watchdog(); |
166 | } |
167 | EXPORT_SYMBOL(touch_nmi_watchdog); |
168 | |
169 | #endif |
170 | |
171 | void touch_softlockup_watchdog_sync(void) |
172 | { |
173 | __raw_get_cpu_var(softlockup_touch_sync) = true; |
174 | __raw_get_cpu_var(watchdog_touch_ts) = 0; |
175 | } |
176 | |
177 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
178 | /* watchdog detector functions */ |
179 | static int is_hardlockup(void) |
180 | { |
181 | unsigned long hrint = __this_cpu_read(hrtimer_interrupts); |
182 | |
183 | if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) |
184 | return 1; |
185 | |
186 | __this_cpu_write(hrtimer_interrupts_saved, hrint); |
187 | return 0; |
188 | } |
189 | #endif |
190 | |
191 | static int is_softlockup(unsigned long touch_ts) |
192 | { |
193 | unsigned long now = get_timestamp(smp_processor_id()); |
194 | |
195 | /* Warn about unreasonable delays: */ |
196 | if (time_after(now, touch_ts + get_softlockup_thresh())) |
197 | return now - touch_ts; |
198 | |
199 | return 0; |
200 | } |
201 | |
202 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
203 | |
204 | static struct perf_event_attr wd_hw_attr = { |
205 | .type = PERF_TYPE_HARDWARE, |
206 | .config = PERF_COUNT_HW_CPU_CYCLES, |
207 | .size = sizeof(struct perf_event_attr), |
208 | .pinned = 1, |
209 | .disabled = 1, |
210 | }; |
211 | |
212 | /* Callback function for perf event subsystem */ |
213 | static void watchdog_overflow_callback(struct perf_event *event, |
214 | struct perf_sample_data *data, |
215 | struct pt_regs *regs) |
216 | { |
217 | /* Ensure the watchdog never gets throttled */ |
218 | event->hw.interrupts = 0; |
219 | |
220 | if (__this_cpu_read(watchdog_nmi_touch) == true) { |
221 | __this_cpu_write(watchdog_nmi_touch, false); |
222 | return; |
223 | } |
224 | |
225 | /* check for a hardlockup |
226 | * This is done by making sure our timer interrupt |
227 | * is incrementing. The timer interrupt should have |
228 | * fired multiple times before we overflow'd. If it hasn't |
229 | * then this is a good indication the cpu is stuck |
230 | */ |
231 | if (is_hardlockup()) { |
232 | int this_cpu = smp_processor_id(); |
233 | |
234 | /* only print hardlockups once */ |
235 | if (__this_cpu_read(hard_watchdog_warn) == true) |
236 | return; |
237 | |
238 | if (hardlockup_panic) |
239 | panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); |
240 | else |
241 | WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); |
242 | |
243 | __this_cpu_write(hard_watchdog_warn, true); |
244 | return; |
245 | } |
246 | |
247 | __this_cpu_write(hard_watchdog_warn, false); |
248 | return; |
249 | } |
250 | static void watchdog_interrupt_count(void) |
251 | { |
252 | __this_cpu_inc(hrtimer_interrupts); |
253 | } |
254 | #else |
255 | static inline void watchdog_interrupt_count(void) { return; } |
256 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
257 | |
258 | /* watchdog kicker functions */ |
259 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) |
260 | { |
261 | unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); |
262 | struct pt_regs *regs = get_irq_regs(); |
263 | int duration; |
264 | |
265 | /* kick the hardlockup detector */ |
266 | watchdog_interrupt_count(); |
267 | |
268 | /* kick the softlockup detector */ |
269 | wake_up_process(__this_cpu_read(softlockup_watchdog)); |
270 | |
271 | /* .. and repeat */ |
272 | hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); |
273 | |
274 | if (touch_ts == 0) { |
275 | if (unlikely(__this_cpu_read(softlockup_touch_sync))) { |
276 | /* |
277 | * If the time stamp was touched atomically |
278 | * make sure the scheduler tick is up to date. |
279 | */ |
280 | __this_cpu_write(softlockup_touch_sync, false); |
281 | sched_clock_tick(); |
282 | } |
283 | __touch_watchdog(); |
284 | return HRTIMER_RESTART; |
285 | } |
286 | |
287 | /* check for a softlockup |
288 | * This is done by making sure a high priority task is |
289 | * being scheduled. The task touches the watchdog to |
290 | * indicate it is getting cpu time. If it hasn't then |
291 | * this is a good indication some task is hogging the cpu |
292 | */ |
293 | duration = is_softlockup(touch_ts); |
294 | if (unlikely(duration)) { |
295 | /* only warn once */ |
296 | if (__this_cpu_read(soft_watchdog_warn) == true) |
297 | return HRTIMER_RESTART; |
298 | |
299 | printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", |
300 | smp_processor_id(), duration, |
301 | current->comm, task_pid_nr(current)); |
302 | print_modules(); |
303 | print_irqtrace_events(current); |
304 | if (regs) |
305 | show_regs(regs); |
306 | else |
307 | dump_stack(); |
308 | |
309 | if (softlockup_panic) |
310 | panic("softlockup: hung tasks"); |
311 | __this_cpu_write(soft_watchdog_warn, true); |
312 | } else |
313 | __this_cpu_write(soft_watchdog_warn, false); |
314 | |
315 | return HRTIMER_RESTART; |
316 | } |
317 | |
318 | |
319 | /* |
320 | * The watchdog thread - touches the timestamp. |
321 | */ |
322 | static int watchdog(void *unused) |
323 | { |
324 | struct sched_param param = { .sched_priority = 0 }; |
325 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); |
326 | |
327 | /* initialize timestamp */ |
328 | __touch_watchdog(); |
329 | |
330 | /* kick off the timer for the hardlockup detector */ |
331 | /* done here because hrtimer_start can only pin to smp_processor_id() */ |
332 | hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()), |
333 | HRTIMER_MODE_REL_PINNED); |
334 | |
335 | set_current_state(TASK_INTERRUPTIBLE); |
336 | /* |
337 | * Run briefly (kicked by the hrtimer callback function) once every |
338 | * get_sample_period() seconds (4 seconds by default) to reset the |
339 | * softlockup timestamp. If this gets delayed for more than |
340 | * 2*watchdog_thresh seconds then the debug-printout triggers in |
341 | * watchdog_timer_fn(). |
342 | */ |
343 | while (!kthread_should_stop()) { |
344 | __touch_watchdog(); |
345 | schedule(); |
346 | |
347 | if (kthread_should_stop()) |
348 | break; |
349 | |
350 | set_current_state(TASK_INTERRUPTIBLE); |
351 | } |
352 | /* |
353 | * Drop the policy/priority elevation during thread exit to avoid a |
354 | * scheduling latency spike. |
355 | */ |
356 | __set_current_state(TASK_RUNNING); |
357 | sched_setscheduler(current, SCHED_NORMAL, ¶m); |
358 | return 0; |
359 | } |
360 | |
361 | |
362 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
363 | static int watchdog_nmi_enable(int cpu) |
364 | { |
365 | struct perf_event_attr *wd_attr; |
366 | struct perf_event *event = per_cpu(watchdog_ev, cpu); |
367 | |
368 | /* is it already setup and enabled? */ |
369 | if (event && event->state > PERF_EVENT_STATE_OFF) |
370 | goto out; |
371 | |
372 | /* it is setup but not enabled */ |
373 | if (event != NULL) |
374 | goto out_enable; |
375 | |
376 | wd_attr = &wd_hw_attr; |
377 | wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); |
378 | |
379 | /* Try to register using hardware perf events */ |
380 | event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); |
381 | if (!IS_ERR(event)) { |
382 | pr_info("enabled, takes one hw-pmu counter.\n"); |
383 | goto out_save; |
384 | } |
385 | |
386 | |
387 | /* vary the KERN level based on the returned errno */ |
388 | if (PTR_ERR(event) == -EOPNOTSUPP) |
389 | pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu); |
390 | else if (PTR_ERR(event) == -ENOENT) |
391 | pr_warning("disabled (cpu%i): hardware events not enabled\n", |
392 | cpu); |
393 | else |
394 | pr_err("disabled (cpu%i): unable to create perf event: %ld\n", |
395 | cpu, PTR_ERR(event)); |
396 | return PTR_ERR(event); |
397 | |
398 | /* success path */ |
399 | out_save: |
400 | per_cpu(watchdog_ev, cpu) = event; |
401 | out_enable: |
402 | perf_event_enable(per_cpu(watchdog_ev, cpu)); |
403 | out: |
404 | return 0; |
405 | } |
406 | |
407 | static void watchdog_nmi_disable(int cpu) |
408 | { |
409 | struct perf_event *event = per_cpu(watchdog_ev, cpu); |
410 | |
411 | if (event) { |
412 | perf_event_disable(event); |
413 | per_cpu(watchdog_ev, cpu) = NULL; |
414 | |
415 | /* should be in cleanup, but blocks oprofile */ |
416 | perf_event_release_kernel(event); |
417 | } |
418 | return; |
419 | } |
420 | #else |
421 | static int watchdog_nmi_enable(int cpu) { return 0; } |
422 | static void watchdog_nmi_disable(int cpu) { return; } |
423 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
424 | |
425 | /* prepare/enable/disable routines */ |
426 | static void watchdog_prepare_cpu(int cpu) |
427 | { |
428 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); |
429 | |
430 | WARN_ON(per_cpu(softlockup_watchdog, cpu)); |
431 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
432 | hrtimer->function = watchdog_timer_fn; |
433 | } |
434 | |
435 | static int watchdog_enable(int cpu) |
436 | { |
437 | struct task_struct *p = per_cpu(softlockup_watchdog, cpu); |
438 | int err = 0; |
439 | |
440 | /* enable the perf event */ |
441 | err = watchdog_nmi_enable(cpu); |
442 | |
443 | /* Regardless of err above, fall through and start softlockup */ |
444 | |
445 | /* create the watchdog thread */ |
446 | if (!p) { |
447 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
448 | p = kthread_create_on_node(watchdog, NULL, cpu_to_node(cpu), "watchdog/%d", cpu); |
449 | if (IS_ERR(p)) { |
450 | pr_err("softlockup watchdog for %i failed\n", cpu); |
451 | if (!err) { |
452 | /* if hardlockup hasn't already set this */ |
453 | err = PTR_ERR(p); |
454 | /* and disable the perf event */ |
455 | watchdog_nmi_disable(cpu); |
456 | } |
457 | goto out; |
458 | } |
459 | sched_setscheduler(p, SCHED_FIFO, ¶m); |
460 | kthread_bind(p, cpu); |
461 | per_cpu(watchdog_touch_ts, cpu) = 0; |
462 | per_cpu(softlockup_watchdog, cpu) = p; |
463 | wake_up_process(p); |
464 | } |
465 | |
466 | out: |
467 | return err; |
468 | } |
469 | |
470 | static void watchdog_disable(int cpu) |
471 | { |
472 | struct task_struct *p = per_cpu(softlockup_watchdog, cpu); |
473 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); |
474 | |
475 | /* |
476 | * cancel the timer first to stop incrementing the stats |
477 | * and waking up the kthread |
478 | */ |
479 | hrtimer_cancel(hrtimer); |
480 | |
481 | /* disable the perf event */ |
482 | watchdog_nmi_disable(cpu); |
483 | |
484 | /* stop the watchdog thread */ |
485 | if (p) { |
486 | per_cpu(softlockup_watchdog, cpu) = NULL; |
487 | kthread_stop(p); |
488 | } |
489 | } |
490 | |
491 | /* sysctl functions */ |
492 | #ifdef CONFIG_SYSCTL |
493 | static void watchdog_enable_all_cpus(void) |
494 | { |
495 | int cpu; |
496 | |
497 | watchdog_enabled = 0; |
498 | |
499 | for_each_online_cpu(cpu) |
500 | if (!watchdog_enable(cpu)) |
501 | /* if any cpu succeeds, watchdog is considered |
502 | enabled for the system */ |
503 | watchdog_enabled = 1; |
504 | |
505 | if (!watchdog_enabled) |
506 | pr_err("failed to be enabled on some cpus\n"); |
507 | |
508 | } |
509 | |
510 | static void watchdog_disable_all_cpus(void) |
511 | { |
512 | int cpu; |
513 | |
514 | for_each_online_cpu(cpu) |
515 | watchdog_disable(cpu); |
516 | |
517 | /* if all watchdogs are disabled, then they are disabled for the system */ |
518 | watchdog_enabled = 0; |
519 | } |
520 | |
521 | |
522 | /* |
523 | * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh |
524 | */ |
525 | |
526 | int proc_dowatchdog(struct ctl_table *table, int write, |
527 | void __user *buffer, size_t *lenp, loff_t *ppos) |
528 | { |
529 | int ret; |
530 | |
531 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
532 | if (ret || !write) |
533 | goto out; |
534 | |
535 | if (watchdog_enabled && watchdog_thresh) |
536 | watchdog_enable_all_cpus(); |
537 | else |
538 | watchdog_disable_all_cpus(); |
539 | |
540 | out: |
541 | return ret; |
542 | } |
543 | #endif /* CONFIG_SYSCTL */ |
544 | |
545 | |
546 | /* |
547 | * Create/destroy watchdog threads as CPUs come and go: |
548 | */ |
549 | static int __cpuinit |
550 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
551 | { |
552 | int hotcpu = (unsigned long)hcpu; |
553 | |
554 | switch (action) { |
555 | case CPU_UP_PREPARE: |
556 | case CPU_UP_PREPARE_FROZEN: |
557 | watchdog_prepare_cpu(hotcpu); |
558 | break; |
559 | case CPU_ONLINE: |
560 | case CPU_ONLINE_FROZEN: |
561 | if (watchdog_enabled) |
562 | watchdog_enable(hotcpu); |
563 | break; |
564 | #ifdef CONFIG_HOTPLUG_CPU |
565 | case CPU_UP_CANCELED: |
566 | case CPU_UP_CANCELED_FROZEN: |
567 | watchdog_disable(hotcpu); |
568 | break; |
569 | case CPU_DEAD: |
570 | case CPU_DEAD_FROZEN: |
571 | watchdog_disable(hotcpu); |
572 | break; |
573 | #endif /* CONFIG_HOTPLUG_CPU */ |
574 | } |
575 | |
576 | /* |
577 | * hardlockup and softlockup are not important enough |
578 | * to block cpu bring up. Just always succeed and |
579 | * rely on printk output to flag problems. |
580 | */ |
581 | return NOTIFY_OK; |
582 | } |
583 | |
584 | static struct notifier_block __cpuinitdata cpu_nfb = { |
585 | .notifier_call = cpu_callback |
586 | }; |
587 | |
588 | void __init lockup_detector_init(void) |
589 | { |
590 | void *cpu = (void *)(long)smp_processor_id(); |
591 | int err; |
592 | |
593 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
594 | WARN_ON(notifier_to_errno(err)); |
595 | |
596 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); |
597 | register_cpu_notifier(&cpu_nfb); |
598 | |
599 | return; |
600 | } |
601 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9