Root/
1 | /* |
2 | * Detect hard and soft lockups on a system |
3 | * |
4 | * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. |
5 | * |
6 | * this code detects hard lockups: incidents in where on a CPU |
7 | * the kernel does not respond to anything except NMI. |
8 | * |
9 | * Note: Most of this code is borrowed heavily from softlockup.c, |
10 | * so thanks to Ingo for the initial implementation. |
11 | * Some chunks also taken from arch/x86/kernel/apic/nmi.c, thanks |
12 | * to those contributors as well. |
13 | */ |
14 | |
15 | #include <linux/mm.h> |
16 | #include <linux/cpu.h> |
17 | #include <linux/nmi.h> |
18 | #include <linux/init.h> |
19 | #include <linux/delay.h> |
20 | #include <linux/freezer.h> |
21 | #include <linux/kthread.h> |
22 | #include <linux/lockdep.h> |
23 | #include <linux/notifier.h> |
24 | #include <linux/module.h> |
25 | #include <linux/sysctl.h> |
26 | |
27 | #include <asm/irq_regs.h> |
28 | #include <linux/perf_event.h> |
29 | |
30 | int watchdog_enabled; |
31 | int __read_mostly softlockup_thresh = 60; |
32 | |
33 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); |
34 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); |
35 | static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); |
36 | static DEFINE_PER_CPU(bool, softlockup_touch_sync); |
37 | static DEFINE_PER_CPU(bool, soft_watchdog_warn); |
38 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
39 | static DEFINE_PER_CPU(bool, hard_watchdog_warn); |
40 | static DEFINE_PER_CPU(bool, watchdog_nmi_touch); |
41 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); |
42 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); |
43 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); |
44 | #endif |
45 | |
46 | static int __read_mostly did_panic; |
47 | static int __initdata no_watchdog; |
48 | |
49 | |
50 | /* boot commands */ |
51 | /* |
52 | * Should we panic when a soft-lockup or hard-lockup occurs: |
53 | */ |
54 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
55 | static int hardlockup_panic; |
56 | |
57 | static int __init hardlockup_panic_setup(char *str) |
58 | { |
59 | if (!strncmp(str, "panic", 5)) |
60 | hardlockup_panic = 1; |
61 | return 1; |
62 | } |
63 | __setup("nmi_watchdog=", hardlockup_panic_setup); |
64 | #endif |
65 | |
66 | unsigned int __read_mostly softlockup_panic = |
67 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; |
68 | |
69 | static int __init softlockup_panic_setup(char *str) |
70 | { |
71 | softlockup_panic = simple_strtoul(str, NULL, 0); |
72 | |
73 | return 1; |
74 | } |
75 | __setup("softlockup_panic=", softlockup_panic_setup); |
76 | |
77 | static int __init nowatchdog_setup(char *str) |
78 | { |
79 | no_watchdog = 1; |
80 | return 1; |
81 | } |
82 | __setup("nowatchdog", nowatchdog_setup); |
83 | |
84 | /* deprecated */ |
85 | static int __init nosoftlockup_setup(char *str) |
86 | { |
87 | no_watchdog = 1; |
88 | return 1; |
89 | } |
90 | __setup("nosoftlockup", nosoftlockup_setup); |
91 | /* */ |
92 | |
93 | |
94 | /* |
95 | * Returns seconds, approximately. We don't need nanosecond |
96 | * resolution, and we don't need to waste time with a big divide when |
97 | * 2^30ns == 1.074s. |
98 | */ |
99 | static unsigned long get_timestamp(int this_cpu) |
100 | { |
101 | return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ |
102 | } |
103 | |
104 | static unsigned long get_sample_period(void) |
105 | { |
106 | /* |
107 | * convert softlockup_thresh from seconds to ns |
108 | * the divide by 5 is to give hrtimer 5 chances to |
109 | * increment before the hardlockup detector generates |
110 | * a warning |
111 | */ |
112 | return softlockup_thresh / 5 * NSEC_PER_SEC; |
113 | } |
114 | |
115 | /* Commands for resetting the watchdog */ |
116 | static void __touch_watchdog(void) |
117 | { |
118 | int this_cpu = smp_processor_id(); |
119 | |
120 | __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu); |
121 | } |
122 | |
123 | void touch_softlockup_watchdog(void) |
124 | { |
125 | __raw_get_cpu_var(watchdog_touch_ts) = 0; |
126 | } |
127 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
128 | |
129 | void touch_all_softlockup_watchdogs(void) |
130 | { |
131 | int cpu; |
132 | |
133 | /* |
134 | * this is done lockless |
135 | * do we care if a 0 races with a timestamp? |
136 | * all it means is the softlock check starts one cycle later |
137 | */ |
138 | for_each_online_cpu(cpu) |
139 | per_cpu(watchdog_touch_ts, cpu) = 0; |
140 | } |
141 | |
142 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
143 | void touch_nmi_watchdog(void) |
144 | { |
145 | if (watchdog_enabled) { |
146 | unsigned cpu; |
147 | |
148 | for_each_present_cpu(cpu) { |
149 | if (per_cpu(watchdog_nmi_touch, cpu) != true) |
150 | per_cpu(watchdog_nmi_touch, cpu) = true; |
151 | } |
152 | } |
153 | touch_softlockup_watchdog(); |
154 | } |
155 | EXPORT_SYMBOL(touch_nmi_watchdog); |
156 | |
157 | #endif |
158 | |
159 | void touch_softlockup_watchdog_sync(void) |
160 | { |
161 | __raw_get_cpu_var(softlockup_touch_sync) = true; |
162 | __raw_get_cpu_var(watchdog_touch_ts) = 0; |
163 | } |
164 | |
165 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
166 | /* watchdog detector functions */ |
167 | static int is_hardlockup(void) |
168 | { |
169 | unsigned long hrint = __get_cpu_var(hrtimer_interrupts); |
170 | |
171 | if (__get_cpu_var(hrtimer_interrupts_saved) == hrint) |
172 | return 1; |
173 | |
174 | __get_cpu_var(hrtimer_interrupts_saved) = hrint; |
175 | return 0; |
176 | } |
177 | #endif |
178 | |
179 | static int is_softlockup(unsigned long touch_ts) |
180 | { |
181 | unsigned long now = get_timestamp(smp_processor_id()); |
182 | |
183 | /* Warn about unreasonable delays: */ |
184 | if (time_after(now, touch_ts + softlockup_thresh)) |
185 | return now - touch_ts; |
186 | |
187 | return 0; |
188 | } |
189 | |
190 | static int |
191 | watchdog_panic(struct notifier_block *this, unsigned long event, void *ptr) |
192 | { |
193 | did_panic = 1; |
194 | |
195 | return NOTIFY_DONE; |
196 | } |
197 | |
198 | static struct notifier_block panic_block = { |
199 | .notifier_call = watchdog_panic, |
200 | }; |
201 | |
202 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
203 | static struct perf_event_attr wd_hw_attr = { |
204 | .type = PERF_TYPE_HARDWARE, |
205 | .config = PERF_COUNT_HW_CPU_CYCLES, |
206 | .size = sizeof(struct perf_event_attr), |
207 | .pinned = 1, |
208 | .disabled = 1, |
209 | }; |
210 | |
211 | /* Callback function for perf event subsystem */ |
212 | void watchdog_overflow_callback(struct perf_event *event, int nmi, |
213 | struct perf_sample_data *data, |
214 | struct pt_regs *regs) |
215 | { |
216 | /* Ensure the watchdog never gets throttled */ |
217 | event->hw.interrupts = 0; |
218 | |
219 | if (__get_cpu_var(watchdog_nmi_touch) == true) { |
220 | __get_cpu_var(watchdog_nmi_touch) = false; |
221 | return; |
222 | } |
223 | |
224 | /* check for a hardlockup |
225 | * This is done by making sure our timer interrupt |
226 | * is incrementing. The timer interrupt should have |
227 | * fired multiple times before we overflow'd. If it hasn't |
228 | * then this is a good indication the cpu is stuck |
229 | */ |
230 | if (is_hardlockup()) { |
231 | int this_cpu = smp_processor_id(); |
232 | |
233 | /* only print hardlockups once */ |
234 | if (__get_cpu_var(hard_watchdog_warn) == true) |
235 | return; |
236 | |
237 | if (hardlockup_panic) |
238 | panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); |
239 | else |
240 | WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); |
241 | |
242 | __get_cpu_var(hard_watchdog_warn) = true; |
243 | return; |
244 | } |
245 | |
246 | __get_cpu_var(hard_watchdog_warn) = false; |
247 | return; |
248 | } |
249 | static void watchdog_interrupt_count(void) |
250 | { |
251 | __get_cpu_var(hrtimer_interrupts)++; |
252 | } |
253 | #else |
254 | static inline void watchdog_interrupt_count(void) { return; } |
255 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
256 | |
257 | /* watchdog kicker functions */ |
258 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) |
259 | { |
260 | unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts); |
261 | struct pt_regs *regs = get_irq_regs(); |
262 | int duration; |
263 | |
264 | /* kick the hardlockup detector */ |
265 | watchdog_interrupt_count(); |
266 | |
267 | /* kick the softlockup detector */ |
268 | wake_up_process(__get_cpu_var(softlockup_watchdog)); |
269 | |
270 | /* .. and repeat */ |
271 | hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); |
272 | |
273 | if (touch_ts == 0) { |
274 | if (unlikely(__get_cpu_var(softlockup_touch_sync))) { |
275 | /* |
276 | * If the time stamp was touched atomically |
277 | * make sure the scheduler tick is up to date. |
278 | */ |
279 | __get_cpu_var(softlockup_touch_sync) = false; |
280 | sched_clock_tick(); |
281 | } |
282 | __touch_watchdog(); |
283 | return HRTIMER_RESTART; |
284 | } |
285 | |
286 | /* check for a softlockup |
287 | * This is done by making sure a high priority task is |
288 | * being scheduled. The task touches the watchdog to |
289 | * indicate it is getting cpu time. If it hasn't then |
290 | * this is a good indication some task is hogging the cpu |
291 | */ |
292 | duration = is_softlockup(touch_ts); |
293 | if (unlikely(duration)) { |
294 | /* only warn once */ |
295 | if (__get_cpu_var(soft_watchdog_warn) == true) |
296 | return HRTIMER_RESTART; |
297 | |
298 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", |
299 | smp_processor_id(), duration, |
300 | current->comm, task_pid_nr(current)); |
301 | print_modules(); |
302 | print_irqtrace_events(current); |
303 | if (regs) |
304 | show_regs(regs); |
305 | else |
306 | dump_stack(); |
307 | |
308 | if (softlockup_panic) |
309 | panic("softlockup: hung tasks"); |
310 | __get_cpu_var(soft_watchdog_warn) = true; |
311 | } else |
312 | __get_cpu_var(soft_watchdog_warn) = false; |
313 | |
314 | return HRTIMER_RESTART; |
315 | } |
316 | |
317 | |
318 | /* |
319 | * The watchdog thread - touches the timestamp. |
320 | */ |
321 | static int watchdog(void *unused) |
322 | { |
323 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
324 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); |
325 | |
326 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
327 | |
328 | /* initialize timestamp */ |
329 | __touch_watchdog(); |
330 | |
331 | /* kick off the timer for the hardlockup detector */ |
332 | /* done here because hrtimer_start can only pin to smp_processor_id() */ |
333 | hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()), |
334 | HRTIMER_MODE_REL_PINNED); |
335 | |
336 | set_current_state(TASK_INTERRUPTIBLE); |
337 | /* |
338 | * Run briefly once per second to reset the softlockup timestamp. |
339 | * If this gets delayed for more than 60 seconds then the |
340 | * debug-printout triggers in watchdog_timer_fn(). |
341 | */ |
342 | while (!kthread_should_stop()) { |
343 | __touch_watchdog(); |
344 | schedule(); |
345 | |
346 | if (kthread_should_stop()) |
347 | break; |
348 | |
349 | set_current_state(TASK_INTERRUPTIBLE); |
350 | } |
351 | __set_current_state(TASK_RUNNING); |
352 | |
353 | return 0; |
354 | } |
355 | |
356 | |
357 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
358 | static int watchdog_nmi_enable(int cpu) |
359 | { |
360 | struct perf_event_attr *wd_attr; |
361 | struct perf_event *event = per_cpu(watchdog_ev, cpu); |
362 | |
363 | /* is it already setup and enabled? */ |
364 | if (event && event->state > PERF_EVENT_STATE_OFF) |
365 | goto out; |
366 | |
367 | /* it is setup but not enabled */ |
368 | if (event != NULL) |
369 | goto out_enable; |
370 | |
371 | /* Try to register using hardware perf events */ |
372 | wd_attr = &wd_hw_attr; |
373 | wd_attr->sample_period = hw_nmi_get_sample_period(); |
374 | event = perf_event_create_kernel_counter(wd_attr, cpu, -1, watchdog_overflow_callback); |
375 | if (!IS_ERR(event)) { |
376 | printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n"); |
377 | goto out_save; |
378 | } |
379 | |
380 | printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event); |
381 | return -1; |
382 | |
383 | /* success path */ |
384 | out_save: |
385 | per_cpu(watchdog_ev, cpu) = event; |
386 | out_enable: |
387 | perf_event_enable(per_cpu(watchdog_ev, cpu)); |
388 | out: |
389 | return 0; |
390 | } |
391 | |
392 | static void watchdog_nmi_disable(int cpu) |
393 | { |
394 | struct perf_event *event = per_cpu(watchdog_ev, cpu); |
395 | |
396 | if (event) { |
397 | perf_event_disable(event); |
398 | per_cpu(watchdog_ev, cpu) = NULL; |
399 | |
400 | /* should be in cleanup, but blocks oprofile */ |
401 | perf_event_release_kernel(event); |
402 | } |
403 | return; |
404 | } |
405 | #else |
406 | static int watchdog_nmi_enable(int cpu) { return 0; } |
407 | static void watchdog_nmi_disable(int cpu) { return; } |
408 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
409 | |
410 | /* prepare/enable/disable routines */ |
411 | static int watchdog_prepare_cpu(int cpu) |
412 | { |
413 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); |
414 | |
415 | WARN_ON(per_cpu(softlockup_watchdog, cpu)); |
416 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
417 | hrtimer->function = watchdog_timer_fn; |
418 | |
419 | return 0; |
420 | } |
421 | |
422 | static int watchdog_enable(int cpu) |
423 | { |
424 | struct task_struct *p = per_cpu(softlockup_watchdog, cpu); |
425 | |
426 | /* enable the perf event */ |
427 | if (watchdog_nmi_enable(cpu) != 0) |
428 | return -1; |
429 | |
430 | /* create the watchdog thread */ |
431 | if (!p) { |
432 | p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); |
433 | if (IS_ERR(p)) { |
434 | printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); |
435 | return -1; |
436 | } |
437 | kthread_bind(p, cpu); |
438 | per_cpu(watchdog_touch_ts, cpu) = 0; |
439 | per_cpu(softlockup_watchdog, cpu) = p; |
440 | wake_up_process(p); |
441 | } |
442 | |
443 | /* if any cpu succeeds, watchdog is considered enabled for the system */ |
444 | watchdog_enabled = 1; |
445 | |
446 | return 0; |
447 | } |
448 | |
449 | static void watchdog_disable(int cpu) |
450 | { |
451 | struct task_struct *p = per_cpu(softlockup_watchdog, cpu); |
452 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); |
453 | |
454 | /* |
455 | * cancel the timer first to stop incrementing the stats |
456 | * and waking up the kthread |
457 | */ |
458 | hrtimer_cancel(hrtimer); |
459 | |
460 | /* disable the perf event */ |
461 | watchdog_nmi_disable(cpu); |
462 | |
463 | /* stop the watchdog thread */ |
464 | if (p) { |
465 | per_cpu(softlockup_watchdog, cpu) = NULL; |
466 | kthread_stop(p); |
467 | } |
468 | } |
469 | |
470 | static void watchdog_enable_all_cpus(void) |
471 | { |
472 | int cpu; |
473 | int result = 0; |
474 | |
475 | for_each_online_cpu(cpu) |
476 | result += watchdog_enable(cpu); |
477 | |
478 | if (result) |
479 | printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n"); |
480 | |
481 | } |
482 | |
483 | static void watchdog_disable_all_cpus(void) |
484 | { |
485 | int cpu; |
486 | |
487 | for_each_online_cpu(cpu) |
488 | watchdog_disable(cpu); |
489 | |
490 | /* if all watchdogs are disabled, then they are disabled for the system */ |
491 | watchdog_enabled = 0; |
492 | } |
493 | |
494 | |
495 | /* sysctl functions */ |
496 | #ifdef CONFIG_SYSCTL |
497 | /* |
498 | * proc handler for /proc/sys/kernel/nmi_watchdog |
499 | */ |
500 | |
501 | int proc_dowatchdog_enabled(struct ctl_table *table, int write, |
502 | void __user *buffer, size_t *length, loff_t *ppos) |
503 | { |
504 | proc_dointvec(table, write, buffer, length, ppos); |
505 | |
506 | if (watchdog_enabled) |
507 | watchdog_enable_all_cpus(); |
508 | else |
509 | watchdog_disable_all_cpus(); |
510 | return 0; |
511 | } |
512 | |
513 | int proc_dowatchdog_thresh(struct ctl_table *table, int write, |
514 | void __user *buffer, |
515 | size_t *lenp, loff_t *ppos) |
516 | { |
517 | return proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
518 | } |
519 | #endif /* CONFIG_SYSCTL */ |
520 | |
521 | |
522 | /* |
523 | * Create/destroy watchdog threads as CPUs come and go: |
524 | */ |
525 | static int __cpuinit |
526 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
527 | { |
528 | int hotcpu = (unsigned long)hcpu; |
529 | |
530 | switch (action) { |
531 | case CPU_UP_PREPARE: |
532 | case CPU_UP_PREPARE_FROZEN: |
533 | if (watchdog_prepare_cpu(hotcpu)) |
534 | return NOTIFY_BAD; |
535 | break; |
536 | case CPU_ONLINE: |
537 | case CPU_ONLINE_FROZEN: |
538 | if (watchdog_enable(hotcpu)) |
539 | return NOTIFY_BAD; |
540 | break; |
541 | #ifdef CONFIG_HOTPLUG_CPU |
542 | case CPU_UP_CANCELED: |
543 | case CPU_UP_CANCELED_FROZEN: |
544 | watchdog_disable(hotcpu); |
545 | break; |
546 | case CPU_DEAD: |
547 | case CPU_DEAD_FROZEN: |
548 | watchdog_disable(hotcpu); |
549 | break; |
550 | #endif /* CONFIG_HOTPLUG_CPU */ |
551 | } |
552 | return NOTIFY_OK; |
553 | } |
554 | |
555 | static struct notifier_block __cpuinitdata cpu_nfb = { |
556 | .notifier_call = cpu_callback |
557 | }; |
558 | |
559 | static int __init spawn_watchdog_task(void) |
560 | { |
561 | void *cpu = (void *)(long)smp_processor_id(); |
562 | int err; |
563 | |
564 | if (no_watchdog) |
565 | return 0; |
566 | |
567 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
568 | WARN_ON(err == NOTIFY_BAD); |
569 | |
570 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); |
571 | register_cpu_notifier(&cpu_nfb); |
572 | |
573 | atomic_notifier_chain_register(&panic_notifier_list, &panic_block); |
574 | |
575 | return 0; |
576 | } |
577 | early_initcall(spawn_watchdog_task); |
578 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9