Root/
1 | /* |
2 | * linux/kernel/panic.c |
3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | */ |
6 | |
7 | /* |
8 | * This function is used through-out the kernel (including mm and fs) |
9 | * to indicate a major problem. |
10 | */ |
11 | #include <linux/debug_locks.h> |
12 | #include <linux/interrupt.h> |
13 | #include <linux/kmsg_dump.h> |
14 | #include <linux/kallsyms.h> |
15 | #include <linux/notifier.h> |
16 | #include <linux/module.h> |
17 | #include <linux/random.h> |
18 | #include <linux/ftrace.h> |
19 | #include <linux/reboot.h> |
20 | #include <linux/delay.h> |
21 | #include <linux/kexec.h> |
22 | #include <linux/sched.h> |
23 | #include <linux/sysrq.h> |
24 | #include <linux/init.h> |
25 | #include <linux/nmi.h> |
26 | |
27 | #define PANIC_TIMER_STEP 100 |
28 | #define PANIC_BLINK_SPD 18 |
29 | |
30 | int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE; |
31 | static unsigned long tainted_mask; |
32 | static int pause_on_oops; |
33 | static int pause_on_oops_flag; |
34 | static DEFINE_SPINLOCK(pause_on_oops_lock); |
35 | |
36 | int panic_timeout; |
37 | EXPORT_SYMBOL_GPL(panic_timeout); |
38 | |
39 | ATOMIC_NOTIFIER_HEAD(panic_notifier_list); |
40 | |
41 | EXPORT_SYMBOL(panic_notifier_list); |
42 | |
43 | static long no_blink(int state) |
44 | { |
45 | return 0; |
46 | } |
47 | |
48 | /* Returns how long it waited in ms */ |
49 | long (*panic_blink)(int state); |
50 | EXPORT_SYMBOL(panic_blink); |
51 | |
52 | /* |
53 | * Stop ourself in panic -- architecture code may override this |
54 | */ |
55 | void __weak panic_smp_self_stop(void) |
56 | { |
57 | while (1) |
58 | cpu_relax(); |
59 | } |
60 | |
61 | /** |
62 | * panic - halt the system |
63 | * @fmt: The text string to print |
64 | * |
65 | * Display a message, then perform cleanups. |
66 | * |
67 | * This function never returns. |
68 | */ |
69 | void panic(const char *fmt, ...) |
70 | { |
71 | static DEFINE_SPINLOCK(panic_lock); |
72 | static char buf[1024]; |
73 | va_list args; |
74 | long i, i_next = 0; |
75 | int state = 0; |
76 | |
77 | /* |
78 | * Disable local interrupts. This will prevent panic_smp_self_stop |
79 | * from deadlocking the first cpu that invokes the panic, since |
80 | * there is nothing to prevent an interrupt handler (that runs |
81 | * after the panic_lock is acquired) from invoking panic again. |
82 | */ |
83 | local_irq_disable(); |
84 | |
85 | /* |
86 | * It's possible to come here directly from a panic-assertion and |
87 | * not have preempt disabled. Some functions called from here want |
88 | * preempt to be disabled. No point enabling it later though... |
89 | * |
90 | * Only one CPU is allowed to execute the panic code from here. For |
91 | * multiple parallel invocations of panic, all other CPUs either |
92 | * stop themself or will wait until they are stopped by the 1st CPU |
93 | * with smp_send_stop(). |
94 | */ |
95 | if (!spin_trylock(&panic_lock)) |
96 | panic_smp_self_stop(); |
97 | |
98 | console_verbose(); |
99 | bust_spinlocks(1); |
100 | va_start(args, fmt); |
101 | vsnprintf(buf, sizeof(buf), fmt, args); |
102 | va_end(args); |
103 | printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf); |
104 | #ifdef CONFIG_DEBUG_BUGVERBOSE |
105 | /* |
106 | * Avoid nested stack-dumping if a panic occurs during oops processing |
107 | */ |
108 | if (!test_taint(TAINT_DIE) && oops_in_progress <= 1) |
109 | dump_stack(); |
110 | #endif |
111 | |
112 | /* |
113 | * If we have crashed and we have a crash kernel loaded let it handle |
114 | * everything else. |
115 | * Do we want to call this before we try to display a message? |
116 | */ |
117 | crash_kexec(NULL); |
118 | |
119 | /* |
120 | * Note smp_send_stop is the usual smp shutdown function, which |
121 | * unfortunately means it may not be hardened to work in a panic |
122 | * situation. |
123 | */ |
124 | smp_send_stop(); |
125 | |
126 | /* |
127 | * Run any panic handlers, including those that might need to |
128 | * add information to the kmsg dump output. |
129 | */ |
130 | atomic_notifier_call_chain(&panic_notifier_list, 0, buf); |
131 | |
132 | kmsg_dump(KMSG_DUMP_PANIC); |
133 | |
134 | bust_spinlocks(0); |
135 | |
136 | if (!panic_blink) |
137 | panic_blink = no_blink; |
138 | |
139 | if (panic_timeout > 0) { |
140 | /* |
141 | * Delay timeout seconds before rebooting the machine. |
142 | * We can't use the "normal" timers since we just panicked. |
143 | */ |
144 | printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout); |
145 | |
146 | for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { |
147 | touch_nmi_watchdog(); |
148 | if (i >= i_next) { |
149 | i += panic_blink(state ^= 1); |
150 | i_next = i + 3600 / PANIC_BLINK_SPD; |
151 | } |
152 | mdelay(PANIC_TIMER_STEP); |
153 | } |
154 | } |
155 | if (panic_timeout != 0) { |
156 | /* |
157 | * This will not be a clean reboot, with everything |
158 | * shutting down. But if there is a chance of |
159 | * rebooting the system it will be rebooted. |
160 | */ |
161 | emergency_restart(); |
162 | } |
163 | #ifdef __sparc__ |
164 | { |
165 | extern int stop_a_enabled; |
166 | /* Make sure the user can actually press Stop-A (L1-A) */ |
167 | stop_a_enabled = 1; |
168 | printk(KERN_EMERG "Press Stop-A (L1-A) to return to the boot prom\n"); |
169 | } |
170 | #endif |
171 | #if defined(CONFIG_S390) |
172 | { |
173 | unsigned long caller; |
174 | |
175 | caller = (unsigned long)__builtin_return_address(0); |
176 | disabled_wait(caller); |
177 | } |
178 | #endif |
179 | local_irq_enable(); |
180 | for (i = 0; ; i += PANIC_TIMER_STEP) { |
181 | touch_softlockup_watchdog(); |
182 | if (i >= i_next) { |
183 | i += panic_blink(state ^= 1); |
184 | i_next = i + 3600 / PANIC_BLINK_SPD; |
185 | } |
186 | mdelay(PANIC_TIMER_STEP); |
187 | } |
188 | } |
189 | |
190 | EXPORT_SYMBOL(panic); |
191 | |
192 | |
193 | struct tnt { |
194 | u8 bit; |
195 | char true; |
196 | char false; |
197 | }; |
198 | |
199 | static const struct tnt tnts[] = { |
200 | { TAINT_PROPRIETARY_MODULE, 'P', 'G' }, |
201 | { TAINT_FORCED_MODULE, 'F', ' ' }, |
202 | { TAINT_UNSAFE_SMP, 'S', ' ' }, |
203 | { TAINT_FORCED_RMMOD, 'R', ' ' }, |
204 | { TAINT_MACHINE_CHECK, 'M', ' ' }, |
205 | { TAINT_BAD_PAGE, 'B', ' ' }, |
206 | { TAINT_USER, 'U', ' ' }, |
207 | { TAINT_DIE, 'D', ' ' }, |
208 | { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' }, |
209 | { TAINT_WARN, 'W', ' ' }, |
210 | { TAINT_CRAP, 'C', ' ' }, |
211 | { TAINT_FIRMWARE_WORKAROUND, 'I', ' ' }, |
212 | { TAINT_OOT_MODULE, 'O', ' ' }, |
213 | }; |
214 | |
215 | /** |
216 | * print_tainted - return a string to represent the kernel taint state. |
217 | * |
218 | * 'P' - Proprietary module has been loaded. |
219 | * 'F' - Module has been forcibly loaded. |
220 | * 'S' - SMP with CPUs not designed for SMP. |
221 | * 'R' - User forced a module unload. |
222 | * 'M' - System experienced a machine check exception. |
223 | * 'B' - System has hit bad_page. |
224 | * 'U' - Userspace-defined naughtiness. |
225 | * 'D' - Kernel has oopsed before |
226 | * 'A' - ACPI table overridden. |
227 | * 'W' - Taint on warning. |
228 | * 'C' - modules from drivers/staging are loaded. |
229 | * 'I' - Working around severe firmware bug. |
230 | * 'O' - Out-of-tree module has been loaded. |
231 | * |
232 | * The string is overwritten by the next call to print_tainted(). |
233 | */ |
234 | const char *print_tainted(void) |
235 | { |
236 | static char buf[ARRAY_SIZE(tnts) + sizeof("Tainted: ")]; |
237 | |
238 | if (tainted_mask) { |
239 | char *s; |
240 | int i; |
241 | |
242 | s = buf + sprintf(buf, "Tainted: "); |
243 | for (i = 0; i < ARRAY_SIZE(tnts); i++) { |
244 | const struct tnt *t = &tnts[i]; |
245 | *s++ = test_bit(t->bit, &tainted_mask) ? |
246 | t->true : t->false; |
247 | } |
248 | *s = 0; |
249 | } else |
250 | snprintf(buf, sizeof(buf), "Not tainted"); |
251 | |
252 | return buf; |
253 | } |
254 | |
255 | int test_taint(unsigned flag) |
256 | { |
257 | return test_bit(flag, &tainted_mask); |
258 | } |
259 | EXPORT_SYMBOL(test_taint); |
260 | |
261 | unsigned long get_taint(void) |
262 | { |
263 | return tainted_mask; |
264 | } |
265 | |
266 | /** |
267 | * add_taint: add a taint flag if not already set. |
268 | * @flag: one of the TAINT_* constants. |
269 | * @lockdep_ok: whether lock debugging is still OK. |
270 | * |
271 | * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for |
272 | * some notewortht-but-not-corrupting cases, it can be set to true. |
273 | */ |
274 | void add_taint(unsigned flag, enum lockdep_ok lockdep_ok) |
275 | { |
276 | if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off()) |
277 | printk(KERN_WARNING |
278 | "Disabling lock debugging due to kernel taint\n"); |
279 | |
280 | set_bit(flag, &tainted_mask); |
281 | } |
282 | EXPORT_SYMBOL(add_taint); |
283 | |
284 | static void spin_msec(int msecs) |
285 | { |
286 | int i; |
287 | |
288 | for (i = 0; i < msecs; i++) { |
289 | touch_nmi_watchdog(); |
290 | mdelay(1); |
291 | } |
292 | } |
293 | |
294 | /* |
295 | * It just happens that oops_enter() and oops_exit() are identically |
296 | * implemented... |
297 | */ |
298 | static void do_oops_enter_exit(void) |
299 | { |
300 | unsigned long flags; |
301 | static int spin_counter; |
302 | |
303 | if (!pause_on_oops) |
304 | return; |
305 | |
306 | spin_lock_irqsave(&pause_on_oops_lock, flags); |
307 | if (pause_on_oops_flag == 0) { |
308 | /* This CPU may now print the oops message */ |
309 | pause_on_oops_flag = 1; |
310 | } else { |
311 | /* We need to stall this CPU */ |
312 | if (!spin_counter) { |
313 | /* This CPU gets to do the counting */ |
314 | spin_counter = pause_on_oops; |
315 | do { |
316 | spin_unlock(&pause_on_oops_lock); |
317 | spin_msec(MSEC_PER_SEC); |
318 | spin_lock(&pause_on_oops_lock); |
319 | } while (--spin_counter); |
320 | pause_on_oops_flag = 0; |
321 | } else { |
322 | /* This CPU waits for a different one */ |
323 | while (spin_counter) { |
324 | spin_unlock(&pause_on_oops_lock); |
325 | spin_msec(1); |
326 | spin_lock(&pause_on_oops_lock); |
327 | } |
328 | } |
329 | } |
330 | spin_unlock_irqrestore(&pause_on_oops_lock, flags); |
331 | } |
332 | |
333 | /* |
334 | * Return true if the calling CPU is allowed to print oops-related info. |
335 | * This is a bit racy.. |
336 | */ |
337 | int oops_may_print(void) |
338 | { |
339 | return pause_on_oops_flag == 0; |
340 | } |
341 | |
342 | /* |
343 | * Called when the architecture enters its oops handler, before it prints |
344 | * anything. If this is the first CPU to oops, and it's oopsing the first |
345 | * time then let it proceed. |
346 | * |
347 | * This is all enabled by the pause_on_oops kernel boot option. We do all |
348 | * this to ensure that oopses don't scroll off the screen. It has the |
349 | * side-effect of preventing later-oopsing CPUs from mucking up the display, |
350 | * too. |
351 | * |
352 | * It turns out that the CPU which is allowed to print ends up pausing for |
353 | * the right duration, whereas all the other CPUs pause for twice as long: |
354 | * once in oops_enter(), once in oops_exit(). |
355 | */ |
356 | void oops_enter(void) |
357 | { |
358 | tracing_off(); |
359 | /* can't trust the integrity of the kernel anymore: */ |
360 | debug_locks_off(); |
361 | do_oops_enter_exit(); |
362 | } |
363 | |
364 | /* |
365 | * 64-bit random ID for oopses: |
366 | */ |
367 | static u64 oops_id; |
368 | |
369 | static int init_oops_id(void) |
370 | { |
371 | if (!oops_id) |
372 | get_random_bytes(&oops_id, sizeof(oops_id)); |
373 | else |
374 | oops_id++; |
375 | |
376 | return 0; |
377 | } |
378 | late_initcall(init_oops_id); |
379 | |
380 | void print_oops_end_marker(void) |
381 | { |
382 | init_oops_id(); |
383 | printk(KERN_WARNING "---[ end trace %016llx ]---\n", |
384 | (unsigned long long)oops_id); |
385 | } |
386 | |
387 | /* |
388 | * Called when the architecture exits its oops handler, after printing |
389 | * everything. |
390 | */ |
391 | void oops_exit(void) |
392 | { |
393 | do_oops_enter_exit(); |
394 | print_oops_end_marker(); |
395 | kmsg_dump(KMSG_DUMP_OOPS); |
396 | } |
397 | |
398 | #ifdef WANT_WARN_ON_SLOWPATH |
399 | struct slowpath_args { |
400 | const char *fmt; |
401 | va_list args; |
402 | }; |
403 | |
404 | static void warn_slowpath_common(const char *file, int line, void *caller, |
405 | unsigned taint, struct slowpath_args *args) |
406 | { |
407 | disable_trace_on_warning(); |
408 | |
409 | pr_warn("------------[ cut here ]------------\n"); |
410 | pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n", |
411 | raw_smp_processor_id(), current->pid, file, line, caller); |
412 | |
413 | if (args) |
414 | vprintk(args->fmt, args->args); |
415 | |
416 | print_modules(); |
417 | dump_stack(); |
418 | print_oops_end_marker(); |
419 | /* Just a warning, don't kill lockdep. */ |
420 | add_taint(taint, LOCKDEP_STILL_OK); |
421 | } |
422 | |
423 | void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...) |
424 | { |
425 | struct slowpath_args args; |
426 | |
427 | args.fmt = fmt; |
428 | va_start(args.args, fmt); |
429 | warn_slowpath_common(file, line, __builtin_return_address(0), |
430 | TAINT_WARN, &args); |
431 | va_end(args.args); |
432 | } |
433 | EXPORT_SYMBOL(warn_slowpath_fmt); |
434 | |
435 | void warn_slowpath_fmt_taint(const char *file, int line, |
436 | unsigned taint, const char *fmt, ...) |
437 | { |
438 | struct slowpath_args args; |
439 | |
440 | args.fmt = fmt; |
441 | va_start(args.args, fmt); |
442 | warn_slowpath_common(file, line, __builtin_return_address(0), |
443 | taint, &args); |
444 | va_end(args.args); |
445 | } |
446 | EXPORT_SYMBOL(warn_slowpath_fmt_taint); |
447 | |
448 | void warn_slowpath_null(const char *file, int line) |
449 | { |
450 | warn_slowpath_common(file, line, __builtin_return_address(0), |
451 | TAINT_WARN, NULL); |
452 | } |
453 | EXPORT_SYMBOL(warn_slowpath_null); |
454 | #endif |
455 | |
456 | #ifdef CONFIG_CC_STACKPROTECTOR |
457 | |
458 | /* |
459 | * Called when gcc's -fstack-protector feature is used, and |
460 | * gcc detects corruption of the on-stack canary value |
461 | */ |
462 | void __stack_chk_fail(void) |
463 | { |
464 | panic("stack-protector: Kernel stack is corrupted in: %p\n", |
465 | __builtin_return_address(0)); |
466 | } |
467 | EXPORT_SYMBOL(__stack_chk_fail); |
468 | |
469 | #endif |
470 | |
471 | core_param(panic, panic_timeout, int, 0644); |
472 | core_param(pause_on_oops, pause_on_oops, int, 0644); |
473 | |
474 | static int __init oops_setup(char *s) |
475 | { |
476 | if (!s) |
477 | return -EINVAL; |
478 | if (!strcmp(s, "panic")) |
479 | panic_on_oops = 1; |
480 | return 0; |
481 | } |
482 | early_param("oops", oops_setup); |
483 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9