Root/
Source at commit 2604e7f9a98c27be50a0c3ff7503b6a5ea8f6cfe created 12 years 7 months ago. By Maarten ter Huurne, cpufreq_stats: Support runtime changes to frequency table | |
---|---|
1 | /* |
2 | * Detect hard and soft lockups on a system |
3 | * |
4 | * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. |
5 | * |
6 | * Note: Most of this code is borrowed heavily from the original softlockup |
7 | * detector, so thanks to Ingo for the initial implementation. |
8 | * Some chunks also taken from the old x86-specific nmi watchdog code, thanks |
9 | * to those contributors as well. |
10 | */ |
11 | |
12 | #define pr_fmt(fmt) "NMI watchdog: " fmt |
13 | |
14 | #include <linux/mm.h> |
15 | #include <linux/cpu.h> |
16 | #include <linux/nmi.h> |
17 | #include <linux/init.h> |
18 | #include <linux/delay.h> |
19 | #include <linux/freezer.h> |
20 | #include <linux/kthread.h> |
21 | #include <linux/lockdep.h> |
22 | #include <linux/notifier.h> |
23 | #include <linux/module.h> |
24 | #include <linux/sysctl.h> |
25 | |
26 | #include <asm/irq_regs.h> |
27 | #include <linux/kvm_para.h> |
28 | #include <linux/perf_event.h> |
29 | |
30 | int watchdog_enabled = 1; |
31 | int __read_mostly watchdog_thresh = 10; |
32 | |
33 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); |
34 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); |
35 | static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); |
36 | static DEFINE_PER_CPU(bool, softlockup_touch_sync); |
37 | static DEFINE_PER_CPU(bool, soft_watchdog_warn); |
38 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
39 | static DEFINE_PER_CPU(bool, hard_watchdog_warn); |
40 | static DEFINE_PER_CPU(bool, watchdog_nmi_touch); |
41 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); |
42 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); |
43 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); |
44 | #endif |
45 | |
46 | /* boot commands */ |
47 | /* |
48 | * Should we panic when a soft-lockup or hard-lockup occurs: |
49 | */ |
50 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
51 | static int hardlockup_panic = |
52 | CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; |
53 | |
54 | static int __init hardlockup_panic_setup(char *str) |
55 | { |
56 | if (!strncmp(str, "panic", 5)) |
57 | hardlockup_panic = 1; |
58 | else if (!strncmp(str, "nopanic", 7)) |
59 | hardlockup_panic = 0; |
60 | else if (!strncmp(str, "0", 1)) |
61 | watchdog_enabled = 0; |
62 | return 1; |
63 | } |
64 | __setup("nmi_watchdog=", hardlockup_panic_setup); |
65 | #endif |
66 | |
67 | unsigned int __read_mostly softlockup_panic = |
68 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; |
69 | |
70 | static int __init softlockup_panic_setup(char *str) |
71 | { |
72 | softlockup_panic = simple_strtoul(str, NULL, 0); |
73 | |
74 | return 1; |
75 | } |
76 | __setup("softlockup_panic=", softlockup_panic_setup); |
77 | |
78 | static int __init nowatchdog_setup(char *str) |
79 | { |
80 | watchdog_enabled = 0; |
81 | return 1; |
82 | } |
83 | __setup("nowatchdog", nowatchdog_setup); |
84 | |
85 | /* deprecated */ |
86 | static int __init nosoftlockup_setup(char *str) |
87 | { |
88 | watchdog_enabled = 0; |
89 | return 1; |
90 | } |
91 | __setup("nosoftlockup", nosoftlockup_setup); |
92 | /* */ |
93 | |
94 | /* |
95 | * Hard-lockup warnings should be triggered after just a few seconds. Soft- |
96 | * lockups can have false positives under extreme conditions. So we generally |
97 | * want a higher threshold for soft lockups than for hard lockups. So we couple |
98 | * the thresholds with a factor: we make the soft threshold twice the amount of |
99 | * time the hard threshold is. |
100 | */ |
101 | static int get_softlockup_thresh(void) |
102 | { |
103 | return watchdog_thresh * 2; |
104 | } |
105 | |
106 | /* |
107 | * Returns seconds, approximately. We don't need nanosecond |
108 | * resolution, and we don't need to waste time with a big divide when |
109 | * 2^30ns == 1.074s. |
110 | */ |
111 | static unsigned long get_timestamp(int this_cpu) |
112 | { |
113 | return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ |
114 | } |
115 | |
116 | static unsigned long get_sample_period(void) |
117 | { |
118 | /* |
119 | * convert watchdog_thresh from seconds to ns |
120 | * the divide by 5 is to give hrtimer several chances (two |
121 | * or three with the current relation between the soft |
122 | * and hard thresholds) to increment before the |
123 | * hardlockup detector generates a warning |
124 | */ |
125 | return get_softlockup_thresh() * (NSEC_PER_SEC / 5); |
126 | } |
127 | |
128 | /* Commands for resetting the watchdog */ |
129 | static void __touch_watchdog(void) |
130 | { |
131 | int this_cpu = smp_processor_id(); |
132 | |
133 | __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu)); |
134 | } |
135 | |
136 | void touch_softlockup_watchdog(void) |
137 | { |
138 | __this_cpu_write(watchdog_touch_ts, 0); |
139 | } |
140 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
141 | |
142 | void touch_all_softlockup_watchdogs(void) |
143 | { |
144 | int cpu; |
145 | |
146 | /* |
147 | * this is done lockless |
148 | * do we care if a 0 races with a timestamp? |
149 | * all it means is the softlock check starts one cycle later |
150 | */ |
151 | for_each_online_cpu(cpu) |
152 | per_cpu(watchdog_touch_ts, cpu) = 0; |
153 | } |
154 | |
155 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
156 | void touch_nmi_watchdog(void) |
157 | { |
158 | if (watchdog_enabled) { |
159 | unsigned cpu; |
160 | |
161 | for_each_present_cpu(cpu) { |
162 | if (per_cpu(watchdog_nmi_touch, cpu) != true) |
163 | per_cpu(watchdog_nmi_touch, cpu) = true; |
164 | } |
165 | } |
166 | touch_softlockup_watchdog(); |
167 | } |
168 | EXPORT_SYMBOL(touch_nmi_watchdog); |
169 | |
170 | #endif |
171 | |
172 | void touch_softlockup_watchdog_sync(void) |
173 | { |
174 | __raw_get_cpu_var(softlockup_touch_sync) = true; |
175 | __raw_get_cpu_var(watchdog_touch_ts) = 0; |
176 | } |
177 | |
178 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
179 | /* watchdog detector functions */ |
180 | static int is_hardlockup(void) |
181 | { |
182 | unsigned long hrint = __this_cpu_read(hrtimer_interrupts); |
183 | |
184 | if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) |
185 | return 1; |
186 | |
187 | __this_cpu_write(hrtimer_interrupts_saved, hrint); |
188 | return 0; |
189 | } |
190 | #endif |
191 | |
192 | static int is_softlockup(unsigned long touch_ts) |
193 | { |
194 | unsigned long now = get_timestamp(smp_processor_id()); |
195 | |
196 | /* Warn about unreasonable delays: */ |
197 | if (time_after(now, touch_ts + get_softlockup_thresh())) |
198 | return now - touch_ts; |
199 | |
200 | return 0; |
201 | } |
202 | |
203 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
204 | |
205 | static struct perf_event_attr wd_hw_attr = { |
206 | .type = PERF_TYPE_HARDWARE, |
207 | .config = PERF_COUNT_HW_CPU_CYCLES, |
208 | .size = sizeof(struct perf_event_attr), |
209 | .pinned = 1, |
210 | .disabled = 1, |
211 | }; |
212 | |
213 | /* Callback function for perf event subsystem */ |
214 | static void watchdog_overflow_callback(struct perf_event *event, |
215 | struct perf_sample_data *data, |
216 | struct pt_regs *regs) |
217 | { |
218 | /* Ensure the watchdog never gets throttled */ |
219 | event->hw.interrupts = 0; |
220 | |
221 | if (__this_cpu_read(watchdog_nmi_touch) == true) { |
222 | __this_cpu_write(watchdog_nmi_touch, false); |
223 | return; |
224 | } |
225 | |
226 | /* check for a hardlockup |
227 | * This is done by making sure our timer interrupt |
228 | * is incrementing. The timer interrupt should have |
229 | * fired multiple times before we overflow'd. If it hasn't |
230 | * then this is a good indication the cpu is stuck |
231 | */ |
232 | if (is_hardlockup()) { |
233 | int this_cpu = smp_processor_id(); |
234 | |
235 | /* only print hardlockups once */ |
236 | if (__this_cpu_read(hard_watchdog_warn) == true) |
237 | return; |
238 | |
239 | if (hardlockup_panic) |
240 | panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); |
241 | else |
242 | WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); |
243 | |
244 | __this_cpu_write(hard_watchdog_warn, true); |
245 | return; |
246 | } |
247 | |
248 | __this_cpu_write(hard_watchdog_warn, false); |
249 | return; |
250 | } |
251 | static void watchdog_interrupt_count(void) |
252 | { |
253 | __this_cpu_inc(hrtimer_interrupts); |
254 | } |
255 | #else |
256 | static inline void watchdog_interrupt_count(void) { return; } |
257 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
258 | |
259 | /* watchdog kicker functions */ |
260 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) |
261 | { |
262 | unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); |
263 | struct pt_regs *regs = get_irq_regs(); |
264 | int duration; |
265 | |
266 | /* kick the hardlockup detector */ |
267 | watchdog_interrupt_count(); |
268 | |
269 | /* kick the softlockup detector */ |
270 | wake_up_process(__this_cpu_read(softlockup_watchdog)); |
271 | |
272 | /* .. and repeat */ |
273 | hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); |
274 | |
275 | if (touch_ts == 0) { |
276 | if (unlikely(__this_cpu_read(softlockup_touch_sync))) { |
277 | /* |
278 | * If the time stamp was touched atomically |
279 | * make sure the scheduler tick is up to date. |
280 | */ |
281 | __this_cpu_write(softlockup_touch_sync, false); |
282 | sched_clock_tick(); |
283 | } |
284 | |
285 | /* Clear the guest paused flag on watchdog reset */ |
286 | kvm_check_and_clear_guest_paused(); |
287 | __touch_watchdog(); |
288 | return HRTIMER_RESTART; |
289 | } |
290 | |
291 | /* check for a softlockup |
292 | * This is done by making sure a high priority task is |
293 | * being scheduled. The task touches the watchdog to |
294 | * indicate it is getting cpu time. If it hasn't then |
295 | * this is a good indication some task is hogging the cpu |
296 | */ |
297 | duration = is_softlockup(touch_ts); |
298 | if (unlikely(duration)) { |
299 | /* |
300 | * If a virtual machine is stopped by the host it can look to |
301 | * the watchdog like a soft lockup, check to see if the host |
302 | * stopped the vm before we issue the warning |
303 | */ |
304 | if (kvm_check_and_clear_guest_paused()) |
305 | return HRTIMER_RESTART; |
306 | |
307 | /* only warn once */ |
308 | if (__this_cpu_read(soft_watchdog_warn) == true) |
309 | return HRTIMER_RESTART; |
310 | |
311 | printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", |
312 | smp_processor_id(), duration, |
313 | current->comm, task_pid_nr(current)); |
314 | print_modules(); |
315 | print_irqtrace_events(current); |
316 | if (regs) |
317 | show_regs(regs); |
318 | else |
319 | dump_stack(); |
320 | |
321 | if (softlockup_panic) |
322 | panic("softlockup: hung tasks"); |
323 | __this_cpu_write(soft_watchdog_warn, true); |
324 | } else |
325 | __this_cpu_write(soft_watchdog_warn, false); |
326 | |
327 | return HRTIMER_RESTART; |
328 | } |
329 | |
330 | |
331 | /* |
332 | * The watchdog thread - touches the timestamp. |
333 | */ |
334 | static int watchdog(void *unused) |
335 | { |
336 | struct sched_param param = { .sched_priority = 0 }; |
337 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); |
338 | |
339 | /* initialize timestamp */ |
340 | __touch_watchdog(); |
341 | |
342 | /* kick off the timer for the hardlockup detector */ |
343 | /* done here because hrtimer_start can only pin to smp_processor_id() */ |
344 | hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()), |
345 | HRTIMER_MODE_REL_PINNED); |
346 | |
347 | set_current_state(TASK_INTERRUPTIBLE); |
348 | /* |
349 | * Run briefly (kicked by the hrtimer callback function) once every |
350 | * get_sample_period() seconds (4 seconds by default) to reset the |
351 | * softlockup timestamp. If this gets delayed for more than |
352 | * 2*watchdog_thresh seconds then the debug-printout triggers in |
353 | * watchdog_timer_fn(). |
354 | */ |
355 | while (!kthread_should_stop()) { |
356 | __touch_watchdog(); |
357 | schedule(); |
358 | |
359 | if (kthread_should_stop()) |
360 | break; |
361 | |
362 | set_current_state(TASK_INTERRUPTIBLE); |
363 | } |
364 | /* |
365 | * Drop the policy/priority elevation during thread exit to avoid a |
366 | * scheduling latency spike. |
367 | */ |
368 | __set_current_state(TASK_RUNNING); |
369 | sched_setscheduler(current, SCHED_NORMAL, ¶m); |
370 | return 0; |
371 | } |
372 | |
373 | |
374 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
375 | /* |
376 | * People like the simple clean cpu node info on boot. |
377 | * Reduce the watchdog noise by only printing messages |
378 | * that are different from what cpu0 displayed. |
379 | */ |
380 | static unsigned long cpu0_err; |
381 | |
382 | static int watchdog_nmi_enable(int cpu) |
383 | { |
384 | struct perf_event_attr *wd_attr; |
385 | struct perf_event *event = per_cpu(watchdog_ev, cpu); |
386 | |
387 | /* is it already setup and enabled? */ |
388 | if (event && event->state > PERF_EVENT_STATE_OFF) |
389 | goto out; |
390 | |
391 | /* it is setup but not enabled */ |
392 | if (event != NULL) |
393 | goto out_enable; |
394 | |
395 | wd_attr = &wd_hw_attr; |
396 | wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); |
397 | |
398 | /* Try to register using hardware perf events */ |
399 | event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); |
400 | |
401 | /* save cpu0 error for future comparision */ |
402 | if (cpu == 0 && IS_ERR(event)) |
403 | cpu0_err = PTR_ERR(event); |
404 | |
405 | if (!IS_ERR(event)) { |
406 | /* only print for cpu0 or different than cpu0 */ |
407 | if (cpu == 0 || cpu0_err) |
408 | pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); |
409 | goto out_save; |
410 | } |
411 | |
412 | /* skip displaying the same error again */ |
413 | if (cpu > 0 && (PTR_ERR(event) == cpu0_err)) |
414 | return PTR_ERR(event); |
415 | |
416 | /* vary the KERN level based on the returned errno */ |
417 | if (PTR_ERR(event) == -EOPNOTSUPP) |
418 | pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu); |
419 | else if (PTR_ERR(event) == -ENOENT) |
420 | pr_warning("disabled (cpu%i): hardware events not enabled\n", |
421 | cpu); |
422 | else |
423 | pr_err("disabled (cpu%i): unable to create perf event: %ld\n", |
424 | cpu, PTR_ERR(event)); |
425 | return PTR_ERR(event); |
426 | |
427 | /* success path */ |
428 | out_save: |
429 | per_cpu(watchdog_ev, cpu) = event; |
430 | out_enable: |
431 | perf_event_enable(per_cpu(watchdog_ev, cpu)); |
432 | out: |
433 | return 0; |
434 | } |
435 | |
436 | static void watchdog_nmi_disable(int cpu) |
437 | { |
438 | struct perf_event *event = per_cpu(watchdog_ev, cpu); |
439 | |
440 | if (event) { |
441 | perf_event_disable(event); |
442 | per_cpu(watchdog_ev, cpu) = NULL; |
443 | |
444 | /* should be in cleanup, but blocks oprofile */ |
445 | perf_event_release_kernel(event); |
446 | } |
447 | return; |
448 | } |
449 | #else |
450 | static int watchdog_nmi_enable(int cpu) { return 0; } |
451 | static void watchdog_nmi_disable(int cpu) { return; } |
452 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
453 | |
454 | /* prepare/enable/disable routines */ |
455 | static void watchdog_prepare_cpu(int cpu) |
456 | { |
457 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); |
458 | |
459 | WARN_ON(per_cpu(softlockup_watchdog, cpu)); |
460 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
461 | hrtimer->function = watchdog_timer_fn; |
462 | } |
463 | |
464 | static int watchdog_enable(int cpu) |
465 | { |
466 | struct task_struct *p = per_cpu(softlockup_watchdog, cpu); |
467 | int err = 0; |
468 | |
469 | /* enable the perf event */ |
470 | err = watchdog_nmi_enable(cpu); |
471 | |
472 | /* Regardless of err above, fall through and start softlockup */ |
473 | |
474 | /* create the watchdog thread */ |
475 | if (!p) { |
476 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
477 | p = kthread_create_on_node(watchdog, NULL, cpu_to_node(cpu), "watchdog/%d", cpu); |
478 | if (IS_ERR(p)) { |
479 | pr_err("softlockup watchdog for %i failed\n", cpu); |
480 | if (!err) { |
481 | /* if hardlockup hasn't already set this */ |
482 | err = PTR_ERR(p); |
483 | /* and disable the perf event */ |
484 | watchdog_nmi_disable(cpu); |
485 | } |
486 | goto out; |
487 | } |
488 | sched_setscheduler(p, SCHED_FIFO, ¶m); |
489 | kthread_bind(p, cpu); |
490 | per_cpu(watchdog_touch_ts, cpu) = 0; |
491 | per_cpu(softlockup_watchdog, cpu) = p; |
492 | wake_up_process(p); |
493 | } |
494 | |
495 | out: |
496 | return err; |
497 | } |
498 | |
499 | static void watchdog_disable(int cpu) |
500 | { |
501 | struct task_struct *p = per_cpu(softlockup_watchdog, cpu); |
502 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); |
503 | |
504 | /* |
505 | * cancel the timer first to stop incrementing the stats |
506 | * and waking up the kthread |
507 | */ |
508 | hrtimer_cancel(hrtimer); |
509 | |
510 | /* disable the perf event */ |
511 | watchdog_nmi_disable(cpu); |
512 | |
513 | /* stop the watchdog thread */ |
514 | if (p) { |
515 | per_cpu(softlockup_watchdog, cpu) = NULL; |
516 | kthread_stop(p); |
517 | } |
518 | } |
519 | |
520 | /* sysctl functions */ |
521 | #ifdef CONFIG_SYSCTL |
522 | static void watchdog_enable_all_cpus(void) |
523 | { |
524 | int cpu; |
525 | |
526 | watchdog_enabled = 0; |
527 | |
528 | for_each_online_cpu(cpu) |
529 | if (!watchdog_enable(cpu)) |
530 | /* if any cpu succeeds, watchdog is considered |
531 | enabled for the system */ |
532 | watchdog_enabled = 1; |
533 | |
534 | if (!watchdog_enabled) |
535 | pr_err("failed to be enabled on some cpus\n"); |
536 | |
537 | } |
538 | |
539 | static void watchdog_disable_all_cpus(void) |
540 | { |
541 | int cpu; |
542 | |
543 | for_each_online_cpu(cpu) |
544 | watchdog_disable(cpu); |
545 | |
546 | /* if all watchdogs are disabled, then they are disabled for the system */ |
547 | watchdog_enabled = 0; |
548 | } |
549 | |
550 | |
551 | /* |
552 | * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh |
553 | */ |
554 | |
555 | int proc_dowatchdog(struct ctl_table *table, int write, |
556 | void __user *buffer, size_t *lenp, loff_t *ppos) |
557 | { |
558 | int ret; |
559 | |
560 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
561 | if (ret || !write) |
562 | goto out; |
563 | |
564 | if (watchdog_enabled && watchdog_thresh) |
565 | watchdog_enable_all_cpus(); |
566 | else |
567 | watchdog_disable_all_cpus(); |
568 | |
569 | out: |
570 | return ret; |
571 | } |
572 | #endif /* CONFIG_SYSCTL */ |
573 | |
574 | |
575 | /* |
576 | * Create/destroy watchdog threads as CPUs come and go: |
577 | */ |
578 | static int __cpuinit |
579 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
580 | { |
581 | int hotcpu = (unsigned long)hcpu; |
582 | |
583 | switch (action) { |
584 | case CPU_UP_PREPARE: |
585 | case CPU_UP_PREPARE_FROZEN: |
586 | watchdog_prepare_cpu(hotcpu); |
587 | break; |
588 | case CPU_ONLINE: |
589 | case CPU_ONLINE_FROZEN: |
590 | if (watchdog_enabled) |
591 | watchdog_enable(hotcpu); |
592 | break; |
593 | #ifdef CONFIG_HOTPLUG_CPU |
594 | case CPU_UP_CANCELED: |
595 | case CPU_UP_CANCELED_FROZEN: |
596 | watchdog_disable(hotcpu); |
597 | break; |
598 | case CPU_DEAD: |
599 | case CPU_DEAD_FROZEN: |
600 | watchdog_disable(hotcpu); |
601 | break; |
602 | #endif /* CONFIG_HOTPLUG_CPU */ |
603 | } |
604 | |
605 | /* |
606 | * hardlockup and softlockup are not important enough |
607 | * to block cpu bring up. Just always succeed and |
608 | * rely on printk output to flag problems. |
609 | */ |
610 | return NOTIFY_OK; |
611 | } |
612 | |
613 | static struct notifier_block __cpuinitdata cpu_nfb = { |
614 | .notifier_call = cpu_callback |
615 | }; |
616 | |
617 | void __init lockup_detector_init(void) |
618 | { |
619 | void *cpu = (void *)(long)smp_processor_id(); |
620 | int err; |
621 | |
622 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
623 | WARN_ON(notifier_to_errno(err)); |
624 | |
625 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); |
626 | register_cpu_notifier(&cpu_nfb); |
627 | |
628 | return; |
629 | } |
630 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9