Root/
1 | /* |
2 | * kernel/power/hibernate.c - Hibernation (a.k.a suspend-to-disk) support. |
3 | * |
4 | * Copyright (c) 2003 Patrick Mochel |
5 | * Copyright (c) 2003 Open Source Development Lab |
6 | * Copyright (c) 2004 Pavel Machek <pavel@ucw.cz> |
7 | * Copyright (c) 2009 Rafael J. Wysocki, Novell Inc. |
8 | * |
9 | * This file is released under the GPLv2. |
10 | */ |
11 | |
12 | #include <linux/suspend.h> |
13 | #include <linux/syscalls.h> |
14 | #include <linux/reboot.h> |
15 | #include <linux/string.h> |
16 | #include <linux/device.h> |
17 | #include <linux/kmod.h> |
18 | #include <linux/delay.h> |
19 | #include <linux/fs.h> |
20 | #include <linux/mount.h> |
21 | #include <linux/pm.h> |
22 | #include <linux/console.h> |
23 | #include <linux/cpu.h> |
24 | #include <linux/freezer.h> |
25 | #include <linux/gfp.h> |
26 | #include <scsi/scsi_scan.h> |
27 | #include <asm/suspend.h> |
28 | |
29 | #include "power.h" |
30 | |
31 | |
32 | static int nocompress = 0; |
33 | static int noresume = 0; |
34 | static char resume_file[256] = CONFIG_PM_STD_PARTITION; |
35 | dev_t swsusp_resume_device; |
36 | sector_t swsusp_resume_block; |
37 | int in_suspend __nosavedata = 0; |
38 | |
39 | enum { |
40 | HIBERNATION_INVALID, |
41 | HIBERNATION_PLATFORM, |
42 | HIBERNATION_TEST, |
43 | HIBERNATION_TESTPROC, |
44 | HIBERNATION_SHUTDOWN, |
45 | HIBERNATION_REBOOT, |
46 | /* keep last */ |
47 | __HIBERNATION_AFTER_LAST |
48 | }; |
49 | #define HIBERNATION_MAX (__HIBERNATION_AFTER_LAST-1) |
50 | #define HIBERNATION_FIRST (HIBERNATION_INVALID + 1) |
51 | |
52 | static int hibernation_mode = HIBERNATION_SHUTDOWN; |
53 | |
54 | static const struct platform_hibernation_ops *hibernation_ops; |
55 | |
56 | /** |
57 | * hibernation_set_ops - set the global hibernate operations |
58 | * @ops: the hibernation operations to use in subsequent hibernation transitions |
59 | */ |
60 | |
61 | void hibernation_set_ops(const struct platform_hibernation_ops *ops) |
62 | { |
63 | if (ops && !(ops->begin && ops->end && ops->pre_snapshot |
64 | && ops->prepare && ops->finish && ops->enter && ops->pre_restore |
65 | && ops->restore_cleanup && ops->leave)) { |
66 | WARN_ON(1); |
67 | return; |
68 | } |
69 | mutex_lock(&pm_mutex); |
70 | hibernation_ops = ops; |
71 | if (ops) |
72 | hibernation_mode = HIBERNATION_PLATFORM; |
73 | else if (hibernation_mode == HIBERNATION_PLATFORM) |
74 | hibernation_mode = HIBERNATION_SHUTDOWN; |
75 | |
76 | mutex_unlock(&pm_mutex); |
77 | } |
78 | |
79 | static bool entering_platform_hibernation; |
80 | |
81 | bool system_entering_hibernation(void) |
82 | { |
83 | return entering_platform_hibernation; |
84 | } |
85 | EXPORT_SYMBOL(system_entering_hibernation); |
86 | |
87 | #ifdef CONFIG_PM_DEBUG |
88 | static void hibernation_debug_sleep(void) |
89 | { |
90 | printk(KERN_INFO "hibernation debug: Waiting for 5 seconds.\n"); |
91 | mdelay(5000); |
92 | } |
93 | |
94 | static int hibernation_testmode(int mode) |
95 | { |
96 | if (hibernation_mode == mode) { |
97 | hibernation_debug_sleep(); |
98 | return 1; |
99 | } |
100 | return 0; |
101 | } |
102 | |
103 | static int hibernation_test(int level) |
104 | { |
105 | if (pm_test_level == level) { |
106 | hibernation_debug_sleep(); |
107 | return 1; |
108 | } |
109 | return 0; |
110 | } |
111 | #else /* !CONFIG_PM_DEBUG */ |
112 | static int hibernation_testmode(int mode) { return 0; } |
113 | static int hibernation_test(int level) { return 0; } |
114 | #endif /* !CONFIG_PM_DEBUG */ |
115 | |
116 | /** |
117 | * platform_begin - tell the platform driver that we're starting |
118 | * hibernation |
119 | */ |
120 | |
121 | static int platform_begin(int platform_mode) |
122 | { |
123 | return (platform_mode && hibernation_ops) ? |
124 | hibernation_ops->begin() : 0; |
125 | } |
126 | |
127 | /** |
128 | * platform_end - tell the platform driver that we've entered the |
129 | * working state |
130 | */ |
131 | |
132 | static void platform_end(int platform_mode) |
133 | { |
134 | if (platform_mode && hibernation_ops) |
135 | hibernation_ops->end(); |
136 | } |
137 | |
138 | /** |
139 | * platform_pre_snapshot - prepare the machine for hibernation using the |
140 | * platform driver if so configured and return an error code if it fails |
141 | */ |
142 | |
143 | static int platform_pre_snapshot(int platform_mode) |
144 | { |
145 | return (platform_mode && hibernation_ops) ? |
146 | hibernation_ops->pre_snapshot() : 0; |
147 | } |
148 | |
149 | /** |
150 | * platform_leave - prepare the machine for switching to the normal mode |
151 | * of operation using the platform driver (called with interrupts disabled) |
152 | */ |
153 | |
154 | static void platform_leave(int platform_mode) |
155 | { |
156 | if (platform_mode && hibernation_ops) |
157 | hibernation_ops->leave(); |
158 | } |
159 | |
160 | /** |
161 | * platform_finish - switch the machine to the normal mode of operation |
162 | * using the platform driver (must be called after platform_prepare()) |
163 | */ |
164 | |
165 | static void platform_finish(int platform_mode) |
166 | { |
167 | if (platform_mode && hibernation_ops) |
168 | hibernation_ops->finish(); |
169 | } |
170 | |
171 | /** |
172 | * platform_pre_restore - prepare the platform for the restoration from a |
173 | * hibernation image. If the restore fails after this function has been |
174 | * called, platform_restore_cleanup() must be called. |
175 | */ |
176 | |
177 | static int platform_pre_restore(int platform_mode) |
178 | { |
179 | return (platform_mode && hibernation_ops) ? |
180 | hibernation_ops->pre_restore() : 0; |
181 | } |
182 | |
183 | /** |
184 | * platform_restore_cleanup - switch the platform to the normal mode of |
185 | * operation after a failing restore. If platform_pre_restore() has been |
186 | * called before the failing restore, this function must be called too, |
187 | * regardless of the result of platform_pre_restore(). |
188 | */ |
189 | |
190 | static void platform_restore_cleanup(int platform_mode) |
191 | { |
192 | if (platform_mode && hibernation_ops) |
193 | hibernation_ops->restore_cleanup(); |
194 | } |
195 | |
196 | /** |
197 | * platform_recover - recover the platform from a failure to suspend |
198 | * devices. |
199 | */ |
200 | |
201 | static void platform_recover(int platform_mode) |
202 | { |
203 | if (platform_mode && hibernation_ops && hibernation_ops->recover) |
204 | hibernation_ops->recover(); |
205 | } |
206 | |
207 | /** |
208 | * swsusp_show_speed - print the time elapsed between two events. |
209 | * @start: Starting event. |
210 | * @stop: Final event. |
211 | * @nr_pages - number of pages processed between @start and @stop |
212 | * @msg - introductory message to print |
213 | */ |
214 | |
215 | void swsusp_show_speed(struct timeval *start, struct timeval *stop, |
216 | unsigned nr_pages, char *msg) |
217 | { |
218 | s64 elapsed_centisecs64; |
219 | int centisecs; |
220 | int k; |
221 | int kps; |
222 | |
223 | elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start); |
224 | do_div(elapsed_centisecs64, NSEC_PER_SEC / 100); |
225 | centisecs = elapsed_centisecs64; |
226 | if (centisecs == 0) |
227 | centisecs = 1; /* avoid div-by-zero */ |
228 | k = nr_pages * (PAGE_SIZE / 1024); |
229 | kps = (k * 100) / centisecs; |
230 | printk(KERN_INFO "PM: %s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", |
231 | msg, k, |
232 | centisecs / 100, centisecs % 100, |
233 | kps / 1000, (kps % 1000) / 10); |
234 | } |
235 | |
236 | /** |
237 | * create_image - freeze devices that need to be frozen with interrupts |
238 | * off, create the hibernation image and thaw those devices. Control |
239 | * reappears in this routine after a restore. |
240 | */ |
241 | |
242 | static int create_image(int platform_mode) |
243 | { |
244 | int error; |
245 | |
246 | error = arch_prepare_suspend(); |
247 | if (error) |
248 | return error; |
249 | |
250 | /* At this point, dpm_suspend_start() has been called, but *not* |
251 | * dpm_suspend_noirq(). We *must* call dpm_suspend_noirq() now. |
252 | * Otherwise, drivers for some devices (e.g. interrupt controllers) |
253 | * become desynchronized with the actual state of the hardware |
254 | * at resume time, and evil weirdness ensues. |
255 | */ |
256 | error = dpm_suspend_noirq(PMSG_FREEZE); |
257 | if (error) { |
258 | printk(KERN_ERR "PM: Some devices failed to power down, " |
259 | "aborting hibernation\n"); |
260 | return error; |
261 | } |
262 | |
263 | error = platform_pre_snapshot(platform_mode); |
264 | if (error || hibernation_test(TEST_PLATFORM)) |
265 | goto Platform_finish; |
266 | |
267 | error = disable_nonboot_cpus(); |
268 | if (error || hibernation_test(TEST_CPUS) |
269 | || hibernation_testmode(HIBERNATION_TEST)) |
270 | goto Enable_cpus; |
271 | |
272 | local_irq_disable(); |
273 | |
274 | error = sysdev_suspend(PMSG_FREEZE); |
275 | if (error) { |
276 | printk(KERN_ERR "PM: Some system devices failed to power down, " |
277 | "aborting hibernation\n"); |
278 | goto Enable_irqs; |
279 | } |
280 | |
281 | if (hibernation_test(TEST_CORE) || pm_wakeup_pending()) |
282 | goto Power_up; |
283 | |
284 | in_suspend = 1; |
285 | save_processor_state(); |
286 | error = swsusp_arch_suspend(); |
287 | if (error) |
288 | printk(KERN_ERR "PM: Error %d creating hibernation image\n", |
289 | error); |
290 | /* Restore control flow magically appears here */ |
291 | restore_processor_state(); |
292 | if (!in_suspend) { |
293 | events_check_enabled = false; |
294 | platform_leave(platform_mode); |
295 | } |
296 | |
297 | Power_up: |
298 | sysdev_resume(); |
299 | /* NOTE: dpm_resume_noirq() is just a resume() for devices |
300 | * that suspended with irqs off ... no overall powerup. |
301 | */ |
302 | |
303 | Enable_irqs: |
304 | local_irq_enable(); |
305 | |
306 | Enable_cpus: |
307 | enable_nonboot_cpus(); |
308 | |
309 | Platform_finish: |
310 | platform_finish(platform_mode); |
311 | |
312 | dpm_resume_noirq(in_suspend ? |
313 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); |
314 | |
315 | return error; |
316 | } |
317 | |
318 | /** |
319 | * hibernation_snapshot - quiesce devices and create the hibernation |
320 | * snapshot image. |
321 | * @platform_mode - if set, use the platform driver, if available, to |
322 | * prepare the platform firmware for the power transition. |
323 | * |
324 | * Must be called with pm_mutex held |
325 | */ |
326 | |
327 | int hibernation_snapshot(int platform_mode) |
328 | { |
329 | int error; |
330 | |
331 | error = platform_begin(platform_mode); |
332 | if (error) |
333 | goto Close; |
334 | |
335 | /* Preallocate image memory before shutting down devices. */ |
336 | error = hibernate_preallocate_memory(); |
337 | if (error) |
338 | goto Close; |
339 | |
340 | suspend_console(); |
341 | pm_restrict_gfp_mask(); |
342 | error = dpm_suspend_start(PMSG_FREEZE); |
343 | if (error) |
344 | goto Recover_platform; |
345 | |
346 | if (hibernation_test(TEST_DEVICES)) |
347 | goto Recover_platform; |
348 | |
349 | error = create_image(platform_mode); |
350 | /* |
351 | * Control returns here (1) after the image has been created or the |
352 | * image creation has failed and (2) after a successful restore. |
353 | */ |
354 | |
355 | Resume_devices: |
356 | /* We may need to release the preallocated image pages here. */ |
357 | if (error || !in_suspend) |
358 | swsusp_free(); |
359 | |
360 | dpm_resume_end(in_suspend ? |
361 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); |
362 | |
363 | if (error || !in_suspend) |
364 | pm_restore_gfp_mask(); |
365 | |
366 | resume_console(); |
367 | Close: |
368 | platform_end(platform_mode); |
369 | return error; |
370 | |
371 | Recover_platform: |
372 | platform_recover(platform_mode); |
373 | goto Resume_devices; |
374 | } |
375 | |
376 | /** |
377 | * resume_target_kernel - prepare devices that need to be suspended with |
378 | * interrupts off, restore the contents of highmem that have not been |
379 | * restored yet from the image and run the low level code that will restore |
380 | * the remaining contents of memory and switch to the just restored target |
381 | * kernel. |
382 | */ |
383 | |
384 | static int resume_target_kernel(bool platform_mode) |
385 | { |
386 | int error; |
387 | |
388 | error = dpm_suspend_noirq(PMSG_QUIESCE); |
389 | if (error) { |
390 | printk(KERN_ERR "PM: Some devices failed to power down, " |
391 | "aborting resume\n"); |
392 | return error; |
393 | } |
394 | |
395 | error = platform_pre_restore(platform_mode); |
396 | if (error) |
397 | goto Cleanup; |
398 | |
399 | error = disable_nonboot_cpus(); |
400 | if (error) |
401 | goto Enable_cpus; |
402 | |
403 | local_irq_disable(); |
404 | |
405 | error = sysdev_suspend(PMSG_QUIESCE); |
406 | if (error) |
407 | goto Enable_irqs; |
408 | |
409 | /* We'll ignore saved state, but this gets preempt count (etc) right */ |
410 | save_processor_state(); |
411 | error = restore_highmem(); |
412 | if (!error) { |
413 | error = swsusp_arch_resume(); |
414 | /* |
415 | * The code below is only ever reached in case of a failure. |
416 | * Otherwise execution continues at place where |
417 | * swsusp_arch_suspend() was called |
418 | */ |
419 | BUG_ON(!error); |
420 | /* This call to restore_highmem() undos the previous one */ |
421 | restore_highmem(); |
422 | } |
423 | /* |
424 | * The only reason why swsusp_arch_resume() can fail is memory being |
425 | * very tight, so we have to free it as soon as we can to avoid |
426 | * subsequent failures |
427 | */ |
428 | swsusp_free(); |
429 | restore_processor_state(); |
430 | touch_softlockup_watchdog(); |
431 | |
432 | sysdev_resume(); |
433 | |
434 | Enable_irqs: |
435 | local_irq_enable(); |
436 | |
437 | Enable_cpus: |
438 | enable_nonboot_cpus(); |
439 | |
440 | Cleanup: |
441 | platform_restore_cleanup(platform_mode); |
442 | |
443 | dpm_resume_noirq(PMSG_RECOVER); |
444 | |
445 | return error; |
446 | } |
447 | |
448 | /** |
449 | * hibernation_restore - quiesce devices and restore the hibernation |
450 | * snapshot image. If successful, control returns in hibernation_snaphot() |
451 | * @platform_mode - if set, use the platform driver, if available, to |
452 | * prepare the platform firmware for the transition. |
453 | * |
454 | * Must be called with pm_mutex held |
455 | */ |
456 | |
457 | int hibernation_restore(int platform_mode) |
458 | { |
459 | int error; |
460 | |
461 | pm_prepare_console(); |
462 | suspend_console(); |
463 | pm_restrict_gfp_mask(); |
464 | error = dpm_suspend_start(PMSG_QUIESCE); |
465 | if (!error) { |
466 | error = resume_target_kernel(platform_mode); |
467 | dpm_resume_end(PMSG_RECOVER); |
468 | } |
469 | pm_restore_gfp_mask(); |
470 | resume_console(); |
471 | pm_restore_console(); |
472 | return error; |
473 | } |
474 | |
475 | /** |
476 | * hibernation_platform_enter - enter the hibernation state using the |
477 | * platform driver (if available) |
478 | */ |
479 | |
480 | int hibernation_platform_enter(void) |
481 | { |
482 | int error; |
483 | |
484 | if (!hibernation_ops) |
485 | return -ENOSYS; |
486 | |
487 | /* |
488 | * We have cancelled the power transition by running |
489 | * hibernation_ops->finish() before saving the image, so we should let |
490 | * the firmware know that we're going to enter the sleep state after all |
491 | */ |
492 | error = hibernation_ops->begin(); |
493 | if (error) |
494 | goto Close; |
495 | |
496 | entering_platform_hibernation = true; |
497 | suspend_console(); |
498 | error = dpm_suspend_start(PMSG_HIBERNATE); |
499 | if (error) { |
500 | if (hibernation_ops->recover) |
501 | hibernation_ops->recover(); |
502 | goto Resume_devices; |
503 | } |
504 | |
505 | error = dpm_suspend_noirq(PMSG_HIBERNATE); |
506 | if (error) |
507 | goto Resume_devices; |
508 | |
509 | error = hibernation_ops->prepare(); |
510 | if (error) |
511 | goto Platform_finish; |
512 | |
513 | error = disable_nonboot_cpus(); |
514 | if (error) |
515 | goto Platform_finish; |
516 | |
517 | local_irq_disable(); |
518 | sysdev_suspend(PMSG_HIBERNATE); |
519 | if (pm_wakeup_pending()) { |
520 | error = -EAGAIN; |
521 | goto Power_up; |
522 | } |
523 | |
524 | hibernation_ops->enter(); |
525 | /* We should never get here */ |
526 | while (1); |
527 | |
528 | Power_up: |
529 | sysdev_resume(); |
530 | local_irq_enable(); |
531 | enable_nonboot_cpus(); |
532 | |
533 | Platform_finish: |
534 | hibernation_ops->finish(); |
535 | |
536 | dpm_resume_noirq(PMSG_RESTORE); |
537 | |
538 | Resume_devices: |
539 | entering_platform_hibernation = false; |
540 | dpm_resume_end(PMSG_RESTORE); |
541 | resume_console(); |
542 | |
543 | Close: |
544 | hibernation_ops->end(); |
545 | |
546 | return error; |
547 | } |
548 | |
549 | /** |
550 | * power_down - Shut the machine down for hibernation. |
551 | * |
552 | * Use the platform driver, if configured so; otherwise try |
553 | * to power off or reboot. |
554 | */ |
555 | |
556 | static void power_down(void) |
557 | { |
558 | switch (hibernation_mode) { |
559 | case HIBERNATION_TEST: |
560 | case HIBERNATION_TESTPROC: |
561 | break; |
562 | case HIBERNATION_REBOOT: |
563 | kernel_restart(NULL); |
564 | break; |
565 | case HIBERNATION_PLATFORM: |
566 | hibernation_platform_enter(); |
567 | case HIBERNATION_SHUTDOWN: |
568 | kernel_power_off(); |
569 | break; |
570 | } |
571 | kernel_halt(); |
572 | /* |
573 | * Valid image is on the disk, if we continue we risk serious data |
574 | * corruption after resume. |
575 | */ |
576 | printk(KERN_CRIT "PM: Please power down manually\n"); |
577 | while(1); |
578 | } |
579 | |
580 | static int prepare_processes(void) |
581 | { |
582 | int error = 0; |
583 | |
584 | if (freeze_processes()) { |
585 | error = -EBUSY; |
586 | thaw_processes(); |
587 | } |
588 | return error; |
589 | } |
590 | |
591 | /** |
592 | * hibernate - The granpappy of the built-in hibernation management |
593 | */ |
594 | |
595 | int hibernate(void) |
596 | { |
597 | int error; |
598 | |
599 | mutex_lock(&pm_mutex); |
600 | /* The snapshot device should not be opened while we're running */ |
601 | if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { |
602 | error = -EBUSY; |
603 | goto Unlock; |
604 | } |
605 | |
606 | pm_prepare_console(); |
607 | error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); |
608 | if (error) |
609 | goto Exit; |
610 | |
611 | error = usermodehelper_disable(); |
612 | if (error) |
613 | goto Exit; |
614 | |
615 | /* Allocate memory management structures */ |
616 | error = create_basic_memory_bitmaps(); |
617 | if (error) |
618 | goto Exit; |
619 | |
620 | printk(KERN_INFO "PM: Syncing filesystems ... "); |
621 | sys_sync(); |
622 | printk("done.\n"); |
623 | |
624 | error = prepare_processes(); |
625 | if (error) |
626 | goto Finish; |
627 | |
628 | if (hibernation_test(TEST_FREEZER)) |
629 | goto Thaw; |
630 | |
631 | if (hibernation_testmode(HIBERNATION_TESTPROC)) |
632 | goto Thaw; |
633 | |
634 | error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); |
635 | if (error) |
636 | goto Thaw; |
637 | |
638 | if (in_suspend) { |
639 | unsigned int flags = 0; |
640 | |
641 | if (hibernation_mode == HIBERNATION_PLATFORM) |
642 | flags |= SF_PLATFORM_MODE; |
643 | if (nocompress) |
644 | flags |= SF_NOCOMPRESS_MODE; |
645 | pr_debug("PM: writing image.\n"); |
646 | error = swsusp_write(flags); |
647 | swsusp_free(); |
648 | if (!error) |
649 | power_down(); |
650 | in_suspend = 0; |
651 | pm_restore_gfp_mask(); |
652 | } else { |
653 | pr_debug("PM: Image restored successfully.\n"); |
654 | } |
655 | |
656 | Thaw: |
657 | thaw_processes(); |
658 | Finish: |
659 | free_basic_memory_bitmaps(); |
660 | usermodehelper_enable(); |
661 | Exit: |
662 | pm_notifier_call_chain(PM_POST_HIBERNATION); |
663 | pm_restore_console(); |
664 | atomic_inc(&snapshot_device_available); |
665 | Unlock: |
666 | mutex_unlock(&pm_mutex); |
667 | return error; |
668 | } |
669 | |
670 | |
671 | /** |
672 | * software_resume - Resume from a saved image. |
673 | * |
674 | * Called as a late_initcall (so all devices are discovered and |
675 | * initialized), we call swsusp to see if we have a saved image or not. |
676 | * If so, we quiesce devices, the restore the saved image. We will |
677 | * return above (in hibernate() ) if everything goes well. |
678 | * Otherwise, we fail gracefully and return to the normally |
679 | * scheduled program. |
680 | * |
681 | */ |
682 | |
683 | static int software_resume(void) |
684 | { |
685 | int error; |
686 | unsigned int flags; |
687 | |
688 | /* |
689 | * If the user said "noresume".. bail out early. |
690 | */ |
691 | if (noresume) |
692 | return 0; |
693 | |
694 | /* |
695 | * name_to_dev_t() below takes a sysfs buffer mutex when sysfs |
696 | * is configured into the kernel. Since the regular hibernate |
697 | * trigger path is via sysfs which takes a buffer mutex before |
698 | * calling hibernate functions (which take pm_mutex) this can |
699 | * cause lockdep to complain about a possible ABBA deadlock |
700 | * which cannot happen since we're in the boot code here and |
701 | * sysfs can't be invoked yet. Therefore, we use a subclass |
702 | * here to avoid lockdep complaining. |
703 | */ |
704 | mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING); |
705 | |
706 | if (swsusp_resume_device) |
707 | goto Check_image; |
708 | |
709 | if (!strlen(resume_file)) { |
710 | error = -ENOENT; |
711 | goto Unlock; |
712 | } |
713 | |
714 | pr_debug("PM: Checking hibernation image partition %s\n", resume_file); |
715 | |
716 | /* Check if the device is there */ |
717 | swsusp_resume_device = name_to_dev_t(resume_file); |
718 | if (!swsusp_resume_device) { |
719 | /* |
720 | * Some device discovery might still be in progress; we need |
721 | * to wait for this to finish. |
722 | */ |
723 | wait_for_device_probe(); |
724 | /* |
725 | * We can't depend on SCSI devices being available after loading |
726 | * one of their modules until scsi_complete_async_scans() is |
727 | * called and the resume device usually is a SCSI one. |
728 | */ |
729 | scsi_complete_async_scans(); |
730 | |
731 | swsusp_resume_device = name_to_dev_t(resume_file); |
732 | if (!swsusp_resume_device) { |
733 | error = -ENODEV; |
734 | goto Unlock; |
735 | } |
736 | } |
737 | |
738 | Check_image: |
739 | pr_debug("PM: Hibernation image partition %d:%d present\n", |
740 | MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); |
741 | |
742 | pr_debug("PM: Looking for hibernation image.\n"); |
743 | error = swsusp_check(); |
744 | if (error) |
745 | goto Unlock; |
746 | |
747 | /* The snapshot device should not be opened while we're running */ |
748 | if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { |
749 | error = -EBUSY; |
750 | swsusp_close(FMODE_READ); |
751 | goto Unlock; |
752 | } |
753 | |
754 | pm_prepare_console(); |
755 | error = pm_notifier_call_chain(PM_RESTORE_PREPARE); |
756 | if (error) |
757 | goto close_finish; |
758 | |
759 | error = usermodehelper_disable(); |
760 | if (error) |
761 | goto close_finish; |
762 | |
763 | error = create_basic_memory_bitmaps(); |
764 | if (error) |
765 | goto close_finish; |
766 | |
767 | pr_debug("PM: Preparing processes for restore.\n"); |
768 | error = prepare_processes(); |
769 | if (error) { |
770 | swsusp_close(FMODE_READ); |
771 | goto Done; |
772 | } |
773 | |
774 | pr_debug("PM: Loading hibernation image.\n"); |
775 | |
776 | error = swsusp_read(&flags); |
777 | swsusp_close(FMODE_READ); |
778 | if (!error) |
779 | hibernation_restore(flags & SF_PLATFORM_MODE); |
780 | |
781 | printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n"); |
782 | swsusp_free(); |
783 | thaw_processes(); |
784 | Done: |
785 | free_basic_memory_bitmaps(); |
786 | usermodehelper_enable(); |
787 | Finish: |
788 | pm_notifier_call_chain(PM_POST_RESTORE); |
789 | pm_restore_console(); |
790 | atomic_inc(&snapshot_device_available); |
791 | /* For success case, the suspend path will release the lock */ |
792 | Unlock: |
793 | mutex_unlock(&pm_mutex); |
794 | pr_debug("PM: Hibernation image not present or could not be loaded.\n"); |
795 | return error; |
796 | close_finish: |
797 | swsusp_close(FMODE_READ); |
798 | goto Finish; |
799 | } |
800 | |
801 | late_initcall(software_resume); |
802 | |
803 | |
804 | static const char * const hibernation_modes[] = { |
805 | [HIBERNATION_PLATFORM] = "platform", |
806 | [HIBERNATION_SHUTDOWN] = "shutdown", |
807 | [HIBERNATION_REBOOT] = "reboot", |
808 | [HIBERNATION_TEST] = "test", |
809 | [HIBERNATION_TESTPROC] = "testproc", |
810 | }; |
811 | |
812 | /** |
813 | * disk - Control hibernation mode |
814 | * |
815 | * Suspend-to-disk can be handled in several ways. We have a few options |
816 | * for putting the system to sleep - using the platform driver (e.g. ACPI |
817 | * or other hibernation_ops), powering off the system or rebooting the |
818 | * system (for testing) as well as the two test modes. |
819 | * |
820 | * The system can support 'platform', and that is known a priori (and |
821 | * encoded by the presence of hibernation_ops). However, the user may |
822 | * choose 'shutdown' or 'reboot' as alternatives, as well as one fo the |
823 | * test modes, 'test' or 'testproc'. |
824 | * |
825 | * show() will display what the mode is currently set to. |
826 | * store() will accept one of |
827 | * |
828 | * 'platform' |
829 | * 'shutdown' |
830 | * 'reboot' |
831 | * 'test' |
832 | * 'testproc' |
833 | * |
834 | * It will only change to 'platform' if the system |
835 | * supports it (as determined by having hibernation_ops). |
836 | */ |
837 | |
838 | static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr, |
839 | char *buf) |
840 | { |
841 | int i; |
842 | char *start = buf; |
843 | |
844 | for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { |
845 | if (!hibernation_modes[i]) |
846 | continue; |
847 | switch (i) { |
848 | case HIBERNATION_SHUTDOWN: |
849 | case HIBERNATION_REBOOT: |
850 | case HIBERNATION_TEST: |
851 | case HIBERNATION_TESTPROC: |
852 | break; |
853 | case HIBERNATION_PLATFORM: |
854 | if (hibernation_ops) |
855 | break; |
856 | /* not a valid mode, continue with loop */ |
857 | continue; |
858 | } |
859 | if (i == hibernation_mode) |
860 | buf += sprintf(buf, "[%s] ", hibernation_modes[i]); |
861 | else |
862 | buf += sprintf(buf, "%s ", hibernation_modes[i]); |
863 | } |
864 | buf += sprintf(buf, "\n"); |
865 | return buf-start; |
866 | } |
867 | |
868 | |
869 | static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, |
870 | const char *buf, size_t n) |
871 | { |
872 | int error = 0; |
873 | int i; |
874 | int len; |
875 | char *p; |
876 | int mode = HIBERNATION_INVALID; |
877 | |
878 | p = memchr(buf, '\n', n); |
879 | len = p ? p - buf : n; |
880 | |
881 | mutex_lock(&pm_mutex); |
882 | for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { |
883 | if (len == strlen(hibernation_modes[i]) |
884 | && !strncmp(buf, hibernation_modes[i], len)) { |
885 | mode = i; |
886 | break; |
887 | } |
888 | } |
889 | if (mode != HIBERNATION_INVALID) { |
890 | switch (mode) { |
891 | case HIBERNATION_SHUTDOWN: |
892 | case HIBERNATION_REBOOT: |
893 | case HIBERNATION_TEST: |
894 | case HIBERNATION_TESTPROC: |
895 | hibernation_mode = mode; |
896 | break; |
897 | case HIBERNATION_PLATFORM: |
898 | if (hibernation_ops) |
899 | hibernation_mode = mode; |
900 | else |
901 | error = -EINVAL; |
902 | } |
903 | } else |
904 | error = -EINVAL; |
905 | |
906 | if (!error) |
907 | pr_debug("PM: Hibernation mode set to '%s'\n", |
908 | hibernation_modes[mode]); |
909 | mutex_unlock(&pm_mutex); |
910 | return error ? error : n; |
911 | } |
912 | |
913 | power_attr(disk); |
914 | |
915 | static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr, |
916 | char *buf) |
917 | { |
918 | return sprintf(buf,"%d:%d\n", MAJOR(swsusp_resume_device), |
919 | MINOR(swsusp_resume_device)); |
920 | } |
921 | |
922 | static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr, |
923 | const char *buf, size_t n) |
924 | { |
925 | unsigned int maj, min; |
926 | dev_t res; |
927 | int ret = -EINVAL; |
928 | |
929 | if (sscanf(buf, "%u:%u", &maj, &min) != 2) |
930 | goto out; |
931 | |
932 | res = MKDEV(maj,min); |
933 | if (maj != MAJOR(res) || min != MINOR(res)) |
934 | goto out; |
935 | |
936 | mutex_lock(&pm_mutex); |
937 | swsusp_resume_device = res; |
938 | mutex_unlock(&pm_mutex); |
939 | printk(KERN_INFO "PM: Starting manual resume from disk\n"); |
940 | noresume = 0; |
941 | software_resume(); |
942 | ret = n; |
943 | out: |
944 | return ret; |
945 | } |
946 | |
947 | power_attr(resume); |
948 | |
949 | static ssize_t image_size_show(struct kobject *kobj, struct kobj_attribute *attr, |
950 | char *buf) |
951 | { |
952 | return sprintf(buf, "%lu\n", image_size); |
953 | } |
954 | |
955 | static ssize_t image_size_store(struct kobject *kobj, struct kobj_attribute *attr, |
956 | const char *buf, size_t n) |
957 | { |
958 | unsigned long size; |
959 | |
960 | if (sscanf(buf, "%lu", &size) == 1) { |
961 | image_size = size; |
962 | return n; |
963 | } |
964 | |
965 | return -EINVAL; |
966 | } |
967 | |
968 | power_attr(image_size); |
969 | |
970 | static struct attribute * g[] = { |
971 | &disk_attr.attr, |
972 | &resume_attr.attr, |
973 | &image_size_attr.attr, |
974 | NULL, |
975 | }; |
976 | |
977 | |
978 | static struct attribute_group attr_group = { |
979 | .attrs = g, |
980 | }; |
981 | |
982 | |
983 | static int __init pm_disk_init(void) |
984 | { |
985 | return sysfs_create_group(power_kobj, &attr_group); |
986 | } |
987 | |
988 | core_initcall(pm_disk_init); |
989 | |
990 | |
991 | static int __init resume_setup(char *str) |
992 | { |
993 | if (noresume) |
994 | return 1; |
995 | |
996 | strncpy( resume_file, str, 255 ); |
997 | return 1; |
998 | } |
999 | |
1000 | static int __init resume_offset_setup(char *str) |
1001 | { |
1002 | unsigned long long offset; |
1003 | |
1004 | if (noresume) |
1005 | return 1; |
1006 | |
1007 | if (sscanf(str, "%llu", &offset) == 1) |
1008 | swsusp_resume_block = offset; |
1009 | |
1010 | return 1; |
1011 | } |
1012 | |
1013 | static int __init hibernate_setup(char *str) |
1014 | { |
1015 | if (!strncmp(str, "noresume", 8)) |
1016 | noresume = 1; |
1017 | else if (!strncmp(str, "nocompress", 10)) |
1018 | nocompress = 1; |
1019 | return 1; |
1020 | } |
1021 | |
1022 | static int __init noresume_setup(char *str) |
1023 | { |
1024 | noresume = 1; |
1025 | return 1; |
1026 | } |
1027 | |
1028 | __setup("noresume", noresume_setup); |
1029 | __setup("resume_offset=", resume_offset_setup); |
1030 | __setup("resume=", resume_setup); |
1031 | __setup("hibernate=", hibernate_setup); |
1032 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9