Root/kernel/kexec.c

1/*
2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
9#include <linux/capability.h>
10#include <linux/mm.h>
11#include <linux/file.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/kexec.h>
15#include <linux/mutex.h>
16#include <linux/list.h>
17#include <linux/highmem.h>
18#include <linux/syscalls.h>
19#include <linux/reboot.h>
20#include <linux/ioport.h>
21#include <linux/hardirq.h>
22#include <linux/elf.h>
23#include <linux/elfcore.h>
24#include <generated/utsrelease.h>
25#include <linux/utsname.h>
26#include <linux/numa.h>
27#include <linux/suspend.h>
28#include <linux/device.h>
29#include <linux/freezer.h>
30#include <linux/pm.h>
31#include <linux/cpu.h>
32#include <linux/console.h>
33#include <linux/vmalloc.h>
34#include <linux/swap.h>
35#include <linux/syscore_ops.h>
36
37#include <asm/page.h>
38#include <asm/uaccess.h>
39#include <asm/io.h>
40#include <asm/sections.h>
41
42/* Per cpu memory for storing cpu states in case of system crash. */
43note_buf_t __percpu *crash_notes;
44
45/* vmcoreinfo stuff */
46static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
47u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
48size_t vmcoreinfo_size;
49size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
50
51/* Location of the reserved area for the crash kernel */
52struct resource crashk_res = {
53    .name = "Crash kernel",
54    .start = 0,
55    .end = 0,
56    .flags = IORESOURCE_BUSY | IORESOURCE_MEM
57};
58
59int kexec_should_crash(struct task_struct *p)
60{
61    if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
62        return 1;
63    return 0;
64}
65
66/*
67 * When kexec transitions to the new kernel there is a one-to-one
68 * mapping between physical and virtual addresses. On processors
69 * where you can disable the MMU this is trivial, and easy. For
70 * others it is still a simple predictable page table to setup.
71 *
72 * In that environment kexec copies the new kernel to its final
73 * resting place. This means I can only support memory whose
74 * physical address can fit in an unsigned long. In particular
75 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
76 * If the assembly stub has more restrictive requirements
77 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
78 * defined more restrictively in <asm/kexec.h>.
79 *
80 * The code for the transition from the current kernel to the
81 * the new kernel is placed in the control_code_buffer, whose size
82 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
83 * page of memory is necessary, but some architectures require more.
84 * Because this memory must be identity mapped in the transition from
85 * virtual to physical addresses it must live in the range
86 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
87 * modifiable.
88 *
89 * The assembly stub in the control code buffer is passed a linked list
90 * of descriptor pages detailing the source pages of the new kernel,
91 * and the destination addresses of those source pages. As this data
92 * structure is not used in the context of the current OS, it must
93 * be self-contained.
94 *
95 * The code has been made to work with highmem pages and will use a
96 * destination page in its final resting place (if it happens
97 * to allocate it). The end product of this is that most of the
98 * physical address space, and most of RAM can be used.
99 *
100 * Future directions include:
101 * - allocating a page table with the control code buffer identity
102 * mapped, to simplify machine_kexec and make kexec_on_panic more
103 * reliable.
104 */
105
106/*
107 * KIMAGE_NO_DEST is an impossible destination address..., for
108 * allocating pages whose destination address we do not care about.
109 */
110#define KIMAGE_NO_DEST (-1UL)
111
112static int kimage_is_destination_range(struct kimage *image,
113                       unsigned long start, unsigned long end);
114static struct page *kimage_alloc_page(struct kimage *image,
115                       gfp_t gfp_mask,
116                       unsigned long dest);
117
118static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
119                        unsigned long nr_segments,
120                            struct kexec_segment __user *segments)
121{
122    size_t segment_bytes;
123    struct kimage *image;
124    unsigned long i;
125    int result;
126
127    /* Allocate a controlling structure */
128    result = -ENOMEM;
129    image = kzalloc(sizeof(*image), GFP_KERNEL);
130    if (!image)
131        goto out;
132
133    image->head = 0;
134    image->entry = &image->head;
135    image->last_entry = &image->head;
136    image->control_page = ~0; /* By default this does not apply */
137    image->start = entry;
138    image->type = KEXEC_TYPE_DEFAULT;
139
140    /* Initialize the list of control pages */
141    INIT_LIST_HEAD(&image->control_pages);
142
143    /* Initialize the list of destination pages */
144    INIT_LIST_HEAD(&image->dest_pages);
145
146    /* Initialize the list of unusable pages */
147    INIT_LIST_HEAD(&image->unuseable_pages);
148
149    /* Read in the segments */
150    image->nr_segments = nr_segments;
151    segment_bytes = nr_segments * sizeof(*segments);
152    result = copy_from_user(image->segment, segments, segment_bytes);
153    if (result) {
154        result = -EFAULT;
155        goto out;
156    }
157
158    /*
159     * Verify we have good destination addresses. The caller is
160     * responsible for making certain we don't attempt to load
161     * the new image into invalid or reserved areas of RAM. This
162     * just verifies it is an address we can use.
163     *
164     * Since the kernel does everything in page size chunks ensure
165     * the destination addresses are page aligned. Too many
166     * special cases crop of when we don't do this. The most
167     * insidious is getting overlapping destination addresses
168     * simply because addresses are changed to page size
169     * granularity.
170     */
171    result = -EADDRNOTAVAIL;
172    for (i = 0; i < nr_segments; i++) {
173        unsigned long mstart, mend;
174
175        mstart = image->segment[i].mem;
176        mend = mstart + image->segment[i].memsz;
177        if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
178            goto out;
179        if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
180            goto out;
181    }
182
183    /* Verify our destination addresses do not overlap.
184     * If we alloed overlapping destination addresses
185     * through very weird things can happen with no
186     * easy explanation as one segment stops on another.
187     */
188    result = -EINVAL;
189    for (i = 0; i < nr_segments; i++) {
190        unsigned long mstart, mend;
191        unsigned long j;
192
193        mstart = image->segment[i].mem;
194        mend = mstart + image->segment[i].memsz;
195        for (j = 0; j < i; j++) {
196            unsigned long pstart, pend;
197            pstart = image->segment[j].mem;
198            pend = pstart + image->segment[j].memsz;
199            /* Do the segments overlap ? */
200            if ((mend > pstart) && (mstart < pend))
201                goto out;
202        }
203    }
204
205    /* Ensure our buffer sizes are strictly less than
206     * our memory sizes. This should always be the case,
207     * and it is easier to check up front than to be surprised
208     * later on.
209     */
210    result = -EINVAL;
211    for (i = 0; i < nr_segments; i++) {
212        if (image->segment[i].bufsz > image->segment[i].memsz)
213            goto out;
214    }
215
216    result = 0;
217out:
218    if (result == 0)
219        *rimage = image;
220    else
221        kfree(image);
222
223    return result;
224
225}
226
227static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
228                unsigned long nr_segments,
229                struct kexec_segment __user *segments)
230{
231    int result;
232    struct kimage *image;
233
234    /* Allocate and initialize a controlling structure */
235    image = NULL;
236    result = do_kimage_alloc(&image, entry, nr_segments, segments);
237    if (result)
238        goto out;
239
240    *rimage = image;
241
242    /*
243     * Find a location for the control code buffer, and add it
244     * the vector of segments so that it's pages will also be
245     * counted as destination pages.
246     */
247    result = -ENOMEM;
248    image->control_code_page = kimage_alloc_control_pages(image,
249                       get_order(KEXEC_CONTROL_PAGE_SIZE));
250    if (!image->control_code_page) {
251        printk(KERN_ERR "Could not allocate control_code_buffer\n");
252        goto out;
253    }
254
255    image->swap_page = kimage_alloc_control_pages(image, 0);
256    if (!image->swap_page) {
257        printk(KERN_ERR "Could not allocate swap buffer\n");
258        goto out;
259    }
260
261    result = 0;
262 out:
263    if (result == 0)
264        *rimage = image;
265    else
266        kfree(image);
267
268    return result;
269}
270
271static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
272                unsigned long nr_segments,
273                struct kexec_segment __user *segments)
274{
275    int result;
276    struct kimage *image;
277    unsigned long i;
278
279    image = NULL;
280    /* Verify we have a valid entry point */
281    if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
282        result = -EADDRNOTAVAIL;
283        goto out;
284    }
285
286    /* Allocate and initialize a controlling structure */
287    result = do_kimage_alloc(&image, entry, nr_segments, segments);
288    if (result)
289        goto out;
290
291    /* Enable the special crash kernel control page
292     * allocation policy.
293     */
294    image->control_page = crashk_res.start;
295    image->type = KEXEC_TYPE_CRASH;
296
297    /*
298     * Verify we have good destination addresses. Normally
299     * the caller is responsible for making certain we don't
300     * attempt to load the new image into invalid or reserved
301     * areas of RAM. But crash kernels are preloaded into a
302     * reserved area of ram. We must ensure the addresses
303     * are in the reserved area otherwise preloading the
304     * kernel could corrupt things.
305     */
306    result = -EADDRNOTAVAIL;
307    for (i = 0; i < nr_segments; i++) {
308        unsigned long mstart, mend;
309
310        mstart = image->segment[i].mem;
311        mend = mstart + image->segment[i].memsz - 1;
312        /* Ensure we are within the crash kernel limits */
313        if ((mstart < crashk_res.start) || (mend > crashk_res.end))
314            goto out;
315    }
316
317    /*
318     * Find a location for the control code buffer, and add
319     * the vector of segments so that it's pages will also be
320     * counted as destination pages.
321     */
322    result = -ENOMEM;
323    image->control_code_page = kimage_alloc_control_pages(image,
324                       get_order(KEXEC_CONTROL_PAGE_SIZE));
325    if (!image->control_code_page) {
326        printk(KERN_ERR "Could not allocate control_code_buffer\n");
327        goto out;
328    }
329
330    result = 0;
331out:
332    if (result == 0)
333        *rimage = image;
334    else
335        kfree(image);
336
337    return result;
338}
339
340static int kimage_is_destination_range(struct kimage *image,
341                    unsigned long start,
342                    unsigned long end)
343{
344    unsigned long i;
345
346    for (i = 0; i < image->nr_segments; i++) {
347        unsigned long mstart, mend;
348
349        mstart = image->segment[i].mem;
350        mend = mstart + image->segment[i].memsz;
351        if ((end > mstart) && (start < mend))
352            return 1;
353    }
354
355    return 0;
356}
357
358static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
359{
360    struct page *pages;
361
362    pages = alloc_pages(gfp_mask, order);
363    if (pages) {
364        unsigned int count, i;
365        pages->mapping = NULL;
366        set_page_private(pages, order);
367        count = 1 << order;
368        for (i = 0; i < count; i++)
369            SetPageReserved(pages + i);
370    }
371
372    return pages;
373}
374
375static void kimage_free_pages(struct page *page)
376{
377    unsigned int order, count, i;
378
379    order = page_private(page);
380    count = 1 << order;
381    for (i = 0; i < count; i++)
382        ClearPageReserved(page + i);
383    __free_pages(page, order);
384}
385
386static void kimage_free_page_list(struct list_head *list)
387{
388    struct list_head *pos, *next;
389
390    list_for_each_safe(pos, next, list) {
391        struct page *page;
392
393        page = list_entry(pos, struct page, lru);
394        list_del(&page->lru);
395        kimage_free_pages(page);
396    }
397}
398
399static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
400                            unsigned int order)
401{
402    /* Control pages are special, they are the intermediaries
403     * that are needed while we copy the rest of the pages
404     * to their final resting place. As such they must
405     * not conflict with either the destination addresses
406     * or memory the kernel is already using.
407     *
408     * The only case where we really need more than one of
409     * these are for architectures where we cannot disable
410     * the MMU and must instead generate an identity mapped
411     * page table for all of the memory.
412     *
413     * At worst this runs in O(N) of the image size.
414     */
415    struct list_head extra_pages;
416    struct page *pages;
417    unsigned int count;
418
419    count = 1 << order;
420    INIT_LIST_HEAD(&extra_pages);
421
422    /* Loop while I can allocate a page and the page allocated
423     * is a destination page.
424     */
425    do {
426        unsigned long pfn, epfn, addr, eaddr;
427
428        pages = kimage_alloc_pages(GFP_KERNEL, order);
429        if (!pages)
430            break;
431        pfn = page_to_pfn(pages);
432        epfn = pfn + count;
433        addr = pfn << PAGE_SHIFT;
434        eaddr = epfn << PAGE_SHIFT;
435        if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
436                  kimage_is_destination_range(image, addr, eaddr)) {
437            list_add(&pages->lru, &extra_pages);
438            pages = NULL;
439        }
440    } while (!pages);
441
442    if (pages) {
443        /* Remember the allocated page... */
444        list_add(&pages->lru, &image->control_pages);
445
446        /* Because the page is already in it's destination
447         * location we will never allocate another page at
448         * that address. Therefore kimage_alloc_pages
449         * will not return it (again) and we don't need
450         * to give it an entry in image->segment[].
451         */
452    }
453    /* Deal with the destination pages I have inadvertently allocated.
454     *
455     * Ideally I would convert multi-page allocations into single
456     * page allocations, and add everything to image->dest_pages.
457     *
458     * For now it is simpler to just free the pages.
459     */
460    kimage_free_page_list(&extra_pages);
461
462    return pages;
463}
464
465static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
466                              unsigned int order)
467{
468    /* Control pages are special, they are the intermediaries
469     * that are needed while we copy the rest of the pages
470     * to their final resting place. As such they must
471     * not conflict with either the destination addresses
472     * or memory the kernel is already using.
473     *
474     * Control pages are also the only pags we must allocate
475     * when loading a crash kernel. All of the other pages
476     * are specified by the segments and we just memcpy
477     * into them directly.
478     *
479     * The only case where we really need more than one of
480     * these are for architectures where we cannot disable
481     * the MMU and must instead generate an identity mapped
482     * page table for all of the memory.
483     *
484     * Given the low demand this implements a very simple
485     * allocator that finds the first hole of the appropriate
486     * size in the reserved memory region, and allocates all
487     * of the memory up to and including the hole.
488     */
489    unsigned long hole_start, hole_end, size;
490    struct page *pages;
491
492    pages = NULL;
493    size = (1 << order) << PAGE_SHIFT;
494    hole_start = (image->control_page + (size - 1)) & ~(size - 1);
495    hole_end = hole_start + size - 1;
496    while (hole_end <= crashk_res.end) {
497        unsigned long i;
498
499        if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
500            break;
501        if (hole_end > crashk_res.end)
502            break;
503        /* See if I overlap any of the segments */
504        for (i = 0; i < image->nr_segments; i++) {
505            unsigned long mstart, mend;
506
507            mstart = image->segment[i].mem;
508            mend = mstart + image->segment[i].memsz - 1;
509            if ((hole_end >= mstart) && (hole_start <= mend)) {
510                /* Advance the hole to the end of the segment */
511                hole_start = (mend + (size - 1)) & ~(size - 1);
512                hole_end = hole_start + size - 1;
513                break;
514            }
515        }
516        /* If I don't overlap any segments I have found my hole! */
517        if (i == image->nr_segments) {
518            pages = pfn_to_page(hole_start >> PAGE_SHIFT);
519            break;
520        }
521    }
522    if (pages)
523        image->control_page = hole_end;
524
525    return pages;
526}
527
528
529struct page *kimage_alloc_control_pages(struct kimage *image,
530                     unsigned int order)
531{
532    struct page *pages = NULL;
533
534    switch (image->type) {
535    case KEXEC_TYPE_DEFAULT:
536        pages = kimage_alloc_normal_control_pages(image, order);
537        break;
538    case KEXEC_TYPE_CRASH:
539        pages = kimage_alloc_crash_control_pages(image, order);
540        break;
541    }
542
543    return pages;
544}
545
546static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
547{
548    if (*image->entry != 0)
549        image->entry++;
550
551    if (image->entry == image->last_entry) {
552        kimage_entry_t *ind_page;
553        struct page *page;
554
555        page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
556        if (!page)
557            return -ENOMEM;
558
559        ind_page = page_address(page);
560        *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
561        image->entry = ind_page;
562        image->last_entry = ind_page +
563                      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
564    }
565    *image->entry = entry;
566    image->entry++;
567    *image->entry = 0;
568
569    return 0;
570}
571
572static int kimage_set_destination(struct kimage *image,
573                   unsigned long destination)
574{
575    int result;
576
577    destination &= PAGE_MASK;
578    result = kimage_add_entry(image, destination | IND_DESTINATION);
579    if (result == 0)
580        image->destination = destination;
581
582    return result;
583}
584
585
586static int kimage_add_page(struct kimage *image, unsigned long page)
587{
588    int result;
589
590    page &= PAGE_MASK;
591    result = kimage_add_entry(image, page | IND_SOURCE);
592    if (result == 0)
593        image->destination += PAGE_SIZE;
594
595    return result;
596}
597
598
599static void kimage_free_extra_pages(struct kimage *image)
600{
601    /* Walk through and free any extra destination pages I may have */
602    kimage_free_page_list(&image->dest_pages);
603
604    /* Walk through and free any unusable pages I have cached */
605    kimage_free_page_list(&image->unuseable_pages);
606
607}
608static void kimage_terminate(struct kimage *image)
609{
610    if (*image->entry != 0)
611        image->entry++;
612
613    *image->entry = IND_DONE;
614}
615
616#define for_each_kimage_entry(image, ptr, entry) \
617    for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
618        ptr = (entry & IND_INDIRECTION)? \
619            phys_to_virt((entry & PAGE_MASK)): ptr +1)
620
621static void kimage_free_entry(kimage_entry_t entry)
622{
623    struct page *page;
624
625    page = pfn_to_page(entry >> PAGE_SHIFT);
626    kimage_free_pages(page);
627}
628
629static void kimage_free(struct kimage *image)
630{
631    kimage_entry_t *ptr, entry;
632    kimage_entry_t ind = 0;
633
634    if (!image)
635        return;
636
637    kimage_free_extra_pages(image);
638    for_each_kimage_entry(image, ptr, entry) {
639        if (entry & IND_INDIRECTION) {
640            /* Free the previous indirection page */
641            if (ind & IND_INDIRECTION)
642                kimage_free_entry(ind);
643            /* Save this indirection page until we are
644             * done with it.
645             */
646            ind = entry;
647        }
648        else if (entry & IND_SOURCE)
649            kimage_free_entry(entry);
650    }
651    /* Free the final indirection page */
652    if (ind & IND_INDIRECTION)
653        kimage_free_entry(ind);
654
655    /* Handle any machine specific cleanup */
656    machine_kexec_cleanup(image);
657
658    /* Free the kexec control pages... */
659    kimage_free_page_list(&image->control_pages);
660    kfree(image);
661}
662
663static kimage_entry_t *kimage_dst_used(struct kimage *image,
664                    unsigned long page)
665{
666    kimage_entry_t *ptr, entry;
667    unsigned long destination = 0;
668
669    for_each_kimage_entry(image, ptr, entry) {
670        if (entry & IND_DESTINATION)
671            destination = entry & PAGE_MASK;
672        else if (entry & IND_SOURCE) {
673            if (page == destination)
674                return ptr;
675            destination += PAGE_SIZE;
676        }
677    }
678
679    return NULL;
680}
681
682static struct page *kimage_alloc_page(struct kimage *image,
683                    gfp_t gfp_mask,
684                    unsigned long destination)
685{
686    /*
687     * Here we implement safeguards to ensure that a source page
688     * is not copied to its destination page before the data on
689     * the destination page is no longer useful.
690     *
691     * To do this we maintain the invariant that a source page is
692     * either its own destination page, or it is not a
693     * destination page at all.
694     *
695     * That is slightly stronger than required, but the proof
696     * that no problems will not occur is trivial, and the
697     * implementation is simply to verify.
698     *
699     * When allocating all pages normally this algorithm will run
700     * in O(N) time, but in the worst case it will run in O(N^2)
701     * time. If the runtime is a problem the data structures can
702     * be fixed.
703     */
704    struct page *page;
705    unsigned long addr;
706
707    /*
708     * Walk through the list of destination pages, and see if I
709     * have a match.
710     */
711    list_for_each_entry(page, &image->dest_pages, lru) {
712        addr = page_to_pfn(page) << PAGE_SHIFT;
713        if (addr == destination) {
714            list_del(&page->lru);
715            return page;
716        }
717    }
718    page = NULL;
719    while (1) {
720        kimage_entry_t *old;
721
722        /* Allocate a page, if we run out of memory give up */
723        page = kimage_alloc_pages(gfp_mask, 0);
724        if (!page)
725            return NULL;
726        /* If the page cannot be used file it away */
727        if (page_to_pfn(page) >
728                (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
729            list_add(&page->lru, &image->unuseable_pages);
730            continue;
731        }
732        addr = page_to_pfn(page) << PAGE_SHIFT;
733
734        /* If it is the destination page we want use it */
735        if (addr == destination)
736            break;
737
738        /* If the page is not a destination page use it */
739        if (!kimage_is_destination_range(image, addr,
740                          addr + PAGE_SIZE))
741            break;
742
743        /*
744         * I know that the page is someones destination page.
745         * See if there is already a source page for this
746         * destination page. And if so swap the source pages.
747         */
748        old = kimage_dst_used(image, addr);
749        if (old) {
750            /* If so move it */
751            unsigned long old_addr;
752            struct page *old_page;
753
754            old_addr = *old & PAGE_MASK;
755            old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
756            copy_highpage(page, old_page);
757            *old = addr | (*old & ~PAGE_MASK);
758
759            /* The old page I have found cannot be a
760             * destination page, so return it if it's
761             * gfp_flags honor the ones passed in.
762             */
763            if (!(gfp_mask & __GFP_HIGHMEM) &&
764                PageHighMem(old_page)) {
765                kimage_free_pages(old_page);
766                continue;
767            }
768            addr = old_addr;
769            page = old_page;
770            break;
771        }
772        else {
773            /* Place the page on the destination list I
774             * will use it later.
775             */
776            list_add(&page->lru, &image->dest_pages);
777        }
778    }
779
780    return page;
781}
782
783static int kimage_load_normal_segment(struct kimage *image,
784                     struct kexec_segment *segment)
785{
786    unsigned long maddr;
787    unsigned long ubytes, mbytes;
788    int result;
789    unsigned char __user *buf;
790
791    result = 0;
792    buf = segment->buf;
793    ubytes = segment->bufsz;
794    mbytes = segment->memsz;
795    maddr = segment->mem;
796
797    result = kimage_set_destination(image, maddr);
798    if (result < 0)
799        goto out;
800
801    while (mbytes) {
802        struct page *page;
803        char *ptr;
804        size_t uchunk, mchunk;
805
806        page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
807        if (!page) {
808            result = -ENOMEM;
809            goto out;
810        }
811        result = kimage_add_page(image, page_to_pfn(page)
812                                << PAGE_SHIFT);
813        if (result < 0)
814            goto out;
815
816        ptr = kmap(page);
817        /* Start with a clear page */
818        clear_page(ptr);
819        ptr += maddr & ~PAGE_MASK;
820        mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
821        if (mchunk > mbytes)
822            mchunk = mbytes;
823
824        uchunk = mchunk;
825        if (uchunk > ubytes)
826            uchunk = ubytes;
827
828        result = copy_from_user(ptr, buf, uchunk);
829        kunmap(page);
830        if (result) {
831            result = -EFAULT;
832            goto out;
833        }
834        ubytes -= uchunk;
835        maddr += mchunk;
836        buf += mchunk;
837        mbytes -= mchunk;
838    }
839out:
840    return result;
841}
842
843static int kimage_load_crash_segment(struct kimage *image,
844                    struct kexec_segment *segment)
845{
846    /* For crash dumps kernels we simply copy the data from
847     * user space to it's destination.
848     * We do things a page at a time for the sake of kmap.
849     */
850    unsigned long maddr;
851    unsigned long ubytes, mbytes;
852    int result;
853    unsigned char __user *buf;
854
855    result = 0;
856    buf = segment->buf;
857    ubytes = segment->bufsz;
858    mbytes = segment->memsz;
859    maddr = segment->mem;
860    while (mbytes) {
861        struct page *page;
862        char *ptr;
863        size_t uchunk, mchunk;
864
865        page = pfn_to_page(maddr >> PAGE_SHIFT);
866        if (!page) {
867            result = -ENOMEM;
868            goto out;
869        }
870        ptr = kmap(page);
871        ptr += maddr & ~PAGE_MASK;
872        mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
873        if (mchunk > mbytes)
874            mchunk = mbytes;
875
876        uchunk = mchunk;
877        if (uchunk > ubytes) {
878            uchunk = ubytes;
879            /* Zero the trailing part of the page */
880            memset(ptr + uchunk, 0, mchunk - uchunk);
881        }
882        result = copy_from_user(ptr, buf, uchunk);
883        kexec_flush_icache_page(page);
884        kunmap(page);
885        if (result) {
886            result = -EFAULT;
887            goto out;
888        }
889        ubytes -= uchunk;
890        maddr += mchunk;
891        buf += mchunk;
892        mbytes -= mchunk;
893    }
894out:
895    return result;
896}
897
898static int kimage_load_segment(struct kimage *image,
899                struct kexec_segment *segment)
900{
901    int result = -ENOMEM;
902
903    switch (image->type) {
904    case KEXEC_TYPE_DEFAULT:
905        result = kimage_load_normal_segment(image, segment);
906        break;
907    case KEXEC_TYPE_CRASH:
908        result = kimage_load_crash_segment(image, segment);
909        break;
910    }
911
912    return result;
913}
914
915/*
916 * Exec Kernel system call: for obvious reasons only root may call it.
917 *
918 * This call breaks up into three pieces.
919 * - A generic part which loads the new kernel from the current
920 * address space, and very carefully places the data in the
921 * allocated pages.
922 *
923 * - A generic part that interacts with the kernel and tells all of
924 * the devices to shut down. Preventing on-going dmas, and placing
925 * the devices in a consistent state so a later kernel can
926 * reinitialize them.
927 *
928 * - A machine specific part that includes the syscall number
929 * and the copies the image to it's final destination. And
930 * jumps into the image at entry.
931 *
932 * kexec does not sync, or unmount filesystems so if you need
933 * that to happen you need to do that yourself.
934 */
935struct kimage *kexec_image;
936struct kimage *kexec_crash_image;
937
938static DEFINE_MUTEX(kexec_mutex);
939
940SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
941        struct kexec_segment __user *, segments, unsigned long, flags)
942{
943    struct kimage **dest_image, *image;
944    int result;
945
946    /* We only trust the superuser with rebooting the system. */
947    if (!capable(CAP_SYS_BOOT))
948        return -EPERM;
949
950    /*
951     * Verify we have a legal set of flags
952     * This leaves us room for future extensions.
953     */
954    if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
955        return -EINVAL;
956
957    /* Verify we are on the appropriate architecture */
958    if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
959        ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
960        return -EINVAL;
961
962    /* Put an artificial cap on the number
963     * of segments passed to kexec_load.
964     */
965    if (nr_segments > KEXEC_SEGMENT_MAX)
966        return -EINVAL;
967
968    image = NULL;
969    result = 0;
970
971    /* Because we write directly to the reserved memory
972     * region when loading crash kernels we need a mutex here to
973     * prevent multiple crash kernels from attempting to load
974     * simultaneously, and to prevent a crash kernel from loading
975     * over the top of a in use crash kernel.
976     *
977     * KISS: always take the mutex.
978     */
979    if (!mutex_trylock(&kexec_mutex))
980        return -EBUSY;
981
982    dest_image = &kexec_image;
983    if (flags & KEXEC_ON_CRASH)
984        dest_image = &kexec_crash_image;
985    if (nr_segments > 0) {
986        unsigned long i;
987
988        /* Loading another kernel to reboot into */
989        if ((flags & KEXEC_ON_CRASH) == 0)
990            result = kimage_normal_alloc(&image, entry,
991                            nr_segments, segments);
992        /* Loading another kernel to switch to if this one crashes */
993        else if (flags & KEXEC_ON_CRASH) {
994            /* Free any current crash dump kernel before
995             * we corrupt it.
996             */
997            kimage_free(xchg(&kexec_crash_image, NULL));
998            result = kimage_crash_alloc(&image, entry,
999                             nr_segments, segments);
1000            crash_map_reserved_pages();
1001        }
1002        if (result)
1003            goto out;
1004
1005        if (flags & KEXEC_PRESERVE_CONTEXT)
1006            image->preserve_context = 1;
1007        result = machine_kexec_prepare(image);
1008        if (result)
1009            goto out;
1010
1011        for (i = 0; i < nr_segments; i++) {
1012            result = kimage_load_segment(image, &image->segment[i]);
1013            if (result)
1014                goto out;
1015        }
1016        kimage_terminate(image);
1017        if (flags & KEXEC_ON_CRASH)
1018            crash_unmap_reserved_pages();
1019    }
1020    /* Install the new kernel, and Uninstall the old */
1021    image = xchg(dest_image, image);
1022
1023out:
1024    mutex_unlock(&kexec_mutex);
1025    kimage_free(image);
1026
1027    return result;
1028}
1029
1030/*
1031 * Add and remove page tables for crashkernel memory
1032 *
1033 * Provide an empty default implementation here -- architecture
1034 * code may override this
1035 */
1036void __weak crash_map_reserved_pages(void)
1037{}
1038
1039void __weak crash_unmap_reserved_pages(void)
1040{}
1041
1042#ifdef CONFIG_COMPAT
1043asmlinkage long compat_sys_kexec_load(unsigned long entry,
1044                unsigned long nr_segments,
1045                struct compat_kexec_segment __user *segments,
1046                unsigned long flags)
1047{
1048    struct compat_kexec_segment in;
1049    struct kexec_segment out, __user *ksegments;
1050    unsigned long i, result;
1051
1052    /* Don't allow clients that don't understand the native
1053     * architecture to do anything.
1054     */
1055    if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1056        return -EINVAL;
1057
1058    if (nr_segments > KEXEC_SEGMENT_MAX)
1059        return -EINVAL;
1060
1061    ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1062    for (i=0; i < nr_segments; i++) {
1063        result = copy_from_user(&in, &segments[i], sizeof(in));
1064        if (result)
1065            return -EFAULT;
1066
1067        out.buf = compat_ptr(in.buf);
1068        out.bufsz = in.bufsz;
1069        out.mem = in.mem;
1070        out.memsz = in.memsz;
1071
1072        result = copy_to_user(&ksegments[i], &out, sizeof(out));
1073        if (result)
1074            return -EFAULT;
1075    }
1076
1077    return sys_kexec_load(entry, nr_segments, ksegments, flags);
1078}
1079#endif
1080
1081void crash_kexec(struct pt_regs *regs)
1082{
1083    /* Take the kexec_mutex here to prevent sys_kexec_load
1084     * running on one cpu from replacing the crash kernel
1085     * we are using after a panic on a different cpu.
1086     *
1087     * If the crash kernel was not located in a fixed area
1088     * of memory the xchg(&kexec_crash_image) would be
1089     * sufficient. But since I reuse the memory...
1090     */
1091    if (mutex_trylock(&kexec_mutex)) {
1092        if (kexec_crash_image) {
1093            struct pt_regs fixed_regs;
1094
1095            crash_setup_regs(&fixed_regs, regs);
1096            crash_save_vmcoreinfo();
1097            machine_crash_shutdown(&fixed_regs);
1098            machine_kexec(kexec_crash_image);
1099        }
1100        mutex_unlock(&kexec_mutex);
1101    }
1102}
1103
1104size_t crash_get_memory_size(void)
1105{
1106    size_t size = 0;
1107    mutex_lock(&kexec_mutex);
1108    if (crashk_res.end != crashk_res.start)
1109        size = resource_size(&crashk_res);
1110    mutex_unlock(&kexec_mutex);
1111    return size;
1112}
1113
1114void __weak crash_free_reserved_phys_range(unsigned long begin,
1115                       unsigned long end)
1116{
1117    unsigned long addr;
1118
1119    for (addr = begin; addr < end; addr += PAGE_SIZE) {
1120        ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
1121        init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
1122        free_page((unsigned long)__va(addr));
1123        totalram_pages++;
1124    }
1125}
1126
1127int crash_shrink_memory(unsigned long new_size)
1128{
1129    int ret = 0;
1130    unsigned long start, end;
1131    unsigned long old_size;
1132    struct resource *ram_res;
1133
1134    mutex_lock(&kexec_mutex);
1135
1136    if (kexec_crash_image) {
1137        ret = -ENOENT;
1138        goto unlock;
1139    }
1140    start = crashk_res.start;
1141    end = crashk_res.end;
1142    old_size = (end == 0) ? 0 : end - start + 1;
1143    if (new_size >= old_size) {
1144        ret = (new_size == old_size) ? 0 : -EINVAL;
1145        goto unlock;
1146    }
1147
1148    ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1149    if (!ram_res) {
1150        ret = -ENOMEM;
1151        goto unlock;
1152    }
1153
1154    start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1155    end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1156
1157    crash_map_reserved_pages();
1158    crash_free_reserved_phys_range(end, crashk_res.end);
1159
1160    if ((start == end) && (crashk_res.parent != NULL))
1161        release_resource(&crashk_res);
1162
1163    ram_res->start = end;
1164    ram_res->end = crashk_res.end;
1165    ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1166    ram_res->name = "System RAM";
1167
1168    crashk_res.end = end - 1;
1169
1170    insert_resource(&iomem_resource, ram_res);
1171    crash_unmap_reserved_pages();
1172
1173unlock:
1174    mutex_unlock(&kexec_mutex);
1175    return ret;
1176}
1177
1178static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1179                size_t data_len)
1180{
1181    struct elf_note note;
1182
1183    note.n_namesz = strlen(name) + 1;
1184    note.n_descsz = data_len;
1185    note.n_type = type;
1186    memcpy(buf, &note, sizeof(note));
1187    buf += (sizeof(note) + 3)/4;
1188    memcpy(buf, name, note.n_namesz);
1189    buf += (note.n_namesz + 3)/4;
1190    memcpy(buf, data, note.n_descsz);
1191    buf += (note.n_descsz + 3)/4;
1192
1193    return buf;
1194}
1195
1196static void final_note(u32 *buf)
1197{
1198    struct elf_note note;
1199
1200    note.n_namesz = 0;
1201    note.n_descsz = 0;
1202    note.n_type = 0;
1203    memcpy(buf, &note, sizeof(note));
1204}
1205
1206void crash_save_cpu(struct pt_regs *regs, int cpu)
1207{
1208    struct elf_prstatus prstatus;
1209    u32 *buf;
1210
1211    if ((cpu < 0) || (cpu >= nr_cpu_ids))
1212        return;
1213
1214    /* Using ELF notes here is opportunistic.
1215     * I need a well defined structure format
1216     * for the data I pass, and I need tags
1217     * on the data to indicate what information I have
1218     * squirrelled away. ELF notes happen to provide
1219     * all of that, so there is no need to invent something new.
1220     */
1221    buf = (u32*)per_cpu_ptr(crash_notes, cpu);
1222    if (!buf)
1223        return;
1224    memset(&prstatus, 0, sizeof(prstatus));
1225    prstatus.pr_pid = current->pid;
1226    elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1227    buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1228                        &prstatus, sizeof(prstatus));
1229    final_note(buf);
1230}
1231
1232static int __init crash_notes_memory_init(void)
1233{
1234    /* Allocate memory for saving cpu registers. */
1235    crash_notes = alloc_percpu(note_buf_t);
1236    if (!crash_notes) {
1237        printk("Kexec: Memory allocation for saving cpu register"
1238        " states failed\n");
1239        return -ENOMEM;
1240    }
1241    return 0;
1242}
1243module_init(crash_notes_memory_init)
1244
1245
1246/*
1247 * parsing the "crashkernel" commandline
1248 *
1249 * this code is intended to be called from architecture specific code
1250 */
1251
1252
1253/*
1254 * This function parses command lines in the format
1255 *
1256 * crashkernel=ramsize-range:size[,...][@offset]
1257 *
1258 * The function returns 0 on success and -EINVAL on failure.
1259 */
1260static int __init parse_crashkernel_mem(char *cmdline,
1261                    unsigned long long system_ram,
1262                    unsigned long long *crash_size,
1263                    unsigned long long *crash_base)
1264{
1265    char *cur = cmdline, *tmp;
1266
1267    /* for each entry of the comma-separated list */
1268    do {
1269        unsigned long long start, end = ULLONG_MAX, size;
1270
1271        /* get the start of the range */
1272        start = memparse(cur, &tmp);
1273        if (cur == tmp) {
1274            pr_warning("crashkernel: Memory value expected\n");
1275            return -EINVAL;
1276        }
1277        cur = tmp;
1278        if (*cur != '-') {
1279            pr_warning("crashkernel: '-' expected\n");
1280            return -EINVAL;
1281        }
1282        cur++;
1283
1284        /* if no ':' is here, than we read the end */
1285        if (*cur != ':') {
1286            end = memparse(cur, &tmp);
1287            if (cur == tmp) {
1288                pr_warning("crashkernel: Memory "
1289                        "value expected\n");
1290                return -EINVAL;
1291            }
1292            cur = tmp;
1293            if (end <= start) {
1294                pr_warning("crashkernel: end <= start\n");
1295                return -EINVAL;
1296            }
1297        }
1298
1299        if (*cur != ':') {
1300            pr_warning("crashkernel: ':' expected\n");
1301            return -EINVAL;
1302        }
1303        cur++;
1304
1305        size = memparse(cur, &tmp);
1306        if (cur == tmp) {
1307            pr_warning("Memory value expected\n");
1308            return -EINVAL;
1309        }
1310        cur = tmp;
1311        if (size >= system_ram) {
1312            pr_warning("crashkernel: invalid size\n");
1313            return -EINVAL;
1314        }
1315
1316        /* match ? */
1317        if (system_ram >= start && system_ram < end) {
1318            *crash_size = size;
1319            break;
1320        }
1321    } while (*cur++ == ',');
1322
1323    if (*crash_size > 0) {
1324        while (*cur && *cur != ' ' && *cur != '@')
1325            cur++;
1326        if (*cur == '@') {
1327            cur++;
1328            *crash_base = memparse(cur, &tmp);
1329            if (cur == tmp) {
1330                pr_warning("Memory value expected "
1331                        "after '@'\n");
1332                return -EINVAL;
1333            }
1334        }
1335    }
1336
1337    return 0;
1338}
1339
1340/*
1341 * That function parses "simple" (old) crashkernel command lines like
1342 *
1343 * crashkernel=size[@offset]
1344 *
1345 * It returns 0 on success and -EINVAL on failure.
1346 */
1347static int __init parse_crashkernel_simple(char *cmdline,
1348                       unsigned long long *crash_size,
1349                       unsigned long long *crash_base)
1350{
1351    char *cur = cmdline;
1352
1353    *crash_size = memparse(cmdline, &cur);
1354    if (cmdline == cur) {
1355        pr_warning("crashkernel: memory value expected\n");
1356        return -EINVAL;
1357    }
1358
1359    if (*cur == '@')
1360        *crash_base = memparse(cur+1, &cur);
1361    else if (*cur != ' ' && *cur != '\0') {
1362        pr_warning("crashkernel: unrecognized char\n");
1363        return -EINVAL;
1364    }
1365
1366    return 0;
1367}
1368
1369/*
1370 * That function is the entry point for command line parsing and should be
1371 * called from the arch-specific code.
1372 */
1373int __init parse_crashkernel(char *cmdline,
1374                 unsigned long long system_ram,
1375                 unsigned long long *crash_size,
1376                 unsigned long long *crash_base)
1377{
1378    char *p = cmdline, *ck_cmdline = NULL;
1379    char *first_colon, *first_space;
1380
1381    BUG_ON(!crash_size || !crash_base);
1382    *crash_size = 0;
1383    *crash_base = 0;
1384
1385    /* find crashkernel and use the last one if there are more */
1386    p = strstr(p, "crashkernel=");
1387    while (p) {
1388        ck_cmdline = p;
1389        p = strstr(p+1, "crashkernel=");
1390    }
1391
1392    if (!ck_cmdline)
1393        return -EINVAL;
1394
1395    ck_cmdline += 12; /* strlen("crashkernel=") */
1396
1397    /*
1398     * if the commandline contains a ':', then that's the extended
1399     * syntax -- if not, it must be the classic syntax
1400     */
1401    first_colon = strchr(ck_cmdline, ':');
1402    first_space = strchr(ck_cmdline, ' ');
1403    if (first_colon && (!first_space || first_colon < first_space))
1404        return parse_crashkernel_mem(ck_cmdline, system_ram,
1405                crash_size, crash_base);
1406    else
1407        return parse_crashkernel_simple(ck_cmdline, crash_size,
1408                crash_base);
1409
1410    return 0;
1411}
1412
1413
1414static void update_vmcoreinfo_note(void)
1415{
1416    u32 *buf = vmcoreinfo_note;
1417
1418    if (!vmcoreinfo_size)
1419        return;
1420    buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1421                  vmcoreinfo_size);
1422    final_note(buf);
1423}
1424
1425void crash_save_vmcoreinfo(void)
1426{
1427    vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
1428    update_vmcoreinfo_note();
1429}
1430
1431void vmcoreinfo_append_str(const char *fmt, ...)
1432{
1433    va_list args;
1434    char buf[0x50];
1435    int r;
1436
1437    va_start(args, fmt);
1438    r = vsnprintf(buf, sizeof(buf), fmt, args);
1439    va_end(args);
1440
1441    if (r + vmcoreinfo_size > vmcoreinfo_max_size)
1442        r = vmcoreinfo_max_size - vmcoreinfo_size;
1443
1444    memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1445
1446    vmcoreinfo_size += r;
1447}
1448
1449/*
1450 * provide an empty default implementation here -- architecture
1451 * code may override this
1452 */
1453void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
1454{}
1455
1456unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
1457{
1458    return __pa((unsigned long)(char *)&vmcoreinfo_note);
1459}
1460
1461static int __init crash_save_vmcoreinfo_init(void)
1462{
1463    VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1464    VMCOREINFO_PAGESIZE(PAGE_SIZE);
1465
1466    VMCOREINFO_SYMBOL(init_uts_ns);
1467    VMCOREINFO_SYMBOL(node_online_map);
1468#ifdef CONFIG_MMU
1469    VMCOREINFO_SYMBOL(swapper_pg_dir);
1470#endif
1471    VMCOREINFO_SYMBOL(_stext);
1472    VMCOREINFO_SYMBOL(vmlist);
1473
1474#ifndef CONFIG_NEED_MULTIPLE_NODES
1475    VMCOREINFO_SYMBOL(mem_map);
1476    VMCOREINFO_SYMBOL(contig_page_data);
1477#endif
1478#ifdef CONFIG_SPARSEMEM
1479    VMCOREINFO_SYMBOL(mem_section);
1480    VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1481    VMCOREINFO_STRUCT_SIZE(mem_section);
1482    VMCOREINFO_OFFSET(mem_section, section_mem_map);
1483#endif
1484    VMCOREINFO_STRUCT_SIZE(page);
1485    VMCOREINFO_STRUCT_SIZE(pglist_data);
1486    VMCOREINFO_STRUCT_SIZE(zone);
1487    VMCOREINFO_STRUCT_SIZE(free_area);
1488    VMCOREINFO_STRUCT_SIZE(list_head);
1489    VMCOREINFO_SIZE(nodemask_t);
1490    VMCOREINFO_OFFSET(page, flags);
1491    VMCOREINFO_OFFSET(page, _count);
1492    VMCOREINFO_OFFSET(page, mapping);
1493    VMCOREINFO_OFFSET(page, lru);
1494    VMCOREINFO_OFFSET(pglist_data, node_zones);
1495    VMCOREINFO_OFFSET(pglist_data, nr_zones);
1496#ifdef CONFIG_FLAT_NODE_MEM_MAP
1497    VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1498#endif
1499    VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1500    VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1501    VMCOREINFO_OFFSET(pglist_data, node_id);
1502    VMCOREINFO_OFFSET(zone, free_area);
1503    VMCOREINFO_OFFSET(zone, vm_stat);
1504    VMCOREINFO_OFFSET(zone, spanned_pages);
1505    VMCOREINFO_OFFSET(free_area, free_list);
1506    VMCOREINFO_OFFSET(list_head, next);
1507    VMCOREINFO_OFFSET(list_head, prev);
1508    VMCOREINFO_OFFSET(vm_struct, addr);
1509    VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1510    log_buf_kexec_setup();
1511    VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1512    VMCOREINFO_NUMBER(NR_FREE_PAGES);
1513    VMCOREINFO_NUMBER(PG_lru);
1514    VMCOREINFO_NUMBER(PG_private);
1515    VMCOREINFO_NUMBER(PG_swapcache);
1516
1517    arch_crash_save_vmcoreinfo();
1518    update_vmcoreinfo_note();
1519
1520    return 0;
1521}
1522
1523module_init(crash_save_vmcoreinfo_init)
1524
1525/*
1526 * Move into place and start executing a preloaded standalone
1527 * executable. If nothing was preloaded return an error.
1528 */
1529int kernel_kexec(void)
1530{
1531    int error = 0;
1532
1533    if (!mutex_trylock(&kexec_mutex))
1534        return -EBUSY;
1535    if (!kexec_image) {
1536        error = -EINVAL;
1537        goto Unlock;
1538    }
1539
1540#ifdef CONFIG_KEXEC_JUMP
1541    if (kexec_image->preserve_context) {
1542        lock_system_sleep();
1543        pm_prepare_console();
1544        error = freeze_processes();
1545        if (error) {
1546            error = -EBUSY;
1547            goto Restore_console;
1548        }
1549        suspend_console();
1550        error = dpm_suspend_start(PMSG_FREEZE);
1551        if (error)
1552            goto Resume_console;
1553        /* At this point, dpm_suspend_start() has been called,
1554         * but *not* dpm_suspend_end(). We *must* call
1555         * dpm_suspend_end() now. Otherwise, drivers for
1556         * some devices (e.g. interrupt controllers) become
1557         * desynchronized with the actual state of the
1558         * hardware at resume time, and evil weirdness ensues.
1559         */
1560        error = dpm_suspend_end(PMSG_FREEZE);
1561        if (error)
1562            goto Resume_devices;
1563        error = disable_nonboot_cpus();
1564        if (error)
1565            goto Enable_cpus;
1566        local_irq_disable();
1567        error = syscore_suspend();
1568        if (error)
1569            goto Enable_irqs;
1570    } else
1571#endif
1572    {
1573        kernel_restart_prepare(NULL);
1574        printk(KERN_EMERG "Starting new kernel\n");
1575        machine_shutdown();
1576    }
1577
1578    machine_kexec(kexec_image);
1579
1580#ifdef CONFIG_KEXEC_JUMP
1581    if (kexec_image->preserve_context) {
1582        syscore_resume();
1583 Enable_irqs:
1584        local_irq_enable();
1585 Enable_cpus:
1586        enable_nonboot_cpus();
1587        dpm_resume_start(PMSG_RESTORE);
1588 Resume_devices:
1589        dpm_resume_end(PMSG_RESTORE);
1590 Resume_console:
1591        resume_console();
1592        thaw_processes();
1593 Restore_console:
1594        pm_restore_console();
1595        unlock_system_sleep();
1596    }
1597#endif
1598
1599 Unlock:
1600    mutex_unlock(&kexec_mutex);
1601    return error;
1602}
1603

Archive Download this file



interactive