| 1 | From eee16330c9de9adf7880cce9f1d32e13f89706bb Mon Sep 17 00:00:00 2001 |
| 2 | From: Wu Zhangjin <wuzhangjin@gmail.com> |
| 3 | Date: Tue, 11 Jan 2011 13:16:47 +0000 |
| 4 | Subject: MIPS: Add crash and kdump support |
| 5 | |
| 6 | From: http://patchwork.linux-mips.org/patch/1025/ |
| 7 | |
| 8 | Hello folks, |
| 9 | |
| 10 | Please find here MIPS crash and kdump patches. |
| 11 | This is patch set of 3 patches: |
| 12 | 1. generic MIPS changes (kernel); |
| 13 | 2. MIPS Cavium Octeon board kexec/kdump code (kernel); |
| 14 | 3. Kexec user space MIPS changes. |
| 15 | |
| 16 | Patches were tested on the latest linux-mips@ git kernel and the latest |
| 17 | kexec-tools git on Cavium Octeon 50xx board. |
| 18 | |
| 19 | I also made the same code working on RMI XLR/XLS boards for both |
| 20 | mips32 and mips64 kernels. |
| 21 | |
| 22 | Best regards, |
| 23 | Maxim Uvarov. |
| 24 | |
| 25 | ------ |
| 26 | [ Zhangjin: Several trivial building failure has been fixed. |
| 27 | |
| 28 | Note: the 2nd patch can not be cleanly applied, but may be a good |
| 29 | reference for the other board development: |
| 30 | |
| 31 | + MIPS Cavium Octeon board kexec,kdump support |
| 32 | http://patchwork.linux-mips.org/patch/1026/ |
| 33 | |
| 34 | And the 3rd patch has already been merged into the mainline kexec-tools: |
| 35 | |
| 36 | + some kexec MIPS improvements |
| 37 | http://patchwork.linux-mips.org/patch/1027/ |
| 38 | |
| 39 | kexec-tools is available here: |
| 40 | |
| 41 | + http://horms.net/projects/kexec/ |
| 42 | git://git.kernel.org/pub/scm/utils/kernel/kexec/kexec-tools.git |
| 43 | ] |
| 44 | Signed-off-by: Wu Zhangjin <wuzhangjin@gmail.com> |
| 45 | --- |
| 46 | (limited to 'arch/mips/kernel') |
| 47 | |
| 48 | --- a/arch/mips/kernel/Makefile |
| 49 | +++ b/arch/mips/kernel/Makefile |
| 50 | @@ -93,7 +93,8 @@ obj-$(CONFIG_I8253) += i8253.o |
| 51 | |
| 52 | obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o |
| 53 | |
| 54 | -obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o |
| 55 | +obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o |
| 56 | +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
| 57 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
| 58 | obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o |
| 59 | obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o |
| 60 | --- /dev/null |
| 61 | +++ b/arch/mips/kernel/crash.c |
| 62 | @@ -0,0 +1,75 @@ |
| 63 | +#include <linux/kernel.h> |
| 64 | +#include <linux/smp.h> |
| 65 | +#include <linux/reboot.h> |
| 66 | +#include <linux/kexec.h> |
| 67 | +#include <linux/bootmem.h> |
| 68 | +#include <linux/crash_dump.h> |
| 69 | +#include <linux/delay.h> |
| 70 | +#include <linux/init.h> |
| 71 | +#include <linux/irq.h> |
| 72 | +#include <linux/types.h> |
| 73 | +#include <linux/sched.h> |
| 74 | + |
| 75 | +#ifdef CONFIG_CRASH_DUMP |
| 76 | +unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; |
| 77 | +#endif |
| 78 | + |
| 79 | +/* This keeps a track of which one is crashing cpu. */ |
| 80 | +int crashing_cpu = -1; |
| 81 | +static cpumask_t cpus_in_crash = CPU_MASK_NONE; |
| 82 | + |
| 83 | +#ifdef CONFIG_SMP |
| 84 | +void crash_shutdown_secondary(void *ignore) |
| 85 | +{ |
| 86 | + struct pt_regs *regs; |
| 87 | + int cpu = smp_processor_id(); |
| 88 | + |
| 89 | + regs = task_pt_regs(current); |
| 90 | + |
| 91 | + if (!cpu_online(cpu)) |
| 92 | + return; |
| 93 | + |
| 94 | + local_irq_disable(); |
| 95 | + if (!cpu_isset(cpu, cpus_in_crash)) |
| 96 | + crash_save_cpu(regs, cpu); |
| 97 | + cpu_set(cpu, cpus_in_crash); |
| 98 | + |
| 99 | + while (!atomic_read(&kexec_ready_to_reboot)) |
| 100 | + cpu_relax(); |
| 101 | + relocated_kexec_smp_wait(NULL); |
| 102 | + /* NOTREACHED */ |
| 103 | +} |
| 104 | + |
| 105 | +static void crash_kexec_prepare_cpus(void) |
| 106 | +{ |
| 107 | + unsigned int msecs; |
| 108 | + |
| 109 | + unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ |
| 110 | + |
| 111 | + dump_send_ipi(crash_shutdown_secondary); |
| 112 | + smp_wmb(); |
| 113 | + |
| 114 | + /* |
| 115 | + * The crash CPU sends an IPI and wait for other CPUs to |
| 116 | + * respond. Delay of at least 10 seconds. |
| 117 | + */ |
| 118 | + printk(KERN_EMERG "Sending IPI to other cpus...\n"); |
| 119 | + msecs = 10000; |
| 120 | + while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { |
| 121 | + cpu_relax(); |
| 122 | + mdelay(1); |
| 123 | + } |
| 124 | +} |
| 125 | + |
| 126 | +#else |
| 127 | +static void crash_kexec_prepare_cpus(void) {} |
| 128 | +#endif |
| 129 | + |
| 130 | +void default_machine_crash_shutdown(struct pt_regs *regs) |
| 131 | +{ |
| 132 | + local_irq_disable(); |
| 133 | + crashing_cpu = smp_processor_id(); |
| 134 | + crash_save_cpu(regs, crashing_cpu); |
| 135 | + crash_kexec_prepare_cpus(); |
| 136 | + cpu_set(crashing_cpu, cpus_in_crash); |
| 137 | +} |
| 138 | --- /dev/null |
| 139 | +++ b/arch/mips/kernel/crash_dump.c |
| 140 | @@ -0,0 +1,86 @@ |
| 141 | +#include <linux/highmem.h> |
| 142 | +#include <linux/bootmem.h> |
| 143 | +#include <linux/crash_dump.h> |
| 144 | +#include <asm/uaccess.h> |
| 145 | + |
| 146 | +#ifdef CONFIG_PROC_VMCORE |
| 147 | +static int __init parse_elfcorehdr(char *p) |
| 148 | +{ |
| 149 | + if (p) |
| 150 | + elfcorehdr_addr = memparse(p, &p); |
| 151 | + return 1; |
| 152 | +} |
| 153 | +__setup("elfcorehdr=", parse_elfcorehdr); |
| 154 | +#endif |
| 155 | + |
| 156 | +static int __init parse_savemaxmem(char *p) |
| 157 | +{ |
| 158 | + if (p) |
| 159 | + saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1; |
| 160 | + |
| 161 | + return 1; |
| 162 | +} |
| 163 | +__setup("savemaxmem=", parse_savemaxmem); |
| 164 | + |
| 165 | + |
| 166 | +static void *kdump_buf_page; |
| 167 | + |
| 168 | +/** |
| 169 | + * copy_oldmem_page - copy one page from "oldmem" |
| 170 | + * @pfn: page frame number to be copied |
| 171 | + * @buf: target memory address for the copy; this can be in kernel address |
| 172 | + * space or user address space (see @userbuf) |
| 173 | + * @csize: number of bytes to copy |
| 174 | + * @offset: offset in bytes into the page (based on pfn) to begin the copy |
| 175 | + * @userbuf: if set, @buf is in user address space, use copy_to_user(), |
| 176 | + * otherwise @buf is in kernel address space, use memcpy(). |
| 177 | + * |
| 178 | + * Copy a page from "oldmem". For this page, there is no pte mapped |
| 179 | + * in the current kernel. |
| 180 | + * |
| 181 | + * Calling copy_to_user() in atomic context is not desirable. Hence first |
| 182 | + * copying the data to a pre-allocated kernel page and then copying to user |
| 183 | + * space in non-atomic context. |
| 184 | + */ |
| 185 | +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
| 186 | + size_t csize, unsigned long offset, int userbuf) |
| 187 | +{ |
| 188 | + void *vaddr; |
| 189 | + |
| 190 | + if (!csize) |
| 191 | + return 0; |
| 192 | + |
| 193 | + vaddr = kmap_atomic_pfn(pfn, KM_PTE0); |
| 194 | + |
| 195 | + if (!userbuf) { |
| 196 | + memcpy(buf, (vaddr + offset), csize); |
| 197 | + kunmap_atomic(vaddr, KM_PTE0); |
| 198 | + } else { |
| 199 | + if (!kdump_buf_page) { |
| 200 | + printk(KERN_WARNING "Kdump: Kdump buffer page not" |
| 201 | + " allocated\n"); |
| 202 | + return -EFAULT; |
| 203 | + } |
| 204 | + copy_page(kdump_buf_page, vaddr); |
| 205 | + kunmap_atomic(vaddr, KM_PTE0); |
| 206 | + if (copy_to_user(buf, (kdump_buf_page + offset), csize)) |
| 207 | + return -EFAULT; |
| 208 | + } |
| 209 | + |
| 210 | + return csize; |
| 211 | +} |
| 212 | + |
| 213 | +static int __init kdump_buf_page_init(void) |
| 214 | +{ |
| 215 | + int ret = 0; |
| 216 | + |
| 217 | + kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); |
| 218 | + if (!kdump_buf_page) { |
| 219 | + printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer" |
| 220 | + " page\n"); |
| 221 | + ret = -ENOMEM; |
| 222 | + } |
| 223 | + |
| 224 | + return ret; |
| 225 | +} |
| 226 | +arch_initcall(kdump_buf_page_init); |
| 227 | --- a/arch/mips/kernel/machine_kexec.c |
| 228 | +++ b/arch/mips/kernel/machine_kexec.c |
| 229 | @@ -19,9 +19,19 @@ extern const size_t relocate_new_kernel_ |
| 230 | extern unsigned long kexec_start_address; |
| 231 | extern unsigned long kexec_indirection_page; |
| 232 | |
| 233 | +int (*_machine_kexec_prepare)(struct kimage *) = NULL; |
| 234 | +void (*_machine_kexec_shutdown)(void) = NULL; |
| 235 | +void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL; |
| 236 | +#ifdef CONFIG_SMP |
| 237 | +void (*relocated_kexec_smp_wait) (void *); |
| 238 | +atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0); |
| 239 | +#endif |
| 240 | + |
| 241 | int |
| 242 | machine_kexec_prepare(struct kimage *kimage) |
| 243 | { |
| 244 | + if (_machine_kexec_prepare) |
| 245 | + return _machine_kexec_prepare(kimage); |
| 246 | return 0; |
| 247 | } |
| 248 | |
| 249 | @@ -33,11 +43,17 @@ machine_kexec_cleanup(struct kimage *kim |
| 250 | void |
| 251 | machine_shutdown(void) |
| 252 | { |
| 253 | + if (_machine_kexec_shutdown) |
| 254 | + _machine_kexec_shutdown(); |
| 255 | } |
| 256 | |
| 257 | void |
| 258 | machine_crash_shutdown(struct pt_regs *regs) |
| 259 | { |
| 260 | + if (_machine_crash_shutdown) |
| 261 | + _machine_crash_shutdown(regs); |
| 262 | + else |
| 263 | + default_machine_crash_shutdown(regs); |
| 264 | } |
| 265 | |
| 266 | typedef void (*noretfun_t)(void) __attribute__((noreturn)); |
| 267 | @@ -52,7 +68,9 @@ machine_kexec(struct kimage *image) |
| 268 | reboot_code_buffer = |
| 269 | (unsigned long)page_address(image->control_code_page); |
| 270 | |
| 271 | - kexec_start_address = (unsigned long) phys_to_virt(image->start); |
| 272 | + kexec_start_address = |
| 273 | + (unsigned long) phys_to_virt(image->start); |
| 274 | + |
| 275 | kexec_indirection_page = |
| 276 | (unsigned long) phys_to_virt(image->head & PAGE_MASK); |
| 277 | |
| 278 | @@ -63,7 +81,7 @@ machine_kexec(struct kimage *image) |
| 279 | * The generic kexec code builds a page list with physical |
| 280 | * addresses. they are directly accessible through KSEG0 (or |
| 281 | * CKSEG0 or XPHYS if on 64bit system), hence the |
| 282 | - * pys_to_virt() call. |
| 283 | + * phys_to_virt() call. |
| 284 | */ |
| 285 | for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE); |
| 286 | ptr = (entry & IND_INDIRECTION) ? |
| 287 | @@ -81,5 +99,13 @@ machine_kexec(struct kimage *image) |
| 288 | printk("Will call new kernel at %08lx\n", image->start); |
| 289 | printk("Bye ...\n"); |
| 290 | __flush_cache_all(); |
| 291 | +#ifdef CONFIG_SMP |
| 292 | + /* All secondary cpus now may jump to kexec_wait cycle */ |
| 293 | + relocated_kexec_smp_wait = reboot_code_buffer + |
| 294 | + (void *)(kexec_smp_wait - relocate_new_kernel); |
| 295 | + smp_wmb(); |
| 296 | + atomic_set(&kexec_ready_to_reboot, 1); |
| 297 | +#endif |
| 298 | ((noretfun_t) reboot_code_buffer)(); |
| 299 | } |
| 300 | + |
| 301 | --- a/arch/mips/kernel/relocate_kernel.S |
| 302 | +++ b/arch/mips/kernel/relocate_kernel.S |
| 303 | @@ -15,6 +15,11 @@ |
| 304 | #include <asm/addrspace.h> |
| 305 | |
| 306 | LEAF(relocate_new_kernel) |
| 307 | + PTR_L a0, arg0 |
| 308 | + PTR_L a1, arg1 |
| 309 | + PTR_L a2, arg2 |
| 310 | + PTR_L a3, arg3 |
| 311 | + |
| 312 | PTR_L s0, kexec_indirection_page |
| 313 | PTR_L s1, kexec_start_address |
| 314 | |
| 315 | @@ -26,7 +31,6 @@ process_entry: |
| 316 | and s3, s2, 0x1 |
| 317 | beq s3, zero, 1f |
| 318 | and s4, s2, ~0x1 /* store destination addr in s4 */ |
| 319 | - move a0, s4 |
| 320 | b process_entry |
| 321 | |
| 322 | 1: |
| 323 | @@ -60,23 +64,100 @@ copy_word: |
| 324 | b process_entry |
| 325 | |
| 326 | done: |
| 327 | +#ifdef CONFIG_SMP |
| 328 | + /* kexec_flag reset is signal to other CPUs what kernel |
| 329 | + was moved to it's location. Note - we need relocated address |
| 330 | + of kexec_flag. */ |
| 331 | + |
| 332 | + bal 1f |
| 333 | + 1: move t1,ra; |
| 334 | + PTR_LA t2,1b |
| 335 | + PTR_LA t0,kexec_flag |
| 336 | + PTR_SUB t0,t0,t2; |
| 337 | + PTR_ADD t0,t1,t0; |
| 338 | + LONG_S zero,(t0) |
| 339 | +#endif |
| 340 | + |
| 341 | + sync |
| 342 | /* jump to kexec_start_address */ |
| 343 | j s1 |
| 344 | END(relocate_new_kernel) |
| 345 | |
| 346 | -kexec_start_address: |
| 347 | - EXPORT(kexec_start_address) |
| 348 | +#ifdef CONFIG_SMP |
| 349 | +/* |
| 350 | + * Other CPUs should wait until code is relocated and |
| 351 | + * then start at entry (?) point. |
| 352 | + */ |
| 353 | +LEAF(kexec_smp_wait) |
| 354 | + PTR_L a0, s_arg0 |
| 355 | + PTR_L a1, s_arg1 |
| 356 | + PTR_L a2, s_arg2 |
| 357 | + PTR_L a3, s_arg3 |
| 358 | + PTR_L s1, kexec_start_address |
| 359 | + |
| 360 | + /* Non-relocated address works for args and kexec_start_address ( old |
| 361 | + * kernel is not overwritten). But we need relocated address of |
| 362 | + * kexec_flag. |
| 363 | + */ |
| 364 | + |
| 365 | + bal 1f |
| 366 | +1: move t1,ra; |
| 367 | + PTR_LA t2,1b |
| 368 | + PTR_LA t0,kexec_flag |
| 369 | + PTR_SUB t0,t0,t2; |
| 370 | + PTR_ADD t0,t1,t0; |
| 371 | + |
| 372 | +1: LONG_L s0, (t0) |
| 373 | + bne s0, zero,1b |
| 374 | + |
| 375 | + sync |
| 376 | + j s1 |
| 377 | + END(kexec_smp_wait) |
| 378 | +#endif |
| 379 | + |
| 380 | +#ifdef __mips64 |
| 381 | + /* all PTR's must be aligned to 8 byte in 64-bit mode */ |
| 382 | + .align 3 |
| 383 | +#endif |
| 384 | + |
| 385 | +/* All parameters to new kernel are passed in registers a0-a3. |
| 386 | + * kexec_args[0..3] are uses to prepare register values. |
| 387 | + */ |
| 388 | + |
| 389 | +EXPORT(kexec_args) |
| 390 | +arg0: PTR 0x0 |
| 391 | +arg1: PTR 0x0 |
| 392 | +arg2: PTR 0x0 |
| 393 | +arg3: PTR 0x0 |
| 394 | + .size kexec_args,PTRSIZE*4 |
| 395 | + |
| 396 | +#ifdef CONFIG_SMP |
| 397 | +/* |
| 398 | + * Secondary CPUs may have different kernel parameters in |
| 399 | + * their registers a0-a3. secondary_kexec_args[0..3] are used |
| 400 | + * to prepare register values. |
| 401 | + */ |
| 402 | +EXPORT(secondary_kexec_args) |
| 403 | +s_arg0: PTR 0x0 |
| 404 | +s_arg1: PTR 0x0 |
| 405 | +s_arg2: PTR 0x0 |
| 406 | +s_arg3: PTR 0x0 |
| 407 | + .size secondary_kexec_args,PTRSIZE*4 |
| 408 | +kexec_flag: |
| 409 | + LONG 0x1 |
| 410 | + |
| 411 | +#endif |
| 412 | + |
| 413 | +EXPORT(kexec_start_address) |
| 414 | PTR 0x0 |
| 415 | .size kexec_start_address, PTRSIZE |
| 416 | |
| 417 | -kexec_indirection_page: |
| 418 | - EXPORT(kexec_indirection_page) |
| 419 | +EXPORT(kexec_indirection_page) |
| 420 | PTR 0 |
| 421 | .size kexec_indirection_page, PTRSIZE |
| 422 | |
| 423 | relocate_new_kernel_end: |
| 424 | |
| 425 | -relocate_new_kernel_size: |
| 426 | - EXPORT(relocate_new_kernel_size) |
| 427 | +EXPORT(relocate_new_kernel_size) |
| 428 | PTR relocate_new_kernel_end - relocate_new_kernel |
| 429 | .size relocate_new_kernel_size, PTRSIZE |
| 430 | --- a/arch/mips/kernel/setup.c |
| 431 | +++ b/arch/mips/kernel/setup.c |
| 432 | @@ -21,6 +21,7 @@ |
| 433 | #include <linux/console.h> |
| 434 | #include <linux/pfn.h> |
| 435 | #include <linux/debugfs.h> |
| 436 | +#include <linux/kexec.h> |
| 437 | |
| 438 | #include <asm/addrspace.h> |
| 439 | #include <asm/bootinfo.h> |
| 440 | @@ -488,12 +489,62 @@ static void __init arch_mem_init(char ** |
| 441 | } |
| 442 | |
| 443 | bootmem_init(); |
| 444 | +#ifdef CONFIG_KEXEC |
| 445 | + if (crashk_res.start != crashk_res.end) |
| 446 | + reserve_bootmem(crashk_res.start, |
| 447 | + crashk_res.end - crashk_res.start + 1, |
| 448 | + BOOTMEM_DEFAULT); |
| 449 | +#endif |
| 450 | device_tree_init(); |
| 451 | sparse_init(); |
| 452 | plat_swiotlb_setup(); |
| 453 | paging_init(); |
| 454 | } |
| 455 | |
| 456 | +#ifdef CONFIG_KEXEC |
| 457 | +static inline unsigned long long get_total_mem(void) |
| 458 | +{ |
| 459 | + unsigned long long total; |
| 460 | + total = max_pfn - min_low_pfn; |
| 461 | + return total << PAGE_SHIFT; |
| 462 | +} |
| 463 | + |
| 464 | +static void __init mips_parse_crashkernel(void) |
| 465 | +{ |
| 466 | + unsigned long long total_mem; |
| 467 | + unsigned long long crash_size, crash_base; |
| 468 | + int ret; |
| 469 | + |
| 470 | + total_mem = get_total_mem(); |
| 471 | + ret = parse_crashkernel(boot_command_line, total_mem, |
| 472 | + &crash_size, &crash_base); |
| 473 | + if (ret != 0 || crash_size <= 0) |
| 474 | + return; |
| 475 | + |
| 476 | + crashk_res.start = crash_base; |
| 477 | + crashk_res.end = crash_base + crash_size - 1; |
| 478 | +} |
| 479 | +static void __init request_crashkernel(struct resource *res) |
| 480 | +{ |
| 481 | + int ret; |
| 482 | + |
| 483 | + ret = request_resource(res, &crashk_res); |
| 484 | + if (!ret) |
| 485 | + printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " |
| 486 | + "for crashkernel\n", |
| 487 | + (unsigned long)((crashk_res.end - |
| 488 | + crashk_res.start + 1) >> 20), |
| 489 | + (unsigned long)(crashk_res.start >> 20)); |
| 490 | +} |
| 491 | +#else |
| 492 | +static void __init mips_parse_crashkernel(void) |
| 493 | +{ |
| 494 | +} |
| 495 | +static void __init request_crashkernel(struct resource *res) |
| 496 | +{ |
| 497 | +} |
| 498 | +#endif |
| 499 | + |
| 500 | static void __init resource_init(void) |
| 501 | { |
| 502 | int i; |
| 503 | @@ -509,6 +560,8 @@ static void __init resource_init(void) |
| 504 | /* |
| 505 | * Request address space for all standard RAM. |
| 506 | */ |
| 507 | + mips_parse_crashkernel(); |
| 508 | + |
| 509 | for (i = 0; i < boot_mem_map.nr_map; i++) { |
| 510 | struct resource *res; |
| 511 | unsigned long start, end; |
| 512 | @@ -544,6 +597,7 @@ static void __init resource_init(void) |
| 513 | */ |
| 514 | request_resource(res, &code_resource); |
| 515 | request_resource(res, &data_resource); |
| 516 | + request_crashkernel(res); |
| 517 | } |
| 518 | } |
| 519 | |
| 520 | --- a/arch/mips/kernel/smp.c |
| 521 | +++ b/arch/mips/kernel/smp.c |
| 522 | @@ -433,3 +433,21 @@ void flush_tlb_one(unsigned long vaddr) |
| 523 | |
| 524 | EXPORT_SYMBOL(flush_tlb_page); |
| 525 | EXPORT_SYMBOL(flush_tlb_one); |
| 526 | + |
| 527 | +#if defined(CONFIG_KEXEC) |
| 528 | +void (*dump_ipi_function_ptr)(void *) = NULL; |
| 529 | +void dump_send_ipi(void (*dump_ipi_callback)(void *)) |
| 530 | +{ |
| 531 | + int i; |
| 532 | + int cpu = smp_processor_id(); |
| 533 | + |
| 534 | + dump_ipi_function_ptr = dump_ipi_callback; |
| 535 | + smp_mb(); |
| 536 | + for_each_online_cpu(i) |
| 537 | + if (i != cpu) |
| 538 | + core_send_ipi(i, SMP_DUMP); |
| 539 | + |
| 540 | +} |
| 541 | +EXPORT_SYMBOL(dump_send_ipi); |
| 542 | +#endif |
| 543 | + |
| 544 | --- a/arch/mips/include/asm/kexec.h |
| 545 | +++ b/arch/mips/include/asm/kexec.h |
| 546 | @@ -9,22 +9,45 @@ |
| 547 | #ifndef _MIPS_KEXEC |
| 548 | # define _MIPS_KEXEC |
| 549 | |
| 550 | +#include <asm/stacktrace.h> |
| 551 | + |
| 552 | +extern unsigned long long elfcorehdr_addr; |
| 553 | + |
| 554 | /* Maximum physical address we can use pages from */ |
| 555 | #define KEXEC_SOURCE_MEMORY_LIMIT (0x20000000) |
| 556 | /* Maximum address we can reach in physical address mode */ |
| 557 | #define KEXEC_DESTINATION_MEMORY_LIMIT (0x20000000) |
| 558 | /* Maximum address we can use for the control code buffer */ |
| 559 | #define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000) |
| 560 | - |
| 561 | -#define KEXEC_CONTROL_PAGE_SIZE 4096 |
| 562 | +/* Reserve 3*4096 bytes for board-specific info */ |
| 563 | +#define KEXEC_CONTROL_PAGE_SIZE (4096 + 3*4096) |
| 564 | |
| 565 | /* The native architecture */ |
| 566 | #define KEXEC_ARCH KEXEC_ARCH_MIPS |
| 567 | +#define MAX_NOTE_BYTES 1024 |
| 568 | |
| 569 | static inline void crash_setup_regs(struct pt_regs *newregs, |
| 570 | - struct pt_regs *oldregs) |
| 571 | + struct pt_regs *oldregs) |
| 572 | { |
| 573 | - /* Dummy implementation for now */ |
| 574 | + if (oldregs) |
| 575 | + memcpy(newregs, oldregs, sizeof(*newregs)); |
| 576 | + else |
| 577 | + prepare_frametrace(newregs); |
| 578 | } |
| 579 | |
| 580 | +#ifdef CONFIG_KEXEC |
| 581 | +struct kimage; |
| 582 | +extern unsigned long kexec_args[4]; |
| 583 | +extern int (*_machine_kexec_prepare)(struct kimage *); |
| 584 | +extern void (*_machine_kexec_shutdown)(void); |
| 585 | +extern void (*_machine_crash_shutdown)(struct pt_regs *regs); |
| 586 | +extern void default_machine_crash_shutdown(struct pt_regs *regs); |
| 587 | +#ifdef CONFIG_SMP |
| 588 | +extern const unsigned char kexec_smp_wait[]; |
| 589 | +extern unsigned long secondary_kexec_args[4]; |
| 590 | +extern void (*relocated_kexec_smp_wait) (void *); |
| 591 | +extern atomic_t kexec_ready_to_reboot; |
| 592 | +#endif |
| 593 | +#endif |
| 594 | + |
| 595 | #endif /* !_MIPS_KEXEC */ |
| 596 | --- a/arch/mips/include/asm/smp.h |
| 597 | +++ b/arch/mips/include/asm/smp.h |
| 598 | @@ -40,6 +40,8 @@ extern int __cpu_logical_map[NR_CPUS]; |
| 599 | #define SMP_CALL_FUNCTION 0x2 |
| 600 | /* Octeon - Tell another core to flush its icache */ |
| 601 | #define SMP_ICACHE_FLUSH 0x4 |
| 602 | +/* Used by kexec crashdump to save all cpu's state */ |
| 603 | +#define SMP_DUMP 0x8 |
| 604 | |
| 605 | extern volatile cpumask_t cpu_callin_map; |
| 606 | |
| 607 | @@ -91,4 +93,9 @@ static inline void arch_send_call_functi |
| 608 | mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); |
| 609 | } |
| 610 | |
| 611 | +extern void core_send_ipi(int cpu, unsigned int action); |
| 612 | +#if defined(CONFIG_KEXEC) |
| 613 | +extern void (*dump_ipi_function_ptr)(void *); |
| 614 | +void dump_send_ipi(void (*dump_ipi_callback)(void *)); |
| 615 | +#endif |
| 616 | #endif /* __ASM_SMP_H */ |
| 617 | |