Root/
1 | /* |
2 | * fs/proc/vmcore.c Interface for accessing the crash |
3 | * dump from the system's previous life. |
4 | * Heavily borrowed from fs/proc/kcore.c |
5 | * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) |
6 | * Copyright (C) IBM Corporation, 2004. All rights reserved |
7 | * |
8 | */ |
9 | |
10 | #include <linux/mm.h> |
11 | #include <linux/proc_fs.h> |
12 | #include <linux/user.h> |
13 | #include <linux/elf.h> |
14 | #include <linux/elfcore.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/highmem.h> |
17 | #include <linux/bootmem.h> |
18 | #include <linux/init.h> |
19 | #include <linux/crash_dump.h> |
20 | #include <linux/list.h> |
21 | #include <asm/uaccess.h> |
22 | #include <asm/io.h> |
23 | |
24 | /* List representing chunks of contiguous memory areas and their offsets in |
25 | * vmcore file. |
26 | */ |
27 | static LIST_HEAD(vmcore_list); |
28 | |
29 | /* Stores the pointer to the buffer containing kernel elf core headers. */ |
30 | static char *elfcorebuf; |
31 | static size_t elfcorebuf_sz; |
32 | |
33 | /* Total size of vmcore file. */ |
34 | static u64 vmcore_size; |
35 | |
36 | static struct proc_dir_entry *proc_vmcore = NULL; |
37 | |
38 | /* |
39 | * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error |
40 | * The called function has to take care of module refcounting. |
41 | */ |
42 | static int (*oldmem_pfn_is_ram)(unsigned long pfn); |
43 | |
44 | int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)) |
45 | { |
46 | if (oldmem_pfn_is_ram) |
47 | return -EBUSY; |
48 | oldmem_pfn_is_ram = fn; |
49 | return 0; |
50 | } |
51 | EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram); |
52 | |
53 | void unregister_oldmem_pfn_is_ram(void) |
54 | { |
55 | oldmem_pfn_is_ram = NULL; |
56 | wmb(); |
57 | } |
58 | EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram); |
59 | |
60 | static int pfn_is_ram(unsigned long pfn) |
61 | { |
62 | int (*fn)(unsigned long pfn); |
63 | /* pfn is ram unless fn() checks pagetype */ |
64 | int ret = 1; |
65 | |
66 | /* |
67 | * Ask hypervisor if the pfn is really ram. |
68 | * A ballooned page contains no data and reading from such a page |
69 | * will cause high load in the hypervisor. |
70 | */ |
71 | fn = oldmem_pfn_is_ram; |
72 | if (fn) |
73 | ret = fn(pfn); |
74 | |
75 | return ret; |
76 | } |
77 | |
78 | /* Reads a page from the oldmem device from given offset. */ |
79 | static ssize_t read_from_oldmem(char *buf, size_t count, |
80 | u64 *ppos, int userbuf) |
81 | { |
82 | unsigned long pfn, offset; |
83 | size_t nr_bytes; |
84 | ssize_t read = 0, tmp; |
85 | |
86 | if (!count) |
87 | return 0; |
88 | |
89 | offset = (unsigned long)(*ppos % PAGE_SIZE); |
90 | pfn = (unsigned long)(*ppos / PAGE_SIZE); |
91 | |
92 | do { |
93 | if (count > (PAGE_SIZE - offset)) |
94 | nr_bytes = PAGE_SIZE - offset; |
95 | else |
96 | nr_bytes = count; |
97 | |
98 | /* If pfn is not ram, return zeros for sparse dump files */ |
99 | if (pfn_is_ram(pfn) == 0) |
100 | memset(buf, 0, nr_bytes); |
101 | else { |
102 | tmp = copy_oldmem_page(pfn, buf, nr_bytes, |
103 | offset, userbuf); |
104 | if (tmp < 0) |
105 | return tmp; |
106 | } |
107 | *ppos += nr_bytes; |
108 | count -= nr_bytes; |
109 | buf += nr_bytes; |
110 | read += nr_bytes; |
111 | ++pfn; |
112 | offset = 0; |
113 | } while (count); |
114 | |
115 | return read; |
116 | } |
117 | |
118 | /* Maps vmcore file offset to respective physical address in memroy. */ |
119 | static u64 map_offset_to_paddr(loff_t offset, struct list_head *vc_list, |
120 | struct vmcore **m_ptr) |
121 | { |
122 | struct vmcore *m; |
123 | u64 paddr; |
124 | |
125 | list_for_each_entry(m, vc_list, list) { |
126 | u64 start, end; |
127 | start = m->offset; |
128 | end = m->offset + m->size - 1; |
129 | if (offset >= start && offset <= end) { |
130 | paddr = m->paddr + offset - start; |
131 | *m_ptr = m; |
132 | return paddr; |
133 | } |
134 | } |
135 | *m_ptr = NULL; |
136 | return 0; |
137 | } |
138 | |
139 | /* Read from the ELF header and then the crash dump. On error, negative value is |
140 | * returned otherwise number of bytes read are returned. |
141 | */ |
142 | static ssize_t read_vmcore(struct file *file, char __user *buffer, |
143 | size_t buflen, loff_t *fpos) |
144 | { |
145 | ssize_t acc = 0, tmp; |
146 | size_t tsz; |
147 | u64 start, nr_bytes; |
148 | struct vmcore *curr_m = NULL; |
149 | |
150 | if (buflen == 0 || *fpos >= vmcore_size) |
151 | return 0; |
152 | |
153 | /* trim buflen to not go beyond EOF */ |
154 | if (buflen > vmcore_size - *fpos) |
155 | buflen = vmcore_size - *fpos; |
156 | |
157 | /* Read ELF core header */ |
158 | if (*fpos < elfcorebuf_sz) { |
159 | tsz = elfcorebuf_sz - *fpos; |
160 | if (buflen < tsz) |
161 | tsz = buflen; |
162 | if (copy_to_user(buffer, elfcorebuf + *fpos, tsz)) |
163 | return -EFAULT; |
164 | buflen -= tsz; |
165 | *fpos += tsz; |
166 | buffer += tsz; |
167 | acc += tsz; |
168 | |
169 | /* leave now if filled buffer already */ |
170 | if (buflen == 0) |
171 | return acc; |
172 | } |
173 | |
174 | start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m); |
175 | if (!curr_m) |
176 | return -EINVAL; |
177 | if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen) |
178 | tsz = buflen; |
179 | |
180 | /* Calculate left bytes in current memory segment. */ |
181 | nr_bytes = (curr_m->size - (start - curr_m->paddr)); |
182 | if (tsz > nr_bytes) |
183 | tsz = nr_bytes; |
184 | |
185 | while (buflen) { |
186 | tmp = read_from_oldmem(buffer, tsz, &start, 1); |
187 | if (tmp < 0) |
188 | return tmp; |
189 | buflen -= tsz; |
190 | *fpos += tsz; |
191 | buffer += tsz; |
192 | acc += tsz; |
193 | if (start >= (curr_m->paddr + curr_m->size)) { |
194 | if (curr_m->list.next == &vmcore_list) |
195 | return acc; /*EOF*/ |
196 | curr_m = list_entry(curr_m->list.next, |
197 | struct vmcore, list); |
198 | start = curr_m->paddr; |
199 | } |
200 | if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen) |
201 | tsz = buflen; |
202 | /* Calculate left bytes in current memory segment. */ |
203 | nr_bytes = (curr_m->size - (start - curr_m->paddr)); |
204 | if (tsz > nr_bytes) |
205 | tsz = nr_bytes; |
206 | } |
207 | return acc; |
208 | } |
209 | |
210 | static const struct file_operations proc_vmcore_operations = { |
211 | .read = read_vmcore, |
212 | .llseek = default_llseek, |
213 | }; |
214 | |
215 | static struct vmcore* __init get_new_element(void) |
216 | { |
217 | return kzalloc(sizeof(struct vmcore), GFP_KERNEL); |
218 | } |
219 | |
220 | static u64 __init get_vmcore_size_elf64(char *elfptr) |
221 | { |
222 | int i; |
223 | u64 size; |
224 | Elf64_Ehdr *ehdr_ptr; |
225 | Elf64_Phdr *phdr_ptr; |
226 | |
227 | ehdr_ptr = (Elf64_Ehdr *)elfptr; |
228 | phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); |
229 | size = sizeof(Elf64_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr)); |
230 | for (i = 0; i < ehdr_ptr->e_phnum; i++) { |
231 | size += phdr_ptr->p_memsz; |
232 | phdr_ptr++; |
233 | } |
234 | return size; |
235 | } |
236 | |
237 | static u64 __init get_vmcore_size_elf32(char *elfptr) |
238 | { |
239 | int i; |
240 | u64 size; |
241 | Elf32_Ehdr *ehdr_ptr; |
242 | Elf32_Phdr *phdr_ptr; |
243 | |
244 | ehdr_ptr = (Elf32_Ehdr *)elfptr; |
245 | phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); |
246 | size = sizeof(Elf32_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr)); |
247 | for (i = 0; i < ehdr_ptr->e_phnum; i++) { |
248 | size += phdr_ptr->p_memsz; |
249 | phdr_ptr++; |
250 | } |
251 | return size; |
252 | } |
253 | |
254 | /* Merges all the PT_NOTE headers into one. */ |
255 | static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, |
256 | struct list_head *vc_list) |
257 | { |
258 | int i, nr_ptnote=0, rc=0; |
259 | char *tmp; |
260 | Elf64_Ehdr *ehdr_ptr; |
261 | Elf64_Phdr phdr, *phdr_ptr; |
262 | Elf64_Nhdr *nhdr_ptr; |
263 | u64 phdr_sz = 0, note_off; |
264 | |
265 | ehdr_ptr = (Elf64_Ehdr *)elfptr; |
266 | phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); |
267 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { |
268 | int j; |
269 | void *notes_section; |
270 | struct vmcore *new; |
271 | u64 offset, max_sz, sz, real_sz = 0; |
272 | if (phdr_ptr->p_type != PT_NOTE) |
273 | continue; |
274 | nr_ptnote++; |
275 | max_sz = phdr_ptr->p_memsz; |
276 | offset = phdr_ptr->p_offset; |
277 | notes_section = kmalloc(max_sz, GFP_KERNEL); |
278 | if (!notes_section) |
279 | return -ENOMEM; |
280 | rc = read_from_oldmem(notes_section, max_sz, &offset, 0); |
281 | if (rc < 0) { |
282 | kfree(notes_section); |
283 | return rc; |
284 | } |
285 | nhdr_ptr = notes_section; |
286 | for (j = 0; j < max_sz; j += sz) { |
287 | if (nhdr_ptr->n_namesz == 0) |
288 | break; |
289 | sz = sizeof(Elf64_Nhdr) + |
290 | ((nhdr_ptr->n_namesz + 3) & ~3) + |
291 | ((nhdr_ptr->n_descsz + 3) & ~3); |
292 | real_sz += sz; |
293 | nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz); |
294 | } |
295 | |
296 | /* Add this contiguous chunk of notes section to vmcore list.*/ |
297 | new = get_new_element(); |
298 | if (!new) { |
299 | kfree(notes_section); |
300 | return -ENOMEM; |
301 | } |
302 | new->paddr = phdr_ptr->p_offset; |
303 | new->size = real_sz; |
304 | list_add_tail(&new->list, vc_list); |
305 | phdr_sz += real_sz; |
306 | kfree(notes_section); |
307 | } |
308 | |
309 | /* Prepare merged PT_NOTE program header. */ |
310 | phdr.p_type = PT_NOTE; |
311 | phdr.p_flags = 0; |
312 | note_off = sizeof(Elf64_Ehdr) + |
313 | (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr); |
314 | phdr.p_offset = note_off; |
315 | phdr.p_vaddr = phdr.p_paddr = 0; |
316 | phdr.p_filesz = phdr.p_memsz = phdr_sz; |
317 | phdr.p_align = 0; |
318 | |
319 | /* Add merged PT_NOTE program header*/ |
320 | tmp = elfptr + sizeof(Elf64_Ehdr); |
321 | memcpy(tmp, &phdr, sizeof(phdr)); |
322 | tmp += sizeof(phdr); |
323 | |
324 | /* Remove unwanted PT_NOTE program headers. */ |
325 | i = (nr_ptnote - 1) * sizeof(Elf64_Phdr); |
326 | *elfsz = *elfsz - i; |
327 | memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr))); |
328 | |
329 | /* Modify e_phnum to reflect merged headers. */ |
330 | ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; |
331 | |
332 | return 0; |
333 | } |
334 | |
335 | /* Merges all the PT_NOTE headers into one. */ |
336 | static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, |
337 | struct list_head *vc_list) |
338 | { |
339 | int i, nr_ptnote=0, rc=0; |
340 | char *tmp; |
341 | Elf32_Ehdr *ehdr_ptr; |
342 | Elf32_Phdr phdr, *phdr_ptr; |
343 | Elf32_Nhdr *nhdr_ptr; |
344 | u64 phdr_sz = 0, note_off; |
345 | |
346 | ehdr_ptr = (Elf32_Ehdr *)elfptr; |
347 | phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); |
348 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { |
349 | int j; |
350 | void *notes_section; |
351 | struct vmcore *new; |
352 | u64 offset, max_sz, sz, real_sz = 0; |
353 | if (phdr_ptr->p_type != PT_NOTE) |
354 | continue; |
355 | nr_ptnote++; |
356 | max_sz = phdr_ptr->p_memsz; |
357 | offset = phdr_ptr->p_offset; |
358 | notes_section = kmalloc(max_sz, GFP_KERNEL); |
359 | if (!notes_section) |
360 | return -ENOMEM; |
361 | rc = read_from_oldmem(notes_section, max_sz, &offset, 0); |
362 | if (rc < 0) { |
363 | kfree(notes_section); |
364 | return rc; |
365 | } |
366 | nhdr_ptr = notes_section; |
367 | for (j = 0; j < max_sz; j += sz) { |
368 | if (nhdr_ptr->n_namesz == 0) |
369 | break; |
370 | sz = sizeof(Elf32_Nhdr) + |
371 | ((nhdr_ptr->n_namesz + 3) & ~3) + |
372 | ((nhdr_ptr->n_descsz + 3) & ~3); |
373 | real_sz += sz; |
374 | nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz); |
375 | } |
376 | |
377 | /* Add this contiguous chunk of notes section to vmcore list.*/ |
378 | new = get_new_element(); |
379 | if (!new) { |
380 | kfree(notes_section); |
381 | return -ENOMEM; |
382 | } |
383 | new->paddr = phdr_ptr->p_offset; |
384 | new->size = real_sz; |
385 | list_add_tail(&new->list, vc_list); |
386 | phdr_sz += real_sz; |
387 | kfree(notes_section); |
388 | } |
389 | |
390 | /* Prepare merged PT_NOTE program header. */ |
391 | phdr.p_type = PT_NOTE; |
392 | phdr.p_flags = 0; |
393 | note_off = sizeof(Elf32_Ehdr) + |
394 | (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr); |
395 | phdr.p_offset = note_off; |
396 | phdr.p_vaddr = phdr.p_paddr = 0; |
397 | phdr.p_filesz = phdr.p_memsz = phdr_sz; |
398 | phdr.p_align = 0; |
399 | |
400 | /* Add merged PT_NOTE program header*/ |
401 | tmp = elfptr + sizeof(Elf32_Ehdr); |
402 | memcpy(tmp, &phdr, sizeof(phdr)); |
403 | tmp += sizeof(phdr); |
404 | |
405 | /* Remove unwanted PT_NOTE program headers. */ |
406 | i = (nr_ptnote - 1) * sizeof(Elf32_Phdr); |
407 | *elfsz = *elfsz - i; |
408 | memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr))); |
409 | |
410 | /* Modify e_phnum to reflect merged headers. */ |
411 | ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; |
412 | |
413 | return 0; |
414 | } |
415 | |
416 | /* Add memory chunks represented by program headers to vmcore list. Also update |
417 | * the new offset fields of exported program headers. */ |
418 | static int __init process_ptload_program_headers_elf64(char *elfptr, |
419 | size_t elfsz, |
420 | struct list_head *vc_list) |
421 | { |
422 | int i; |
423 | Elf64_Ehdr *ehdr_ptr; |
424 | Elf64_Phdr *phdr_ptr; |
425 | loff_t vmcore_off; |
426 | struct vmcore *new; |
427 | |
428 | ehdr_ptr = (Elf64_Ehdr *)elfptr; |
429 | phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */ |
430 | |
431 | /* First program header is PT_NOTE header. */ |
432 | vmcore_off = sizeof(Elf64_Ehdr) + |
433 | (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr) + |
434 | phdr_ptr->p_memsz; /* Note sections */ |
435 | |
436 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { |
437 | if (phdr_ptr->p_type != PT_LOAD) |
438 | continue; |
439 | |
440 | /* Add this contiguous chunk of memory to vmcore list.*/ |
441 | new = get_new_element(); |
442 | if (!new) |
443 | return -ENOMEM; |
444 | new->paddr = phdr_ptr->p_offset; |
445 | new->size = phdr_ptr->p_memsz; |
446 | list_add_tail(&new->list, vc_list); |
447 | |
448 | /* Update the program header offset. */ |
449 | phdr_ptr->p_offset = vmcore_off; |
450 | vmcore_off = vmcore_off + phdr_ptr->p_memsz; |
451 | } |
452 | return 0; |
453 | } |
454 | |
455 | static int __init process_ptload_program_headers_elf32(char *elfptr, |
456 | size_t elfsz, |
457 | struct list_head *vc_list) |
458 | { |
459 | int i; |
460 | Elf32_Ehdr *ehdr_ptr; |
461 | Elf32_Phdr *phdr_ptr; |
462 | loff_t vmcore_off; |
463 | struct vmcore *new; |
464 | |
465 | ehdr_ptr = (Elf32_Ehdr *)elfptr; |
466 | phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */ |
467 | |
468 | /* First program header is PT_NOTE header. */ |
469 | vmcore_off = sizeof(Elf32_Ehdr) + |
470 | (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr) + |
471 | phdr_ptr->p_memsz; /* Note sections */ |
472 | |
473 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { |
474 | if (phdr_ptr->p_type != PT_LOAD) |
475 | continue; |
476 | |
477 | /* Add this contiguous chunk of memory to vmcore list.*/ |
478 | new = get_new_element(); |
479 | if (!new) |
480 | return -ENOMEM; |
481 | new->paddr = phdr_ptr->p_offset; |
482 | new->size = phdr_ptr->p_memsz; |
483 | list_add_tail(&new->list, vc_list); |
484 | |
485 | /* Update the program header offset */ |
486 | phdr_ptr->p_offset = vmcore_off; |
487 | vmcore_off = vmcore_off + phdr_ptr->p_memsz; |
488 | } |
489 | return 0; |
490 | } |
491 | |
492 | /* Sets offset fields of vmcore elements. */ |
493 | static void __init set_vmcore_list_offsets_elf64(char *elfptr, |
494 | struct list_head *vc_list) |
495 | { |
496 | loff_t vmcore_off; |
497 | Elf64_Ehdr *ehdr_ptr; |
498 | struct vmcore *m; |
499 | |
500 | ehdr_ptr = (Elf64_Ehdr *)elfptr; |
501 | |
502 | /* Skip Elf header and program headers. */ |
503 | vmcore_off = sizeof(Elf64_Ehdr) + |
504 | (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr); |
505 | |
506 | list_for_each_entry(m, vc_list, list) { |
507 | m->offset = vmcore_off; |
508 | vmcore_off += m->size; |
509 | } |
510 | } |
511 | |
512 | /* Sets offset fields of vmcore elements. */ |
513 | static void __init set_vmcore_list_offsets_elf32(char *elfptr, |
514 | struct list_head *vc_list) |
515 | { |
516 | loff_t vmcore_off; |
517 | Elf32_Ehdr *ehdr_ptr; |
518 | struct vmcore *m; |
519 | |
520 | ehdr_ptr = (Elf32_Ehdr *)elfptr; |
521 | |
522 | /* Skip Elf header and program headers. */ |
523 | vmcore_off = sizeof(Elf32_Ehdr) + |
524 | (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr); |
525 | |
526 | list_for_each_entry(m, vc_list, list) { |
527 | m->offset = vmcore_off; |
528 | vmcore_off += m->size; |
529 | } |
530 | } |
531 | |
532 | static int __init parse_crash_elf64_headers(void) |
533 | { |
534 | int rc=0; |
535 | Elf64_Ehdr ehdr; |
536 | u64 addr; |
537 | |
538 | addr = elfcorehdr_addr; |
539 | |
540 | /* Read Elf header */ |
541 | rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0); |
542 | if (rc < 0) |
543 | return rc; |
544 | |
545 | /* Do some basic Verification. */ |
546 | if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || |
547 | (ehdr.e_type != ET_CORE) || |
548 | !vmcore_elf64_check_arch(&ehdr) || |
549 | ehdr.e_ident[EI_CLASS] != ELFCLASS64 || |
550 | ehdr.e_ident[EI_VERSION] != EV_CURRENT || |
551 | ehdr.e_version != EV_CURRENT || |
552 | ehdr.e_ehsize != sizeof(Elf64_Ehdr) || |
553 | ehdr.e_phentsize != sizeof(Elf64_Phdr) || |
554 | ehdr.e_phnum == 0) { |
555 | printk(KERN_WARNING "Warning: Core image elf header is not" |
556 | "sane\n"); |
557 | return -EINVAL; |
558 | } |
559 | |
560 | /* Read in all elf headers. */ |
561 | elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr); |
562 | elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); |
563 | if (!elfcorebuf) |
564 | return -ENOMEM; |
565 | addr = elfcorehdr_addr; |
566 | rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); |
567 | if (rc < 0) { |
568 | kfree(elfcorebuf); |
569 | return rc; |
570 | } |
571 | |
572 | /* Merge all PT_NOTE headers into one. */ |
573 | rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list); |
574 | if (rc) { |
575 | kfree(elfcorebuf); |
576 | return rc; |
577 | } |
578 | rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz, |
579 | &vmcore_list); |
580 | if (rc) { |
581 | kfree(elfcorebuf); |
582 | return rc; |
583 | } |
584 | set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list); |
585 | return 0; |
586 | } |
587 | |
588 | static int __init parse_crash_elf32_headers(void) |
589 | { |
590 | int rc=0; |
591 | Elf32_Ehdr ehdr; |
592 | u64 addr; |
593 | |
594 | addr = elfcorehdr_addr; |
595 | |
596 | /* Read Elf header */ |
597 | rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0); |
598 | if (rc < 0) |
599 | return rc; |
600 | |
601 | /* Do some basic Verification. */ |
602 | if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || |
603 | (ehdr.e_type != ET_CORE) || |
604 | !elf_check_arch(&ehdr) || |
605 | ehdr.e_ident[EI_CLASS] != ELFCLASS32|| |
606 | ehdr.e_ident[EI_VERSION] != EV_CURRENT || |
607 | ehdr.e_version != EV_CURRENT || |
608 | ehdr.e_ehsize != sizeof(Elf32_Ehdr) || |
609 | ehdr.e_phentsize != sizeof(Elf32_Phdr) || |
610 | ehdr.e_phnum == 0) { |
611 | printk(KERN_WARNING "Warning: Core image elf header is not" |
612 | "sane\n"); |
613 | return -EINVAL; |
614 | } |
615 | |
616 | /* Read in all elf headers. */ |
617 | elfcorebuf_sz = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr); |
618 | elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); |
619 | if (!elfcorebuf) |
620 | return -ENOMEM; |
621 | addr = elfcorehdr_addr; |
622 | rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); |
623 | if (rc < 0) { |
624 | kfree(elfcorebuf); |
625 | return rc; |
626 | } |
627 | |
628 | /* Merge all PT_NOTE headers into one. */ |
629 | rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list); |
630 | if (rc) { |
631 | kfree(elfcorebuf); |
632 | return rc; |
633 | } |
634 | rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz, |
635 | &vmcore_list); |
636 | if (rc) { |
637 | kfree(elfcorebuf); |
638 | return rc; |
639 | } |
640 | set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list); |
641 | return 0; |
642 | } |
643 | |
644 | static int __init parse_crash_elf_headers(void) |
645 | { |
646 | unsigned char e_ident[EI_NIDENT]; |
647 | u64 addr; |
648 | int rc=0; |
649 | |
650 | addr = elfcorehdr_addr; |
651 | rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0); |
652 | if (rc < 0) |
653 | return rc; |
654 | if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) { |
655 | printk(KERN_WARNING "Warning: Core image elf header" |
656 | " not found\n"); |
657 | return -EINVAL; |
658 | } |
659 | |
660 | if (e_ident[EI_CLASS] == ELFCLASS64) { |
661 | rc = parse_crash_elf64_headers(); |
662 | if (rc) |
663 | return rc; |
664 | |
665 | /* Determine vmcore size. */ |
666 | vmcore_size = get_vmcore_size_elf64(elfcorebuf); |
667 | } else if (e_ident[EI_CLASS] == ELFCLASS32) { |
668 | rc = parse_crash_elf32_headers(); |
669 | if (rc) |
670 | return rc; |
671 | |
672 | /* Determine vmcore size. */ |
673 | vmcore_size = get_vmcore_size_elf32(elfcorebuf); |
674 | } else { |
675 | printk(KERN_WARNING "Warning: Core image elf header is not" |
676 | " sane\n"); |
677 | return -EINVAL; |
678 | } |
679 | return 0; |
680 | } |
681 | |
682 | /* Init function for vmcore module. */ |
683 | static int __init vmcore_init(void) |
684 | { |
685 | int rc = 0; |
686 | |
687 | /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/ |
688 | if (!(is_vmcore_usable())) |
689 | return rc; |
690 | rc = parse_crash_elf_headers(); |
691 | if (rc) { |
692 | printk(KERN_WARNING "Kdump: vmcore not initialized\n"); |
693 | return rc; |
694 | } |
695 | |
696 | proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations); |
697 | if (proc_vmcore) |
698 | proc_vmcore->size = vmcore_size; |
699 | return 0; |
700 | } |
701 | module_init(vmcore_init) |
702 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9