Root/
1 | /* |
2 | * Copyright (C) 2004-2006 Atmel Corporation |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. |
7 | */ |
8 | |
9 | #include <linux/clk.h> |
10 | #include <linux/init.h> |
11 | #include <linux/initrd.h> |
12 | #include <linux/sched.h> |
13 | #include <linux/console.h> |
14 | #include <linux/ioport.h> |
15 | #include <linux/bootmem.h> |
16 | #include <linux/fs.h> |
17 | #include <linux/module.h> |
18 | #include <linux/pfn.h> |
19 | #include <linux/root_dev.h> |
20 | #include <linux/cpu.h> |
21 | #include <linux/kernel.h> |
22 | |
23 | #include <asm/sections.h> |
24 | #include <asm/processor.h> |
25 | #include <asm/pgtable.h> |
26 | #include <asm/setup.h> |
27 | #include <asm/sysreg.h> |
28 | |
29 | #include <mach/board.h> |
30 | #include <mach/init.h> |
31 | |
32 | extern int root_mountflags; |
33 | |
34 | /* |
35 | * Initialize loops_per_jiffy as 5000000 (500MIPS). |
36 | * Better make it too large than too small... |
37 | */ |
38 | struct avr32_cpuinfo boot_cpu_data = { |
39 | .loops_per_jiffy = 5000000 |
40 | }; |
41 | EXPORT_SYMBOL(boot_cpu_data); |
42 | |
43 | static char __initdata command_line[COMMAND_LINE_SIZE]; |
44 | |
45 | /* |
46 | * Standard memory resources |
47 | */ |
48 | static struct resource __initdata kernel_data = { |
49 | .name = "Kernel data", |
50 | .start = 0, |
51 | .end = 0, |
52 | .flags = IORESOURCE_MEM, |
53 | }; |
54 | static struct resource __initdata kernel_code = { |
55 | .name = "Kernel code", |
56 | .start = 0, |
57 | .end = 0, |
58 | .flags = IORESOURCE_MEM, |
59 | .sibling = &kernel_data, |
60 | }; |
61 | |
62 | /* |
63 | * Available system RAM and reserved regions as singly linked |
64 | * lists. These lists are traversed using the sibling pointer in |
65 | * struct resource and are kept sorted at all times. |
66 | */ |
67 | static struct resource *__initdata system_ram; |
68 | static struct resource *__initdata reserved = &kernel_code; |
69 | |
70 | /* |
71 | * We need to allocate these before the bootmem allocator is up and |
72 | * running, so we need this "cache". 32 entries are probably enough |
73 | * for all but the most insanely complex systems. |
74 | */ |
75 | static struct resource __initdata res_cache[32]; |
76 | static unsigned int __initdata res_cache_next_free; |
77 | |
78 | static void __init resource_init(void) |
79 | { |
80 | struct resource *mem, *res; |
81 | struct resource *new; |
82 | |
83 | kernel_code.start = __pa(init_mm.start_code); |
84 | |
85 | for (mem = system_ram; mem; mem = mem->sibling) { |
86 | new = alloc_bootmem_low(sizeof(struct resource)); |
87 | memcpy(new, mem, sizeof(struct resource)); |
88 | |
89 | new->sibling = NULL; |
90 | if (request_resource(&iomem_resource, new)) |
91 | printk(KERN_WARNING "Bad RAM resource %08x-%08x\n", |
92 | mem->start, mem->end); |
93 | } |
94 | |
95 | for (res = reserved; res; res = res->sibling) { |
96 | new = alloc_bootmem_low(sizeof(struct resource)); |
97 | memcpy(new, res, sizeof(struct resource)); |
98 | |
99 | new->sibling = NULL; |
100 | if (insert_resource(&iomem_resource, new)) |
101 | printk(KERN_WARNING |
102 | "Bad reserved resource %s (%08x-%08x)\n", |
103 | res->name, res->start, res->end); |
104 | } |
105 | } |
106 | |
107 | static void __init |
108 | add_physical_memory(resource_size_t start, resource_size_t end) |
109 | { |
110 | struct resource *new, *next, **pprev; |
111 | |
112 | for (pprev = &system_ram, next = system_ram; next; |
113 | pprev = &next->sibling, next = next->sibling) { |
114 | if (end < next->start) |
115 | break; |
116 | if (start <= next->end) { |
117 | printk(KERN_WARNING |
118 | "Warning: Physical memory map is broken\n"); |
119 | printk(KERN_WARNING |
120 | "Warning: %08x-%08x overlaps %08x-%08x\n", |
121 | start, end, next->start, next->end); |
122 | return; |
123 | } |
124 | } |
125 | |
126 | if (res_cache_next_free >= ARRAY_SIZE(res_cache)) { |
127 | printk(KERN_WARNING |
128 | "Warning: Failed to add physical memory %08x-%08x\n", |
129 | start, end); |
130 | return; |
131 | } |
132 | |
133 | new = &res_cache[res_cache_next_free++]; |
134 | new->start = start; |
135 | new->end = end; |
136 | new->name = "System RAM"; |
137 | new->flags = IORESOURCE_MEM; |
138 | |
139 | *pprev = new; |
140 | } |
141 | |
142 | static int __init |
143 | add_reserved_region(resource_size_t start, resource_size_t end, |
144 | const char *name) |
145 | { |
146 | struct resource *new, *next, **pprev; |
147 | |
148 | if (end < start) |
149 | return -EINVAL; |
150 | |
151 | if (res_cache_next_free >= ARRAY_SIZE(res_cache)) |
152 | return -ENOMEM; |
153 | |
154 | for (pprev = &reserved, next = reserved; next; |
155 | pprev = &next->sibling, next = next->sibling) { |
156 | if (end < next->start) |
157 | break; |
158 | if (start <= next->end) |
159 | return -EBUSY; |
160 | } |
161 | |
162 | new = &res_cache[res_cache_next_free++]; |
163 | new->start = start; |
164 | new->end = end; |
165 | new->name = name; |
166 | new->sibling = next; |
167 | new->flags = IORESOURCE_MEM; |
168 | |
169 | *pprev = new; |
170 | |
171 | return 0; |
172 | } |
173 | |
174 | static unsigned long __init |
175 | find_free_region(const struct resource *mem, resource_size_t size, |
176 | resource_size_t align) |
177 | { |
178 | struct resource *res; |
179 | unsigned long target; |
180 | |
181 | target = ALIGN(mem->start, align); |
182 | for (res = reserved; res; res = res->sibling) { |
183 | if ((target + size) <= res->start) |
184 | break; |
185 | if (target <= res->end) |
186 | target = ALIGN(res->end + 1, align); |
187 | } |
188 | |
189 | if ((target + size) > (mem->end + 1)) |
190 | return mem->end + 1; |
191 | |
192 | return target; |
193 | } |
194 | |
195 | static int __init |
196 | alloc_reserved_region(resource_size_t *start, resource_size_t size, |
197 | resource_size_t align, const char *name) |
198 | { |
199 | struct resource *mem; |
200 | resource_size_t target; |
201 | int ret; |
202 | |
203 | for (mem = system_ram; mem; mem = mem->sibling) { |
204 | target = find_free_region(mem, size, align); |
205 | if (target <= mem->end) { |
206 | ret = add_reserved_region(target, target + size - 1, |
207 | name); |
208 | if (!ret) |
209 | *start = target; |
210 | return ret; |
211 | } |
212 | } |
213 | |
214 | return -ENOMEM; |
215 | } |
216 | |
217 | /* |
218 | * Early framebuffer allocation. Works as follows: |
219 | * - If fbmem_size is zero, nothing will be allocated or reserved. |
220 | * - If fbmem_start is zero when setup_bootmem() is called, |
221 | * a block of fbmem_size bytes will be reserved before bootmem |
222 | * initialization. It will be aligned to the largest page size |
223 | * that fbmem_size is a multiple of. |
224 | * - If fbmem_start is nonzero, an area of size fbmem_size will be |
225 | * reserved at the physical address fbmem_start if possible. If |
226 | * it collides with other reserved memory, a different block of |
227 | * same size will be allocated, just as if fbmem_start was zero. |
228 | * |
229 | * Board-specific code may use these variables to set up platform data |
230 | * for the framebuffer driver if fbmem_size is nonzero. |
231 | */ |
232 | resource_size_t __initdata fbmem_start; |
233 | resource_size_t __initdata fbmem_size; |
234 | |
235 | /* |
236 | * "fbmem=xxx[kKmM]" allocates the specified amount of boot memory for |
237 | * use as framebuffer. |
238 | * |
239 | * "fbmem=xxx[kKmM]@yyy[kKmM]" defines a memory region of size xxx and |
240 | * starting at yyy to be reserved for use as framebuffer. |
241 | * |
242 | * The kernel won't verify that the memory region starting at yyy |
243 | * actually contains usable RAM. |
244 | */ |
245 | static int __init early_parse_fbmem(char *p) |
246 | { |
247 | int ret; |
248 | unsigned long align; |
249 | |
250 | fbmem_size = memparse(p, &p); |
251 | if (*p == '@') { |
252 | fbmem_start = memparse(p + 1, &p); |
253 | ret = add_reserved_region(fbmem_start, |
254 | fbmem_start + fbmem_size - 1, |
255 | "Framebuffer"); |
256 | if (ret) { |
257 | printk(KERN_WARNING |
258 | "Failed to reserve framebuffer memory\n"); |
259 | fbmem_start = 0; |
260 | } |
261 | } |
262 | |
263 | if (!fbmem_start) { |
264 | if ((fbmem_size & 0x000fffffUL) == 0) |
265 | align = 0x100000; /* 1 MiB */ |
266 | else if ((fbmem_size & 0x0000ffffUL) == 0) |
267 | align = 0x10000; /* 64 KiB */ |
268 | else |
269 | align = 0x1000; /* 4 KiB */ |
270 | |
271 | ret = alloc_reserved_region(&fbmem_start, fbmem_size, |
272 | align, "Framebuffer"); |
273 | if (ret) { |
274 | printk(KERN_WARNING |
275 | "Failed to allocate framebuffer memory\n"); |
276 | fbmem_size = 0; |
277 | } else { |
278 | memset(__va(fbmem_start), 0, fbmem_size); |
279 | } |
280 | } |
281 | |
282 | return 0; |
283 | } |
284 | early_param("fbmem", early_parse_fbmem); |
285 | |
286 | /* |
287 | * Pick out the memory size. We look for mem=size@start, |
288 | * where start and size are "size[KkMmGg]" |
289 | */ |
290 | static int __init early_mem(char *p) |
291 | { |
292 | resource_size_t size, start; |
293 | |
294 | start = system_ram->start; |
295 | size = memparse(p, &p); |
296 | if (*p == '@') |
297 | start = memparse(p + 1, &p); |
298 | |
299 | system_ram->start = start; |
300 | system_ram->end = system_ram->start + size - 1; |
301 | return 0; |
302 | } |
303 | early_param("mem", early_mem); |
304 | |
305 | static int __init parse_tag_core(struct tag *tag) |
306 | { |
307 | if (tag->hdr.size > 2) { |
308 | if ((tag->u.core.flags & 1) == 0) |
309 | root_mountflags &= ~MS_RDONLY; |
310 | ROOT_DEV = new_decode_dev(tag->u.core.rootdev); |
311 | } |
312 | return 0; |
313 | } |
314 | __tagtable(ATAG_CORE, parse_tag_core); |
315 | |
316 | static int __init parse_tag_mem(struct tag *tag) |
317 | { |
318 | unsigned long start, end; |
319 | |
320 | /* |
321 | * Ignore zero-sized entries. If we're running standalone, the |
322 | * SDRAM code may emit such entries if something goes |
323 | * wrong... |
324 | */ |
325 | if (tag->u.mem_range.size == 0) |
326 | return 0; |
327 | |
328 | start = tag->u.mem_range.addr; |
329 | end = tag->u.mem_range.addr + tag->u.mem_range.size - 1; |
330 | |
331 | add_physical_memory(start, end); |
332 | return 0; |
333 | } |
334 | __tagtable(ATAG_MEM, parse_tag_mem); |
335 | |
336 | static int __init parse_tag_rdimg(struct tag *tag) |
337 | { |
338 | #ifdef CONFIG_BLK_DEV_INITRD |
339 | struct tag_mem_range *mem = &tag->u.mem_range; |
340 | int ret; |
341 | |
342 | if (initrd_start) { |
343 | printk(KERN_WARNING |
344 | "Warning: Only the first initrd image will be used\n"); |
345 | return 0; |
346 | } |
347 | |
348 | ret = add_reserved_region(mem->addr, mem->addr + mem->size - 1, |
349 | "initrd"); |
350 | if (ret) { |
351 | printk(KERN_WARNING |
352 | "Warning: Failed to reserve initrd memory\n"); |
353 | return ret; |
354 | } |
355 | |
356 | initrd_start = (unsigned long)__va(mem->addr); |
357 | initrd_end = initrd_start + mem->size; |
358 | #else |
359 | printk(KERN_WARNING "RAM disk image present, but " |
360 | "no initrd support in kernel, ignoring\n"); |
361 | #endif |
362 | |
363 | return 0; |
364 | } |
365 | __tagtable(ATAG_RDIMG, parse_tag_rdimg); |
366 | |
367 | static int __init parse_tag_rsvd_mem(struct tag *tag) |
368 | { |
369 | struct tag_mem_range *mem = &tag->u.mem_range; |
370 | |
371 | return add_reserved_region(mem->addr, mem->addr + mem->size - 1, |
372 | "Reserved"); |
373 | } |
374 | __tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem); |
375 | |
376 | static int __init parse_tag_cmdline(struct tag *tag) |
377 | { |
378 | strlcpy(boot_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); |
379 | return 0; |
380 | } |
381 | __tagtable(ATAG_CMDLINE, parse_tag_cmdline); |
382 | |
383 | static int __init parse_tag_clock(struct tag *tag) |
384 | { |
385 | /* |
386 | * We'll figure out the clocks by peeking at the system |
387 | * manager regs directly. |
388 | */ |
389 | return 0; |
390 | } |
391 | __tagtable(ATAG_CLOCK, parse_tag_clock); |
392 | |
393 | /* |
394 | * The board_number correspond to the bd->bi_board_number in U-Boot. This |
395 | * parameter is only available during initialisation and can be used in some |
396 | * kind of board identification. |
397 | */ |
398 | u32 __initdata board_number; |
399 | |
400 | static int __init parse_tag_boardinfo(struct tag *tag) |
401 | { |
402 | board_number = tag->u.boardinfo.board_number; |
403 | |
404 | return 0; |
405 | } |
406 | __tagtable(ATAG_BOARDINFO, parse_tag_boardinfo); |
407 | |
408 | /* |
409 | * Scan the tag table for this tag, and call its parse function. The |
410 | * tag table is built by the linker from all the __tagtable |
411 | * declarations. |
412 | */ |
413 | static int __init parse_tag(struct tag *tag) |
414 | { |
415 | extern struct tagtable __tagtable_begin, __tagtable_end; |
416 | struct tagtable *t; |
417 | |
418 | for (t = &__tagtable_begin; t < &__tagtable_end; t++) |
419 | if (tag->hdr.tag == t->tag) { |
420 | t->parse(tag); |
421 | break; |
422 | } |
423 | |
424 | return t < &__tagtable_end; |
425 | } |
426 | |
427 | /* |
428 | * Parse all tags in the list we got from the boot loader |
429 | */ |
430 | static void __init parse_tags(struct tag *t) |
431 | { |
432 | for (; t->hdr.tag != ATAG_NONE; t = tag_next(t)) |
433 | if (!parse_tag(t)) |
434 | printk(KERN_WARNING |
435 | "Ignoring unrecognised tag 0x%08x\n", |
436 | t->hdr.tag); |
437 | } |
438 | |
439 | /* |
440 | * Find a free memory region large enough for storing the |
441 | * bootmem bitmap. |
442 | */ |
443 | static unsigned long __init |
444 | find_bootmap_pfn(const struct resource *mem) |
445 | { |
446 | unsigned long bootmap_pages, bootmap_len; |
447 | unsigned long node_pages = PFN_UP(resource_size(mem)); |
448 | unsigned long bootmap_start; |
449 | |
450 | bootmap_pages = bootmem_bootmap_pages(node_pages); |
451 | bootmap_len = bootmap_pages << PAGE_SHIFT; |
452 | |
453 | /* |
454 | * Find a large enough region without reserved pages for |
455 | * storing the bootmem bitmap. We can take advantage of the |
456 | * fact that all lists have been sorted. |
457 | * |
458 | * We have to check that we don't collide with any reserved |
459 | * regions, which includes the kernel image and any RAMDISK |
460 | * images. |
461 | */ |
462 | bootmap_start = find_free_region(mem, bootmap_len, PAGE_SIZE); |
463 | |
464 | return bootmap_start >> PAGE_SHIFT; |
465 | } |
466 | |
467 | #define MAX_LOWMEM HIGHMEM_START |
468 | #define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM) |
469 | |
470 | static void __init setup_bootmem(void) |
471 | { |
472 | unsigned bootmap_size; |
473 | unsigned long first_pfn, bootmap_pfn, pages; |
474 | unsigned long max_pfn, max_low_pfn; |
475 | unsigned node = 0; |
476 | struct resource *res; |
477 | |
478 | printk(KERN_INFO "Physical memory:\n"); |
479 | for (res = system_ram; res; res = res->sibling) |
480 | printk(" %08x-%08x\n", res->start, res->end); |
481 | printk(KERN_INFO "Reserved memory:\n"); |
482 | for (res = reserved; res; res = res->sibling) |
483 | printk(" %08x-%08x: %s\n", |
484 | res->start, res->end, res->name); |
485 | |
486 | nodes_clear(node_online_map); |
487 | |
488 | if (system_ram->sibling) |
489 | printk(KERN_WARNING "Only using first memory bank\n"); |
490 | |
491 | for (res = system_ram; res; res = NULL) { |
492 | first_pfn = PFN_UP(res->start); |
493 | max_low_pfn = max_pfn = PFN_DOWN(res->end + 1); |
494 | bootmap_pfn = find_bootmap_pfn(res); |
495 | if (bootmap_pfn > max_pfn) |
496 | panic("No space for bootmem bitmap!\n"); |
497 | |
498 | if (max_low_pfn > MAX_LOWMEM_PFN) { |
499 | max_low_pfn = MAX_LOWMEM_PFN; |
500 | #ifndef CONFIG_HIGHMEM |
501 | /* |
502 | * Lowmem is memory that can be addressed |
503 | * directly through P1/P2 |
504 | */ |
505 | printk(KERN_WARNING |
506 | "Node %u: Only %ld MiB of memory will be used.\n", |
507 | node, MAX_LOWMEM >> 20); |
508 | printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); |
509 | #else |
510 | #error HIGHMEM is not supported by AVR32 yet |
511 | #endif |
512 | } |
513 | |
514 | /* Initialize the boot-time allocator with low memory only. */ |
515 | bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn, |
516 | first_pfn, max_low_pfn); |
517 | |
518 | /* |
519 | * Register fully available RAM pages with the bootmem |
520 | * allocator. |
521 | */ |
522 | pages = max_low_pfn - first_pfn; |
523 | free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn), |
524 | PFN_PHYS(pages)); |
525 | |
526 | /* Reserve space for the bootmem bitmap... */ |
527 | reserve_bootmem_node(NODE_DATA(node), |
528 | PFN_PHYS(bootmap_pfn), |
529 | bootmap_size, |
530 | BOOTMEM_DEFAULT); |
531 | |
532 | /* ...and any other reserved regions. */ |
533 | for (res = reserved; res; res = res->sibling) { |
534 | if (res->start > PFN_PHYS(max_pfn)) |
535 | break; |
536 | |
537 | /* |
538 | * resource_init will complain about partial |
539 | * overlaps, so we'll just ignore such |
540 | * resources for now. |
541 | */ |
542 | if (res->start >= PFN_PHYS(first_pfn) |
543 | && res->end < PFN_PHYS(max_pfn)) |
544 | reserve_bootmem_node(NODE_DATA(node), |
545 | res->start, |
546 | resource_size(res), |
547 | BOOTMEM_DEFAULT); |
548 | } |
549 | |
550 | node_set_online(node); |
551 | } |
552 | } |
553 | |
554 | void __init setup_arch (char **cmdline_p) |
555 | { |
556 | struct clk *cpu_clk; |
557 | |
558 | init_mm.start_code = (unsigned long)_stext; |
559 | init_mm.end_code = (unsigned long)_etext; |
560 | init_mm.end_data = (unsigned long)_edata; |
561 | init_mm.brk = (unsigned long)_end; |
562 | |
563 | /* |
564 | * Include .init section to make allocations easier. It will |
565 | * be removed before the resource is actually requested. |
566 | */ |
567 | kernel_code.start = __pa(__init_begin); |
568 | kernel_code.end = __pa(init_mm.end_code - 1); |
569 | kernel_data.start = __pa(init_mm.end_code); |
570 | kernel_data.end = __pa(init_mm.brk - 1); |
571 | |
572 | parse_tags(bootloader_tags); |
573 | |
574 | setup_processor(); |
575 | setup_platform(); |
576 | setup_board(); |
577 | |
578 | cpu_clk = clk_get(NULL, "cpu"); |
579 | if (IS_ERR(cpu_clk)) { |
580 | printk(KERN_WARNING "Warning: Unable to get CPU clock\n"); |
581 | } else { |
582 | unsigned long cpu_hz = clk_get_rate(cpu_clk); |
583 | |
584 | /* |
585 | * Well, duh, but it's probably a good idea to |
586 | * increment the use count. |
587 | */ |
588 | clk_enable(cpu_clk); |
589 | |
590 | boot_cpu_data.clk = cpu_clk; |
591 | boot_cpu_data.loops_per_jiffy = cpu_hz * 4; |
592 | printk("CPU: Running at %lu.%03lu MHz\n", |
593 | ((cpu_hz + 500) / 1000) / 1000, |
594 | ((cpu_hz + 500) / 1000) % 1000); |
595 | } |
596 | |
597 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); |
598 | *cmdline_p = command_line; |
599 | parse_early_param(); |
600 | |
601 | setup_bootmem(); |
602 | |
603 | #ifdef CONFIG_VT |
604 | conswitchp = &dummy_con; |
605 | #endif |
606 | |
607 | paging_init(); |
608 | resource_init(); |
609 | } |
610 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9