Root/
1 | /* |
2 | * Copyright 2004-2009 Analog Devices Inc. |
3 | * |
4 | * Licensed under the GPL-2 or later. |
5 | */ |
6 | |
7 | #include <linux/gfp.h> |
8 | #include <linux/swap.h> |
9 | #include <linux/bootmem.h> |
10 | #include <linux/uaccess.h> |
11 | #include <asm/bfin-global.h> |
12 | #include <asm/pda.h> |
13 | #include <asm/cplbinit.h> |
14 | #include <asm/early_printk.h> |
15 | #include "blackfin_sram.h" |
16 | |
17 | /* |
18 | * ZERO_PAGE is a special page that is used for zero-initialized data and COW. |
19 | * Let the bss do its zero-init magic so we don't have to do it ourselves. |
20 | */ |
21 | char empty_zero_page[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); |
22 | EXPORT_SYMBOL(empty_zero_page); |
23 | |
24 | #ifndef CONFIG_EXCEPTION_L1_SCRATCH |
25 | #if defined CONFIG_SYSCALL_TAB_L1 |
26 | __attribute__((l1_data)) |
27 | #endif |
28 | static unsigned long exception_stack[NR_CPUS][1024]; |
29 | #endif |
30 | |
31 | struct blackfin_pda cpu_pda[NR_CPUS]; |
32 | EXPORT_SYMBOL(cpu_pda); |
33 | |
34 | /* |
35 | * paging_init() continues the virtual memory environment setup which |
36 | * was begun by the code in arch/head.S. |
37 | * The parameters are pointers to where to stick the starting and ending |
38 | * addresses of available kernel virtual memory. |
39 | */ |
40 | void __init paging_init(void) |
41 | { |
42 | /* |
43 | * make sure start_mem is page aligned, otherwise bootmem and |
44 | * page_alloc get different views of the world |
45 | */ |
46 | unsigned long end_mem = memory_end & PAGE_MASK; |
47 | |
48 | unsigned long zones_size[MAX_NR_ZONES] = { |
49 | [0] = 0, |
50 | [ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT, |
51 | [ZONE_NORMAL] = 0, |
52 | #ifdef CONFIG_HIGHMEM |
53 | [ZONE_HIGHMEM] = 0, |
54 | #endif |
55 | }; |
56 | |
57 | /* Set up SFC/DFC registers (user data space) */ |
58 | set_fs(KERNEL_DS); |
59 | |
60 | pr_debug("free_area_init -> start_mem is %#lx virtual_end is %#lx\n", |
61 | PAGE_ALIGN(memory_start), end_mem); |
62 | free_area_init(zones_size); |
63 | } |
64 | |
65 | asmlinkage void __init init_pda(void) |
66 | { |
67 | unsigned int cpu = raw_smp_processor_id(); |
68 | |
69 | early_shadow_stamp(); |
70 | |
71 | /* Initialize the PDA fields holding references to other parts |
72 | of the memory. The content of such memory is still |
73 | undefined at the time of the call, we are only setting up |
74 | valid pointers to it. */ |
75 | memset(&cpu_pda[cpu], 0, sizeof(cpu_pda[cpu])); |
76 | |
77 | cpu_pda[0].next = &cpu_pda[1]; |
78 | cpu_pda[1].next = &cpu_pda[0]; |
79 | |
80 | #ifdef CONFIG_EXCEPTION_L1_SCRATCH |
81 | cpu_pda[cpu].ex_stack = (unsigned long *)(L1_SCRATCH_START + \ |
82 | L1_SCRATCH_LENGTH); |
83 | #else |
84 | cpu_pda[cpu].ex_stack = exception_stack[cpu + 1]; |
85 | #endif |
86 | |
87 | #ifdef CONFIG_SMP |
88 | cpu_pda[cpu].imask = 0x1f; |
89 | #endif |
90 | } |
91 | |
92 | void __init mem_init(void) |
93 | { |
94 | unsigned int codek = 0, datak = 0, initk = 0; |
95 | unsigned int reservedpages = 0, freepages = 0; |
96 | unsigned long tmp; |
97 | unsigned long start_mem = memory_start; |
98 | unsigned long end_mem = memory_end; |
99 | |
100 | end_mem &= PAGE_MASK; |
101 | high_memory = (void *)end_mem; |
102 | |
103 | start_mem = PAGE_ALIGN(start_mem); |
104 | max_mapnr = num_physpages = MAP_NR(high_memory); |
105 | printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", num_physpages); |
106 | |
107 | /* This will put all memory onto the freelists. */ |
108 | totalram_pages = free_all_bootmem(); |
109 | |
110 | reservedpages = 0; |
111 | for (tmp = 0; tmp < max_mapnr; tmp++) |
112 | if (PageReserved(pfn_to_page(tmp))) |
113 | reservedpages++; |
114 | freepages = max_mapnr - reservedpages; |
115 | |
116 | /* do not count in kernel image between _rambase and _ramstart */ |
117 | reservedpages -= (_ramstart - _rambase) >> PAGE_SHIFT; |
118 | #if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263) |
119 | reservedpages += (_ramend - memory_end - DMA_UNCACHED_REGION) >> PAGE_SHIFT; |
120 | #endif |
121 | |
122 | codek = (_etext - _stext) >> 10; |
123 | initk = (__init_end - __init_begin) >> 10; |
124 | datak = ((_ramstart - _rambase) >> 10) - codek - initk; |
125 | |
126 | printk(KERN_INFO |
127 | "Memory available: %luk/%luk RAM, " |
128 | "(%uk init code, %uk kernel code, %uk data, %uk dma, %uk reserved)\n", |
129 | (unsigned long) freepages << (PAGE_SHIFT-10), _ramend >> 10, |
130 | initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10))); |
131 | } |
132 | |
133 | static void __init free_init_pages(const char *what, unsigned long begin, unsigned long end) |
134 | { |
135 | unsigned long addr; |
136 | /* next to check that the page we free is not a partial page */ |
137 | for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) { |
138 | ClearPageReserved(virt_to_page(addr)); |
139 | init_page_count(virt_to_page(addr)); |
140 | free_page(addr); |
141 | totalram_pages++; |
142 | } |
143 | printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); |
144 | } |
145 | |
146 | #ifdef CONFIG_BLK_DEV_INITRD |
147 | void __init free_initrd_mem(unsigned long start, unsigned long end) |
148 | { |
149 | #ifndef CONFIG_MPU |
150 | free_init_pages("initrd memory", start, end); |
151 | #endif |
152 | } |
153 | #endif |
154 | |
155 | void __init_refok free_initmem(void) |
156 | { |
157 | #if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU |
158 | free_init_pages("unused kernel memory", |
159 | (unsigned long)(&__init_begin), |
160 | (unsigned long)(&__init_end)); |
161 | |
162 | if (memory_start == (unsigned long)(&__init_end)) |
163 | memory_start = (unsigned long)(&__init_begin); |
164 | #endif |
165 | } |
166 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9