Root/
1 | /* |
2 | * Initialize MMU support. |
3 | * |
4 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> |
6 | */ |
7 | #include <linux/kernel.h> |
8 | #include <linux/init.h> |
9 | |
10 | #include <linux/bootmem.h> |
11 | #include <linux/efi.h> |
12 | #include <linux/elf.h> |
13 | #include <linux/mm.h> |
14 | #include <linux/mmzone.h> |
15 | #include <linux/module.h> |
16 | #include <linux/personality.h> |
17 | #include <linux/reboot.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/swap.h> |
20 | #include <linux/proc_fs.h> |
21 | #include <linux/bitops.h> |
22 | #include <linux/kexec.h> |
23 | |
24 | #include <asm/dma.h> |
25 | #include <asm/io.h> |
26 | #include <asm/machvec.h> |
27 | #include <asm/numa.h> |
28 | #include <asm/patch.h> |
29 | #include <asm/pgalloc.h> |
30 | #include <asm/sal.h> |
31 | #include <asm/sections.h> |
32 | #include <asm/system.h> |
33 | #include <asm/tlb.h> |
34 | #include <asm/uaccess.h> |
35 | #include <asm/unistd.h> |
36 | #include <asm/mca.h> |
37 | #include <asm/paravirt.h> |
38 | |
39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
40 | |
41 | extern void ia64_tlb_init (void); |
42 | |
43 | unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; |
44 | |
45 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
46 | unsigned long VMALLOC_END = VMALLOC_END_INIT; |
47 | EXPORT_SYMBOL(VMALLOC_END); |
48 | struct page *vmem_map; |
49 | EXPORT_SYMBOL(vmem_map); |
50 | #endif |
51 | |
52 | struct page *zero_page_memmap_ptr; /* map entry for zero page */ |
53 | EXPORT_SYMBOL(zero_page_memmap_ptr); |
54 | |
55 | void |
56 | __ia64_sync_icache_dcache (pte_t pte) |
57 | { |
58 | unsigned long addr; |
59 | struct page *page; |
60 | |
61 | page = pte_page(pte); |
62 | addr = (unsigned long) page_address(page); |
63 | |
64 | if (test_bit(PG_arch_1, &page->flags)) |
65 | return; /* i-cache is already coherent with d-cache */ |
66 | |
67 | flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page))); |
68 | set_bit(PG_arch_1, &page->flags); /* mark page as clean */ |
69 | } |
70 | |
71 | /* |
72 | * Since DMA is i-cache coherent, any (complete) pages that were written via |
73 | * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to |
74 | * flush them when they get mapped into an executable vm-area. |
75 | */ |
76 | void |
77 | dma_mark_clean(void *addr, size_t size) |
78 | { |
79 | unsigned long pg_addr, end; |
80 | |
81 | pg_addr = PAGE_ALIGN((unsigned long) addr); |
82 | end = (unsigned long) addr + size; |
83 | while (pg_addr + PAGE_SIZE <= end) { |
84 | struct page *page = virt_to_page(pg_addr); |
85 | set_bit(PG_arch_1, &page->flags); |
86 | pg_addr += PAGE_SIZE; |
87 | } |
88 | } |
89 | |
90 | inline void |
91 | ia64_set_rbs_bot (void) |
92 | { |
93 | unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16; |
94 | |
95 | if (stack_size > MAX_USER_STACK_SIZE) |
96 | stack_size = MAX_USER_STACK_SIZE; |
97 | current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size); |
98 | } |
99 | |
100 | /* |
101 | * This performs some platform-dependent address space initialization. |
102 | * On IA-64, we want to setup the VM area for the register backing |
103 | * store (which grows upwards) and install the gateway page which is |
104 | * used for signal trampolines, etc. |
105 | */ |
106 | void |
107 | ia64_init_addr_space (void) |
108 | { |
109 | struct vm_area_struct *vma; |
110 | |
111 | ia64_set_rbs_bot(); |
112 | |
113 | /* |
114 | * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore |
115 | * the problem. When the process attempts to write to the register backing store |
116 | * for the first time, it will get a SEGFAULT in this case. |
117 | */ |
118 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
119 | if (vma) { |
120 | INIT_LIST_HEAD(&vma->anon_vma_chain); |
121 | vma->vm_mm = current->mm; |
122 | vma->vm_start = current->thread.rbs_bot & PAGE_MASK; |
123 | vma->vm_end = vma->vm_start + PAGE_SIZE; |
124 | vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; |
125 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
126 | down_write(¤t->mm->mmap_sem); |
127 | if (insert_vm_struct(current->mm, vma)) { |
128 | up_write(¤t->mm->mmap_sem); |
129 | kmem_cache_free(vm_area_cachep, vma); |
130 | return; |
131 | } |
132 | up_write(¤t->mm->mmap_sem); |
133 | } |
134 | |
135 | /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ |
136 | if (!(current->personality & MMAP_PAGE_ZERO)) { |
137 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
138 | if (vma) { |
139 | INIT_LIST_HEAD(&vma->anon_vma_chain); |
140 | vma->vm_mm = current->mm; |
141 | vma->vm_end = PAGE_SIZE; |
142 | vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); |
143 | vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED; |
144 | down_write(¤t->mm->mmap_sem); |
145 | if (insert_vm_struct(current->mm, vma)) { |
146 | up_write(¤t->mm->mmap_sem); |
147 | kmem_cache_free(vm_area_cachep, vma); |
148 | return; |
149 | } |
150 | up_write(¤t->mm->mmap_sem); |
151 | } |
152 | } |
153 | } |
154 | |
155 | void |
156 | free_initmem (void) |
157 | { |
158 | unsigned long addr, eaddr; |
159 | |
160 | addr = (unsigned long) ia64_imva(__init_begin); |
161 | eaddr = (unsigned long) ia64_imva(__init_end); |
162 | while (addr < eaddr) { |
163 | ClearPageReserved(virt_to_page(addr)); |
164 | init_page_count(virt_to_page(addr)); |
165 | free_page(addr); |
166 | ++totalram_pages; |
167 | addr += PAGE_SIZE; |
168 | } |
169 | printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n", |
170 | (__init_end - __init_begin) >> 10); |
171 | } |
172 | |
173 | void __init |
174 | free_initrd_mem (unsigned long start, unsigned long end) |
175 | { |
176 | struct page *page; |
177 | /* |
178 | * EFI uses 4KB pages while the kernel can use 4KB or bigger. |
179 | * Thus EFI and the kernel may have different page sizes. It is |
180 | * therefore possible to have the initrd share the same page as |
181 | * the end of the kernel (given current setup). |
182 | * |
183 | * To avoid freeing/using the wrong page (kernel sized) we: |
184 | * - align up the beginning of initrd |
185 | * - align down the end of initrd |
186 | * |
187 | * | | |
188 | * |=============| a000 |
189 | * | | |
190 | * | | |
191 | * | | 9000 |
192 | * |/////////////| |
193 | * |/////////////| |
194 | * |=============| 8000 |
195 | * |///INITRD////| |
196 | * |/////////////| |
197 | * |/////////////| 7000 |
198 | * | | |
199 | * |KKKKKKKKKKKKK| |
200 | * |=============| 6000 |
201 | * |KKKKKKKKKKKKK| |
202 | * |KKKKKKKKKKKKK| |
203 | * K=kernel using 8KB pages |
204 | * |
205 | * In this example, we must free page 8000 ONLY. So we must align up |
206 | * initrd_start and keep initrd_end as is. |
207 | */ |
208 | start = PAGE_ALIGN(start); |
209 | end = end & PAGE_MASK; |
210 | |
211 | if (start < end) |
212 | printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10); |
213 | |
214 | for (; start < end; start += PAGE_SIZE) { |
215 | if (!virt_addr_valid(start)) |
216 | continue; |
217 | page = virt_to_page(start); |
218 | ClearPageReserved(page); |
219 | init_page_count(page); |
220 | free_page(start); |
221 | ++totalram_pages; |
222 | } |
223 | } |
224 | |
225 | /* |
226 | * This installs a clean page in the kernel's page table. |
227 | */ |
228 | static struct page * __init |
229 | put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) |
230 | { |
231 | pgd_t *pgd; |
232 | pud_t *pud; |
233 | pmd_t *pmd; |
234 | pte_t *pte; |
235 | |
236 | if (!PageReserved(page)) |
237 | printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n", |
238 | page_address(page)); |
239 | |
240 | pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */ |
241 | |
242 | { |
243 | pud = pud_alloc(&init_mm, pgd, address); |
244 | if (!pud) |
245 | goto out; |
246 | pmd = pmd_alloc(&init_mm, pud, address); |
247 | if (!pmd) |
248 | goto out; |
249 | pte = pte_alloc_kernel(pmd, address); |
250 | if (!pte) |
251 | goto out; |
252 | if (!pte_none(*pte)) |
253 | goto out; |
254 | set_pte(pte, mk_pte(page, pgprot)); |
255 | } |
256 | out: |
257 | /* no need for flush_tlb */ |
258 | return page; |
259 | } |
260 | |
261 | static void __init |
262 | setup_gate (void) |
263 | { |
264 | void *gate_section; |
265 | struct page *page; |
266 | |
267 | /* |
268 | * Map the gate page twice: once read-only to export the ELF |
269 | * headers etc. and once execute-only page to enable |
270 | * privilege-promotion via "epc": |
271 | */ |
272 | gate_section = paravirt_get_gate_section(); |
273 | page = virt_to_page(ia64_imva(gate_section)); |
274 | put_kernel_page(page, GATE_ADDR, PAGE_READONLY); |
275 | #ifdef HAVE_BUGGY_SEGREL |
276 | page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE)); |
277 | put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); |
278 | #else |
279 | put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); |
280 | /* Fill in the holes (if any) with read-only zero pages: */ |
281 | { |
282 | unsigned long addr; |
283 | |
284 | for (addr = GATE_ADDR + PAGE_SIZE; |
285 | addr < GATE_ADDR + PERCPU_PAGE_SIZE; |
286 | addr += PAGE_SIZE) |
287 | { |
288 | put_kernel_page(ZERO_PAGE(0), addr, |
289 | PAGE_READONLY); |
290 | put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE, |
291 | PAGE_READONLY); |
292 | } |
293 | } |
294 | #endif |
295 | ia64_patch_gate(); |
296 | } |
297 | |
298 | void __devinit |
299 | ia64_mmu_init (void *my_cpu_data) |
300 | { |
301 | unsigned long pta, impl_va_bits; |
302 | extern void __devinit tlb_init (void); |
303 | |
304 | #ifdef CONFIG_DISABLE_VHPT |
305 | # define VHPT_ENABLE_BIT 0 |
306 | #else |
307 | # define VHPT_ENABLE_BIT 1 |
308 | #endif |
309 | |
310 | /* |
311 | * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped |
312 | * address space. The IA-64 architecture guarantees that at least 50 bits of |
313 | * virtual address space are implemented but if we pick a large enough page size |
314 | * (e.g., 64KB), the mapped address space is big enough that it will overlap with |
315 | * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages, |
316 | * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a |
317 | * problem in practice. Alternatively, we could truncate the top of the mapped |
318 | * address space to not permit mappings that would overlap with the VMLPT. |
319 | * --davidm 00/12/06 |
320 | */ |
321 | # define pte_bits 3 |
322 | # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT) |
323 | /* |
324 | * The virtual page table has to cover the entire implemented address space within |
325 | * a region even though not all of this space may be mappable. The reason for |
326 | * this is that the Access bit and Dirty bit fault handlers perform |
327 | * non-speculative accesses to the virtual page table, so the address range of the |
328 | * virtual page table itself needs to be covered by virtual page table. |
329 | */ |
330 | # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits) |
331 | # define POW2(n) (1ULL << (n)) |
332 | |
333 | impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61))); |
334 | |
335 | if (impl_va_bits < 51 || impl_va_bits > 61) |
336 | panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1); |
337 | /* |
338 | * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need, |
339 | * which must fit into "vmlpt_bits - pte_bits" slots. Second half of |
340 | * the test makes sure that our mapped space doesn't overlap the |
341 | * unimplemented hole in the middle of the region. |
342 | */ |
343 | if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) || |
344 | (mapped_space_bits > impl_va_bits - 1)) |
345 | panic("Cannot build a big enough virtual-linear page table" |
346 | " to cover mapped address space.\n" |
347 | " Try using a smaller page size.\n"); |
348 | |
349 | |
350 | /* place the VMLPT at the end of each page-table mapped region: */ |
351 | pta = POW2(61) - POW2(vmlpt_bits); |
352 | |
353 | /* |
354 | * Set the (virtually mapped linear) page table address. Bit |
355 | * 8 selects between the short and long format, bits 2-7 the |
356 | * size of the table, and bit 0 whether the VHPT walker is |
357 | * enabled. |
358 | */ |
359 | ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT); |
360 | |
361 | ia64_tlb_init(); |
362 | |
363 | #ifdef CONFIG_HUGETLB_PAGE |
364 | ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2); |
365 | ia64_srlz_d(); |
366 | #endif |
367 | } |
368 | |
369 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
370 | int vmemmap_find_next_valid_pfn(int node, int i) |
371 | { |
372 | unsigned long end_address, hole_next_pfn; |
373 | unsigned long stop_address; |
374 | pg_data_t *pgdat = NODE_DATA(node); |
375 | |
376 | end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; |
377 | end_address = PAGE_ALIGN(end_address); |
378 | |
379 | stop_address = (unsigned long) &vmem_map[ |
380 | pgdat->node_start_pfn + pgdat->node_spanned_pages]; |
381 | |
382 | do { |
383 | pgd_t *pgd; |
384 | pud_t *pud; |
385 | pmd_t *pmd; |
386 | pte_t *pte; |
387 | |
388 | pgd = pgd_offset_k(end_address); |
389 | if (pgd_none(*pgd)) { |
390 | end_address += PGDIR_SIZE; |
391 | continue; |
392 | } |
393 | |
394 | pud = pud_offset(pgd, end_address); |
395 | if (pud_none(*pud)) { |
396 | end_address += PUD_SIZE; |
397 | continue; |
398 | } |
399 | |
400 | pmd = pmd_offset(pud, end_address); |
401 | if (pmd_none(*pmd)) { |
402 | end_address += PMD_SIZE; |
403 | continue; |
404 | } |
405 | |
406 | pte = pte_offset_kernel(pmd, end_address); |
407 | retry_pte: |
408 | if (pte_none(*pte)) { |
409 | end_address += PAGE_SIZE; |
410 | pte++; |
411 | if ((end_address < stop_address) && |
412 | (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) |
413 | goto retry_pte; |
414 | continue; |
415 | } |
416 | /* Found next valid vmem_map page */ |
417 | break; |
418 | } while (end_address < stop_address); |
419 | |
420 | end_address = min(end_address, stop_address); |
421 | end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; |
422 | hole_next_pfn = end_address / sizeof(struct page); |
423 | return hole_next_pfn - pgdat->node_start_pfn; |
424 | } |
425 | |
426 | int __init create_mem_map_page_table(u64 start, u64 end, void *arg) |
427 | { |
428 | unsigned long address, start_page, end_page; |
429 | struct page *map_start, *map_end; |
430 | int node; |
431 | pgd_t *pgd; |
432 | pud_t *pud; |
433 | pmd_t *pmd; |
434 | pte_t *pte; |
435 | |
436 | map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); |
437 | map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); |
438 | |
439 | start_page = (unsigned long) map_start & PAGE_MASK; |
440 | end_page = PAGE_ALIGN((unsigned long) map_end); |
441 | node = paddr_to_nid(__pa(start)); |
442 | |
443 | for (address = start_page; address < end_page; address += PAGE_SIZE) { |
444 | pgd = pgd_offset_k(address); |
445 | if (pgd_none(*pgd)) |
446 | pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); |
447 | pud = pud_offset(pgd, address); |
448 | |
449 | if (pud_none(*pud)) |
450 | pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); |
451 | pmd = pmd_offset(pud, address); |
452 | |
453 | if (pmd_none(*pmd)) |
454 | pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); |
455 | pte = pte_offset_kernel(pmd, address); |
456 | |
457 | if (pte_none(*pte)) |
458 | set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT, |
459 | PAGE_KERNEL)); |
460 | } |
461 | return 0; |
462 | } |
463 | |
464 | struct memmap_init_callback_data { |
465 | struct page *start; |
466 | struct page *end; |
467 | int nid; |
468 | unsigned long zone; |
469 | }; |
470 | |
471 | static int __meminit |
472 | virtual_memmap_init(u64 start, u64 end, void *arg) |
473 | { |
474 | struct memmap_init_callback_data *args; |
475 | struct page *map_start, *map_end; |
476 | |
477 | args = (struct memmap_init_callback_data *) arg; |
478 | map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); |
479 | map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); |
480 | |
481 | if (map_start < args->start) |
482 | map_start = args->start; |
483 | if (map_end > args->end) |
484 | map_end = args->end; |
485 | |
486 | /* |
487 | * We have to initialize "out of bounds" struct page elements that fit completely |
488 | * on the same pages that were allocated for the "in bounds" elements because they |
489 | * may be referenced later (and found to be "reserved"). |
490 | */ |
491 | map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page); |
492 | map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end) |
493 | / sizeof(struct page)); |
494 | |
495 | if (map_start < map_end) |
496 | memmap_init_zone((unsigned long)(map_end - map_start), |
497 | args->nid, args->zone, page_to_pfn(map_start), |
498 | MEMMAP_EARLY); |
499 | return 0; |
500 | } |
501 | |
502 | void __meminit |
503 | memmap_init (unsigned long size, int nid, unsigned long zone, |
504 | unsigned long start_pfn) |
505 | { |
506 | if (!vmem_map) |
507 | memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY); |
508 | else { |
509 | struct page *start; |
510 | struct memmap_init_callback_data args; |
511 | |
512 | start = pfn_to_page(start_pfn); |
513 | args.start = start; |
514 | args.end = start + size; |
515 | args.nid = nid; |
516 | args.zone = zone; |
517 | |
518 | efi_memmap_walk(virtual_memmap_init, &args); |
519 | } |
520 | } |
521 | |
522 | int |
523 | ia64_pfn_valid (unsigned long pfn) |
524 | { |
525 | char byte; |
526 | struct page *pg = pfn_to_page(pfn); |
527 | |
528 | return (__get_user(byte, (char __user *) pg) == 0) |
529 | && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK)) |
530 | || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0)); |
531 | } |
532 | EXPORT_SYMBOL(ia64_pfn_valid); |
533 | |
534 | int __init find_largest_hole(u64 start, u64 end, void *arg) |
535 | { |
536 | u64 *max_gap = arg; |
537 | |
538 | static u64 last_end = PAGE_OFFSET; |
539 | |
540 | /* NOTE: this algorithm assumes efi memmap table is ordered */ |
541 | |
542 | if (*max_gap < (start - last_end)) |
543 | *max_gap = start - last_end; |
544 | last_end = end; |
545 | return 0; |
546 | } |
547 | |
548 | #endif /* CONFIG_VIRTUAL_MEM_MAP */ |
549 | |
550 | int __init register_active_ranges(u64 start, u64 len, int nid) |
551 | { |
552 | u64 end = start + len; |
553 | |
554 | #ifdef CONFIG_KEXEC |
555 | if (start > crashk_res.start && start < crashk_res.end) |
556 | start = crashk_res.end; |
557 | if (end > crashk_res.start && end < crashk_res.end) |
558 | end = crashk_res.start; |
559 | #endif |
560 | |
561 | if (start < end) |
562 | add_active_range(nid, __pa(start) >> PAGE_SHIFT, |
563 | __pa(end) >> PAGE_SHIFT); |
564 | return 0; |
565 | } |
566 | |
567 | static int __init |
568 | count_reserved_pages(u64 start, u64 end, void *arg) |
569 | { |
570 | unsigned long num_reserved = 0; |
571 | unsigned long *count = arg; |
572 | |
573 | for (; start < end; start += PAGE_SIZE) |
574 | if (PageReserved(virt_to_page(start))) |
575 | ++num_reserved; |
576 | *count += num_reserved; |
577 | return 0; |
578 | } |
579 | |
580 | int |
581 | find_max_min_low_pfn (u64 start, u64 end, void *arg) |
582 | { |
583 | unsigned long pfn_start, pfn_end; |
584 | #ifdef CONFIG_FLATMEM |
585 | pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT; |
586 | pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT; |
587 | #else |
588 | pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT; |
589 | pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT; |
590 | #endif |
591 | min_low_pfn = min(min_low_pfn, pfn_start); |
592 | max_low_pfn = max(max_low_pfn, pfn_end); |
593 | return 0; |
594 | } |
595 | |
596 | /* |
597 | * Boot command-line option "nolwsys" can be used to disable the use of any light-weight |
598 | * system call handler. When this option is in effect, all fsyscalls will end up bubbling |
599 | * down into the kernel and calling the normal (heavy-weight) syscall handler. This is |
600 | * useful for performance testing, but conceivably could also come in handy for debugging |
601 | * purposes. |
602 | */ |
603 | |
604 | static int nolwsys __initdata; |
605 | |
606 | static int __init |
607 | nolwsys_setup (char *s) |
608 | { |
609 | nolwsys = 1; |
610 | return 1; |
611 | } |
612 | |
613 | __setup("nolwsys", nolwsys_setup); |
614 | |
615 | void __init |
616 | mem_init (void) |
617 | { |
618 | long reserved_pages, codesize, datasize, initsize; |
619 | pg_data_t *pgdat; |
620 | int i; |
621 | |
622 | BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); |
623 | BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); |
624 | BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE); |
625 | |
626 | #ifdef CONFIG_PCI |
627 | /* |
628 | * This needs to be called _after_ the command line has been parsed but _before_ |
629 | * any drivers that may need the PCI DMA interface are initialized or bootmem has |
630 | * been freed. |
631 | */ |
632 | platform_dma_init(); |
633 | #endif |
634 | |
635 | #ifdef CONFIG_FLATMEM |
636 | BUG_ON(!mem_map); |
637 | max_mapnr = max_low_pfn; |
638 | #endif |
639 | |
640 | high_memory = __va(max_low_pfn * PAGE_SIZE); |
641 | |
642 | for_each_online_pgdat(pgdat) |
643 | if (pgdat->bdata->node_bootmem_map) |
644 | totalram_pages += free_all_bootmem_node(pgdat); |
645 | |
646 | reserved_pages = 0; |
647 | efi_memmap_walk(count_reserved_pages, &reserved_pages); |
648 | |
649 | codesize = (unsigned long) _etext - (unsigned long) _stext; |
650 | datasize = (unsigned long) _edata - (unsigned long) _etext; |
651 | initsize = (unsigned long) __init_end - (unsigned long) __init_begin; |
652 | |
653 | printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, " |
654 | "%luk data, %luk init)\n", nr_free_pages() << (PAGE_SHIFT - 10), |
655 | num_physpages << (PAGE_SHIFT - 10), codesize >> 10, |
656 | reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); |
657 | |
658 | |
659 | /* |
660 | * For fsyscall entrpoints with no light-weight handler, use the ordinary |
661 | * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry |
662 | * code can tell them apart. |
663 | */ |
664 | for (i = 0; i < NR_syscalls; ++i) { |
665 | extern unsigned long sys_call_table[NR_syscalls]; |
666 | unsigned long *fsyscall_table = paravirt_get_fsyscall_table(); |
667 | |
668 | if (!fsyscall_table[i] || nolwsys) |
669 | fsyscall_table[i] = sys_call_table[i] | 1; |
670 | } |
671 | setup_gate(); |
672 | } |
673 | |
674 | #ifdef CONFIG_MEMORY_HOTPLUG |
675 | int arch_add_memory(int nid, u64 start, u64 size) |
676 | { |
677 | pg_data_t *pgdat; |
678 | struct zone *zone; |
679 | unsigned long start_pfn = start >> PAGE_SHIFT; |
680 | unsigned long nr_pages = size >> PAGE_SHIFT; |
681 | int ret; |
682 | |
683 | pgdat = NODE_DATA(nid); |
684 | |
685 | zone = pgdat->node_zones + ZONE_NORMAL; |
686 | ret = __add_pages(nid, zone, start_pfn, nr_pages); |
687 | |
688 | if (ret) |
689 | printk("%s: Problem encountered in __add_pages() as ret=%d\n", |
690 | __func__, ret); |
691 | |
692 | return ret; |
693 | } |
694 | #endif |
695 | |
696 | /* |
697 | * Even when CONFIG_IA32_SUPPORT is not enabled it is |
698 | * useful to have the Linux/x86 domain registered to |
699 | * avoid an attempted module load when emulators call |
700 | * personality(PER_LINUX32). This saves several milliseconds |
701 | * on each such call. |
702 | */ |
703 | static struct exec_domain ia32_exec_domain; |
704 | |
705 | static int __init |
706 | per_linux32_init(void) |
707 | { |
708 | ia32_exec_domain.name = "Linux/x86"; |
709 | ia32_exec_domain.handler = NULL; |
710 | ia32_exec_domain.pers_low = PER_LINUX32; |
711 | ia32_exec_domain.pers_high = PER_LINUX32; |
712 | ia32_exec_domain.signal_map = default_exec_domain.signal_map; |
713 | ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap; |
714 | register_exec_domain(&ia32_exec_domain); |
715 | |
716 | return 0; |
717 | } |
718 | |
719 | __initcall(per_linux32_init); |
720 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9