Root/
Source at commit 694c7fbe86b8a9c91392e505afcb9fcfc91deccc created 13 years 13 days ago. By Maarten ter Huurne, MIPS: JZ4740: Add cpufreq support | |
---|---|
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * Copyright (C) 1994 - 2000 Ralf Baechle |
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
8 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com |
9 | * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. |
10 | */ |
11 | #include <linux/bug.h> |
12 | #include <linux/init.h> |
13 | #include <linux/module.h> |
14 | #include <linux/signal.h> |
15 | #include <linux/sched.h> |
16 | #include <linux/smp.h> |
17 | #include <linux/kernel.h> |
18 | #include <linux/errno.h> |
19 | #include <linux/string.h> |
20 | #include <linux/types.h> |
21 | #include <linux/pagemap.h> |
22 | #include <linux/ptrace.h> |
23 | #include <linux/mman.h> |
24 | #include <linux/mm.h> |
25 | #include <linux/bootmem.h> |
26 | #include <linux/highmem.h> |
27 | #include <linux/swap.h> |
28 | #include <linux/proc_fs.h> |
29 | #include <linux/pfn.h> |
30 | #include <linux/hardirq.h> |
31 | #include <linux/gfp.h> |
32 | #include <linux/kcore.h> |
33 | |
34 | #include <asm/asm-offsets.h> |
35 | #include <asm/bootinfo.h> |
36 | #include <asm/cachectl.h> |
37 | #include <asm/cpu.h> |
38 | #include <asm/dma.h> |
39 | #include <asm/kmap_types.h> |
40 | #include <asm/mmu_context.h> |
41 | #include <asm/sections.h> |
42 | #include <asm/pgtable.h> |
43 | #include <asm/pgalloc.h> |
44 | #include <asm/tlb.h> |
45 | #include <asm/fixmap.h> |
46 | |
47 | /* Atomicity and interruptability */ |
48 | #ifdef CONFIG_MIPS_MT_SMTC |
49 | |
50 | #include <asm/mipsmtregs.h> |
51 | |
52 | #define ENTER_CRITICAL(flags) \ |
53 | { \ |
54 | unsigned int mvpflags; \ |
55 | local_irq_save(flags);\ |
56 | mvpflags = dvpe() |
57 | #define EXIT_CRITICAL(flags) \ |
58 | evpe(mvpflags); \ |
59 | local_irq_restore(flags); \ |
60 | } |
61 | #else |
62 | |
63 | #define ENTER_CRITICAL(flags) local_irq_save(flags) |
64 | #define EXIT_CRITICAL(flags) local_irq_restore(flags) |
65 | |
66 | #endif /* CONFIG_MIPS_MT_SMTC */ |
67 | |
68 | /* |
69 | * We have up to 8 empty zeroed pages so we can map one of the right colour |
70 | * when needed. This is necessary only on R4000 / R4400 SC and MC versions |
71 | * where we have to avoid VCED / VECI exceptions for good performance at |
72 | * any price. Since page is never written to after the initialization we |
73 | * don't have to care about aliases on other CPUs. |
74 | */ |
75 | unsigned long empty_zero_page, zero_page_mask; |
76 | EXPORT_SYMBOL_GPL(empty_zero_page); |
77 | |
78 | /* |
79 | * Not static inline because used by IP27 special magic initialization code |
80 | */ |
81 | void setup_zero_pages(void) |
82 | { |
83 | unsigned int order, i; |
84 | struct page *page; |
85 | |
86 | if (cpu_has_vce) |
87 | order = 3; |
88 | else |
89 | order = 0; |
90 | |
91 | empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
92 | if (!empty_zero_page) |
93 | panic("Oh boy, that early out of memory?"); |
94 | |
95 | page = virt_to_page((void *)empty_zero_page); |
96 | split_page(page, order); |
97 | for (i = 0; i < (1 << order); i++, page++) |
98 | mark_page_reserved(page); |
99 | |
100 | zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; |
101 | } |
102 | |
103 | #ifdef CONFIG_MIPS_MT_SMTC |
104 | static pte_t *kmap_coherent_pte; |
105 | static void __init kmap_coherent_init(void) |
106 | { |
107 | unsigned long vaddr; |
108 | |
109 | /* cache the first coherent kmap pte */ |
110 | vaddr = __fix_to_virt(FIX_CMAP_BEGIN); |
111 | kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); |
112 | } |
113 | #else |
114 | static inline void kmap_coherent_init(void) {} |
115 | #endif |
116 | |
117 | void *kmap_coherent(struct page *page, unsigned long addr) |
118 | { |
119 | enum fixed_addresses idx; |
120 | unsigned long vaddr, flags, entrylo; |
121 | unsigned long old_ctx; |
122 | pte_t pte; |
123 | int tlbidx; |
124 | |
125 | BUG_ON(Page_dcache_dirty(page)); |
126 | |
127 | pagefault_disable(); |
128 | idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); |
129 | #ifdef CONFIG_MIPS_MT_SMTC |
130 | idx += FIX_N_COLOURS * smp_processor_id() + |
131 | (in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0); |
132 | #else |
133 | idx += in_interrupt() ? FIX_N_COLOURS : 0; |
134 | #endif |
135 | vaddr = __fix_to_virt(FIX_CMAP_END - idx); |
136 | pte = mk_pte(page, PAGE_KERNEL); |
137 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) |
138 | entrylo = pte.pte_high; |
139 | #else |
140 | entrylo = pte_to_entrylo(pte_val(pte)); |
141 | #endif |
142 | |
143 | ENTER_CRITICAL(flags); |
144 | old_ctx = read_c0_entryhi(); |
145 | write_c0_entryhi(vaddr & (PAGE_MASK << 1)); |
146 | write_c0_entrylo0(entrylo); |
147 | write_c0_entrylo1(entrylo); |
148 | #ifdef CONFIG_MIPS_MT_SMTC |
149 | set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); |
150 | /* preload TLB instead of local_flush_tlb_one() */ |
151 | mtc0_tlbw_hazard(); |
152 | tlb_probe(); |
153 | tlb_probe_hazard(); |
154 | tlbidx = read_c0_index(); |
155 | mtc0_tlbw_hazard(); |
156 | if (tlbidx < 0) |
157 | tlb_write_random(); |
158 | else |
159 | tlb_write_indexed(); |
160 | #else |
161 | tlbidx = read_c0_wired(); |
162 | write_c0_wired(tlbidx + 1); |
163 | write_c0_index(tlbidx); |
164 | mtc0_tlbw_hazard(); |
165 | tlb_write_indexed(); |
166 | #endif |
167 | tlbw_use_hazard(); |
168 | write_c0_entryhi(old_ctx); |
169 | EXIT_CRITICAL(flags); |
170 | |
171 | return (void*) vaddr; |
172 | } |
173 | |
174 | #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) |
175 | |
176 | void kunmap_coherent(void) |
177 | { |
178 | #ifndef CONFIG_MIPS_MT_SMTC |
179 | unsigned int wired; |
180 | unsigned long flags, old_ctx; |
181 | |
182 | ENTER_CRITICAL(flags); |
183 | old_ctx = read_c0_entryhi(); |
184 | wired = read_c0_wired() - 1; |
185 | write_c0_wired(wired); |
186 | write_c0_index(wired); |
187 | write_c0_entryhi(UNIQUE_ENTRYHI(wired)); |
188 | write_c0_entrylo0(0); |
189 | write_c0_entrylo1(0); |
190 | mtc0_tlbw_hazard(); |
191 | tlb_write_indexed(); |
192 | tlbw_use_hazard(); |
193 | write_c0_entryhi(old_ctx); |
194 | EXIT_CRITICAL(flags); |
195 | #endif |
196 | pagefault_enable(); |
197 | } |
198 | |
199 | void copy_user_highpage(struct page *to, struct page *from, |
200 | unsigned long vaddr, struct vm_area_struct *vma) |
201 | { |
202 | void *vfrom, *vto; |
203 | |
204 | vto = kmap_atomic(to); |
205 | if (cpu_has_dc_aliases && |
206 | page_mapped(from) && !Page_dcache_dirty(from)) { |
207 | vfrom = kmap_coherent(from, vaddr); |
208 | copy_page(vto, vfrom); |
209 | kunmap_coherent(); |
210 | } else { |
211 | vfrom = kmap_atomic(from); |
212 | copy_page(vto, vfrom); |
213 | kunmap_atomic(vfrom); |
214 | } |
215 | if ((!cpu_has_ic_fills_f_dc) || |
216 | pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) |
217 | flush_data_cache_page((unsigned long)vto); |
218 | kunmap_atomic(vto); |
219 | /* Make sure this page is cleared on other CPU's too before using it */ |
220 | smp_wmb(); |
221 | } |
222 | |
223 | void copy_to_user_page(struct vm_area_struct *vma, |
224 | struct page *page, unsigned long vaddr, void *dst, const void *src, |
225 | unsigned long len) |
226 | { |
227 | if (cpu_has_dc_aliases && |
228 | page_mapped(page) && !Page_dcache_dirty(page)) { |
229 | void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); |
230 | memcpy(vto, src, len); |
231 | kunmap_coherent(); |
232 | } else { |
233 | memcpy(dst, src, len); |
234 | if (cpu_has_dc_aliases) |
235 | SetPageDcacheDirty(page); |
236 | } |
237 | if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) |
238 | flush_cache_page(vma, vaddr, page_to_pfn(page)); |
239 | } |
240 | |
241 | void copy_from_user_page(struct vm_area_struct *vma, |
242 | struct page *page, unsigned long vaddr, void *dst, const void *src, |
243 | unsigned long len) |
244 | { |
245 | if (cpu_has_dc_aliases && |
246 | page_mapped(page) && !Page_dcache_dirty(page)) { |
247 | void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); |
248 | memcpy(dst, vfrom, len); |
249 | kunmap_coherent(); |
250 | } else { |
251 | memcpy(dst, src, len); |
252 | if (cpu_has_dc_aliases) |
253 | SetPageDcacheDirty(page); |
254 | } |
255 | } |
256 | EXPORT_SYMBOL_GPL(copy_from_user_page); |
257 | |
258 | void __init fixrange_init(unsigned long start, unsigned long end, |
259 | pgd_t *pgd_base) |
260 | { |
261 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC) |
262 | pgd_t *pgd; |
263 | pud_t *pud; |
264 | pmd_t *pmd; |
265 | pte_t *pte; |
266 | int i, j, k; |
267 | unsigned long vaddr; |
268 | |
269 | vaddr = start; |
270 | i = __pgd_offset(vaddr); |
271 | j = __pud_offset(vaddr); |
272 | k = __pmd_offset(vaddr); |
273 | pgd = pgd_base + i; |
274 | |
275 | for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { |
276 | pud = (pud_t *)pgd; |
277 | for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) { |
278 | pmd = (pmd_t *)pud; |
279 | for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) { |
280 | if (pmd_none(*pmd)) { |
281 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); |
282 | set_pmd(pmd, __pmd((unsigned long)pte)); |
283 | BUG_ON(pte != pte_offset_kernel(pmd, 0)); |
284 | } |
285 | vaddr += PMD_SIZE; |
286 | } |
287 | k = 0; |
288 | } |
289 | j = 0; |
290 | } |
291 | #endif |
292 | } |
293 | |
294 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
295 | int page_is_ram(unsigned long pagenr) |
296 | { |
297 | int i; |
298 | |
299 | for (i = 0; i < boot_mem_map.nr_map; i++) { |
300 | unsigned long addr, end; |
301 | |
302 | switch (boot_mem_map.map[i].type) { |
303 | case BOOT_MEM_RAM: |
304 | case BOOT_MEM_INIT_RAM: |
305 | break; |
306 | default: |
307 | /* not usable memory */ |
308 | continue; |
309 | } |
310 | |
311 | addr = PFN_UP(boot_mem_map.map[i].addr); |
312 | end = PFN_DOWN(boot_mem_map.map[i].addr + |
313 | boot_mem_map.map[i].size); |
314 | |
315 | if (pagenr >= addr && pagenr < end) |
316 | return 1; |
317 | } |
318 | |
319 | return 0; |
320 | } |
321 | |
322 | void __init paging_init(void) |
323 | { |
324 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
325 | unsigned long lastpfn __maybe_unused; |
326 | |
327 | pagetable_init(); |
328 | |
329 | #ifdef CONFIG_HIGHMEM |
330 | kmap_init(); |
331 | #endif |
332 | kmap_coherent_init(); |
333 | |
334 | #ifdef CONFIG_ZONE_DMA |
335 | max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; |
336 | #endif |
337 | #ifdef CONFIG_ZONE_DMA32 |
338 | max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; |
339 | #endif |
340 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
341 | lastpfn = max_low_pfn; |
342 | #ifdef CONFIG_HIGHMEM |
343 | max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; |
344 | lastpfn = highend_pfn; |
345 | |
346 | if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) { |
347 | printk(KERN_WARNING "This processor doesn't support highmem." |
348 | " %ldk highmem ignored\n", |
349 | (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10)); |
350 | max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; |
351 | lastpfn = max_low_pfn; |
352 | } |
353 | #endif |
354 | |
355 | free_area_init_nodes(max_zone_pfns); |
356 | } |
357 | |
358 | #ifdef CONFIG_64BIT |
359 | static struct kcore_list kcore_kseg0; |
360 | #endif |
361 | |
362 | static inline void mem_init_free_highmem(void) |
363 | { |
364 | #ifdef CONFIG_HIGHMEM |
365 | unsigned long tmp; |
366 | |
367 | for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { |
368 | struct page *page = pfn_to_page(tmp); |
369 | |
370 | if (!page_is_ram(tmp)) |
371 | SetPageReserved(page); |
372 | else |
373 | free_highmem_page(page); |
374 | } |
375 | #endif |
376 | } |
377 | |
378 | void __init mem_init(void) |
379 | { |
380 | #ifdef CONFIG_HIGHMEM |
381 | #ifdef CONFIG_DISCONTIGMEM |
382 | #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" |
383 | #endif |
384 | max_mapnr = highend_pfn ? highend_pfn : max_low_pfn; |
385 | #else |
386 | max_mapnr = max_low_pfn; |
387 | #endif |
388 | high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); |
389 | |
390 | free_all_bootmem(); |
391 | setup_zero_pages(); /* Setup zeroed pages. */ |
392 | mem_init_free_highmem(); |
393 | mem_init_print_info(NULL); |
394 | |
395 | #ifdef CONFIG_64BIT |
396 | if ((unsigned long) &_text > (unsigned long) CKSEG0) |
397 | /* The -4 is a hack so that user tools don't have to handle |
398 | the overflow. */ |
399 | kclist_add(&kcore_kseg0, (void *) CKSEG0, |
400 | 0x80000000 - 4, KCORE_TEXT); |
401 | #endif |
402 | } |
403 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
404 | |
405 | void free_init_pages(const char *what, unsigned long begin, unsigned long end) |
406 | { |
407 | unsigned long pfn; |
408 | |
409 | for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { |
410 | struct page *page = pfn_to_page(pfn); |
411 | void *addr = phys_to_virt(PFN_PHYS(pfn)); |
412 | |
413 | memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); |
414 | free_reserved_page(page); |
415 | } |
416 | printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); |
417 | } |
418 | |
419 | #ifdef CONFIG_BLK_DEV_INITRD |
420 | void free_initrd_mem(unsigned long start, unsigned long end) |
421 | { |
422 | free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, |
423 | "initrd"); |
424 | } |
425 | #endif |
426 | |
427 | void __init_refok free_initmem(void) |
428 | { |
429 | prom_free_prom_memory(); |
430 | free_initmem_default(POISON_FREE_INITMEM); |
431 | } |
432 | |
433 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
434 | unsigned long pgd_current[NR_CPUS]; |
435 | #endif |
436 | |
437 | /* |
438 | * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER |
439 | * are constants. So we use the variants from asm-offset.h until that gcc |
440 | * will officially be retired. |
441 | * |
442 | * Align swapper_pg_dir in to 64K, allows its address to be loaded |
443 | * with a single LUI instruction in the TLB handlers. If we used |
444 | * __aligned(64K), its size would get rounded up to the alignment |
445 | * size, and waste space. So we place it in its own section and align |
446 | * it in the linker script. |
447 | */ |
448 | pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir); |
449 | #ifndef __PAGETABLE_PMD_FOLDED |
450 | pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss; |
451 | #endif |
452 | pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; |
453 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9