Root/
Source at commit 694c7fbe86b8a9c91392e505afcb9fcfc91deccc created 13 years 13 days ago. By Maarten ter Huurne, MIPS: JZ4740: Add cpufreq support | |
---|---|
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) |
7 | * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org) |
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
9 | */ |
10 | #include <linux/hardirq.h> |
11 | #include <linux/init.h> |
12 | #include <linux/highmem.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/linkage.h> |
15 | #include <linux/preempt.h> |
16 | #include <linux/sched.h> |
17 | #include <linux/smp.h> |
18 | #include <linux/mm.h> |
19 | #include <linux/module.h> |
20 | #include <linux/bitops.h> |
21 | |
22 | #include <asm/bcache.h> |
23 | #include <asm/bootinfo.h> |
24 | #include <asm/cache.h> |
25 | #include <asm/cacheops.h> |
26 | #include <asm/cpu.h> |
27 | #include <asm/cpu-features.h> |
28 | #include <asm/cpu-type.h> |
29 | #include <asm/io.h> |
30 | #include <asm/page.h> |
31 | #include <asm/pgtable.h> |
32 | #include <asm/r4kcache.h> |
33 | #include <asm/sections.h> |
34 | #include <asm/mmu_context.h> |
35 | #include <asm/war.h> |
36 | #include <asm/cacheflush.h> /* for run_uncached() */ |
37 | #include <asm/traps.h> |
38 | #include <asm/dma-coherence.h> |
39 | |
40 | /* |
41 | * Special Variant of smp_call_function for use by cache functions: |
42 | * |
43 | * o No return value |
44 | * o collapses to normal function call on UP kernels |
45 | * o collapses to normal function call on systems with a single shared |
46 | * primary cache. |
47 | * o doesn't disable interrupts on the local CPU |
48 | */ |
49 | static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) |
50 | { |
51 | preempt_disable(); |
52 | |
53 | #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) |
54 | smp_call_function(func, info, 1); |
55 | #endif |
56 | func(info); |
57 | preempt_enable(); |
58 | } |
59 | |
60 | #if defined(CONFIG_MIPS_CMP) |
61 | #define cpu_has_safe_index_cacheops 0 |
62 | #else |
63 | #define cpu_has_safe_index_cacheops 1 |
64 | #endif |
65 | |
66 | /* |
67 | * Must die. |
68 | */ |
69 | static unsigned long icache_size __read_mostly; |
70 | static unsigned long dcache_size __read_mostly; |
71 | static unsigned long scache_size __read_mostly; |
72 | |
73 | /* |
74 | * Dummy cache handling routines for machines without boardcaches |
75 | */ |
76 | static void cache_noop(void) {} |
77 | |
78 | static struct bcache_ops no_sc_ops = { |
79 | .bc_enable = (void *)cache_noop, |
80 | .bc_disable = (void *)cache_noop, |
81 | .bc_wback_inv = (void *)cache_noop, |
82 | .bc_inv = (void *)cache_noop |
83 | }; |
84 | |
85 | struct bcache_ops *bcops = &no_sc_ops; |
86 | |
87 | #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) |
88 | #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) |
89 | |
90 | #define R4600_HIT_CACHEOP_WAR_IMPL \ |
91 | do { \ |
92 | if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \ |
93 | *(volatile unsigned long *)CKSEG1; \ |
94 | if (R4600_V1_HIT_CACHEOP_WAR) \ |
95 | __asm__ __volatile__("nop;nop;nop;nop"); \ |
96 | } while (0) |
97 | |
98 | static void (*r4k_blast_dcache_page)(unsigned long addr); |
99 | |
100 | static inline void r4k_blast_dcache_page_dc32(unsigned long addr) |
101 | { |
102 | R4600_HIT_CACHEOP_WAR_IMPL; |
103 | blast_dcache32_page(addr); |
104 | } |
105 | |
106 | static inline void r4k_blast_dcache_page_dc64(unsigned long addr) |
107 | { |
108 | R4600_HIT_CACHEOP_WAR_IMPL; |
109 | blast_dcache64_page(addr); |
110 | } |
111 | |
112 | static void r4k_blast_dcache_page_setup(void) |
113 | { |
114 | unsigned long dc_lsize = cpu_dcache_line_size(); |
115 | |
116 | if (dc_lsize == 0) |
117 | r4k_blast_dcache_page = (void *)cache_noop; |
118 | else if (dc_lsize == 16) |
119 | r4k_blast_dcache_page = blast_dcache16_page; |
120 | else if (dc_lsize == 32) |
121 | r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; |
122 | else if (dc_lsize == 64) |
123 | r4k_blast_dcache_page = r4k_blast_dcache_page_dc64; |
124 | } |
125 | |
126 | static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); |
127 | |
128 | static void r4k_blast_dcache_page_indexed_setup(void) |
129 | { |
130 | unsigned long dc_lsize = cpu_dcache_line_size(); |
131 | |
132 | if (dc_lsize == 0) |
133 | r4k_blast_dcache_page_indexed = (void *)cache_noop; |
134 | else if (dc_lsize == 16) |
135 | r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; |
136 | else if (dc_lsize == 32) |
137 | r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; |
138 | else if (dc_lsize == 64) |
139 | r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; |
140 | } |
141 | |
142 | void (* r4k_blast_dcache)(void); |
143 | EXPORT_SYMBOL(r4k_blast_dcache); |
144 | |
145 | static void r4k_blast_dcache_setup(void) |
146 | { |
147 | unsigned long dc_lsize = cpu_dcache_line_size(); |
148 | |
149 | if (dc_lsize == 0) |
150 | r4k_blast_dcache = (void *)cache_noop; |
151 | else if (dc_lsize == 16) |
152 | r4k_blast_dcache = blast_dcache16; |
153 | else if (dc_lsize == 32) |
154 | r4k_blast_dcache = blast_dcache32; |
155 | else if (dc_lsize == 64) |
156 | r4k_blast_dcache = blast_dcache64; |
157 | } |
158 | |
159 | /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */ |
160 | #define JUMP_TO_ALIGN(order) \ |
161 | __asm__ __volatile__( \ |
162 | "b\t1f\n\t" \ |
163 | ".align\t" #order "\n\t" \ |
164 | "1:\n\t" \ |
165 | ) |
166 | #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */ |
167 | #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11) |
168 | |
169 | static inline void blast_r4600_v1_icache32(void) |
170 | { |
171 | unsigned long flags; |
172 | |
173 | local_irq_save(flags); |
174 | blast_icache32(); |
175 | local_irq_restore(flags); |
176 | } |
177 | |
178 | static inline void tx49_blast_icache32(void) |
179 | { |
180 | unsigned long start = INDEX_BASE; |
181 | unsigned long end = start + current_cpu_data.icache.waysize; |
182 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; |
183 | unsigned long ws_end = current_cpu_data.icache.ways << |
184 | current_cpu_data.icache.waybit; |
185 | unsigned long ws, addr; |
186 | |
187 | CACHE32_UNROLL32_ALIGN2; |
188 | /* I'm in even chunk. blast odd chunks */ |
189 | for (ws = 0; ws < ws_end; ws += ws_inc) |
190 | for (addr = start + 0x400; addr < end; addr += 0x400 * 2) |
191 | cache32_unroll32(addr|ws, Index_Invalidate_I); |
192 | CACHE32_UNROLL32_ALIGN; |
193 | /* I'm in odd chunk. blast even chunks */ |
194 | for (ws = 0; ws < ws_end; ws += ws_inc) |
195 | for (addr = start; addr < end; addr += 0x400 * 2) |
196 | cache32_unroll32(addr|ws, Index_Invalidate_I); |
197 | } |
198 | |
199 | static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page) |
200 | { |
201 | unsigned long flags; |
202 | |
203 | local_irq_save(flags); |
204 | blast_icache32_page_indexed(page); |
205 | local_irq_restore(flags); |
206 | } |
207 | |
208 | static inline void tx49_blast_icache32_page_indexed(unsigned long page) |
209 | { |
210 | unsigned long indexmask = current_cpu_data.icache.waysize - 1; |
211 | unsigned long start = INDEX_BASE + (page & indexmask); |
212 | unsigned long end = start + PAGE_SIZE; |
213 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; |
214 | unsigned long ws_end = current_cpu_data.icache.ways << |
215 | current_cpu_data.icache.waybit; |
216 | unsigned long ws, addr; |
217 | |
218 | CACHE32_UNROLL32_ALIGN2; |
219 | /* I'm in even chunk. blast odd chunks */ |
220 | for (ws = 0; ws < ws_end; ws += ws_inc) |
221 | for (addr = start + 0x400; addr < end; addr += 0x400 * 2) |
222 | cache32_unroll32(addr|ws, Index_Invalidate_I); |
223 | CACHE32_UNROLL32_ALIGN; |
224 | /* I'm in odd chunk. blast even chunks */ |
225 | for (ws = 0; ws < ws_end; ws += ws_inc) |
226 | for (addr = start; addr < end; addr += 0x400 * 2) |
227 | cache32_unroll32(addr|ws, Index_Invalidate_I); |
228 | } |
229 | |
230 | static void (* r4k_blast_icache_page)(unsigned long addr); |
231 | |
232 | static void r4k_blast_icache_page_setup(void) |
233 | { |
234 | unsigned long ic_lsize = cpu_icache_line_size(); |
235 | |
236 | if (ic_lsize == 0) |
237 | r4k_blast_icache_page = (void *)cache_noop; |
238 | else if (ic_lsize == 16) |
239 | r4k_blast_icache_page = blast_icache16_page; |
240 | else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2) |
241 | r4k_blast_icache_page = loongson2_blast_icache32_page; |
242 | else if (ic_lsize == 32) |
243 | r4k_blast_icache_page = blast_icache32_page; |
244 | else if (ic_lsize == 64) |
245 | r4k_blast_icache_page = blast_icache64_page; |
246 | } |
247 | |
248 | |
249 | static void (* r4k_blast_icache_page_indexed)(unsigned long addr); |
250 | |
251 | static void r4k_blast_icache_page_indexed_setup(void) |
252 | { |
253 | unsigned long ic_lsize = cpu_icache_line_size(); |
254 | |
255 | if (ic_lsize == 0) |
256 | r4k_blast_icache_page_indexed = (void *)cache_noop; |
257 | else if (ic_lsize == 16) |
258 | r4k_blast_icache_page_indexed = blast_icache16_page_indexed; |
259 | else if (ic_lsize == 32) { |
260 | if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) |
261 | r4k_blast_icache_page_indexed = |
262 | blast_icache32_r4600_v1_page_indexed; |
263 | else if (TX49XX_ICACHE_INDEX_INV_WAR) |
264 | r4k_blast_icache_page_indexed = |
265 | tx49_blast_icache32_page_indexed; |
266 | else if (current_cpu_type() == CPU_LOONGSON2) |
267 | r4k_blast_icache_page_indexed = |
268 | loongson2_blast_icache32_page_indexed; |
269 | else |
270 | r4k_blast_icache_page_indexed = |
271 | blast_icache32_page_indexed; |
272 | } else if (ic_lsize == 64) |
273 | r4k_blast_icache_page_indexed = blast_icache64_page_indexed; |
274 | } |
275 | |
276 | void (* r4k_blast_icache)(void); |
277 | EXPORT_SYMBOL(r4k_blast_icache); |
278 | |
279 | static void r4k_blast_icache_setup(void) |
280 | { |
281 | unsigned long ic_lsize = cpu_icache_line_size(); |
282 | |
283 | if (ic_lsize == 0) |
284 | r4k_blast_icache = (void *)cache_noop; |
285 | else if (ic_lsize == 16) |
286 | r4k_blast_icache = blast_icache16; |
287 | else if (ic_lsize == 32) { |
288 | if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) |
289 | r4k_blast_icache = blast_r4600_v1_icache32; |
290 | else if (TX49XX_ICACHE_INDEX_INV_WAR) |
291 | r4k_blast_icache = tx49_blast_icache32; |
292 | else if (current_cpu_type() == CPU_LOONGSON2) |
293 | r4k_blast_icache = loongson2_blast_icache32; |
294 | else |
295 | r4k_blast_icache = blast_icache32; |
296 | } else if (ic_lsize == 64) |
297 | r4k_blast_icache = blast_icache64; |
298 | } |
299 | |
300 | static void (* r4k_blast_scache_page)(unsigned long addr); |
301 | |
302 | static void r4k_blast_scache_page_setup(void) |
303 | { |
304 | unsigned long sc_lsize = cpu_scache_line_size(); |
305 | |
306 | if (scache_size == 0) |
307 | r4k_blast_scache_page = (void *)cache_noop; |
308 | else if (sc_lsize == 16) |
309 | r4k_blast_scache_page = blast_scache16_page; |
310 | else if (sc_lsize == 32) |
311 | r4k_blast_scache_page = blast_scache32_page; |
312 | else if (sc_lsize == 64) |
313 | r4k_blast_scache_page = blast_scache64_page; |
314 | else if (sc_lsize == 128) |
315 | r4k_blast_scache_page = blast_scache128_page; |
316 | } |
317 | |
318 | static void (* r4k_blast_scache_page_indexed)(unsigned long addr); |
319 | |
320 | static void r4k_blast_scache_page_indexed_setup(void) |
321 | { |
322 | unsigned long sc_lsize = cpu_scache_line_size(); |
323 | |
324 | if (scache_size == 0) |
325 | r4k_blast_scache_page_indexed = (void *)cache_noop; |
326 | else if (sc_lsize == 16) |
327 | r4k_blast_scache_page_indexed = blast_scache16_page_indexed; |
328 | else if (sc_lsize == 32) |
329 | r4k_blast_scache_page_indexed = blast_scache32_page_indexed; |
330 | else if (sc_lsize == 64) |
331 | r4k_blast_scache_page_indexed = blast_scache64_page_indexed; |
332 | else if (sc_lsize == 128) |
333 | r4k_blast_scache_page_indexed = blast_scache128_page_indexed; |
334 | } |
335 | |
336 | static void (* r4k_blast_scache)(void); |
337 | |
338 | static void r4k_blast_scache_setup(void) |
339 | { |
340 | unsigned long sc_lsize = cpu_scache_line_size(); |
341 | |
342 | if (scache_size == 0) |
343 | r4k_blast_scache = (void *)cache_noop; |
344 | else if (sc_lsize == 16) |
345 | r4k_blast_scache = blast_scache16; |
346 | else if (sc_lsize == 32) |
347 | r4k_blast_scache = blast_scache32; |
348 | else if (sc_lsize == 64) |
349 | r4k_blast_scache = blast_scache64; |
350 | else if (sc_lsize == 128) |
351 | r4k_blast_scache = blast_scache128; |
352 | } |
353 | |
354 | static inline void local_r4k___flush_cache_all(void * args) |
355 | { |
356 | switch (current_cpu_type()) { |
357 | case CPU_LOONGSON2: |
358 | case CPU_R4000SC: |
359 | case CPU_R4000MC: |
360 | case CPU_R4400SC: |
361 | case CPU_R4400MC: |
362 | case CPU_R10000: |
363 | case CPU_R12000: |
364 | case CPU_R14000: |
365 | /* |
366 | * These caches are inclusive caches, that is, if something |
367 | * is not cached in the S-cache, we know it also won't be |
368 | * in one of the primary caches. |
369 | */ |
370 | r4k_blast_scache(); |
371 | break; |
372 | |
373 | default: |
374 | r4k_blast_dcache(); |
375 | r4k_blast_icache(); |
376 | break; |
377 | } |
378 | } |
379 | |
380 | static void r4k___flush_cache_all(void) |
381 | { |
382 | r4k_on_each_cpu(local_r4k___flush_cache_all, NULL); |
383 | } |
384 | |
385 | static inline int has_valid_asid(const struct mm_struct *mm) |
386 | { |
387 | #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) |
388 | int i; |
389 | |
390 | for_each_online_cpu(i) |
391 | if (cpu_context(i, mm)) |
392 | return 1; |
393 | |
394 | return 0; |
395 | #else |
396 | return cpu_context(smp_processor_id(), mm); |
397 | #endif |
398 | } |
399 | |
400 | static void r4k__flush_cache_vmap(void) |
401 | { |
402 | r4k_blast_dcache(); |
403 | } |
404 | |
405 | static void r4k__flush_cache_vunmap(void) |
406 | { |
407 | r4k_blast_dcache(); |
408 | } |
409 | |
410 | static inline void local_r4k_flush_cache_range(void * args) |
411 | { |
412 | struct vm_area_struct *vma = args; |
413 | int exec = vma->vm_flags & VM_EXEC; |
414 | |
415 | if (!(has_valid_asid(vma->vm_mm))) |
416 | return; |
417 | |
418 | r4k_blast_dcache(); |
419 | if (exec) |
420 | r4k_blast_icache(); |
421 | } |
422 | |
423 | static void r4k_flush_cache_range(struct vm_area_struct *vma, |
424 | unsigned long start, unsigned long end) |
425 | { |
426 | int exec = vma->vm_flags & VM_EXEC; |
427 | |
428 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) |
429 | r4k_on_each_cpu(local_r4k_flush_cache_range, vma); |
430 | } |
431 | |
432 | static inline void local_r4k_flush_cache_mm(void * args) |
433 | { |
434 | struct mm_struct *mm = args; |
435 | |
436 | if (!has_valid_asid(mm)) |
437 | return; |
438 | |
439 | /* |
440 | * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we |
441 | * only flush the primary caches but R10000 and R12000 behave sane ... |
442 | * R4000SC and R4400SC indexed S-cache ops also invalidate primary |
443 | * caches, so we can bail out early. |
444 | */ |
445 | if (current_cpu_type() == CPU_R4000SC || |
446 | current_cpu_type() == CPU_R4000MC || |
447 | current_cpu_type() == CPU_R4400SC || |
448 | current_cpu_type() == CPU_R4400MC) { |
449 | r4k_blast_scache(); |
450 | return; |
451 | } |
452 | |
453 | r4k_blast_dcache(); |
454 | } |
455 | |
456 | static void r4k_flush_cache_mm(struct mm_struct *mm) |
457 | { |
458 | if (!cpu_has_dc_aliases) |
459 | return; |
460 | |
461 | r4k_on_each_cpu(local_r4k_flush_cache_mm, mm); |
462 | } |
463 | |
464 | struct flush_cache_page_args { |
465 | struct vm_area_struct *vma; |
466 | unsigned long addr; |
467 | unsigned long pfn; |
468 | }; |
469 | |
470 | static inline void local_r4k_flush_cache_page(void *args) |
471 | { |
472 | struct flush_cache_page_args *fcp_args = args; |
473 | struct vm_area_struct *vma = fcp_args->vma; |
474 | unsigned long addr = fcp_args->addr; |
475 | struct page *page = pfn_to_page(fcp_args->pfn); |
476 | int exec = vma->vm_flags & VM_EXEC; |
477 | struct mm_struct *mm = vma->vm_mm; |
478 | int map_coherent = 0; |
479 | pgd_t *pgdp; |
480 | pud_t *pudp; |
481 | pmd_t *pmdp; |
482 | pte_t *ptep; |
483 | void *vaddr; |
484 | |
485 | /* |
486 | * If ownes no valid ASID yet, cannot possibly have gotten |
487 | * this page into the cache. |
488 | */ |
489 | if (!has_valid_asid(mm)) |
490 | return; |
491 | |
492 | addr &= PAGE_MASK; |
493 | pgdp = pgd_offset(mm, addr); |
494 | pudp = pud_offset(pgdp, addr); |
495 | pmdp = pmd_offset(pudp, addr); |
496 | ptep = pte_offset(pmdp, addr); |
497 | |
498 | /* |
499 | * If the page isn't marked valid, the page cannot possibly be |
500 | * in the cache. |
501 | */ |
502 | if (!(pte_present(*ptep))) |
503 | return; |
504 | |
505 | if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) |
506 | vaddr = NULL; |
507 | else { |
508 | /* |
509 | * Use kmap_coherent or kmap_atomic to do flushes for |
510 | * another ASID than the current one. |
511 | */ |
512 | map_coherent = (cpu_has_dc_aliases && |
513 | page_mapped(page) && !Page_dcache_dirty(page)); |
514 | if (map_coherent) |
515 | vaddr = kmap_coherent(page, addr); |
516 | else |
517 | vaddr = kmap_atomic(page); |
518 | addr = (unsigned long)vaddr; |
519 | } |
520 | |
521 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { |
522 | r4k_blast_dcache_page(addr); |
523 | if (exec && !cpu_icache_snoops_remote_store) |
524 | r4k_blast_scache_page(addr); |
525 | } |
526 | if (exec) { |
527 | if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) { |
528 | int cpu = smp_processor_id(); |
529 | |
530 | if (cpu_context(cpu, mm) != 0) |
531 | drop_mmu_context(mm, cpu); |
532 | } else |
533 | r4k_blast_icache_page(addr); |
534 | } |
535 | |
536 | if (vaddr) { |
537 | if (map_coherent) |
538 | kunmap_coherent(); |
539 | else |
540 | kunmap_atomic(vaddr); |
541 | } |
542 | } |
543 | |
544 | static void r4k_flush_cache_page(struct vm_area_struct *vma, |
545 | unsigned long addr, unsigned long pfn) |
546 | { |
547 | struct flush_cache_page_args args; |
548 | |
549 | args.vma = vma; |
550 | args.addr = addr; |
551 | args.pfn = pfn; |
552 | |
553 | r4k_on_each_cpu(local_r4k_flush_cache_page, &args); |
554 | } |
555 | |
556 | static inline void local_r4k_flush_data_cache_page(void * addr) |
557 | { |
558 | r4k_blast_dcache_page((unsigned long) addr); |
559 | } |
560 | |
561 | static void r4k_flush_data_cache_page(unsigned long addr) |
562 | { |
563 | if (in_atomic()) |
564 | local_r4k_flush_data_cache_page((void *)addr); |
565 | else |
566 | r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr); |
567 | } |
568 | |
569 | struct flush_icache_range_args { |
570 | unsigned long start; |
571 | unsigned long end; |
572 | }; |
573 | |
574 | static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end) |
575 | { |
576 | if (!cpu_has_ic_fills_f_dc) { |
577 | if (end - start >= dcache_size) { |
578 | r4k_blast_dcache(); |
579 | } else { |
580 | R4600_HIT_CACHEOP_WAR_IMPL; |
581 | protected_blast_dcache_range(start, end); |
582 | } |
583 | } |
584 | |
585 | if (end - start > icache_size) |
586 | r4k_blast_icache(); |
587 | else { |
588 | switch (boot_cpu_type()) { |
589 | case CPU_LOONGSON2: |
590 | protected_loongson2_blast_icache_range(start, end); |
591 | break; |
592 | |
593 | default: |
594 | protected_blast_icache_range(start, end); |
595 | break; |
596 | } |
597 | } |
598 | } |
599 | |
600 | static inline void local_r4k_flush_icache_range_ipi(void *args) |
601 | { |
602 | struct flush_icache_range_args *fir_args = args; |
603 | unsigned long start = fir_args->start; |
604 | unsigned long end = fir_args->end; |
605 | |
606 | local_r4k_flush_icache_range(start, end); |
607 | } |
608 | |
609 | static void r4k_flush_icache_range(unsigned long start, unsigned long end) |
610 | { |
611 | struct flush_icache_range_args args; |
612 | |
613 | args.start = start; |
614 | args.end = end; |
615 | |
616 | r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args); |
617 | instruction_hazard(); |
618 | } |
619 | |
620 | #ifdef CONFIG_DMA_NONCOHERENT |
621 | |
622 | static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) |
623 | { |
624 | /* Catch bad driver code */ |
625 | BUG_ON(size == 0); |
626 | |
627 | preempt_disable(); |
628 | if (cpu_has_inclusive_pcaches) { |
629 | if (size >= scache_size) |
630 | r4k_blast_scache(); |
631 | else |
632 | blast_scache_range(addr, addr + size); |
633 | preempt_enable(); |
634 | __sync(); |
635 | return; |
636 | } |
637 | |
638 | /* |
639 | * Either no secondary cache or the available caches don't have the |
640 | * subset property so we have to flush the primary caches |
641 | * explicitly |
642 | */ |
643 | if (cpu_has_safe_index_cacheops && size >= dcache_size) { |
644 | r4k_blast_dcache(); |
645 | } else { |
646 | R4600_HIT_CACHEOP_WAR_IMPL; |
647 | blast_dcache_range(addr, addr + size); |
648 | } |
649 | preempt_enable(); |
650 | |
651 | bc_wback_inv(addr, size); |
652 | __sync(); |
653 | } |
654 | |
655 | static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) |
656 | { |
657 | /* Catch bad driver code */ |
658 | BUG_ON(size == 0); |
659 | |
660 | preempt_disable(); |
661 | if (cpu_has_inclusive_pcaches) { |
662 | if (size >= scache_size) |
663 | r4k_blast_scache(); |
664 | else { |
665 | /* |
666 | * There is no clearly documented alignment requirement |
667 | * for the cache instruction on MIPS processors and |
668 | * some processors, among them the RM5200 and RM7000 |
669 | * QED processors will throw an address error for cache |
670 | * hit ops with insufficient alignment. Solved by |
671 | * aligning the address to cache line size. |
672 | */ |
673 | blast_inv_scache_range(addr, addr + size); |
674 | } |
675 | preempt_enable(); |
676 | __sync(); |
677 | return; |
678 | } |
679 | |
680 | if (cpu_has_safe_index_cacheops && size >= dcache_size) { |
681 | r4k_blast_dcache(); |
682 | } else { |
683 | R4600_HIT_CACHEOP_WAR_IMPL; |
684 | blast_inv_dcache_range(addr, addr + size); |
685 | } |
686 | preempt_enable(); |
687 | |
688 | bc_inv(addr, size); |
689 | __sync(); |
690 | } |
691 | #endif /* CONFIG_DMA_NONCOHERENT */ |
692 | |
693 | /* |
694 | * While we're protected against bad userland addresses we don't care |
695 | * very much about what happens in that case. Usually a segmentation |
696 | * fault will dump the process later on anyway ... |
697 | */ |
698 | static void local_r4k_flush_cache_sigtramp(void * arg) |
699 | { |
700 | unsigned long ic_lsize = cpu_icache_line_size(); |
701 | unsigned long dc_lsize = cpu_dcache_line_size(); |
702 | unsigned long sc_lsize = cpu_scache_line_size(); |
703 | unsigned long addr = (unsigned long) arg; |
704 | |
705 | R4600_HIT_CACHEOP_WAR_IMPL; |
706 | if (dc_lsize) |
707 | protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); |
708 | if (!cpu_icache_snoops_remote_store && scache_size) |
709 | protected_writeback_scache_line(addr & ~(sc_lsize - 1)); |
710 | if (ic_lsize) |
711 | protected_flush_icache_line(addr & ~(ic_lsize - 1)); |
712 | if (MIPS4K_ICACHE_REFILL_WAR) { |
713 | __asm__ __volatile__ ( |
714 | ".set push\n\t" |
715 | ".set noat\n\t" |
716 | ".set mips3\n\t" |
717 | #ifdef CONFIG_32BIT |
718 | "la $at,1f\n\t" |
719 | #endif |
720 | #ifdef CONFIG_64BIT |
721 | "dla $at,1f\n\t" |
722 | #endif |
723 | "cache %0,($at)\n\t" |
724 | "nop; nop; nop\n" |
725 | "1:\n\t" |
726 | ".set pop" |
727 | : |
728 | : "i" (Hit_Invalidate_I)); |
729 | } |
730 | if (MIPS_CACHE_SYNC_WAR) |
731 | __asm__ __volatile__ ("sync"); |
732 | } |
733 | |
734 | static void r4k_flush_cache_sigtramp(unsigned long addr) |
735 | { |
736 | r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr); |
737 | } |
738 | |
739 | static void r4k_flush_icache_all(void) |
740 | { |
741 | if (cpu_has_vtag_icache) |
742 | r4k_blast_icache(); |
743 | } |
744 | |
745 | struct flush_kernel_vmap_range_args { |
746 | unsigned long vaddr; |
747 | int size; |
748 | }; |
749 | |
750 | static inline void local_r4k_flush_kernel_vmap_range(void *args) |
751 | { |
752 | struct flush_kernel_vmap_range_args *vmra = args; |
753 | unsigned long vaddr = vmra->vaddr; |
754 | int size = vmra->size; |
755 | |
756 | /* |
757 | * Aliases only affect the primary caches so don't bother with |
758 | * S-caches or T-caches. |
759 | */ |
760 | if (cpu_has_safe_index_cacheops && size >= dcache_size) |
761 | r4k_blast_dcache(); |
762 | else { |
763 | R4600_HIT_CACHEOP_WAR_IMPL; |
764 | blast_dcache_range(vaddr, vaddr + size); |
765 | } |
766 | } |
767 | |
768 | static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size) |
769 | { |
770 | struct flush_kernel_vmap_range_args args; |
771 | |
772 | args.vaddr = (unsigned long) vaddr; |
773 | args.size = size; |
774 | |
775 | r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args); |
776 | } |
777 | |
778 | static inline void rm7k_erratum31(void) |
779 | { |
780 | const unsigned long ic_lsize = 32; |
781 | unsigned long addr; |
782 | |
783 | /* RM7000 erratum #31. The icache is screwed at startup. */ |
784 | write_c0_taglo(0); |
785 | write_c0_taghi(0); |
786 | |
787 | for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) { |
788 | __asm__ __volatile__ ( |
789 | ".set push\n\t" |
790 | ".set noreorder\n\t" |
791 | ".set mips3\n\t" |
792 | "cache\t%1, 0(%0)\n\t" |
793 | "cache\t%1, 0x1000(%0)\n\t" |
794 | "cache\t%1, 0x2000(%0)\n\t" |
795 | "cache\t%1, 0x3000(%0)\n\t" |
796 | "cache\t%2, 0(%0)\n\t" |
797 | "cache\t%2, 0x1000(%0)\n\t" |
798 | "cache\t%2, 0x2000(%0)\n\t" |
799 | "cache\t%2, 0x3000(%0)\n\t" |
800 | "cache\t%1, 0(%0)\n\t" |
801 | "cache\t%1, 0x1000(%0)\n\t" |
802 | "cache\t%1, 0x2000(%0)\n\t" |
803 | "cache\t%1, 0x3000(%0)\n\t" |
804 | ".set pop\n" |
805 | : |
806 | : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill)); |
807 | } |
808 | } |
809 | |
810 | static inline void alias_74k_erratum(struct cpuinfo_mips *c) |
811 | { |
812 | unsigned int imp = c->processor_id & PRID_IMP_MASK; |
813 | unsigned int rev = c->processor_id & PRID_REV_MASK; |
814 | |
815 | /* |
816 | * Early versions of the 74K do not update the cache tags on a |
817 | * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG |
818 | * aliases. In this case it is better to treat the cache as always |
819 | * having aliases. |
820 | */ |
821 | switch (imp) { |
822 | case PRID_IMP_74K: |
823 | if (rev <= PRID_REV_ENCODE_332(2, 4, 0)) |
824 | c->dcache.flags |= MIPS_CACHE_VTAG; |
825 | if (rev == PRID_REV_ENCODE_332(2, 4, 0)) |
826 | write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); |
827 | break; |
828 | case PRID_IMP_1074K: |
829 | if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) { |
830 | c->dcache.flags |= MIPS_CACHE_VTAG; |
831 | write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); |
832 | } |
833 | break; |
834 | default: |
835 | BUG(); |
836 | } |
837 | } |
838 | |
839 | static char *way_string[] = { NULL, "direct mapped", "2-way", |
840 | "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" |
841 | }; |
842 | |
843 | static void probe_pcache(void) |
844 | { |
845 | struct cpuinfo_mips *c = ¤t_cpu_data; |
846 | unsigned int config = read_c0_config(); |
847 | unsigned int prid = read_c0_prid(); |
848 | unsigned long config1; |
849 | unsigned int lsize; |
850 | |
851 | switch (current_cpu_type()) { |
852 | case CPU_R4600: /* QED style two way caches? */ |
853 | case CPU_R4700: |
854 | case CPU_R5000: |
855 | case CPU_NEVADA: |
856 | icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); |
857 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); |
858 | c->icache.ways = 2; |
859 | c->icache.waybit = __ffs(icache_size/2); |
860 | |
861 | dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); |
862 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); |
863 | c->dcache.ways = 2; |
864 | c->dcache.waybit= __ffs(dcache_size/2); |
865 | |
866 | c->options |= MIPS_CPU_CACHE_CDEX_P; |
867 | break; |
868 | |
869 | case CPU_R5432: |
870 | case CPU_R5500: |
871 | icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); |
872 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); |
873 | c->icache.ways = 2; |
874 | c->icache.waybit= 0; |
875 | |
876 | dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); |
877 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); |
878 | c->dcache.ways = 2; |
879 | c->dcache.waybit = 0; |
880 | |
881 | c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH; |
882 | break; |
883 | |
884 | case CPU_TX49XX: |
885 | icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); |
886 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); |
887 | c->icache.ways = 4; |
888 | c->icache.waybit= 0; |
889 | |
890 | dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); |
891 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); |
892 | c->dcache.ways = 4; |
893 | c->dcache.waybit = 0; |
894 | |
895 | c->options |= MIPS_CPU_CACHE_CDEX_P; |
896 | c->options |= MIPS_CPU_PREFETCH; |
897 | break; |
898 | |
899 | case CPU_R4000PC: |
900 | case CPU_R4000SC: |
901 | case CPU_R4000MC: |
902 | case CPU_R4400PC: |
903 | case CPU_R4400SC: |
904 | case CPU_R4400MC: |
905 | case CPU_R4300: |
906 | icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); |
907 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); |
908 | c->icache.ways = 1; |
909 | c->icache.waybit = 0; /* doesn't matter */ |
910 | |
911 | dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); |
912 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); |
913 | c->dcache.ways = 1; |
914 | c->dcache.waybit = 0; /* does not matter */ |
915 | |
916 | c->options |= MIPS_CPU_CACHE_CDEX_P; |
917 | break; |
918 | |
919 | case CPU_R10000: |
920 | case CPU_R12000: |
921 | case CPU_R14000: |
922 | icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29)); |
923 | c->icache.linesz = 64; |
924 | c->icache.ways = 2; |
925 | c->icache.waybit = 0; |
926 | |
927 | dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26)); |
928 | c->dcache.linesz = 32; |
929 | c->dcache.ways = 2; |
930 | c->dcache.waybit = 0; |
931 | |
932 | c->options |= MIPS_CPU_PREFETCH; |
933 | break; |
934 | |
935 | case CPU_VR4133: |
936 | write_c0_config(config & ~VR41_CONF_P4K); |
937 | case CPU_VR4131: |
938 | /* Workaround for cache instruction bug of VR4131 */ |
939 | if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U || |
940 | c->processor_id == 0x0c82U) { |
941 | config |= 0x00400000U; |
942 | if (c->processor_id == 0x0c80U) |
943 | config |= VR41_CONF_BP; |
944 | write_c0_config(config); |
945 | } else |
946 | c->options |= MIPS_CPU_CACHE_CDEX_P; |
947 | |
948 | icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); |
949 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); |
950 | c->icache.ways = 2; |
951 | c->icache.waybit = __ffs(icache_size/2); |
952 | |
953 | dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); |
954 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); |
955 | c->dcache.ways = 2; |
956 | c->dcache.waybit = __ffs(dcache_size/2); |
957 | break; |
958 | |
959 | case CPU_VR41XX: |
960 | case CPU_VR4111: |
961 | case CPU_VR4121: |
962 | case CPU_VR4122: |
963 | case CPU_VR4181: |
964 | case CPU_VR4181A: |
965 | icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); |
966 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); |
967 | c->icache.ways = 1; |
968 | c->icache.waybit = 0; /* doesn't matter */ |
969 | |
970 | dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); |
971 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); |
972 | c->dcache.ways = 1; |
973 | c->dcache.waybit = 0; /* does not matter */ |
974 | |
975 | c->options |= MIPS_CPU_CACHE_CDEX_P; |
976 | break; |
977 | |
978 | case CPU_RM7000: |
979 | rm7k_erratum31(); |
980 | |
981 | icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); |
982 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); |
983 | c->icache.ways = 4; |
984 | c->icache.waybit = __ffs(icache_size / c->icache.ways); |
985 | |
986 | dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); |
987 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); |
988 | c->dcache.ways = 4; |
989 | c->dcache.waybit = __ffs(dcache_size / c->dcache.ways); |
990 | |
991 | c->options |= MIPS_CPU_CACHE_CDEX_P; |
992 | c->options |= MIPS_CPU_PREFETCH; |
993 | break; |
994 | |
995 | case CPU_LOONGSON2: |
996 | icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); |
997 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); |
998 | if (prid & 0x3) |
999 | c->icache.ways = 4; |
1000 | else |
1001 | c->icache.ways = 2; |
1002 | c->icache.waybit = 0; |
1003 | |
1004 | dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); |
1005 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); |
1006 | if (prid & 0x3) |
1007 | c->dcache.ways = 4; |
1008 | else |
1009 | c->dcache.ways = 2; |
1010 | c->dcache.waybit = 0; |
1011 | break; |
1012 | |
1013 | default: |
1014 | if (!(config & MIPS_CONF_M)) |
1015 | panic("Don't know how to probe P-caches on this cpu."); |
1016 | |
1017 | /* |
1018 | * So we seem to be a MIPS32 or MIPS64 CPU |
1019 | * So let's probe the I-cache ... |
1020 | */ |
1021 | config1 = read_c0_config1(); |
1022 | |
1023 | if ((lsize = ((config1 >> 19) & 7))) |
1024 | c->icache.linesz = 2 << lsize; |
1025 | else |
1026 | c->icache.linesz = lsize; |
1027 | c->icache.sets = 32 << (((config1 >> 22) + 1) & 7); |
1028 | c->icache.ways = 1 + ((config1 >> 16) & 7); |
1029 | |
1030 | icache_size = c->icache.sets * |
1031 | c->icache.ways * |
1032 | c->icache.linesz; |
1033 | c->icache.waybit = __ffs(icache_size/c->icache.ways); |
1034 | |
1035 | if (config & 0x8) /* VI bit */ |
1036 | c->icache.flags |= MIPS_CACHE_VTAG; |
1037 | |
1038 | /* |
1039 | * Now probe the MIPS32 / MIPS64 data cache. |
1040 | */ |
1041 | c->dcache.flags = 0; |
1042 | |
1043 | if ((lsize = ((config1 >> 10) & 7))) |
1044 | c->dcache.linesz = 2 << lsize; |
1045 | else |
1046 | c->dcache.linesz= lsize; |
1047 | c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7); |
1048 | c->dcache.ways = 1 + ((config1 >> 7) & 7); |
1049 | |
1050 | dcache_size = c->dcache.sets * |
1051 | c->dcache.ways * |
1052 | c->dcache.linesz; |
1053 | c->dcache.waybit = __ffs(dcache_size/c->dcache.ways); |
1054 | |
1055 | c->options |= MIPS_CPU_PREFETCH; |
1056 | break; |
1057 | } |
1058 | |
1059 | /* |
1060 | * Processor configuration sanity check for the R4000SC erratum |
1061 | * #5. With page sizes larger than 32kB there is no possibility |
1062 | * to get a VCE exception anymore so we don't care about this |
1063 | * misconfiguration. The case is rather theoretical anyway; |
1064 | * presumably no vendor is shipping his hardware in the "bad" |
1065 | * configuration. |
1066 | */ |
1067 | if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 && |
1068 | (prid & PRID_REV_MASK) < PRID_REV_R4400 && |
1069 | !(config & CONF_SC) && c->icache.linesz != 16 && |
1070 | PAGE_SIZE <= 0x8000) |
1071 | panic("Improper R4000SC processor configuration detected"); |
1072 | |
1073 | /* compute a couple of other cache variables */ |
1074 | c->icache.waysize = icache_size / c->icache.ways; |
1075 | c->dcache.waysize = dcache_size / c->dcache.ways; |
1076 | |
1077 | c->icache.sets = c->icache.linesz ? |
1078 | icache_size / (c->icache.linesz * c->icache.ways) : 0; |
1079 | c->dcache.sets = c->dcache.linesz ? |
1080 | dcache_size / (c->dcache.linesz * c->dcache.ways) : 0; |
1081 | |
1082 | /* |
1083 | * R10000 and R12000 P-caches are odd in a positive way. They're 32kB |
1084 | * 2-way virtually indexed so normally would suffer from aliases. So |
1085 | * normally they'd suffer from aliases but magic in the hardware deals |
1086 | * with that for us so we don't need to take care ourselves. |
1087 | */ |
1088 | switch (current_cpu_type()) { |
1089 | case CPU_20KC: |
1090 | case CPU_25KF: |
1091 | case CPU_SB1: |
1092 | case CPU_SB1A: |
1093 | case CPU_XLR: |
1094 | c->dcache.flags |= MIPS_CACHE_PINDEX; |
1095 | break; |
1096 | |
1097 | case CPU_R10000: |
1098 | case CPU_R12000: |
1099 | case CPU_R14000: |
1100 | break; |
1101 | |
1102 | case CPU_M14KC: |
1103 | case CPU_M14KEC: |
1104 | case CPU_24K: |
1105 | case CPU_34K: |
1106 | case CPU_74K: |
1107 | case CPU_1004K: |
1108 | if (current_cpu_type() == CPU_74K) |
1109 | alias_74k_erratum(c); |
1110 | if ((read_c0_config7() & (1 << 16))) { |
1111 | /* effectively physically indexed dcache, |
1112 | thus no virtual aliases. */ |
1113 | c->dcache.flags |= MIPS_CACHE_PINDEX; |
1114 | break; |
1115 | } |
1116 | default: |
1117 | if (c->dcache.waysize > PAGE_SIZE) |
1118 | c->dcache.flags |= MIPS_CACHE_ALIASES; |
1119 | } |
1120 | |
1121 | switch (current_cpu_type()) { |
1122 | case CPU_20KC: |
1123 | /* |
1124 | * Some older 20Kc chips doesn't have the 'VI' bit in |
1125 | * the config register. |
1126 | */ |
1127 | c->icache.flags |= MIPS_CACHE_VTAG; |
1128 | break; |
1129 | |
1130 | case CPU_ALCHEMY: |
1131 | c->icache.flags |= MIPS_CACHE_IC_F_DC; |
1132 | break; |
1133 | |
1134 | case CPU_LOONGSON2: |
1135 | /* |
1136 | * LOONGSON2 has 4 way icache, but when using indexed cache op, |
1137 | * one op will act on all 4 ways |
1138 | */ |
1139 | c->icache.ways = 1; |
1140 | } |
1141 | |
1142 | printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n", |
1143 | icache_size >> 10, |
1144 | c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT", |
1145 | way_string[c->icache.ways], c->icache.linesz); |
1146 | |
1147 | printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n", |
1148 | dcache_size >> 10, way_string[c->dcache.ways], |
1149 | (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT", |
1150 | (c->dcache.flags & MIPS_CACHE_ALIASES) ? |
1151 | "cache aliases" : "no aliases", |
1152 | c->dcache.linesz); |
1153 | } |
1154 | |
1155 | /* |
1156 | * If you even _breathe_ on this function, look at the gcc output and make sure |
1157 | * it does not pop things on and off the stack for the cache sizing loop that |
1158 | * executes in KSEG1 space or else you will crash and burn badly. You have |
1159 | * been warned. |
1160 | */ |
1161 | static int probe_scache(void) |
1162 | { |
1163 | unsigned long flags, addr, begin, end, pow2; |
1164 | unsigned int config = read_c0_config(); |
1165 | struct cpuinfo_mips *c = ¤t_cpu_data; |
1166 | |
1167 | if (config & CONF_SC) |
1168 | return 0; |
1169 | |
1170 | begin = (unsigned long) &_stext; |
1171 | begin &= ~((4 * 1024 * 1024) - 1); |
1172 | end = begin + (4 * 1024 * 1024); |
1173 | |
1174 | /* |
1175 | * This is such a bitch, you'd think they would make it easy to do |
1176 | * this. Away you daemons of stupidity! |
1177 | */ |
1178 | local_irq_save(flags); |
1179 | |
1180 | /* Fill each size-multiple cache line with a valid tag. */ |
1181 | pow2 = (64 * 1024); |
1182 | for (addr = begin; addr < end; addr = (begin + pow2)) { |
1183 | unsigned long *p = (unsigned long *) addr; |
1184 | __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */ |
1185 | pow2 <<= 1; |
1186 | } |
1187 | |
1188 | /* Load first line with zero (therefore invalid) tag. */ |
1189 | write_c0_taglo(0); |
1190 | write_c0_taghi(0); |
1191 | __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */ |
1192 | cache_op(Index_Store_Tag_I, begin); |
1193 | cache_op(Index_Store_Tag_D, begin); |
1194 | cache_op(Index_Store_Tag_SD, begin); |
1195 | |
1196 | /* Now search for the wrap around point. */ |
1197 | pow2 = (128 * 1024); |
1198 | for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { |
1199 | cache_op(Index_Load_Tag_SD, addr); |
1200 | __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ |
1201 | if (!read_c0_taglo()) |
1202 | break; |
1203 | pow2 <<= 1; |
1204 | } |
1205 | local_irq_restore(flags); |
1206 | addr -= begin; |
1207 | |
1208 | scache_size = addr; |
1209 | c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22); |
1210 | c->scache.ways = 1; |
1211 | c->dcache.waybit = 0; /* does not matter */ |
1212 | |
1213 | return 1; |
1214 | } |
1215 | |
1216 | static void __init loongson2_sc_init(void) |
1217 | { |
1218 | struct cpuinfo_mips *c = ¤t_cpu_data; |
1219 | |
1220 | scache_size = 512*1024; |
1221 | c->scache.linesz = 32; |
1222 | c->scache.ways = 4; |
1223 | c->scache.waybit = 0; |
1224 | c->scache.waysize = scache_size / (c->scache.ways); |
1225 | c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); |
1226 | pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", |
1227 | scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); |
1228 | |
1229 | c->options |= MIPS_CPU_INCLUSIVE_CACHES; |
1230 | } |
1231 | |
1232 | extern int r5k_sc_init(void); |
1233 | extern int rm7k_sc_init(void); |
1234 | extern int mips_sc_init(void); |
1235 | |
1236 | static void setup_scache(void) |
1237 | { |
1238 | struct cpuinfo_mips *c = ¤t_cpu_data; |
1239 | unsigned int config = read_c0_config(); |
1240 | int sc_present = 0; |
1241 | |
1242 | /* |
1243 | * Do the probing thing on R4000SC and R4400SC processors. Other |
1244 | * processors don't have a S-cache that would be relevant to the |
1245 | * Linux memory management. |
1246 | */ |
1247 | switch (current_cpu_type()) { |
1248 | case CPU_R4000SC: |
1249 | case CPU_R4000MC: |
1250 | case CPU_R4400SC: |
1251 | case CPU_R4400MC: |
1252 | sc_present = run_uncached(probe_scache); |
1253 | if (sc_present) |
1254 | c->options |= MIPS_CPU_CACHE_CDEX_S; |
1255 | break; |
1256 | |
1257 | case CPU_R10000: |
1258 | case CPU_R12000: |
1259 | case CPU_R14000: |
1260 | scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16); |
1261 | c->scache.linesz = 64 << ((config >> 13) & 1); |
1262 | c->scache.ways = 2; |
1263 | c->scache.waybit= 0; |
1264 | sc_present = 1; |
1265 | break; |
1266 | |
1267 | case CPU_R5000: |
1268 | case CPU_NEVADA: |
1269 | #ifdef CONFIG_R5000_CPU_SCACHE |
1270 | r5k_sc_init(); |
1271 | #endif |
1272 | return; |
1273 | |
1274 | case CPU_RM7000: |
1275 | #ifdef CONFIG_RM7000_CPU_SCACHE |
1276 | rm7k_sc_init(); |
1277 | #endif |
1278 | return; |
1279 | |
1280 | case CPU_LOONGSON2: |
1281 | loongson2_sc_init(); |
1282 | return; |
1283 | |
1284 | case CPU_XLP: |
1285 | /* don't need to worry about L2, fully coherent */ |
1286 | return; |
1287 | |
1288 | default: |
1289 | if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | |
1290 | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { |
1291 | #ifdef CONFIG_MIPS_CPU_SCACHE |
1292 | if (mips_sc_init ()) { |
1293 | scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; |
1294 | printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n", |
1295 | scache_size >> 10, |
1296 | way_string[c->scache.ways], c->scache.linesz); |
1297 | } |
1298 | #else |
1299 | if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) |
1300 | panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); |
1301 | #endif |
1302 | return; |
1303 | } |
1304 | sc_present = 0; |
1305 | } |
1306 | |
1307 | if (!sc_present) |
1308 | return; |
1309 | |
1310 | /* compute a couple of other cache variables */ |
1311 | c->scache.waysize = scache_size / c->scache.ways; |
1312 | |
1313 | c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); |
1314 | |
1315 | printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n", |
1316 | scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); |
1317 | |
1318 | c->options |= MIPS_CPU_INCLUSIVE_CACHES; |
1319 | } |
1320 | |
1321 | void au1x00_fixup_config_od(void) |
1322 | { |
1323 | /* |
1324 | * c0_config.od (bit 19) was write only (and read as 0) |
1325 | * on the early revisions of Alchemy SOCs. It disables the bus |
1326 | * transaction overlapping and needs to be set to fix various errata. |
1327 | */ |
1328 | switch (read_c0_prid()) { |
1329 | case 0x00030100: /* Au1000 DA */ |
1330 | case 0x00030201: /* Au1000 HA */ |
1331 | case 0x00030202: /* Au1000 HB */ |
1332 | case 0x01030200: /* Au1500 AB */ |
1333 | /* |
1334 | * Au1100 errata actually keeps silence about this bit, so we set it |
1335 | * just in case for those revisions that require it to be set according |
1336 | * to the (now gone) cpu table. |
1337 | */ |
1338 | case 0x02030200: /* Au1100 AB */ |
1339 | case 0x02030201: /* Au1100 BA */ |
1340 | case 0x02030202: /* Au1100 BC */ |
1341 | set_c0_config(1 << 19); |
1342 | break; |
1343 | } |
1344 | } |
1345 | |
1346 | /* CP0 hazard avoidance. */ |
1347 | #define NXP_BARRIER() \ |
1348 | __asm__ __volatile__( \ |
1349 | ".set noreorder\n\t" \ |
1350 | "nop; nop; nop; nop; nop; nop;\n\t" \ |
1351 | ".set reorder\n\t") |
1352 | |
1353 | static void nxp_pr4450_fixup_config(void) |
1354 | { |
1355 | unsigned long config0; |
1356 | |
1357 | config0 = read_c0_config(); |
1358 | |
1359 | /* clear all three cache coherency fields */ |
1360 | config0 &= ~(0x7 | (7 << 25) | (7 << 28)); |
1361 | config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) | |
1362 | ((_page_cachable_default >> _CACHE_SHIFT) << 25) | |
1363 | ((_page_cachable_default >> _CACHE_SHIFT) << 28)); |
1364 | write_c0_config(config0); |
1365 | NXP_BARRIER(); |
1366 | } |
1367 | |
1368 | static int cca = -1; |
1369 | |
1370 | static int __init cca_setup(char *str) |
1371 | { |
1372 | get_option(&str, &cca); |
1373 | |
1374 | return 0; |
1375 | } |
1376 | |
1377 | early_param("cca", cca_setup); |
1378 | |
1379 | static void coherency_setup(void) |
1380 | { |
1381 | if (cca < 0 || cca > 7) |
1382 | cca = read_c0_config() & CONF_CM_CMASK; |
1383 | _page_cachable_default = cca << _CACHE_SHIFT; |
1384 | |
1385 | pr_debug("Using cache attribute %d\n", cca); |
1386 | change_c0_config(CONF_CM_CMASK, cca); |
1387 | |
1388 | /* |
1389 | * c0_status.cu=0 specifies that updates by the sc instruction use |
1390 | * the coherency mode specified by the TLB; 1 means cachable |
1391 | * coherent update on write will be used. Not all processors have |
1392 | * this bit and; some wire it to zero, others like Toshiba had the |
1393 | * silly idea of putting something else there ... |
1394 | */ |
1395 | switch (current_cpu_type()) { |
1396 | case CPU_R4000PC: |
1397 | case CPU_R4000SC: |
1398 | case CPU_R4000MC: |
1399 | case CPU_R4400PC: |
1400 | case CPU_R4400SC: |
1401 | case CPU_R4400MC: |
1402 | clear_c0_config(CONF_CU); |
1403 | break; |
1404 | /* |
1405 | * We need to catch the early Alchemy SOCs with |
1406 | * the write-only co_config.od bit and set it back to one on: |
1407 | * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB |
1408 | */ |
1409 | case CPU_ALCHEMY: |
1410 | au1x00_fixup_config_od(); |
1411 | break; |
1412 | |
1413 | case PRID_IMP_PR4450: |
1414 | nxp_pr4450_fixup_config(); |
1415 | break; |
1416 | } |
1417 | } |
1418 | |
1419 | static void r4k_cache_error_setup(void) |
1420 | { |
1421 | extern char __weak except_vec2_generic; |
1422 | extern char __weak except_vec2_sb1; |
1423 | |
1424 | switch (current_cpu_type()) { |
1425 | case CPU_SB1: |
1426 | case CPU_SB1A: |
1427 | set_uncached_handler(0x100, &except_vec2_sb1, 0x80); |
1428 | break; |
1429 | |
1430 | default: |
1431 | set_uncached_handler(0x100, &except_vec2_generic, 0x80); |
1432 | break; |
1433 | } |
1434 | } |
1435 | |
1436 | void r4k_cache_init(void) |
1437 | { |
1438 | extern void build_clear_page(void); |
1439 | extern void build_copy_page(void); |
1440 | struct cpuinfo_mips *c = ¤t_cpu_data; |
1441 | |
1442 | probe_pcache(); |
1443 | setup_scache(); |
1444 | |
1445 | r4k_blast_dcache_page_setup(); |
1446 | r4k_blast_dcache_page_indexed_setup(); |
1447 | r4k_blast_dcache_setup(); |
1448 | r4k_blast_icache_page_setup(); |
1449 | r4k_blast_icache_page_indexed_setup(); |
1450 | r4k_blast_icache_setup(); |
1451 | r4k_blast_scache_page_setup(); |
1452 | r4k_blast_scache_page_indexed_setup(); |
1453 | r4k_blast_scache_setup(); |
1454 | |
1455 | /* |
1456 | * Some MIPS32 and MIPS64 processors have physically indexed caches. |
1457 | * This code supports virtually indexed processors and will be |
1458 | * unnecessarily inefficient on physically indexed processors. |
1459 | */ |
1460 | if (c->dcache.linesz) |
1461 | shm_align_mask = max_t( unsigned long, |
1462 | c->dcache.sets * c->dcache.linesz - 1, |
1463 | PAGE_SIZE - 1); |
1464 | else |
1465 | shm_align_mask = PAGE_SIZE-1; |
1466 | |
1467 | __flush_cache_vmap = r4k__flush_cache_vmap; |
1468 | __flush_cache_vunmap = r4k__flush_cache_vunmap; |
1469 | |
1470 | flush_cache_all = cache_noop; |
1471 | __flush_cache_all = r4k___flush_cache_all; |
1472 | flush_cache_mm = r4k_flush_cache_mm; |
1473 | flush_cache_page = r4k_flush_cache_page; |
1474 | flush_cache_range = r4k_flush_cache_range; |
1475 | |
1476 | __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range; |
1477 | |
1478 | flush_cache_sigtramp = r4k_flush_cache_sigtramp; |
1479 | flush_icache_all = r4k_flush_icache_all; |
1480 | local_flush_data_cache_page = local_r4k_flush_data_cache_page; |
1481 | flush_data_cache_page = r4k_flush_data_cache_page; |
1482 | flush_icache_range = r4k_flush_icache_range; |
1483 | local_flush_icache_range = local_r4k_flush_icache_range; |
1484 | |
1485 | #if defined(CONFIG_DMA_NONCOHERENT) |
1486 | if (coherentio) { |
1487 | _dma_cache_wback_inv = (void *)cache_noop; |
1488 | _dma_cache_wback = (void *)cache_noop; |
1489 | _dma_cache_inv = (void *)cache_noop; |
1490 | } else { |
1491 | _dma_cache_wback_inv = r4k_dma_cache_wback_inv; |
1492 | _dma_cache_wback = r4k_dma_cache_wback_inv; |
1493 | _dma_cache_inv = r4k_dma_cache_inv; |
1494 | } |
1495 | #endif |
1496 | |
1497 | build_clear_page(); |
1498 | build_copy_page(); |
1499 | |
1500 | /* |
1501 | * We want to run CMP kernels on core with and without coherent |
1502 | * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether |
1503 | * or not to flush caches. |
1504 | */ |
1505 | local_r4k___flush_cache_all(NULL); |
1506 | |
1507 | coherency_setup(); |
1508 | board_cache_error_setup = r4k_cache_error_setup; |
1509 | } |
1510 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9