Root/target/linux/brcm-2.4/patches/003-bcm47xx_cache_fixes.patch

1--- a/arch/mips/kernel/entry.S
2+++ b/arch/mips/kernel/entry.S
3@@ -100,6 +100,10 @@ END(except_vec1_generic)
4          * and R4400 SC and MC versions.
5          */
6 NESTED(except_vec3_generic, 0, sp)
7+#ifdef CONFIG_BCM4710
8+ nop
9+ nop
10+#endif
11 #if R5432_CP0_INTERRUPT_WAR
12         mfc0 k0, CP0_INDEX
13 #endif
14--- a/arch/mips/mm/c-r4k.c
15+++ b/arch/mips/mm/c-r4k.c
16@@ -14,6 +14,12 @@
17 #include <linux/mm.h>
18 #include <linux/bitops.h>
19 
20+#ifdef CONFIG_BCM4710
21+#include "../bcm947xx/include/typedefs.h"
22+#include "../bcm947xx/include/sbconfig.h"
23+#include <asm/paccess.h>
24+#endif
25+
26 #include <asm/bcache.h>
27 #include <asm/bootinfo.h>
28 #include <asm/cacheops.h>
29@@ -40,6 +46,7 @@ static struct bcache_ops no_sc_ops = {
30     .bc_inv = (void *)no_sc_noop
31 };
32 
33+int bcm4710 = 0;
34 struct bcache_ops *bcops = &no_sc_ops;
35 
36 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x2010)
37@@ -64,8 +71,10 @@ static inline void r4k_blast_dcache_page
38 static inline void r4k_blast_dcache_page_setup(void)
39 {
40     unsigned long dc_lsize = current_cpu_data.dcache.linesz;
41-
42- if (dc_lsize == 16)
43+
44+ if (bcm4710)
45+ r4k_blast_dcache_page = blast_dcache_page;
46+ else if (dc_lsize == 16)
47         r4k_blast_dcache_page = blast_dcache16_page;
48     else if (dc_lsize == 32)
49         r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
50@@ -77,7 +86,9 @@ static void r4k_blast_dcache_page_indexe
51 {
52     unsigned long dc_lsize = current_cpu_data.dcache.linesz;
53 
54- if (dc_lsize == 16)
55+ if (bcm4710)
56+ r4k_blast_dcache_page_indexed = blast_dcache_page_indexed;
57+ else if (dc_lsize == 16)
58         r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
59     else if (dc_lsize == 32)
60         r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
61@@ -89,7 +100,9 @@ static inline void r4k_blast_dcache_setu
62 {
63     unsigned long dc_lsize = current_cpu_data.dcache.linesz;
64 
65- if (dc_lsize == 16)
66+ if (bcm4710)
67+ r4k_blast_dcache = blast_dcache;
68+ else if (dc_lsize == 16)
69         r4k_blast_dcache = blast_dcache16;
70     else if (dc_lsize == 32)
71         r4k_blast_dcache = blast_dcache32;
72@@ -266,6 +279,7 @@ static void r4k___flush_cache_all(void)
73     r4k_blast_dcache();
74     r4k_blast_icache();
75 
76+ if (!bcm4710)
77     switch (current_cpu_data.cputype) {
78     case CPU_R4000SC:
79     case CPU_R4000MC:
80@@ -304,10 +318,10 @@ static void r4k_flush_cache_mm(struct mm
81      * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
82      * only flush the primary caches but R10000 and R12000 behave sane ...
83      */
84- if (current_cpu_data.cputype == CPU_R4000SC ||
85+ if (!bcm4710 && (current_cpu_data.cputype == CPU_R4000SC ||
86         current_cpu_data.cputype == CPU_R4000MC ||
87         current_cpu_data.cputype == CPU_R4400SC ||
88- current_cpu_data.cputype == CPU_R4400MC)
89+ current_cpu_data.cputype == CPU_R4400MC))
90         r4k_blast_scache();
91 }
92 
93@@ -383,12 +397,15 @@ static void r4k_flush_icache_range(unsig
94     unsigned long ic_lsize = current_cpu_data.icache.linesz;
95     unsigned long addr, aend;
96 
97+ addr = start & ~(dc_lsize - 1);
98+ aend = (end - 1) & ~(dc_lsize - 1);
99+
100     if (!cpu_has_ic_fills_f_dc) {
101         if (end - start > dcache_size)
102             r4k_blast_dcache();
103         else {
104- addr = start & ~(dc_lsize - 1);
105- aend = (end - 1) & ~(dc_lsize - 1);
106+ BCM4710_PROTECTED_FILL_TLB(addr);
107+ BCM4710_PROTECTED_FILL_TLB(aend);
108 
109             while (1) {
110                 /* Hit_Writeback_Inv_D */
111@@ -403,8 +420,6 @@ static void r4k_flush_icache_range(unsig
112     if (end - start > icache_size)
113         r4k_blast_icache();
114     else {
115- addr = start & ~(ic_lsize - 1);
116- aend = (end - 1) & ~(ic_lsize - 1);
117         while (1) {
118             /* Hit_Invalidate_I */
119             protected_flush_icache_line(addr);
120@@ -413,6 +428,9 @@ static void r4k_flush_icache_range(unsig
121             addr += ic_lsize;
122         }
123     }
124+
125+ if (bcm4710)
126+ flush_cache_all();
127 }
128 
129 /*
130@@ -443,7 +461,8 @@ static void r4k_flush_icache_page(struct
131     if (cpu_has_subset_pcaches) {
132         unsigned long addr = (unsigned long) page_address(page);
133 
134- r4k_blast_scache_page(addr);
135+ if (!bcm4710)
136+ r4k_blast_scache_page(addr);
137         ClearPageDcacheDirty(page);
138 
139         return;
140@@ -451,6 +470,7 @@ static void r4k_flush_icache_page(struct
141 
142     if (!cpu_has_ic_fills_f_dc) {
143         unsigned long addr = (unsigned long) page_address(page);
144+
145         r4k_blast_dcache_page(addr);
146         ClearPageDcacheDirty(page);
147     }
148@@ -477,7 +497,7 @@ static void r4k_dma_cache_wback_inv(unsi
149     /* Catch bad driver code */
150     BUG_ON(size == 0);
151 
152- if (cpu_has_subset_pcaches) {
153+ if (!bcm4710 && cpu_has_subset_pcaches) {
154         unsigned long sc_lsize = current_cpu_data.scache.linesz;
155 
156         if (size >= scache_size) {
157@@ -509,6 +529,8 @@ static void r4k_dma_cache_wback_inv(unsi
158         R4600_HIT_CACHEOP_WAR_IMPL;
159         a = addr & ~(dc_lsize - 1);
160         end = (addr + size - 1) & ~(dc_lsize - 1);
161+ BCM4710_FILL_TLB(a);
162+ BCM4710_FILL_TLB(end);
163         while (1) {
164             flush_dcache_line(a); /* Hit_Writeback_Inv_D */
165             if (a == end)
166@@ -527,7 +549,7 @@ static void r4k_dma_cache_inv(unsigned l
167     /* Catch bad driver code */
168     BUG_ON(size == 0);
169 
170- if (cpu_has_subset_pcaches) {
171+ if (!bcm4710 && (cpu_has_subset_pcaches)) {
172         unsigned long sc_lsize = current_cpu_data.scache.linesz;
173 
174         if (size >= scache_size) {
175@@ -554,6 +576,8 @@ static void r4k_dma_cache_inv(unsigned l
176         R4600_HIT_CACHEOP_WAR_IMPL;
177         a = addr & ~(dc_lsize - 1);
178         end = (addr + size - 1) & ~(dc_lsize - 1);
179+ BCM4710_FILL_TLB(a);
180+ BCM4710_FILL_TLB(end);
181         while (1) {
182             flush_dcache_line(a); /* Hit_Writeback_Inv_D */
183             if (a == end)
184@@ -577,6 +601,8 @@ static void r4k_flush_cache_sigtramp(uns
185     unsigned long dc_lsize = current_cpu_data.dcache.linesz;
186 
187     R4600_HIT_CACHEOP_WAR_IMPL;
188+ BCM4710_PROTECTED_FILL_TLB(addr);
189+ BCM4710_PROTECTED_FILL_TLB(addr + 4);
190     protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
191     protected_flush_icache_line(addr & ~(ic_lsize - 1));
192     if (MIPS4K_ICACHE_REFILL_WAR) {
193@@ -986,10 +1012,12 @@ static void __init setup_scache(void)
194     case CPU_R4000MC:
195     case CPU_R4400SC:
196     case CPU_R4400MC:
197- probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache));
198- sc_present = probe_scache_kseg1(config);
199- if (sc_present)
200- c->options |= MIPS_CPU_CACHE_CDEX_S;
201+ if (!bcm4710) {
202+ probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache));
203+ sc_present = probe_scache_kseg1(config);
204+ if (sc_present)
205+ c->options |= MIPS_CPU_CACHE_CDEX_S;
206+ }
207         break;
208 
209     case CPU_R10000:
210@@ -1041,6 +1069,19 @@ static void __init setup_scache(void)
211 static inline void coherency_setup(void)
212 {
213     change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
214+
215+#if defined(CONFIG_BCM4310) || defined(CONFIG_BCM4704) || defined(CONFIG_BCM5365)
216+ if (BCM330X(current_cpu_data.processor_id)) {
217+ uint32 cm;
218+
219+ cm = read_c0_diag();
220+ /* Enable icache */
221+ cm |= (1 << 31);
222+ /* Enable dcache */
223+ cm |= (1 << 30);
224+ write_c0_diag(cm);
225+ }
226+#endif
227 
228     /*
229      * c0_status.cu=0 specifies that updates by the sc instruction use
230@@ -1073,6 +1114,12 @@ void __init ld_mmu_r4xx0(void)
231     memcpy((void *)(KSEG0 + 0x100), &except_vec2_generic, 0x80);
232     memcpy((void *)(KSEG1 + 0x100), &except_vec2_generic, 0x80);
233 
234+ if (current_cpu_data.cputype == CPU_BCM4710 && (current_cpu_data.processor_id & PRID_REV_MASK) == 0) {
235+ printk("Enabling BCM4710A0 cache workarounds.\n");
236+ bcm4710 = 1;
237+ } else
238+ bcm4710 = 0;
239+
240     probe_pcache();
241     setup_scache();
242 
243--- a/arch/mips/mm/tlbex-mips32.S
244+++ b/arch/mips/mm/tlbex-mips32.S
245@@ -90,6 +90,9 @@
246     .set noat
247     LEAF(except_vec0_r4000)
248     .set mips3
249+#ifdef CONFIG_BCM4704
250+ nop
251+#endif
252 #ifdef CONFIG_SMP
253     mfc0 k1, CP0_CONTEXT
254     la k0, pgd_current
255--- a/include/asm-mips/r4kcache.h
256+++ b/include/asm-mips/r4kcache.h
257@@ -15,6 +15,18 @@
258 #include <asm/asm.h>
259 #include <asm/cacheops.h>
260 
261+#ifdef CONFIG_BCM4710
262+#define BCM4710_DUMMY_RREG() (((sbconfig_t *)(KSEG1ADDR(SB_ENUM_BASE + SBCONFIGOFF)))->sbimstate)
263+
264+#define BCM4710_FILL_TLB(addr) (*(volatile unsigned long *)(addr))
265+#define BCM4710_PROTECTED_FILL_TLB(addr) ({ unsigned long x; get_dbe(x, (volatile unsigned long *)(addr)); })
266+#else
267+#define BCM4710_DUMMY_RREG()
268+
269+#define BCM4710_FILL_TLB(addr)
270+#define BCM4710_PROTECTED_FILL_TLB(addr)
271+#endif
272+
273 #define cache_op(op,addr) \
274     __asm__ __volatile__( \
275     " .set noreorder \n" \
276@@ -27,12 +39,25 @@
277 
278 static inline void flush_icache_line_indexed(unsigned long addr)
279 {
280- cache_op(Index_Invalidate_I, addr);
281+ unsigned int way;
282+ unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
283+
284+ for (way = 0; way < current_cpu_data.dcache.ways; way++) {
285+ cache_op(Index_Invalidate_I, addr);
286+ addr += ws_inc;
287+ }
288 }
289 
290 static inline void flush_dcache_line_indexed(unsigned long addr)
291 {
292- cache_op(Index_Writeback_Inv_D, addr);
293+ unsigned int way;
294+ unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
295+
296+ for (way = 0; way < current_cpu_data.dcache.ways; way++) {
297+ BCM4710_DUMMY_RREG();
298+ cache_op(Index_Writeback_Inv_D, addr);
299+ addr += ws_inc;
300+ }
301 }
302 
303 static inline void flush_scache_line_indexed(unsigned long addr)
304@@ -47,6 +72,7 @@ static inline void flush_icache_line(uns
305 
306 static inline void flush_dcache_line(unsigned long addr)
307 {
308+ BCM4710_DUMMY_RREG();
309     cache_op(Hit_Writeback_Inv_D, addr);
310 }
311 
312@@ -91,6 +117,7 @@ static inline void protected_flush_icach
313  */
314 static inline void protected_writeback_dcache_line(unsigned long addr)
315 {
316+ BCM4710_DUMMY_RREG();
317     __asm__ __volatile__(
318         ".set noreorder\n\t"
319         ".set mips3\n"
320@@ -138,6 +165,62 @@ static inline void invalidate_tcache_pag
321         : "r" (base), \
322           "i" (op));
323 
324+#define cache_unroll(base,op) \
325+ __asm__ __volatile__(" \
326+ .set noreorder; \
327+ .set mips3; \
328+ cache %1, (%0); \
329+ .set mips0; \
330+ .set reorder" \
331+ : \
332+ : "r" (base), \
333+ "i" (op));
334+
335+
336+static inline void blast_dcache(void)
337+{
338+ unsigned long start = KSEG0;
339+ unsigned long dcache_size = current_cpu_data.dcache.waysize * current_cpu_data.dcache.ways;
340+ unsigned long end = (start + dcache_size);
341+
342+ while(start < end) {
343+ BCM4710_DUMMY_RREG();
344+ cache_unroll(start,Index_Writeback_Inv_D);
345+ start += current_cpu_data.dcache.linesz;
346+ }
347+}
348+
349+static inline void blast_dcache_page(unsigned long page)
350+{
351+ unsigned long start = page;
352+ unsigned long end = start + PAGE_SIZE;
353+
354+ BCM4710_FILL_TLB(start);
355+ do {
356+ BCM4710_DUMMY_RREG();
357+ cache_unroll(start,Hit_Writeback_Inv_D);
358+ start += current_cpu_data.dcache.linesz;
359+ } while (start < end);
360+}
361+
362+static inline void blast_dcache_page_indexed(unsigned long page)
363+{
364+ unsigned long start = page;
365+ unsigned long end = start + PAGE_SIZE;
366+ unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
367+ unsigned long ws_end = current_cpu_data.dcache.ways <<
368+ current_cpu_data.dcache.waybit;
369+ unsigned long ws, addr;
370+
371+ for (ws = 0; ws < ws_end; ws += ws_inc) {
372+ start = page + ws;
373+ for (addr = start; addr < end; addr += current_cpu_data.dcache.linesz) {
374+ BCM4710_DUMMY_RREG();
375+ cache_unroll(addr,Index_Writeback_Inv_D);
376+ }
377+ }
378+}
379+
380 static inline void blast_dcache16(void)
381 {
382     unsigned long start = KSEG0;
383@@ -148,8 +231,9 @@ static inline void blast_dcache16(void)
384     unsigned long ws, addr;
385 
386     for (ws = 0; ws < ws_end; ws += ws_inc)
387- for (addr = start; addr < end; addr += 0x200)
388+ for (addr = start; addr < end; addr += 0x200) {
389             cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
390+ }
391 }
392 
393 static inline void blast_dcache16_page(unsigned long page)
394@@ -173,8 +257,9 @@ static inline void blast_dcache16_page_i
395     unsigned long ws, addr;
396 
397     for (ws = 0; ws < ws_end; ws += ws_inc)
398- for (addr = start; addr < end; addr += 0x200)
399+ for (addr = start; addr < end; addr += 0x200) {
400             cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
401+ }
402 }
403 
404 static inline void blast_icache16(void)
405@@ -196,6 +281,7 @@ static inline void blast_icache16_page(u
406     unsigned long start = page;
407     unsigned long end = start + PAGE_SIZE;
408 
409+ BCM4710_FILL_TLB(start);
410     do {
411         cache16_unroll32(start,Hit_Invalidate_I);
412         start += 0x200;
413@@ -281,6 +367,7 @@ static inline void blast_scache16_page_i
414         : "r" (base), \
415           "i" (op));
416 
417+
418 static inline void blast_dcache32(void)
419 {
420     unsigned long start = KSEG0;
421@@ -291,8 +378,9 @@ static inline void blast_dcache32(void)
422     unsigned long ws, addr;
423 
424     for (ws = 0; ws < ws_end; ws += ws_inc)
425- for (addr = start; addr < end; addr += 0x400)
426+ for (addr = start; addr < end; addr += 0x400) {
427             cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
428+ }
429 }
430 
431 static inline void blast_dcache32_page(unsigned long page)
432@@ -316,8 +404,9 @@ static inline void blast_dcache32_page_i
433     unsigned long ws, addr;
434 
435     for (ws = 0; ws < ws_end; ws += ws_inc)
436- for (addr = start; addr < end; addr += 0x400)
437+ for (addr = start; addr < end; addr += 0x400) {
438             cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
439+ }
440 }
441 
442 static inline void blast_icache32(void)
443@@ -339,6 +428,7 @@ static inline void blast_icache32_page(u
444     unsigned long start = page;
445     unsigned long end = start + PAGE_SIZE;
446 
447+ BCM4710_FILL_TLB(start);
448     do {
449         cache32_unroll32(start,Hit_Invalidate_I);
450         start += 0x400;
451@@ -443,6 +533,7 @@ static inline void blast_icache64_page(u
452     unsigned long start = page;
453     unsigned long end = start + PAGE_SIZE;
454 
455+ BCM4710_FILL_TLB(start);
456     do {
457         cache64_unroll32(start,Hit_Invalidate_I);
458         start += 0x800;
459--- a/include/asm-mips/stackframe.h
460+++ b/include/asm-mips/stackframe.h
461@@ -209,6 +209,20 @@
462 
463 #endif
464 
465+#if defined(CONFIG_BCM4710) || defined(CONFIG_BCM4704)
466+
467+#undef RESTORE_SP_AND_RET
468+#define RESTORE_SP_AND_RET \
469+ lw sp, PT_R29(sp); \
470+ .set mips3; \
471+ nop; \
472+ nop; \
473+ eret; \
474+ .set mips0
475+
476+#endif
477+
478+
479 #define RESTORE_SP \
480         lw sp, PT_R29(sp); \
481 
482--- a/mm/memory.c
483+++ b/mm/memory.c
484@@ -927,6 +927,7 @@ static inline void break_cow(struct vm_a
485     flush_page_to_ram(new_page);
486     flush_cache_page(vma, address);
487     establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
488+ flush_icache_page(vma, new_page);
489 }
490 
491 /*
492

Archive Download this file



interactive