Root/target/linux/xburst/patches-2.6.36/001-xburst-cache-quirks.patch

1From 3679ae9872aef12529b332767e32097aa8233904 Mon Sep 17 00:00:00 2001
2From: Lars-Peter Clausen <lars@metafoo.de>
3Date: Sat, 24 Apr 2010 17:34:29 +0200
4Subject: [PATCH] JZ4740 cache quirks
5
6---
7 arch/mips/include/asm/r4kcache.h | 231 ++++++++++++++++++++++++++++++++++++++
8 1 files changed, 231 insertions(+), 0 deletions(-)
9
10--- a/arch/mips/include/asm/r4kcache.h
11+++ b/arch/mips/include/asm/r4kcache.h
12@@ -17,6 +17,58 @@
13 #include <asm/cpu-features.h>
14 #include <asm/mipsmtregs.h>
15 
16+#ifdef CONFIG_JZRISC
17+
18+#define K0_TO_K1() \
19+do { \
20+ unsigned long __k0_addr; \
21+ \
22+ __asm__ __volatile__( \
23+ "la %0, 1f\n\t" \
24+ "or %0, %0, %1\n\t" \
25+ "jr %0\n\t" \
26+ "nop\n\t" \
27+ "1: nop\n" \
28+ : "=&r"(__k0_addr) \
29+ : "r" (0x20000000) ); \
30+} while(0)
31+
32+#define K1_TO_K0() \
33+do { \
34+ unsigned long __k0_addr; \
35+ __asm__ __volatile__( \
36+ "nop;nop;nop;nop;nop;nop;nop\n\t" \
37+ "la %0, 1f\n\t" \
38+ "jr %0\n\t" \
39+ "nop\n\t" \
40+ "1: nop\n" \
41+ : "=&r" (__k0_addr)); \
42+} while (0)
43+
44+#define INVALIDATE_BTB() \
45+do { \
46+ unsigned long tmp; \
47+ __asm__ __volatile__( \
48+ ".set mips32\n\t" \
49+ "mfc0 %0, $16, 7\n\t" \
50+ "nop\n\t" \
51+ "ori %0, 2\n\t" \
52+ "mtc0 %0, $16, 7\n\t" \
53+ "nop\n\t" \
54+ : "=&r" (tmp)); \
55+} while (0)
56+
57+#define SYNC_WB() __asm__ __volatile__ ("sync")
58+
59+#else /* CONFIG_JZRISC */
60+
61+#define K0_TO_K1() do { } while (0)
62+#define K1_TO_K0() do { } while (0)
63+#define INVALIDATE_BTB() do { } while (0)
64+#define SYNC_WB() do { } while (0)
65+
66+#endif /* CONFIG_JZRISC */
67+
68 /*
69  * This macro return a properly sign-extended address suitable as base address
70  * for indexed cache operations. Two issues here:
71@@ -144,6 +196,7 @@ static inline void flush_icache_line_ind
72 {
73     __iflush_prologue
74     cache_op(Index_Invalidate_I, addr);
75+ INVALIDATE_BTB();
76     __iflush_epilogue
77 }
78 
79@@ -151,6 +204,7 @@ static inline void flush_dcache_line_ind
80 {
81     __dflush_prologue
82     cache_op(Index_Writeback_Inv_D, addr);
83+ SYNC_WB();
84     __dflush_epilogue
85 }
86 
87@@ -163,6 +217,7 @@ static inline void flush_icache_line(uns
88 {
89     __iflush_prologue
90     cache_op(Hit_Invalidate_I, addr);
91+ INVALIDATE_BTB();
92     __iflush_epilogue
93 }
94 
95@@ -170,6 +225,7 @@ static inline void flush_dcache_line(uns
96 {
97     __dflush_prologue
98     cache_op(Hit_Writeback_Inv_D, addr);
99+ SYNC_WB();
100     __dflush_epilogue
101 }
102 
103@@ -177,6 +233,7 @@ static inline void invalidate_dcache_lin
104 {
105     __dflush_prologue
106     cache_op(Hit_Invalidate_D, addr);
107+ SYNC_WB();
108     __dflush_epilogue
109 }
110 
111@@ -209,6 +266,7 @@ static inline void flush_scache_line(uns
112 static inline void protected_flush_icache_line(unsigned long addr)
113 {
114     protected_cache_op(Hit_Invalidate_I, addr);
115+ INVALIDATE_BTB();
116 }
117 
118 /*
119@@ -220,6 +278,7 @@ static inline void protected_flush_icach
120 static inline void protected_writeback_dcache_line(unsigned long addr)
121 {
122     protected_cache_op(Hit_Writeback_Inv_D, addr);
123+ SYNC_WB();
124 }
125 
126 static inline void protected_writeback_scache_line(unsigned long addr)
127@@ -396,8 +455,10 @@ static inline void blast_##pfx##cache##l
128 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
129 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
130 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
131+#ifndef CONFIG_JZRISC
132 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
133 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
134+#endif
135 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
136 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64)
137 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
138@@ -405,12 +466,122 @@ __BUILD_BLAST_CACHE(s, scache, Index_Wri
139 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
140 
141 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16)
142+#ifndef CONFIG_JZRISC
143 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32)
144+#endif
145 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16)
146 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32)
147 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64)
148 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
149 
150+#ifdef CONFIG_JZRISC
151+
152+static inline void blast_dcache32(void)
153+{
154+ unsigned long start = INDEX_BASE;
155+ unsigned long end = start + current_cpu_data.dcache.waysize;
156+ unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
157+ unsigned long ws_end = current_cpu_data.dcache.ways <<
158+ current_cpu_data.dcache.waybit;
159+ unsigned long ws, addr;
160+
161+ for (ws = 0; ws < ws_end; ws += ws_inc)
162+ for (addr = start; addr < end; addr += 0x400)
163+ cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
164+
165+ SYNC_WB();
166+}
167+
168+static inline void blast_dcache32_page(unsigned long page)
169+{
170+ unsigned long start = page;
171+ unsigned long end = page + PAGE_SIZE;
172+
173+ do {
174+ cache32_unroll32(start,Hit_Writeback_Inv_D);
175+ start += 0x400;
176+ } while (start < end);
177+
178+ SYNC_WB();
179+}
180+
181+static inline void blast_dcache32_page_indexed(unsigned long page)
182+{
183+ unsigned long indexmask = current_cpu_data.dcache.waysize - 1;
184+ unsigned long start = INDEX_BASE + (page & indexmask);
185+ unsigned long end = start + PAGE_SIZE;
186+ unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
187+ unsigned long ws_end = current_cpu_data.dcache.ways <<
188+ current_cpu_data.dcache.waybit;
189+ unsigned long ws, addr;
190+
191+ for (ws = 0; ws < ws_end; ws += ws_inc)
192+ for (addr = start; addr < end; addr += 0x400)
193+ cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
194+
195+ SYNC_WB();
196+}
197+
198+static inline void blast_icache32(void)
199+{
200+ unsigned long start = INDEX_BASE;
201+ unsigned long end = start + current_cpu_data.icache.waysize;
202+ unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
203+ unsigned long ws_end = current_cpu_data.icache.ways <<
204+ current_cpu_data.icache.waybit;
205+ unsigned long ws, addr;
206+
207+ K0_TO_K1();
208+
209+ for (ws = 0; ws < ws_end; ws += ws_inc)
210+ for (addr = start; addr < end; addr += 0x400)
211+ cache32_unroll32(addr|ws,Index_Invalidate_I);
212+
213+ INVALIDATE_BTB();
214+
215+ K1_TO_K0();
216+}
217+
218+static inline void blast_icache32_page(unsigned long page)
219+{
220+ unsigned long start = page;
221+ unsigned long end = page + PAGE_SIZE;
222+
223+ K0_TO_K1();
224+
225+ do {
226+ cache32_unroll32(start,Hit_Invalidate_I);
227+ start += 0x400;
228+ } while (start < end);
229+
230+ INVALIDATE_BTB();
231+
232+ K1_TO_K0();
233+}
234+
235+static inline void blast_icache32_page_indexed(unsigned long page)
236+{
237+ unsigned long indexmask = current_cpu_data.icache.waysize - 1;
238+ unsigned long start = INDEX_BASE + (page & indexmask);
239+ unsigned long end = start + PAGE_SIZE;
240+ unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
241+ unsigned long ws_end = current_cpu_data.icache.ways <<
242+ current_cpu_data.icache.waybit;
243+ unsigned long ws, addr;
244+
245+ K0_TO_K1();
246+
247+ for (ws = 0; ws < ws_end; ws += ws_inc)
248+ for (addr = start; addr < end; addr += 0x400)
249+ cache32_unroll32(addr|ws,Index_Invalidate_I);
250+
251+ INVALIDATE_BTB();
252+
253+ K1_TO_K0();
254+}
255+
256+#endif /* CONFIG_JZRISC */
257+
258 /* build blast_xxx_range, protected_blast_xxx_range */
259 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
260 static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
261@@ -432,13 +603,73 @@ static inline void prot##blast_##pfx##ca
262     __##pfx##flush_epilogue \
263 }
264 
265+#ifndef CONFIG_JZRISC
266 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
267+#endif
268 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
269+#ifndef CONFIG_JZRISC
270 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
271 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
272+#endif
273 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
274 /* blast_inv_dcache_range */
275 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
276 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
277 
278+#ifdef CONFIG_JZRISC
279+
280+static inline void protected_blast_dcache_range(unsigned long start,
281+ unsigned long end)
282+{
283+ unsigned long lsize = cpu_dcache_line_size();
284+ unsigned long addr = start & ~(lsize - 1);
285+ unsigned long aend = (end - 1) & ~(lsize - 1);
286+
287+ while (1) {
288+ protected_cache_op(Hit_Writeback_Inv_D, addr);
289+ if (addr == aend)
290+ break;
291+ addr += lsize;
292+ }
293+ SYNC_WB();
294+}
295+
296+static inline void protected_blast_icache_range(unsigned long start,
297+ unsigned long end)
298+{
299+ unsigned long lsize = cpu_icache_line_size();
300+ unsigned long addr = start & ~(lsize - 1);
301+ unsigned long aend = (end - 1) & ~(lsize - 1);
302+
303+ K0_TO_K1();
304+
305+ while (1) {
306+ protected_cache_op(Hit_Invalidate_I, addr);
307+ if (addr == aend)
308+ break;
309+ addr += lsize;
310+ }
311+ INVALIDATE_BTB();
312+
313+ K1_TO_K0();
314+}
315+
316+static inline void blast_dcache_range(unsigned long start,
317+ unsigned long end)
318+{
319+ unsigned long lsize = cpu_dcache_line_size();
320+ unsigned long addr = start & ~(lsize - 1);
321+ unsigned long aend = (end - 1) & ~(lsize - 1);
322+
323+ while (1) {
324+ cache_op(Hit_Writeback_Inv_D, addr);
325+ if (addr == aend)
326+ break;
327+ addr += lsize;
328+ }
329+ SYNC_WB();
330+}
331+
332+#endif /* CONFIG_JZRISC */
333+
334 #endif /* _ASM_R4KCACHE_H */
335

Archive Download this file



interactive