Root/
Source at commit ba1031a7e7eac2aea92f07960238404b1622462f created 14 years 2 months ago. By Lars-Peter Clausen, From 42789dfb077bb7b640ee19d0e3f7808dc5318adf Mon Sep 17 00:00:00 2001 Subject: [PATCH] /opt/Projects/openwrt/target/linux/xburst/patches-2.6.31/001-core.patch | |
---|---|
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * Inline assembly cache operations. |
7 | * |
8 | * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) |
9 | * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org) |
10 | * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org) |
11 | */ |
12 | #ifndef _ASM_R4KCACHE_H |
13 | #define _ASM_R4KCACHE_H |
14 | |
15 | #include <asm/asm.h> |
16 | #include <asm/cacheops.h> |
17 | #include <asm/cpu-features.h> |
18 | #include <asm/mipsmtregs.h> |
19 | |
20 | #ifdef CONFIG_JZRISC |
21 | |
22 | #define K0_TO_K1() \ |
23 | do { \ |
24 | unsigned long __k0_addr; \ |
25 | \ |
26 | __asm__ __volatile__( \ |
27 | "la %0, 1f\n\t" \ |
28 | "or %0, %0, %1\n\t" \ |
29 | "jr %0\n\t" \ |
30 | "nop\n\t" \ |
31 | "1: nop\n" \ |
32 | : "=&r"(__k0_addr) \ |
33 | : "r" (0x20000000) ); \ |
34 | } while(0) |
35 | |
36 | #define K1_TO_K0() \ |
37 | do { \ |
38 | unsigned long __k0_addr; \ |
39 | __asm__ __volatile__( \ |
40 | "nop;nop;nop;nop;nop;nop;nop\n\t" \ |
41 | "la %0, 1f\n\t" \ |
42 | "jr %0\n\t" \ |
43 | "nop\n\t" \ |
44 | "1: nop\n" \ |
45 | : "=&r" (__k0_addr)); \ |
46 | } while (0) |
47 | |
48 | #define INVALIDATE_BTB() \ |
49 | do { \ |
50 | unsigned long tmp; \ |
51 | __asm__ __volatile__( \ |
52 | ".set mips32\n\t" \ |
53 | "mfc0 %0, $16, 7\n\t" \ |
54 | "nop\n\t" \ |
55 | "ori %0, 2\n\t" \ |
56 | "mtc0 %0, $16, 7\n\t" \ |
57 | "nop\n\t" \ |
58 | : "=&r" (tmp)); \ |
59 | } while (0) |
60 | |
61 | #define SYNC_WB() __asm__ __volatile__ ("sync") |
62 | |
63 | #else /* CONFIG_JZRISC */ |
64 | |
65 | #define K0_TO_K1() do { } while (0) |
66 | #define K1_TO_K0() do { } while (0) |
67 | #define INVALIDATE_BTB() do { } while (0) |
68 | #define SYNC_WB() do { } while (0) |
69 | |
70 | #endif /* CONFIG_JZRISC */ |
71 | |
72 | /* |
73 | * This macro return a properly sign-extended address suitable as base address |
74 | * for indexed cache operations. Two issues here: |
75 | * |
76 | * - The MIPS32 and MIPS64 specs permit an implementation to directly derive |
77 | * the index bits from the virtual address. This breaks with tradition |
78 | * set by the R4000. To keep unpleasant surprises from happening we pick |
79 | * an address in KSEG0 / CKSEG0. |
80 | * - We need a properly sign extended address for 64-bit code. To get away |
81 | * without ifdefs we let the compiler do it by a type cast. |
82 | */ |
83 | #define INDEX_BASE CKSEG0 |
84 | |
85 | #define cache_op(op,addr) \ |
86 | __asm__ __volatile__( \ |
87 | " .set push \n" \ |
88 | " .set noreorder \n" \ |
89 | " .set mips3\n\t \n" \ |
90 | " cache %0, %1 \n" \ |
91 | " .set pop \n" \ |
92 | : \ |
93 | : "i" (op), "R" (*(unsigned char *)(addr))) |
94 | |
95 | #ifdef CONFIG_MIPS_MT |
96 | /* |
97 | * Temporary hacks for SMTC debug. Optionally force single-threaded |
98 | * execution during I-cache flushes. |
99 | */ |
100 | |
101 | #define PROTECT_CACHE_FLUSHES 1 |
102 | |
103 | #ifdef PROTECT_CACHE_FLUSHES |
104 | |
105 | extern int mt_protiflush; |
106 | extern int mt_protdflush; |
107 | extern void mt_cflush_lockdown(void); |
108 | extern void mt_cflush_release(void); |
109 | |
110 | #define BEGIN_MT_IPROT \ |
111 | unsigned long flags = 0; \ |
112 | unsigned long mtflags = 0; \ |
113 | if(mt_protiflush) { \ |
114 | local_irq_save(flags); \ |
115 | ehb(); \ |
116 | mtflags = dvpe(); \ |
117 | mt_cflush_lockdown(); \ |
118 | } |
119 | |
120 | #define END_MT_IPROT \ |
121 | if(mt_protiflush) { \ |
122 | mt_cflush_release(); \ |
123 | evpe(mtflags); \ |
124 | local_irq_restore(flags); \ |
125 | } |
126 | |
127 | #define BEGIN_MT_DPROT \ |
128 | unsigned long flags = 0; \ |
129 | unsigned long mtflags = 0; \ |
130 | if(mt_protdflush) { \ |
131 | local_irq_save(flags); \ |
132 | ehb(); \ |
133 | mtflags = dvpe(); \ |
134 | mt_cflush_lockdown(); \ |
135 | } |
136 | |
137 | #define END_MT_DPROT \ |
138 | if(mt_protdflush) { \ |
139 | mt_cflush_release(); \ |
140 | evpe(mtflags); \ |
141 | local_irq_restore(flags); \ |
142 | } |
143 | |
144 | #else |
145 | |
146 | #define BEGIN_MT_IPROT |
147 | #define BEGIN_MT_DPROT |
148 | #define END_MT_IPROT |
149 | #define END_MT_DPROT |
150 | |
151 | #endif /* PROTECT_CACHE_FLUSHES */ |
152 | |
153 | #define __iflush_prologue \ |
154 | unsigned long redundance; \ |
155 | extern int mt_n_iflushes; \ |
156 | BEGIN_MT_IPROT \ |
157 | for (redundance = 0; redundance < mt_n_iflushes; redundance++) { |
158 | |
159 | #define __iflush_epilogue \ |
160 | END_MT_IPROT \ |
161 | } |
162 | |
163 | #define __dflush_prologue \ |
164 | unsigned long redundance; \ |
165 | extern int mt_n_dflushes; \ |
166 | BEGIN_MT_DPROT \ |
167 | for (redundance = 0; redundance < mt_n_dflushes; redundance++) { |
168 | |
169 | #define __dflush_epilogue \ |
170 | END_MT_DPROT \ |
171 | } |
172 | |
173 | #define __inv_dflush_prologue __dflush_prologue |
174 | #define __inv_dflush_epilogue __dflush_epilogue |
175 | #define __sflush_prologue { |
176 | #define __sflush_epilogue } |
177 | #define __inv_sflush_prologue __sflush_prologue |
178 | #define __inv_sflush_epilogue __sflush_epilogue |
179 | |
180 | #else /* CONFIG_MIPS_MT */ |
181 | |
182 | #define __iflush_prologue { |
183 | #define __iflush_epilogue } |
184 | #define __dflush_prologue { |
185 | #define __dflush_epilogue } |
186 | #define __inv_dflush_prologue { |
187 | #define __inv_dflush_epilogue } |
188 | #define __sflush_prologue { |
189 | #define __sflush_epilogue } |
190 | #define __inv_sflush_prologue { |
191 | #define __inv_sflush_epilogue } |
192 | |
193 | #endif /* CONFIG_MIPS_MT */ |
194 | |
195 | static inline void flush_icache_line_indexed(unsigned long addr) |
196 | { |
197 | __iflush_prologue |
198 | cache_op(Index_Invalidate_I, addr); |
199 | INVALIDATE_BTB(); |
200 | __iflush_epilogue |
201 | } |
202 | |
203 | static inline void flush_dcache_line_indexed(unsigned long addr) |
204 | { |
205 | __dflush_prologue |
206 | cache_op(Index_Writeback_Inv_D, addr); |
207 | SYNC_WB(); |
208 | __dflush_epilogue |
209 | } |
210 | |
211 | static inline void flush_scache_line_indexed(unsigned long addr) |
212 | { |
213 | cache_op(Index_Writeback_Inv_SD, addr); |
214 | } |
215 | |
216 | static inline void flush_icache_line(unsigned long addr) |
217 | { |
218 | __iflush_prologue |
219 | cache_op(Hit_Invalidate_I, addr); |
220 | INVALIDATE_BTB(); |
221 | __iflush_epilogue |
222 | } |
223 | |
224 | static inline void flush_dcache_line(unsigned long addr) |
225 | { |
226 | __dflush_prologue |
227 | cache_op(Hit_Writeback_Inv_D, addr); |
228 | SYNC_WB(); |
229 | __dflush_epilogue |
230 | } |
231 | |
232 | static inline void invalidate_dcache_line(unsigned long addr) |
233 | { |
234 | __dflush_prologue |
235 | cache_op(Hit_Invalidate_D, addr); |
236 | SYNC_WB(); |
237 | __dflush_epilogue |
238 | } |
239 | |
240 | static inline void invalidate_scache_line(unsigned long addr) |
241 | { |
242 | cache_op(Hit_Invalidate_SD, addr); |
243 | } |
244 | |
245 | static inline void flush_scache_line(unsigned long addr) |
246 | { |
247 | cache_op(Hit_Writeback_Inv_SD, addr); |
248 | } |
249 | |
250 | #define protected_cache_op(op,addr) \ |
251 | __asm__ __volatile__( \ |
252 | " .set push \n" \ |
253 | " .set noreorder \n" \ |
254 | " .set mips3 \n" \ |
255 | "1: cache %0, (%1) \n" \ |
256 | "2: .set pop \n" \ |
257 | " .section __ex_table,\"a\" \n" \ |
258 | " "STR(PTR)" 1b, 2b \n" \ |
259 | " .previous" \ |
260 | : \ |
261 | : "i" (op), "r" (addr)) |
262 | |
263 | /* |
264 | * The next two are for badland addresses like signal trampolines. |
265 | */ |
266 | static inline void protected_flush_icache_line(unsigned long addr) |
267 | { |
268 | protected_cache_op(Hit_Invalidate_I, addr); |
269 | INVALIDATE_BTB(); |
270 | } |
271 | |
272 | /* |
273 | * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D |
274 | * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style |
275 | * caches. We're talking about one cacheline unnecessarily getting invalidated |
276 | * here so the penalty isn't overly hard. |
277 | */ |
278 | static inline void protected_writeback_dcache_line(unsigned long addr) |
279 | { |
280 | protected_cache_op(Hit_Writeback_Inv_D, addr); |
281 | SYNC_WB(); |
282 | } |
283 | |
284 | static inline void protected_writeback_scache_line(unsigned long addr) |
285 | { |
286 | protected_cache_op(Hit_Writeback_Inv_SD, addr); |
287 | } |
288 | |
289 | /* |
290 | * This one is RM7000-specific |
291 | */ |
292 | static inline void invalidate_tcache_page(unsigned long addr) |
293 | { |
294 | cache_op(Page_Invalidate_T, addr); |
295 | } |
296 | |
297 | #define cache16_unroll32(base,op) \ |
298 | __asm__ __volatile__( \ |
299 | " .set push \n" \ |
300 | " .set noreorder \n" \ |
301 | " .set mips3 \n" \ |
302 | " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \ |
303 | " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \ |
304 | " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \ |
305 | " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \ |
306 | " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \ |
307 | " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \ |
308 | " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \ |
309 | " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \ |
310 | " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \ |
311 | " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \ |
312 | " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \ |
313 | " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \ |
314 | " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \ |
315 | " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \ |
316 | " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \ |
317 | " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \ |
318 | " .set pop \n" \ |
319 | : \ |
320 | : "r" (base), \ |
321 | "i" (op)); |
322 | |
323 | #define cache32_unroll32(base,op) \ |
324 | __asm__ __volatile__( \ |
325 | " .set push \n" \ |
326 | " .set noreorder \n" \ |
327 | " .set mips3 \n" \ |
328 | " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \ |
329 | " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \ |
330 | " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \ |
331 | " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \ |
332 | " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \ |
333 | " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \ |
334 | " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \ |
335 | " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \ |
336 | " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \ |
337 | " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \ |
338 | " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \ |
339 | " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \ |
340 | " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \ |
341 | " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \ |
342 | " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \ |
343 | " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \ |
344 | " .set pop \n" \ |
345 | : \ |
346 | : "r" (base), \ |
347 | "i" (op)); |
348 | |
349 | #define cache64_unroll32(base,op) \ |
350 | __asm__ __volatile__( \ |
351 | " .set push \n" \ |
352 | " .set noreorder \n" \ |
353 | " .set mips3 \n" \ |
354 | " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \ |
355 | " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \ |
356 | " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \ |
357 | " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \ |
358 | " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \ |
359 | " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \ |
360 | " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \ |
361 | " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \ |
362 | " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \ |
363 | " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \ |
364 | " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \ |
365 | " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \ |
366 | " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \ |
367 | " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \ |
368 | " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \ |
369 | " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \ |
370 | " .set pop \n" \ |
371 | : \ |
372 | : "r" (base), \ |
373 | "i" (op)); |
374 | |
375 | #define cache128_unroll32(base,op) \ |
376 | __asm__ __volatile__( \ |
377 | " .set push \n" \ |
378 | " .set noreorder \n" \ |
379 | " .set mips3 \n" \ |
380 | " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \ |
381 | " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \ |
382 | " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \ |
383 | " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \ |
384 | " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \ |
385 | " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \ |
386 | " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \ |
387 | " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \ |
388 | " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \ |
389 | " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \ |
390 | " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \ |
391 | " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \ |
392 | " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \ |
393 | " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \ |
394 | " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \ |
395 | " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \ |
396 | " .set pop \n" \ |
397 | : \ |
398 | : "r" (base), \ |
399 | "i" (op)); |
400 | |
401 | /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */ |
402 | #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \ |
403 | static inline void blast_##pfx##cache##lsize(void) \ |
404 | { \ |
405 | unsigned long start = INDEX_BASE; \ |
406 | unsigned long end = start + current_cpu_data.desc.waysize; \ |
407 | unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \ |
408 | unsigned long ws_end = current_cpu_data.desc.ways << \ |
409 | current_cpu_data.desc.waybit; \ |
410 | unsigned long ws, addr; \ |
411 | \ |
412 | __##pfx##flush_prologue \ |
413 | \ |
414 | for (ws = 0; ws < ws_end; ws += ws_inc) \ |
415 | for (addr = start; addr < end; addr += lsize * 32) \ |
416 | cache##lsize##_unroll32(addr|ws, indexop); \ |
417 | \ |
418 | __##pfx##flush_epilogue \ |
419 | } \ |
420 | \ |
421 | static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ |
422 | { \ |
423 | unsigned long start = page; \ |
424 | unsigned long end = page + PAGE_SIZE; \ |
425 | \ |
426 | __##pfx##flush_prologue \ |
427 | \ |
428 | do { \ |
429 | cache##lsize##_unroll32(start, hitop); \ |
430 | start += lsize * 32; \ |
431 | } while (start < end); \ |
432 | \ |
433 | __##pfx##flush_epilogue \ |
434 | } \ |
435 | \ |
436 | static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ |
437 | { \ |
438 | unsigned long indexmask = current_cpu_data.desc.waysize - 1; \ |
439 | unsigned long start = INDEX_BASE + (page & indexmask); \ |
440 | unsigned long end = start + PAGE_SIZE; \ |
441 | unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \ |
442 | unsigned long ws_end = current_cpu_data.desc.ways << \ |
443 | current_cpu_data.desc.waybit; \ |
444 | unsigned long ws, addr; \ |
445 | \ |
446 | __##pfx##flush_prologue \ |
447 | \ |
448 | for (ws = 0; ws < ws_end; ws += ws_inc) \ |
449 | for (addr = start; addr < end; addr += lsize * 32) \ |
450 | cache##lsize##_unroll32(addr|ws, indexop); \ |
451 | \ |
452 | __##pfx##flush_epilogue \ |
453 | } |
454 | |
455 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16) |
456 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16) |
457 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16) |
458 | #ifndef CONFIG_JZRISC |
459 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32) |
460 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32) |
461 | #endif |
462 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32) |
463 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64) |
464 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) |
465 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) |
466 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) |
467 | |
468 | __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16) |
469 | #ifndef CONFIG_JZRISC |
470 | __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32) |
471 | #endif |
472 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16) |
473 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32) |
474 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64) |
475 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128) |
476 | |
477 | #ifdef CONFIG_JZRISC |
478 | |
479 | static inline void blast_dcache32(void) |
480 | { |
481 | unsigned long start = INDEX_BASE; |
482 | unsigned long end = start + current_cpu_data.dcache.waysize; |
483 | unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit; |
484 | unsigned long ws_end = current_cpu_data.dcache.ways << |
485 | current_cpu_data.dcache.waybit; |
486 | unsigned long ws, addr; |
487 | |
488 | for (ws = 0; ws < ws_end; ws += ws_inc) |
489 | for (addr = start; addr < end; addr += 0x400) |
490 | cache32_unroll32(addr|ws,Index_Writeback_Inv_D); |
491 | |
492 | SYNC_WB(); |
493 | } |
494 | |
495 | static inline void blast_dcache32_page(unsigned long page) |
496 | { |
497 | unsigned long start = page; |
498 | unsigned long end = page + PAGE_SIZE; |
499 | |
500 | do { |
501 | cache32_unroll32(start,Hit_Writeback_Inv_D); |
502 | start += 0x400; |
503 | } while (start < end); |
504 | |
505 | SYNC_WB(); |
506 | } |
507 | |
508 | static inline void blast_dcache32_page_indexed(unsigned long page) |
509 | { |
510 | unsigned long indexmask = current_cpu_data.dcache.waysize - 1; |
511 | unsigned long start = INDEX_BASE + (page & indexmask); |
512 | unsigned long end = start + PAGE_SIZE; |
513 | unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit; |
514 | unsigned long ws_end = current_cpu_data.dcache.ways << |
515 | current_cpu_data.dcache.waybit; |
516 | unsigned long ws, addr; |
517 | |
518 | for (ws = 0; ws < ws_end; ws += ws_inc) |
519 | for (addr = start; addr < end; addr += 0x400) |
520 | cache32_unroll32(addr|ws,Index_Writeback_Inv_D); |
521 | |
522 | SYNC_WB(); |
523 | } |
524 | |
525 | static inline void blast_icache32(void) |
526 | { |
527 | unsigned long start = INDEX_BASE; |
528 | unsigned long end = start + current_cpu_data.icache.waysize; |
529 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; |
530 | unsigned long ws_end = current_cpu_data.icache.ways << |
531 | current_cpu_data.icache.waybit; |
532 | unsigned long ws, addr; |
533 | |
534 | K0_TO_K1(); |
535 | |
536 | for (ws = 0; ws < ws_end; ws += ws_inc) |
537 | for (addr = start; addr < end; addr += 0x400) |
538 | cache32_unroll32(addr|ws,Index_Invalidate_I); |
539 | |
540 | INVALIDATE_BTB(); |
541 | |
542 | K1_TO_K0(); |
543 | } |
544 | |
545 | static inline void blast_icache32_page(unsigned long page) |
546 | { |
547 | unsigned long start = page; |
548 | unsigned long end = page + PAGE_SIZE; |
549 | |
550 | K0_TO_K1(); |
551 | |
552 | do { |
553 | cache32_unroll32(start,Hit_Invalidate_I); |
554 | start += 0x400; |
555 | } while (start < end); |
556 | |
557 | INVALIDATE_BTB(); |
558 | |
559 | K1_TO_K0(); |
560 | } |
561 | |
562 | static inline void blast_icache32_page_indexed(unsigned long page) |
563 | { |
564 | unsigned long indexmask = current_cpu_data.icache.waysize - 1; |
565 | unsigned long start = INDEX_BASE + (page & indexmask); |
566 | unsigned long end = start + PAGE_SIZE; |
567 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; |
568 | unsigned long ws_end = current_cpu_data.icache.ways << |
569 | current_cpu_data.icache.waybit; |
570 | unsigned long ws, addr; |
571 | |
572 | K0_TO_K1(); |
573 | |
574 | for (ws = 0; ws < ws_end; ws += ws_inc) |
575 | for (addr = start; addr < end; addr += 0x400) |
576 | cache32_unroll32(addr|ws,Index_Invalidate_I); |
577 | |
578 | INVALIDATE_BTB(); |
579 | |
580 | K1_TO_K0(); |
581 | } |
582 | |
583 | #endif /* CONFIG_JZRISC */ |
584 | |
585 | /* build blast_xxx_range, protected_blast_xxx_range */ |
586 | #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \ |
587 | static inline void prot##blast_##pfx##cache##_range(unsigned long start, \ |
588 | unsigned long end) \ |
589 | { \ |
590 | unsigned long lsize = cpu_##desc##_line_size(); \ |
591 | unsigned long addr = start & ~(lsize - 1); \ |
592 | unsigned long aend = (end - 1) & ~(lsize - 1); \ |
593 | \ |
594 | __##pfx##flush_prologue \ |
595 | \ |
596 | while (1) { \ |
597 | prot##cache_op(hitop, addr); \ |
598 | if (addr == aend) \ |
599 | break; \ |
600 | addr += lsize; \ |
601 | } \ |
602 | \ |
603 | __##pfx##flush_epilogue \ |
604 | } |
605 | |
606 | #ifndef CONFIG_JZRISC |
607 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_) |
608 | #endif |
609 | __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_) |
610 | #ifndef CONFIG_JZRISC |
611 | __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_) |
612 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, ) |
613 | #endif |
614 | __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, ) |
615 | /* blast_inv_dcache_range */ |
616 | __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, ) |
617 | __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, ) |
618 | |
619 | #ifdef CONFIG_JZRISC |
620 | |
621 | static inline void protected_blast_dcache_range(unsigned long start, |
622 | unsigned long end) |
623 | { |
624 | unsigned long lsize = cpu_dcache_line_size(); |
625 | unsigned long addr = start & ~(lsize - 1); |
626 | unsigned long aend = (end - 1) & ~(lsize - 1); |
627 | |
628 | while (1) { |
629 | protected_cache_op(Hit_Writeback_Inv_D, addr); |
630 | if (addr == aend) |
631 | break; |
632 | addr += lsize; |
633 | } |
634 | SYNC_WB(); |
635 | } |
636 | |
637 | static inline void protected_blast_icache_range(unsigned long start, |
638 | unsigned long end) |
639 | { |
640 | unsigned long lsize = cpu_icache_line_size(); |
641 | unsigned long addr = start & ~(lsize - 1); |
642 | unsigned long aend = (end - 1) & ~(lsize - 1); |
643 | |
644 | K0_TO_K1(); |
645 | |
646 | while (1) { |
647 | protected_cache_op(Hit_Invalidate_I, addr); |
648 | if (addr == aend) |
649 | break; |
650 | addr += lsize; |
651 | } |
652 | INVALIDATE_BTB(); |
653 | |
654 | K1_TO_K0(); |
655 | } |
656 | |
657 | static inline void blast_dcache_range(unsigned long start, |
658 | unsigned long end) |
659 | { |
660 | unsigned long lsize = cpu_dcache_line_size(); |
661 | unsigned long addr = start & ~(lsize - 1); |
662 | unsigned long aend = (end - 1) & ~(lsize - 1); |
663 | |
664 | while (1) { |
665 | cache_op(Hit_Writeback_Inv_D, addr); |
666 | if (addr == aend) |
667 | break; |
668 | addr += lsize; |
669 | } |
670 | SYNC_WB(); |
671 | } |
672 | |
673 | #endif /* CONFIG_JZRISC */ |
674 | |
675 | #endif /* _ASM_R4KCACHE_H */ |
676 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9