Root/
Source at commit ba1031a7e7eac2aea92f07960238404b1622462f created 14 years 2 months ago. By Lars-Peter Clausen, From 42789dfb077bb7b640ee19d0e3f7808dc5318adf Mon Sep 17 00:00:00 2001 Subject: [PATCH] /opt/Projects/openwrt/target/linux/xburst/patches-2.6.31/001-core.patch | |
---|---|
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * Synthesize TLB refill handlers at runtime. |
7 | * |
8 | * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer |
9 | * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki |
10 | * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) |
11 | * Copyright (C) 2008, 2009 Cavium Networks, Inc. |
12 | * |
13 | * ... and the days got worse and worse and now you see |
14 | * I've gone completly out of my mind. |
15 | * |
16 | * They're coming to take me a away haha |
17 | * they're coming to take me a away hoho hihi haha |
18 | * to the funny farm where code is beautiful all the time ... |
19 | * |
20 | * (Condolences to Napoleon XIV) |
21 | */ |
22 | |
23 | #include <linux/bug.h> |
24 | #include <linux/kernel.h> |
25 | #include <linux/types.h> |
26 | #include <linux/smp.h> |
27 | #include <linux/string.h> |
28 | #include <linux/init.h> |
29 | |
30 | #include <asm/mmu_context.h> |
31 | #include <asm/war.h> |
32 | |
33 | #include "uasm.h" |
34 | |
35 | static inline int r45k_bvahwbug(void) |
36 | { |
37 | /* XXX: We should probe for the presence of this bug, but we don't. */ |
38 | return 0; |
39 | } |
40 | |
41 | static inline int r4k_250MHZhwbug(void) |
42 | { |
43 | /* XXX: We should probe for the presence of this bug, but we don't. */ |
44 | return 0; |
45 | } |
46 | |
47 | static inline int __maybe_unused bcm1250_m3_war(void) |
48 | { |
49 | return BCM1250_M3_WAR; |
50 | } |
51 | |
52 | static inline int __maybe_unused r10000_llsc_war(void) |
53 | { |
54 | return R10000_LLSC_WAR; |
55 | } |
56 | |
57 | /* |
58 | * Found by experiment: At least some revisions of the 4kc throw under |
59 | * some circumstances a machine check exception, triggered by invalid |
60 | * values in the index register. Delaying the tlbp instruction until |
61 | * after the next branch, plus adding an additional nop in front of |
62 | * tlbwi/tlbwr avoids the invalid index register values. Nobody knows |
63 | * why; it's not an issue caused by the core RTL. |
64 | * |
65 | */ |
66 | static int __cpuinit m4kc_tlbp_war(void) |
67 | { |
68 | return (current_cpu_data.processor_id & 0xffff00) == |
69 | (PRID_COMP_MIPS | PRID_IMP_4KC); |
70 | } |
71 | |
72 | /* Handle labels (which must be positive integers). */ |
73 | enum label_id { |
74 | label_second_part = 1, |
75 | label_leave, |
76 | #ifdef MODULE_START |
77 | label_module_alloc, |
78 | #endif |
79 | label_vmalloc, |
80 | label_vmalloc_done, |
81 | label_tlbw_hazard, |
82 | label_split, |
83 | label_nopage_tlbl, |
84 | label_nopage_tlbs, |
85 | label_nopage_tlbm, |
86 | label_smp_pgtable_change, |
87 | label_r3000_write_probe_fail, |
88 | #ifdef CONFIG_HUGETLB_PAGE |
89 | label_tlb_huge_update, |
90 | #endif |
91 | }; |
92 | |
93 | UASM_L_LA(_second_part) |
94 | UASM_L_LA(_leave) |
95 | #ifdef MODULE_START |
96 | UASM_L_LA(_module_alloc) |
97 | #endif |
98 | UASM_L_LA(_vmalloc) |
99 | UASM_L_LA(_vmalloc_done) |
100 | UASM_L_LA(_tlbw_hazard) |
101 | UASM_L_LA(_split) |
102 | UASM_L_LA(_nopage_tlbl) |
103 | UASM_L_LA(_nopage_tlbs) |
104 | UASM_L_LA(_nopage_tlbm) |
105 | UASM_L_LA(_smp_pgtable_change) |
106 | UASM_L_LA(_r3000_write_probe_fail) |
107 | #ifdef CONFIG_HUGETLB_PAGE |
108 | UASM_L_LA(_tlb_huge_update) |
109 | #endif |
110 | |
111 | /* |
112 | * For debug purposes. |
113 | */ |
114 | static inline void dump_handler(const u32 *handler, int count) |
115 | { |
116 | int i; |
117 | |
118 | pr_debug("\t.set push\n"); |
119 | pr_debug("\t.set noreorder\n"); |
120 | |
121 | for (i = 0; i < count; i++) |
122 | pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]); |
123 | |
124 | pr_debug("\t.set pop\n"); |
125 | } |
126 | |
127 | /* The only general purpose registers allowed in TLB handlers. */ |
128 | #define K0 26 |
129 | #define K1 27 |
130 | |
131 | /* Some CP0 registers */ |
132 | #define C0_INDEX 0, 0 |
133 | #define C0_ENTRYLO0 2, 0 |
134 | #define C0_TCBIND 2, 2 |
135 | #define C0_ENTRYLO1 3, 0 |
136 | #define C0_CONTEXT 4, 0 |
137 | #define C0_PAGEMASK 5, 0 |
138 | #define C0_BADVADDR 8, 0 |
139 | #define C0_ENTRYHI 10, 0 |
140 | #define C0_EPC 14, 0 |
141 | #define C0_XCONTEXT 20, 0 |
142 | |
143 | #ifdef CONFIG_64BIT |
144 | # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT) |
145 | #else |
146 | # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT) |
147 | #endif |
148 | |
149 | /* The worst case length of the handler is around 18 instructions for |
150 | * R3000-style TLBs and up to 63 instructions for R4000-style TLBs. |
151 | * Maximum space available is 32 instructions for R3000 and 64 |
152 | * instructions for R4000. |
153 | * |
154 | * We deliberately chose a buffer size of 128, so we won't scribble |
155 | * over anything important on overflow before we panic. |
156 | */ |
157 | static u32 tlb_handler[128] __cpuinitdata; |
158 | |
159 | /* simply assume worst case size for labels and relocs */ |
160 | static struct uasm_label labels[128] __cpuinitdata; |
161 | static struct uasm_reloc relocs[128] __cpuinitdata; |
162 | |
163 | /* |
164 | * The R3000 TLB handler is simple. |
165 | */ |
166 | static void __cpuinit build_r3000_tlb_refill_handler(void) |
167 | { |
168 | long pgdc = (long)pgd_current; |
169 | u32 *p; |
170 | |
171 | memset(tlb_handler, 0, sizeof(tlb_handler)); |
172 | p = tlb_handler; |
173 | |
174 | uasm_i_mfc0(&p, K0, C0_BADVADDR); |
175 | uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */ |
176 | uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1); |
177 | uasm_i_srl(&p, K0, K0, 22); /* load delay */ |
178 | uasm_i_sll(&p, K0, K0, 2); |
179 | uasm_i_addu(&p, K1, K1, K0); |
180 | uasm_i_mfc0(&p, K0, C0_CONTEXT); |
181 | uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */ |
182 | uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */ |
183 | uasm_i_addu(&p, K1, K1, K0); |
184 | uasm_i_lw(&p, K0, 0, K1); |
185 | uasm_i_nop(&p); /* load delay */ |
186 | uasm_i_mtc0(&p, K0, C0_ENTRYLO0); |
187 | uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ |
188 | uasm_i_tlbwr(&p); /* cp0 delay */ |
189 | uasm_i_jr(&p, K1); |
190 | uasm_i_rfe(&p); /* branch delay */ |
191 | |
192 | if (p > tlb_handler + 32) |
193 | panic("TLB refill handler space exceeded"); |
194 | |
195 | pr_debug("Wrote TLB refill handler (%u instructions).\n", |
196 | (unsigned int)(p - tlb_handler)); |
197 | |
198 | memcpy((void *)ebase, tlb_handler, 0x80); |
199 | |
200 | dump_handler((u32 *)ebase, 32); |
201 | } |
202 | |
203 | /* |
204 | * The R4000 TLB handler is much more complicated. We have two |
205 | * consecutive handler areas with 32 instructions space each. |
206 | * Since they aren't used at the same time, we can overflow in the |
207 | * other one.To keep things simple, we first assume linear space, |
208 | * then we relocate it to the final handler layout as needed. |
209 | */ |
210 | static u32 final_handler[64] __cpuinitdata; |
211 | |
212 | /* |
213 | * Hazards |
214 | * |
215 | * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0: |
216 | * 2. A timing hazard exists for the TLBP instruction. |
217 | * |
218 | * stalling_instruction |
219 | * TLBP |
220 | * |
221 | * The JTLB is being read for the TLBP throughout the stall generated by the |
222 | * previous instruction. This is not really correct as the stalling instruction |
223 | * can modify the address used to access the JTLB. The failure symptom is that |
224 | * the TLBP instruction will use an address created for the stalling instruction |
225 | * and not the address held in C0_ENHI and thus report the wrong results. |
226 | * |
227 | * The software work-around is to not allow the instruction preceding the TLBP |
228 | * to stall - make it an NOP or some other instruction guaranteed not to stall. |
229 | * |
230 | * Errata 2 will not be fixed. This errata is also on the R5000. |
231 | * |
232 | * As if we MIPS hackers wouldn't know how to nop pipelines happy ... |
233 | */ |
234 | static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) |
235 | { |
236 | switch (current_cpu_type()) { |
237 | /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ |
238 | case CPU_R4600: |
239 | case CPU_R4700: |
240 | case CPU_R5000: |
241 | case CPU_R5000A: |
242 | case CPU_NEVADA: |
243 | uasm_i_nop(p); |
244 | uasm_i_tlbp(p); |
245 | break; |
246 | |
247 | default: |
248 | uasm_i_tlbp(p); |
249 | break; |
250 | } |
251 | } |
252 | |
253 | /* |
254 | * Write random or indexed TLB entry, and care about the hazards from |
255 | * the preceeding mtc0 and for the following eret. |
256 | */ |
257 | enum tlb_write_entry { tlb_random, tlb_indexed }; |
258 | |
259 | static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, |
260 | struct uasm_reloc **r, |
261 | enum tlb_write_entry wmode) |
262 | { |
263 | void(*tlbw)(u32 **) = NULL; |
264 | |
265 | switch (wmode) { |
266 | case tlb_random: tlbw = uasm_i_tlbwr; break; |
267 | case tlb_indexed: tlbw = uasm_i_tlbwi; break; |
268 | } |
269 | |
270 | if (cpu_has_mips_r2) { |
271 | if (cpu_has_mips_r2_exec_hazard) |
272 | uasm_i_ehb(p); |
273 | tlbw(p); |
274 | return; |
275 | } |
276 | |
277 | switch (current_cpu_type()) { |
278 | case CPU_R4000PC: |
279 | case CPU_R4000SC: |
280 | case CPU_R4000MC: |
281 | case CPU_R4400PC: |
282 | case CPU_R4400SC: |
283 | case CPU_R4400MC: |
284 | /* |
285 | * This branch uses up a mtc0 hazard nop slot and saves |
286 | * two nops after the tlbw instruction. |
287 | */ |
288 | uasm_il_bgezl(p, r, 0, label_tlbw_hazard); |
289 | tlbw(p); |
290 | uasm_l_tlbw_hazard(l, *p); |
291 | uasm_i_nop(p); |
292 | break; |
293 | |
294 | case CPU_R4600: |
295 | case CPU_R4700: |
296 | case CPU_R5000: |
297 | case CPU_R5000A: |
298 | uasm_i_nop(p); |
299 | tlbw(p); |
300 | uasm_i_nop(p); |
301 | break; |
302 | |
303 | case CPU_R4300: |
304 | case CPU_5KC: |
305 | case CPU_TX49XX: |
306 | case CPU_PR4450: |
307 | uasm_i_nop(p); |
308 | tlbw(p); |
309 | break; |
310 | |
311 | case CPU_R10000: |
312 | case CPU_R12000: |
313 | case CPU_R14000: |
314 | case CPU_4KC: |
315 | case CPU_4KEC: |
316 | case CPU_SB1: |
317 | case CPU_SB1A: |
318 | case CPU_4KSC: |
319 | case CPU_20KC: |
320 | case CPU_25KF: |
321 | case CPU_BCM3302: |
322 | case CPU_BCM4710: |
323 | case CPU_LOONGSON2: |
324 | case CPU_BCM6338: |
325 | case CPU_BCM6345: |
326 | case CPU_BCM6348: |
327 | case CPU_BCM6358: |
328 | case CPU_R5500: |
329 | if (m4kc_tlbp_war()) |
330 | uasm_i_nop(p); |
331 | case CPU_ALCHEMY: |
332 | tlbw(p); |
333 | break; |
334 | |
335 | case CPU_NEVADA: |
336 | uasm_i_nop(p); /* QED specifies 2 nops hazard */ |
337 | /* |
338 | * This branch uses up a mtc0 hazard nop slot and saves |
339 | * a nop after the tlbw instruction. |
340 | */ |
341 | uasm_il_bgezl(p, r, 0, label_tlbw_hazard); |
342 | tlbw(p); |
343 | uasm_l_tlbw_hazard(l, *p); |
344 | break; |
345 | |
346 | case CPU_RM7000: |
347 | uasm_i_nop(p); |
348 | uasm_i_nop(p); |
349 | uasm_i_nop(p); |
350 | uasm_i_nop(p); |
351 | tlbw(p); |
352 | break; |
353 | |
354 | case CPU_RM9000: |
355 | /* |
356 | * When the JTLB is updated by tlbwi or tlbwr, a subsequent |
357 | * use of the JTLB for instructions should not occur for 4 |
358 | * cpu cycles and use for data translations should not occur |
359 | * for 3 cpu cycles. |
360 | */ |
361 | uasm_i_ssnop(p); |
362 | uasm_i_ssnop(p); |
363 | uasm_i_ssnop(p); |
364 | uasm_i_ssnop(p); |
365 | tlbw(p); |
366 | uasm_i_ssnop(p); |
367 | uasm_i_ssnop(p); |
368 | uasm_i_ssnop(p); |
369 | uasm_i_ssnop(p); |
370 | break; |
371 | |
372 | case CPU_VR4111: |
373 | case CPU_VR4121: |
374 | case CPU_VR4122: |
375 | case CPU_VR4181: |
376 | case CPU_VR4181A: |
377 | uasm_i_nop(p); |
378 | uasm_i_nop(p); |
379 | tlbw(p); |
380 | uasm_i_nop(p); |
381 | uasm_i_nop(p); |
382 | break; |
383 | |
384 | case CPU_VR4131: |
385 | case CPU_VR4133: |
386 | case CPU_R5432: |
387 | uasm_i_nop(p); |
388 | uasm_i_nop(p); |
389 | tlbw(p); |
390 | break; |
391 | |
392 | case CPU_JZRISC: |
393 | tlbw(p); |
394 | uasm_i_nop(p); |
395 | break; |
396 | |
397 | default: |
398 | panic("No TLB refill handler yet (CPU type: %d)", |
399 | current_cpu_data.cputype); |
400 | break; |
401 | } |
402 | } |
403 | |
404 | #ifdef CONFIG_HUGETLB_PAGE |
405 | static __cpuinit void build_huge_tlb_write_entry(u32 **p, |
406 | struct uasm_label **l, |
407 | struct uasm_reloc **r, |
408 | unsigned int tmp, |
409 | enum tlb_write_entry wmode) |
410 | { |
411 | /* Set huge page tlb entry size */ |
412 | uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); |
413 | uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); |
414 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); |
415 | |
416 | build_tlb_write_entry(p, l, r, wmode); |
417 | |
418 | /* Reset default page size */ |
419 | if (PM_DEFAULT_MASK >> 16) { |
420 | uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); |
421 | uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); |
422 | uasm_il_b(p, r, label_leave); |
423 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); |
424 | } else if (PM_DEFAULT_MASK) { |
425 | uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); |
426 | uasm_il_b(p, r, label_leave); |
427 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); |
428 | } else { |
429 | uasm_il_b(p, r, label_leave); |
430 | uasm_i_mtc0(p, 0, C0_PAGEMASK); |
431 | } |
432 | } |
433 | |
434 | /* |
435 | * Check if Huge PTE is present, if so then jump to LABEL. |
436 | */ |
437 | static void __cpuinit |
438 | build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, |
439 | unsigned int pmd, int lid) |
440 | { |
441 | UASM_i_LW(p, tmp, 0, pmd); |
442 | uasm_i_andi(p, tmp, tmp, _PAGE_HUGE); |
443 | uasm_il_bnez(p, r, tmp, lid); |
444 | } |
445 | |
446 | static __cpuinit void build_huge_update_entries(u32 **p, |
447 | unsigned int pte, |
448 | unsigned int tmp) |
449 | { |
450 | int small_sequence; |
451 | |
452 | /* |
453 | * A huge PTE describes an area the size of the |
454 | * configured huge page size. This is twice the |
455 | * of the large TLB entry size we intend to use. |
456 | * A TLB entry half the size of the configured |
457 | * huge page size is configured into entrylo0 |
458 | * and entrylo1 to cover the contiguous huge PTE |
459 | * address space. |
460 | */ |
461 | small_sequence = (HPAGE_SIZE >> 7) < 0x10000; |
462 | |
463 | /* We can clobber tmp. It isn't used after this.*/ |
464 | if (!small_sequence) |
465 | uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); |
466 | |
467 | UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */ |
468 | uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* load it */ |
469 | /* convert to entrylo1 */ |
470 | if (small_sequence) |
471 | UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); |
472 | else |
473 | UASM_i_ADDU(p, pte, pte, tmp); |
474 | |
475 | uasm_i_mtc0(p, pte, C0_ENTRYLO1); /* load it */ |
476 | } |
477 | |
478 | static __cpuinit void build_huge_handler_tail(u32 **p, |
479 | struct uasm_reloc **r, |
480 | struct uasm_label **l, |
481 | unsigned int pte, |
482 | unsigned int ptr) |
483 | { |
484 | #ifdef CONFIG_SMP |
485 | UASM_i_SC(p, pte, 0, ptr); |
486 | uasm_il_beqz(p, r, pte, label_tlb_huge_update); |
487 | UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */ |
488 | #else |
489 | UASM_i_SW(p, pte, 0, ptr); |
490 | #endif |
491 | build_huge_update_entries(p, pte, ptr); |
492 | build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed); |
493 | } |
494 | #endif /* CONFIG_HUGETLB_PAGE */ |
495 | |
496 | #ifdef CONFIG_64BIT |
497 | /* |
498 | * TMP and PTR are scratch. |
499 | * TMP will be clobbered, PTR will hold the pmd entry. |
500 | */ |
501 | static void __cpuinit |
502 | build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, |
503 | unsigned int tmp, unsigned int ptr) |
504 | { |
505 | long pgdc = (long)pgd_current; |
506 | |
507 | /* |
508 | * The vmalloc handling is not in the hotpath. |
509 | */ |
510 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); |
511 | uasm_il_bltz(p, r, tmp, label_vmalloc); |
512 | /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ |
513 | |
514 | #ifdef CONFIG_SMP |
515 | # ifdef CONFIG_MIPS_MT_SMTC |
516 | /* |
517 | * SMTC uses TCBind value as "CPU" index |
518 | */ |
519 | uasm_i_mfc0(p, ptr, C0_TCBIND); |
520 | uasm_i_dsrl(p, ptr, ptr, 19); |
521 | # else |
522 | /* |
523 | * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 |
524 | * stored in CONTEXT. |
525 | */ |
526 | uasm_i_dmfc0(p, ptr, C0_CONTEXT); |
527 | uasm_i_dsrl(p, ptr, ptr, 23); |
528 | #endif |
529 | UASM_i_LA_mostly(p, tmp, pgdc); |
530 | uasm_i_daddu(p, ptr, ptr, tmp); |
531 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); |
532 | uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); |
533 | #else |
534 | UASM_i_LA_mostly(p, ptr, pgdc); |
535 | uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); |
536 | #endif |
537 | |
538 | uasm_l_vmalloc_done(l, *p); |
539 | |
540 | if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */ |
541 | uasm_i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3); |
542 | else |
543 | uasm_i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32); |
544 | |
545 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); |
546 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ |
547 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ |
548 | uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ |
549 | uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ |
550 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); |
551 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ |
552 | } |
553 | |
554 | /* |
555 | * BVADDR is the faulting address, PTR is scratch. |
556 | * PTR will hold the pgd for vmalloc. |
557 | */ |
558 | static void __cpuinit |
559 | build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, |
560 | unsigned int bvaddr, unsigned int ptr) |
561 | { |
562 | long swpd = (long)swapper_pg_dir; |
563 | |
564 | uasm_l_vmalloc(l, *p); |
565 | |
566 | if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { |
567 | uasm_il_b(p, r, label_vmalloc_done); |
568 | uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); |
569 | } else { |
570 | UASM_i_LA_mostly(p, ptr, swpd); |
571 | uasm_il_b(p, r, label_vmalloc_done); |
572 | if (uasm_in_compat_space_p(swpd)) |
573 | uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); |
574 | else |
575 | uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); |
576 | } |
577 | } |
578 | |
579 | #else /* !CONFIG_64BIT */ |
580 | |
581 | /* |
582 | * TMP and PTR are scratch. |
583 | * TMP will be clobbered, PTR will hold the pgd entry. |
584 | */ |
585 | static void __cpuinit __maybe_unused |
586 | build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) |
587 | { |
588 | long pgdc = (long)pgd_current; |
589 | |
590 | /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ |
591 | #ifdef CONFIG_SMP |
592 | #ifdef CONFIG_MIPS_MT_SMTC |
593 | /* |
594 | * SMTC uses TCBind value as "CPU" index |
595 | */ |
596 | uasm_i_mfc0(p, ptr, C0_TCBIND); |
597 | UASM_i_LA_mostly(p, tmp, pgdc); |
598 | uasm_i_srl(p, ptr, ptr, 19); |
599 | #else |
600 | /* |
601 | * smp_processor_id() << 3 is stored in CONTEXT. |
602 | */ |
603 | uasm_i_mfc0(p, ptr, C0_CONTEXT); |
604 | UASM_i_LA_mostly(p, tmp, pgdc); |
605 | uasm_i_srl(p, ptr, ptr, 23); |
606 | #endif |
607 | uasm_i_addu(p, ptr, tmp, ptr); |
608 | #else |
609 | UASM_i_LA_mostly(p, ptr, pgdc); |
610 | #endif |
611 | uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ |
612 | uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); |
613 | uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ |
614 | uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); |
615 | uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ |
616 | } |
617 | |
618 | #endif /* !CONFIG_64BIT */ |
619 | |
620 | static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) |
621 | { |
622 | unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; |
623 | unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); |
624 | |
625 | switch (current_cpu_type()) { |
626 | case CPU_VR41XX: |
627 | case CPU_VR4111: |
628 | case CPU_VR4121: |
629 | case CPU_VR4122: |
630 | case CPU_VR4131: |
631 | case CPU_VR4181: |
632 | case CPU_VR4181A: |
633 | case CPU_VR4133: |
634 | shift += 2; |
635 | break; |
636 | |
637 | default: |
638 | break; |
639 | } |
640 | |
641 | if (shift) |
642 | UASM_i_SRL(p, ctx, ctx, shift); |
643 | uasm_i_andi(p, ctx, ctx, mask); |
644 | } |
645 | |
646 | static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) |
647 | { |
648 | /* |
649 | * Bug workaround for the Nevada. It seems as if under certain |
650 | * circumstances the move from cp0_context might produce a |
651 | * bogus result when the mfc0 instruction and its consumer are |
652 | * in a different cacheline or a load instruction, probably any |
653 | * memory reference, is between them. |
654 | */ |
655 | switch (current_cpu_type()) { |
656 | case CPU_NEVADA: |
657 | UASM_i_LW(p, ptr, 0, ptr); |
658 | GET_CONTEXT(p, tmp); /* get context reg */ |
659 | break; |
660 | |
661 | default: |
662 | GET_CONTEXT(p, tmp); /* get context reg */ |
663 | UASM_i_LW(p, ptr, 0, ptr); |
664 | break; |
665 | } |
666 | |
667 | build_adjust_context(p, tmp); |
668 | UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ |
669 | } |
670 | |
671 | static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, |
672 | unsigned int ptep) |
673 | { |
674 | /* |
675 | * 64bit address support (36bit on a 32bit CPU) in a 32bit |
676 | * Kernel is a special case. Only a few CPUs use it. |
677 | */ |
678 | #ifdef CONFIG_64BIT_PHYS_ADDR |
679 | if (cpu_has_64bits) { |
680 | uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ |
681 | uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ |
682 | uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */ |
683 | uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ |
684 | uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */ |
685 | uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ |
686 | } else { |
687 | int pte_off_even = sizeof(pte_t) / 2; |
688 | int pte_off_odd = pte_off_even + sizeof(pte_t); |
689 | |
690 | /* The pte entries are pre-shifted */ |
691 | uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ |
692 | uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ |
693 | uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ |
694 | uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ |
695 | } |
696 | #else |
697 | UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ |
698 | UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ |
699 | if (r45k_bvahwbug()) |
700 | build_tlb_probe_entry(p); |
701 | UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */ |
702 | if (r4k_250MHZhwbug()) |
703 | uasm_i_mtc0(p, 0, C0_ENTRYLO0); |
704 | uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ |
705 | UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */ |
706 | if (r45k_bvahwbug()) |
707 | uasm_i_mfc0(p, tmp, C0_INDEX); |
708 | if (r4k_250MHZhwbug()) |
709 | uasm_i_mtc0(p, 0, C0_ENTRYLO1); |
710 | uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ |
711 | #endif |
712 | } |
713 | |
714 | /* |
715 | * For a 64-bit kernel, we are using the 64-bit XTLB refill exception |
716 | * because EXL == 0. If we wrap, we can also use the 32 instruction |
717 | * slots before the XTLB refill exception handler which belong to the |
718 | * unused TLB refill exception. |
719 | */ |
720 | #define MIPS64_REFILL_INSNS 32 |
721 | |
722 | static void __cpuinit build_r4000_tlb_refill_handler(void) |
723 | { |
724 | u32 *p = tlb_handler; |
725 | struct uasm_label *l = labels; |
726 | struct uasm_reloc *r = relocs; |
727 | u32 *f; |
728 | unsigned int final_len; |
729 | |
730 | memset(tlb_handler, 0, sizeof(tlb_handler)); |
731 | memset(labels, 0, sizeof(labels)); |
732 | memset(relocs, 0, sizeof(relocs)); |
733 | memset(final_handler, 0, sizeof(final_handler)); |
734 | |
735 | /* |
736 | * create the plain linear handler |
737 | */ |
738 | if (bcm1250_m3_war()) { |
739 | UASM_i_MFC0(&p, K0, C0_BADVADDR); |
740 | UASM_i_MFC0(&p, K1, C0_ENTRYHI); |
741 | uasm_i_xor(&p, K0, K0, K1); |
742 | UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); |
743 | uasm_il_bnez(&p, &r, K0, label_leave); |
744 | /* No need for uasm_i_nop */ |
745 | } |
746 | |
747 | #ifdef CONFIG_64BIT |
748 | build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ |
749 | #else |
750 | build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ |
751 | #endif |
752 | |
753 | #ifdef CONFIG_HUGETLB_PAGE |
754 | build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); |
755 | #endif |
756 | |
757 | build_get_ptep(&p, K0, K1); |
758 | build_update_entries(&p, K0, K1); |
759 | build_tlb_write_entry(&p, &l, &r, tlb_random); |
760 | uasm_l_leave(&l, p); |
761 | uasm_i_eret(&p); /* return from trap */ |
762 | |
763 | #ifdef CONFIG_HUGETLB_PAGE |
764 | uasm_l_tlb_huge_update(&l, p); |
765 | UASM_i_LW(&p, K0, 0, K1); |
766 | build_huge_update_entries(&p, K0, K1); |
767 | build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random); |
768 | #endif |
769 | |
770 | #ifdef CONFIG_64BIT |
771 | build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); |
772 | #endif |
773 | |
774 | /* |
775 | * Overflow check: For the 64bit handler, we need at least one |
776 | * free instruction slot for the wrap-around branch. In worst |
777 | * case, if the intended insertion point is a delay slot, we |
778 | * need three, with the second nop'ed and the third being |
779 | * unused. |
780 | */ |
781 | /* Loongson2 ebase is different than r4k, we have more space */ |
782 | #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) |
783 | if ((p - tlb_handler) > 64) |
784 | panic("TLB refill handler space exceeded"); |
785 | #else |
786 | if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) |
787 | || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) |
788 | && uasm_insn_has_bdelay(relocs, |
789 | tlb_handler + MIPS64_REFILL_INSNS - 3))) |
790 | panic("TLB refill handler space exceeded"); |
791 | #endif |
792 | |
793 | /* |
794 | * Now fold the handler in the TLB refill handler space. |
795 | */ |
796 | #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) |
797 | f = final_handler; |
798 | /* Simplest case, just copy the handler. */ |
799 | uasm_copy_handler(relocs, labels, tlb_handler, p, f); |
800 | final_len = p - tlb_handler; |
801 | #else /* CONFIG_64BIT */ |
802 | f = final_handler + MIPS64_REFILL_INSNS; |
803 | if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { |
804 | /* Just copy the handler. */ |
805 | uasm_copy_handler(relocs, labels, tlb_handler, p, f); |
806 | final_len = p - tlb_handler; |
807 | } else { |
808 | #if defined(CONFIG_HUGETLB_PAGE) |
809 | const enum label_id ls = label_tlb_huge_update; |
810 | #elif defined(MODULE_START) |
811 | const enum label_id ls = label_module_alloc; |
812 | #else |
813 | const enum label_id ls = label_vmalloc; |
814 | #endif |
815 | u32 *split; |
816 | int ov = 0; |
817 | int i; |
818 | |
819 | for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) |
820 | ; |
821 | BUG_ON(i == ARRAY_SIZE(labels)); |
822 | split = labels[i].addr; |
823 | |
824 | /* |
825 | * See if we have overflown one way or the other. |
826 | */ |
827 | if (split > tlb_handler + MIPS64_REFILL_INSNS || |
828 | split < p - MIPS64_REFILL_INSNS) |
829 | ov = 1; |
830 | |
831 | if (ov) { |
832 | /* |
833 | * Split two instructions before the end. One |
834 | * for the branch and one for the instruction |
835 | * in the delay slot. |
836 | */ |
837 | split = tlb_handler + MIPS64_REFILL_INSNS - 2; |
838 | |
839 | /* |
840 | * If the branch would fall in a delay slot, |
841 | * we must back up an additional instruction |
842 | * so that it is no longer in a delay slot. |
843 | */ |
844 | if (uasm_insn_has_bdelay(relocs, split - 1)) |
845 | split--; |
846 | } |
847 | /* Copy first part of the handler. */ |
848 | uasm_copy_handler(relocs, labels, tlb_handler, split, f); |
849 | f += split - tlb_handler; |
850 | |
851 | if (ov) { |
852 | /* Insert branch. */ |
853 | uasm_l_split(&l, final_handler); |
854 | uasm_il_b(&f, &r, label_split); |
855 | if (uasm_insn_has_bdelay(relocs, split)) |
856 | uasm_i_nop(&f); |
857 | else { |
858 | uasm_copy_handler(relocs, labels, |
859 | split, split + 1, f); |
860 | uasm_move_labels(labels, f, f + 1, -1); |
861 | f++; |
862 | split++; |
863 | } |
864 | } |
865 | |
866 | /* Copy the rest of the handler. */ |
867 | uasm_copy_handler(relocs, labels, split, p, final_handler); |
868 | final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + |
869 | (p - split); |
870 | } |
871 | #endif /* CONFIG_64BIT */ |
872 | |
873 | uasm_resolve_relocs(relocs, labels); |
874 | pr_debug("Wrote TLB refill handler (%u instructions).\n", |
875 | final_len); |
876 | |
877 | memcpy((void *)ebase, final_handler, 0x100); |
878 | |
879 | dump_handler((u32 *)ebase, 64); |
880 | } |
881 | |
882 | /* |
883 | * TLB load/store/modify handlers. |
884 | * |
885 | * Only the fastpath gets synthesized at runtime, the slowpath for |
886 | * do_page_fault remains normal asm. |
887 | */ |
888 | extern void tlb_do_page_fault_0(void); |
889 | extern void tlb_do_page_fault_1(void); |
890 | |
891 | /* |
892 | * 128 instructions for the fastpath handler is generous and should |
893 | * never be exceeded. |
894 | */ |
895 | #define FASTPATH_SIZE 128 |
896 | |
897 | u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned; |
898 | u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; |
899 | u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; |
900 | |
901 | static void __cpuinit |
902 | iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) |
903 | { |
904 | #ifdef CONFIG_SMP |
905 | # ifdef CONFIG_64BIT_PHYS_ADDR |
906 | if (cpu_has_64bits) |
907 | uasm_i_lld(p, pte, 0, ptr); |
908 | else |
909 | # endif |
910 | UASM_i_LL(p, pte, 0, ptr); |
911 | #else |
912 | # ifdef CONFIG_64BIT_PHYS_ADDR |
913 | if (cpu_has_64bits) |
914 | uasm_i_ld(p, pte, 0, ptr); |
915 | else |
916 | # endif |
917 | UASM_i_LW(p, pte, 0, ptr); |
918 | #endif |
919 | } |
920 | |
921 | static void __cpuinit |
922 | iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, |
923 | unsigned int mode) |
924 | { |
925 | #ifdef CONFIG_64BIT_PHYS_ADDR |
926 | unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); |
927 | #endif |
928 | |
929 | uasm_i_ori(p, pte, pte, mode); |
930 | #ifdef CONFIG_SMP |
931 | # ifdef CONFIG_64BIT_PHYS_ADDR |
932 | if (cpu_has_64bits) |
933 | uasm_i_scd(p, pte, 0, ptr); |
934 | else |
935 | # endif |
936 | UASM_i_SC(p, pte, 0, ptr); |
937 | |
938 | if (r10000_llsc_war()) |
939 | uasm_il_beqzl(p, r, pte, label_smp_pgtable_change); |
940 | else |
941 | uasm_il_beqz(p, r, pte, label_smp_pgtable_change); |
942 | |
943 | # ifdef CONFIG_64BIT_PHYS_ADDR |
944 | if (!cpu_has_64bits) { |
945 | /* no uasm_i_nop needed */ |
946 | uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr); |
947 | uasm_i_ori(p, pte, pte, hwmode); |
948 | uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr); |
949 | uasm_il_beqz(p, r, pte, label_smp_pgtable_change); |
950 | /* no uasm_i_nop needed */ |
951 | uasm_i_lw(p, pte, 0, ptr); |
952 | } else |
953 | uasm_i_nop(p); |
954 | # else |
955 | uasm_i_nop(p); |
956 | # endif |
957 | #else |
958 | # ifdef CONFIG_64BIT_PHYS_ADDR |
959 | if (cpu_has_64bits) |
960 | uasm_i_sd(p, pte, 0, ptr); |
961 | else |
962 | # endif |
963 | UASM_i_SW(p, pte, 0, ptr); |
964 | |
965 | # ifdef CONFIG_64BIT_PHYS_ADDR |
966 | if (!cpu_has_64bits) { |
967 | uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr); |
968 | uasm_i_ori(p, pte, pte, hwmode); |
969 | uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr); |
970 | uasm_i_lw(p, pte, 0, ptr); |
971 | } |
972 | # endif |
973 | #endif |
974 | } |
975 | |
976 | /* |
977 | * Check if PTE is present, if not then jump to LABEL. PTR points to |
978 | * the page table where this PTE is located, PTE will be re-loaded |
979 | * with it's original value. |
980 | */ |
981 | static void __cpuinit |
982 | build_pte_present(u32 **p, struct uasm_reloc **r, |
983 | unsigned int pte, unsigned int ptr, enum label_id lid) |
984 | { |
985 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); |
986 | uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); |
987 | uasm_il_bnez(p, r, pte, lid); |
988 | iPTE_LW(p, pte, ptr); |
989 | } |
990 | |
991 | /* Make PTE valid, store result in PTR. */ |
992 | static void __cpuinit |
993 | build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, |
994 | unsigned int ptr) |
995 | { |
996 | unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; |
997 | |
998 | iPTE_SW(p, r, pte, ptr, mode); |
999 | } |
1000 | |
1001 | /* |
1002 | * Check if PTE can be written to, if not branch to LABEL. Regardless |
1003 | * restore PTE with value from PTR when done. |
1004 | */ |
1005 | static void __cpuinit |
1006 | build_pte_writable(u32 **p, struct uasm_reloc **r, |
1007 | unsigned int pte, unsigned int ptr, enum label_id lid) |
1008 | { |
1009 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); |
1010 | uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); |
1011 | uasm_il_bnez(p, r, pte, lid); |
1012 | iPTE_LW(p, pte, ptr); |
1013 | } |
1014 | |
1015 | /* Make PTE writable, update software status bits as well, then store |
1016 | * at PTR. |
1017 | */ |
1018 | static void __cpuinit |
1019 | build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, |
1020 | unsigned int ptr) |
1021 | { |
1022 | unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID |
1023 | | _PAGE_DIRTY); |
1024 | |
1025 | iPTE_SW(p, r, pte, ptr, mode); |
1026 | } |
1027 | |
1028 | /* |
1029 | * Check if PTE can be modified, if not branch to LABEL. Regardless |
1030 | * restore PTE with value from PTR when done. |
1031 | */ |
1032 | static void __cpuinit |
1033 | build_pte_modifiable(u32 **p, struct uasm_reloc **r, |
1034 | unsigned int pte, unsigned int ptr, enum label_id lid) |
1035 | { |
1036 | uasm_i_andi(p, pte, pte, _PAGE_WRITE); |
1037 | uasm_il_beqz(p, r, pte, lid); |
1038 | iPTE_LW(p, pte, ptr); |
1039 | } |
1040 | |
1041 | /* |
1042 | * R3000 style TLB load/store/modify handlers. |
1043 | */ |
1044 | |
1045 | /* |
1046 | * This places the pte into ENTRYLO0 and writes it with tlbwi. |
1047 | * Then it returns. |
1048 | */ |
1049 | static void __cpuinit |
1050 | build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) |
1051 | { |
1052 | uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ |
1053 | uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */ |
1054 | uasm_i_tlbwi(p); |
1055 | uasm_i_jr(p, tmp); |
1056 | uasm_i_rfe(p); /* branch delay */ |
1057 | } |
1058 | |
1059 | /* |
1060 | * This places the pte into ENTRYLO0 and writes it with tlbwi |
1061 | * or tlbwr as appropriate. This is because the index register |
1062 | * may have the probe fail bit set as a result of a trap on a |
1063 | * kseg2 access, i.e. without refill. Then it returns. |
1064 | */ |
1065 | static void __cpuinit |
1066 | build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, |
1067 | struct uasm_reloc **r, unsigned int pte, |
1068 | unsigned int tmp) |
1069 | { |
1070 | uasm_i_mfc0(p, tmp, C0_INDEX); |
1071 | uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ |
1072 | uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */ |
1073 | uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */ |
1074 | uasm_i_tlbwi(p); /* cp0 delay */ |
1075 | uasm_i_jr(p, tmp); |
1076 | uasm_i_rfe(p); /* branch delay */ |
1077 | uasm_l_r3000_write_probe_fail(l, *p); |
1078 | uasm_i_tlbwr(p); /* cp0 delay */ |
1079 | uasm_i_jr(p, tmp); |
1080 | uasm_i_rfe(p); /* branch delay */ |
1081 | } |
1082 | |
1083 | static void __cpuinit |
1084 | build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, |
1085 | unsigned int ptr) |
1086 | { |
1087 | long pgdc = (long)pgd_current; |
1088 | |
1089 | uasm_i_mfc0(p, pte, C0_BADVADDR); |
1090 | uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */ |
1091 | uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); |
1092 | uasm_i_srl(p, pte, pte, 22); /* load delay */ |
1093 | uasm_i_sll(p, pte, pte, 2); |
1094 | uasm_i_addu(p, ptr, ptr, pte); |
1095 | uasm_i_mfc0(p, pte, C0_CONTEXT); |
1096 | uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */ |
1097 | uasm_i_andi(p, pte, pte, 0xffc); /* load delay */ |
1098 | uasm_i_addu(p, ptr, ptr, pte); |
1099 | uasm_i_lw(p, pte, 0, ptr); |
1100 | uasm_i_tlbp(p); /* load delay */ |
1101 | } |
1102 | |
1103 | static void __cpuinit build_r3000_tlb_load_handler(void) |
1104 | { |
1105 | u32 *p = handle_tlbl; |
1106 | struct uasm_label *l = labels; |
1107 | struct uasm_reloc *r = relocs; |
1108 | |
1109 | memset(handle_tlbl, 0, sizeof(handle_tlbl)); |
1110 | memset(labels, 0, sizeof(labels)); |
1111 | memset(relocs, 0, sizeof(relocs)); |
1112 | |
1113 | build_r3000_tlbchange_handler_head(&p, K0, K1); |
1114 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); |
1115 | uasm_i_nop(&p); /* load delay */ |
1116 | build_make_valid(&p, &r, K0, K1); |
1117 | build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); |
1118 | |
1119 | uasm_l_nopage_tlbl(&l, p); |
1120 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); |
1121 | uasm_i_nop(&p); |
1122 | |
1123 | if ((p - handle_tlbl) > FASTPATH_SIZE) |
1124 | panic("TLB load handler fastpath space exceeded"); |
1125 | |
1126 | uasm_resolve_relocs(relocs, labels); |
1127 | pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", |
1128 | (unsigned int)(p - handle_tlbl)); |
1129 | |
1130 | dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); |
1131 | } |
1132 | |
1133 | static void __cpuinit build_r3000_tlb_store_handler(void) |
1134 | { |
1135 | u32 *p = handle_tlbs; |
1136 | struct uasm_label *l = labels; |
1137 | struct uasm_reloc *r = relocs; |
1138 | |
1139 | memset(handle_tlbs, 0, sizeof(handle_tlbs)); |
1140 | memset(labels, 0, sizeof(labels)); |
1141 | memset(relocs, 0, sizeof(relocs)); |
1142 | |
1143 | build_r3000_tlbchange_handler_head(&p, K0, K1); |
1144 | build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); |
1145 | uasm_i_nop(&p); /* load delay */ |
1146 | build_make_write(&p, &r, K0, K1); |
1147 | build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); |
1148 | |
1149 | uasm_l_nopage_tlbs(&l, p); |
1150 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); |
1151 | uasm_i_nop(&p); |
1152 | |
1153 | if ((p - handle_tlbs) > FASTPATH_SIZE) |
1154 | panic("TLB store handler fastpath space exceeded"); |
1155 | |
1156 | uasm_resolve_relocs(relocs, labels); |
1157 | pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", |
1158 | (unsigned int)(p - handle_tlbs)); |
1159 | |
1160 | dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); |
1161 | } |
1162 | |
1163 | static void __cpuinit build_r3000_tlb_modify_handler(void) |
1164 | { |
1165 | u32 *p = handle_tlbm; |
1166 | struct uasm_label *l = labels; |
1167 | struct uasm_reloc *r = relocs; |
1168 | |
1169 | memset(handle_tlbm, 0, sizeof(handle_tlbm)); |
1170 | memset(labels, 0, sizeof(labels)); |
1171 | memset(relocs, 0, sizeof(relocs)); |
1172 | |
1173 | build_r3000_tlbchange_handler_head(&p, K0, K1); |
1174 | build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); |
1175 | uasm_i_nop(&p); /* load delay */ |
1176 | build_make_write(&p, &r, K0, K1); |
1177 | build_r3000_pte_reload_tlbwi(&p, K0, K1); |
1178 | |
1179 | uasm_l_nopage_tlbm(&l, p); |
1180 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); |
1181 | uasm_i_nop(&p); |
1182 | |
1183 | if ((p - handle_tlbm) > FASTPATH_SIZE) |
1184 | panic("TLB modify handler fastpath space exceeded"); |
1185 | |
1186 | uasm_resolve_relocs(relocs, labels); |
1187 | pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", |
1188 | (unsigned int)(p - handle_tlbm)); |
1189 | |
1190 | dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); |
1191 | } |
1192 | |
1193 | /* |
1194 | * R4000 style TLB load/store/modify handlers. |
1195 | */ |
1196 | static void __cpuinit |
1197 | build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, |
1198 | struct uasm_reloc **r, unsigned int pte, |
1199 | unsigned int ptr) |
1200 | { |
1201 | #ifdef CONFIG_64BIT |
1202 | build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */ |
1203 | #else |
1204 | build_get_pgde32(p, pte, ptr); /* get pgd in ptr */ |
1205 | #endif |
1206 | |
1207 | #ifdef CONFIG_HUGETLB_PAGE |
1208 | /* |
1209 | * For huge tlb entries, pmd doesn't contain an address but |
1210 | * instead contains the tlb pte. Check the PAGE_HUGE bit and |
1211 | * see if we need to jump to huge tlb processing. |
1212 | */ |
1213 | build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update); |
1214 | #endif |
1215 | |
1216 | UASM_i_MFC0(p, pte, C0_BADVADDR); |
1217 | UASM_i_LW(p, ptr, 0, ptr); |
1218 | UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); |
1219 | uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2); |
1220 | UASM_i_ADDU(p, ptr, ptr, pte); |
1221 | |
1222 | #ifdef CONFIG_SMP |
1223 | uasm_l_smp_pgtable_change(l, *p); |
1224 | #endif |
1225 | iPTE_LW(p, pte, ptr); /* get even pte */ |
1226 | if (!m4kc_tlbp_war()) |
1227 | build_tlb_probe_entry(p); |
1228 | } |
1229 | |
1230 | static void __cpuinit |
1231 | build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, |
1232 | struct uasm_reloc **r, unsigned int tmp, |
1233 | unsigned int ptr) |
1234 | { |
1235 | uasm_i_ori(p, ptr, ptr, sizeof(pte_t)); |
1236 | uasm_i_xori(p, ptr, ptr, sizeof(pte_t)); |
1237 | build_update_entries(p, tmp, ptr); |
1238 | build_tlb_write_entry(p, l, r, tlb_indexed); |
1239 | uasm_l_leave(l, *p); |
1240 | uasm_i_eret(p); /* return from trap */ |
1241 | |
1242 | #ifdef CONFIG_64BIT |
1243 | build_get_pgd_vmalloc64(p, l, r, tmp, ptr); |
1244 | #endif |
1245 | } |
1246 | |
1247 | static void __cpuinit build_r4000_tlb_load_handler(void) |
1248 | { |
1249 | u32 *p = handle_tlbl; |
1250 | struct uasm_label *l = labels; |
1251 | struct uasm_reloc *r = relocs; |
1252 | |
1253 | memset(handle_tlbl, 0, sizeof(handle_tlbl)); |
1254 | memset(labels, 0, sizeof(labels)); |
1255 | memset(relocs, 0, sizeof(relocs)); |
1256 | |
1257 | if (bcm1250_m3_war()) { |
1258 | UASM_i_MFC0(&p, K0, C0_BADVADDR); |
1259 | UASM_i_MFC0(&p, K1, C0_ENTRYHI); |
1260 | uasm_i_xor(&p, K0, K0, K1); |
1261 | UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); |
1262 | uasm_il_bnez(&p, &r, K0, label_leave); |
1263 | /* No need for uasm_i_nop */ |
1264 | } |
1265 | |
1266 | build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); |
1267 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); |
1268 | if (m4kc_tlbp_war()) |
1269 | build_tlb_probe_entry(&p); |
1270 | build_make_valid(&p, &r, K0, K1); |
1271 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); |
1272 | |
1273 | #ifdef CONFIG_HUGETLB_PAGE |
1274 | /* |
1275 | * This is the entry point when build_r4000_tlbchange_handler_head |
1276 | * spots a huge page. |
1277 | */ |
1278 | uasm_l_tlb_huge_update(&l, p); |
1279 | iPTE_LW(&p, K0, K1); |
1280 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); |
1281 | build_tlb_probe_entry(&p); |
1282 | uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); |
1283 | build_huge_handler_tail(&p, &r, &l, K0, K1); |
1284 | #endif |
1285 | |
1286 | uasm_l_nopage_tlbl(&l, p); |
1287 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); |
1288 | uasm_i_nop(&p); |
1289 | |
1290 | if ((p - handle_tlbl) > FASTPATH_SIZE) |
1291 | panic("TLB load handler fastpath space exceeded"); |
1292 | |
1293 | uasm_resolve_relocs(relocs, labels); |
1294 | pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", |
1295 | (unsigned int)(p - handle_tlbl)); |
1296 | |
1297 | dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); |
1298 | } |
1299 | |
1300 | static void __cpuinit build_r4000_tlb_store_handler(void) |
1301 | { |
1302 | u32 *p = handle_tlbs; |
1303 | struct uasm_label *l = labels; |
1304 | struct uasm_reloc *r = relocs; |
1305 | |
1306 | memset(handle_tlbs, 0, sizeof(handle_tlbs)); |
1307 | memset(labels, 0, sizeof(labels)); |
1308 | memset(relocs, 0, sizeof(relocs)); |
1309 | |
1310 | build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); |
1311 | build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); |
1312 | if (m4kc_tlbp_war()) |
1313 | build_tlb_probe_entry(&p); |
1314 | build_make_write(&p, &r, K0, K1); |
1315 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); |
1316 | |
1317 | #ifdef CONFIG_HUGETLB_PAGE |
1318 | /* |
1319 | * This is the entry point when |
1320 | * build_r4000_tlbchange_handler_head spots a huge page. |
1321 | */ |
1322 | uasm_l_tlb_huge_update(&l, p); |
1323 | iPTE_LW(&p, K0, K1); |
1324 | build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); |
1325 | build_tlb_probe_entry(&p); |
1326 | uasm_i_ori(&p, K0, K0, |
1327 | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); |
1328 | build_huge_handler_tail(&p, &r, &l, K0, K1); |
1329 | #endif |
1330 | |
1331 | uasm_l_nopage_tlbs(&l, p); |
1332 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); |
1333 | uasm_i_nop(&p); |
1334 | |
1335 | if ((p - handle_tlbs) > FASTPATH_SIZE) |
1336 | panic("TLB store handler fastpath space exceeded"); |
1337 | |
1338 | uasm_resolve_relocs(relocs, labels); |
1339 | pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", |
1340 | (unsigned int)(p - handle_tlbs)); |
1341 | |
1342 | dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); |
1343 | } |
1344 | |
1345 | static void __cpuinit build_r4000_tlb_modify_handler(void) |
1346 | { |
1347 | u32 *p = handle_tlbm; |
1348 | struct uasm_label *l = labels; |
1349 | struct uasm_reloc *r = relocs; |
1350 | |
1351 | memset(handle_tlbm, 0, sizeof(handle_tlbm)); |
1352 | memset(labels, 0, sizeof(labels)); |
1353 | memset(relocs, 0, sizeof(relocs)); |
1354 | |
1355 | build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); |
1356 | build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); |
1357 | if (m4kc_tlbp_war()) |
1358 | build_tlb_probe_entry(&p); |
1359 | /* Present and writable bits set, set accessed and dirty bits. */ |
1360 | build_make_write(&p, &r, K0, K1); |
1361 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); |
1362 | |
1363 | #ifdef CONFIG_HUGETLB_PAGE |
1364 | /* |
1365 | * This is the entry point when |
1366 | * build_r4000_tlbchange_handler_head spots a huge page. |
1367 | */ |
1368 | uasm_l_tlb_huge_update(&l, p); |
1369 | iPTE_LW(&p, K0, K1); |
1370 | build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); |
1371 | build_tlb_probe_entry(&p); |
1372 | uasm_i_ori(&p, K0, K0, |
1373 | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); |
1374 | build_huge_handler_tail(&p, &r, &l, K0, K1); |
1375 | #endif |
1376 | |
1377 | uasm_l_nopage_tlbm(&l, p); |
1378 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); |
1379 | uasm_i_nop(&p); |
1380 | |
1381 | if ((p - handle_tlbm) > FASTPATH_SIZE) |
1382 | panic("TLB modify handler fastpath space exceeded"); |
1383 | |
1384 | uasm_resolve_relocs(relocs, labels); |
1385 | pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", |
1386 | (unsigned int)(p - handle_tlbm)); |
1387 | |
1388 | dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); |
1389 | } |
1390 | |
1391 | void __cpuinit build_tlb_refill_handler(void) |
1392 | { |
1393 | /* |
1394 | * The refill handler is generated per-CPU, multi-node systems |
1395 | * may have local storage for it. The other handlers are only |
1396 | * needed once. |
1397 | */ |
1398 | static int run_once = 0; |
1399 | |
1400 | switch (current_cpu_type()) { |
1401 | case CPU_R2000: |
1402 | case CPU_R3000: |
1403 | case CPU_R3000A: |
1404 | case CPU_R3081E: |
1405 | case CPU_TX3912: |
1406 | case CPU_TX3922: |
1407 | case CPU_TX3927: |
1408 | build_r3000_tlb_refill_handler(); |
1409 | if (!run_once) { |
1410 | build_r3000_tlb_load_handler(); |
1411 | build_r3000_tlb_store_handler(); |
1412 | build_r3000_tlb_modify_handler(); |
1413 | run_once++; |
1414 | } |
1415 | break; |
1416 | |
1417 | case CPU_R6000: |
1418 | case CPU_R6000A: |
1419 | panic("No R6000 TLB refill handler yet"); |
1420 | break; |
1421 | |
1422 | case CPU_R8000: |
1423 | panic("No R8000 TLB refill handler yet"); |
1424 | break; |
1425 | |
1426 | default: |
1427 | build_r4000_tlb_refill_handler(); |
1428 | if (!run_once) { |
1429 | build_r4000_tlb_load_handler(); |
1430 | build_r4000_tlb_store_handler(); |
1431 | build_r4000_tlb_modify_handler(); |
1432 | run_once++; |
1433 | } |
1434 | } |
1435 | } |
1436 | |
1437 | void __cpuinit flush_tlb_handlers(void) |
1438 | { |
1439 | local_flush_icache_range((unsigned long)handle_tlbl, |
1440 | (unsigned long)handle_tlbl + sizeof(handle_tlbl)); |
1441 | local_flush_icache_range((unsigned long)handle_tlbs, |
1442 | (unsigned long)handle_tlbs + sizeof(handle_tlbs)); |
1443 | local_flush_icache_range((unsigned long)handle_tlbm, |
1444 | (unsigned long)handle_tlbm + sizeof(handle_tlbm)); |
1445 | } |
1446 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9