Root/
Source at commit 694c7fbe86b8a9c91392e505afcb9fcfc91deccc created 12 years 8 months ago. By Maarten ter Huurne, MIPS: JZ4740: Add cpufreq support | |
---|---|
1 | /* |
2 | * r2300.c: R2000 and R3000 specific mmu/cache code. |
3 | * |
4 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) |
5 | * |
6 | * with a lot of changes to make this thing work for R3000s |
7 | * Tx39XX R4k style caches added. HK |
8 | * Copyright (C) 1998, 1999, 2000 Harald Koerfgen |
9 | * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov |
10 | * Copyright (C) 2002 Ralf Baechle |
11 | * Copyright (C) 2002 Maciej W. Rozycki |
12 | */ |
13 | #include <linux/init.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/sched.h> |
16 | #include <linux/smp.h> |
17 | #include <linux/mm.h> |
18 | |
19 | #include <asm/page.h> |
20 | #include <asm/pgtable.h> |
21 | #include <asm/mmu_context.h> |
22 | #include <asm/tlbmisc.h> |
23 | #include <asm/isadep.h> |
24 | #include <asm/io.h> |
25 | #include <asm/bootinfo.h> |
26 | #include <asm/cpu.h> |
27 | |
28 | #undef DEBUG_TLB |
29 | |
30 | extern void build_tlb_refill_handler(void); |
31 | |
32 | /* CP0 hazard avoidance. */ |
33 | #define BARRIER \ |
34 | __asm__ __volatile__( \ |
35 | ".set push\n\t" \ |
36 | ".set noreorder\n\t" \ |
37 | "nop\n\t" \ |
38 | ".set pop\n\t") |
39 | |
40 | int r3k_have_wired_reg; /* should be in cpu_data? */ |
41 | |
42 | /* TLB operations. */ |
43 | void local_flush_tlb_all(void) |
44 | { |
45 | unsigned long flags; |
46 | unsigned long old_ctx; |
47 | int entry; |
48 | |
49 | #ifdef DEBUG_TLB |
50 | printk("[tlball]"); |
51 | #endif |
52 | |
53 | local_irq_save(flags); |
54 | old_ctx = read_c0_entryhi() & ASID_MASK; |
55 | write_c0_entrylo0(0); |
56 | entry = r3k_have_wired_reg ? read_c0_wired() : 8; |
57 | for (; entry < current_cpu_data.tlbsize; entry++) { |
58 | write_c0_index(entry << 8); |
59 | write_c0_entryhi((entry | 0x80000) << 12); |
60 | BARRIER; |
61 | tlb_write_indexed(); |
62 | } |
63 | write_c0_entryhi(old_ctx); |
64 | local_irq_restore(flags); |
65 | } |
66 | |
67 | void local_flush_tlb_mm(struct mm_struct *mm) |
68 | { |
69 | int cpu = smp_processor_id(); |
70 | |
71 | if (cpu_context(cpu, mm) != 0) { |
72 | #ifdef DEBUG_TLB |
73 | printk("[tlbmm<%lu>]", (unsigned long)cpu_context(cpu, mm)); |
74 | #endif |
75 | drop_mmu_context(mm, cpu); |
76 | } |
77 | } |
78 | |
79 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
80 | unsigned long end) |
81 | { |
82 | struct mm_struct *mm = vma->vm_mm; |
83 | int cpu = smp_processor_id(); |
84 | |
85 | if (cpu_context(cpu, mm) != 0) { |
86 | unsigned long size, flags; |
87 | |
88 | #ifdef DEBUG_TLB |
89 | printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", |
90 | cpu_context(cpu, mm) & ASID_MASK, start, end); |
91 | #endif |
92 | local_irq_save(flags); |
93 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
94 | if (size <= current_cpu_data.tlbsize) { |
95 | int oldpid = read_c0_entryhi() & ASID_MASK; |
96 | int newpid = cpu_context(cpu, mm) & ASID_MASK; |
97 | |
98 | start &= PAGE_MASK; |
99 | end += PAGE_SIZE - 1; |
100 | end &= PAGE_MASK; |
101 | while (start < end) { |
102 | int idx; |
103 | |
104 | write_c0_entryhi(start | newpid); |
105 | start += PAGE_SIZE; /* BARRIER */ |
106 | tlb_probe(); |
107 | idx = read_c0_index(); |
108 | write_c0_entrylo0(0); |
109 | write_c0_entryhi(KSEG0); |
110 | if (idx < 0) /* BARRIER */ |
111 | continue; |
112 | tlb_write_indexed(); |
113 | } |
114 | write_c0_entryhi(oldpid); |
115 | } else { |
116 | drop_mmu_context(mm, cpu); |
117 | } |
118 | local_irq_restore(flags); |
119 | } |
120 | } |
121 | |
122 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) |
123 | { |
124 | unsigned long size, flags; |
125 | |
126 | #ifdef DEBUG_TLB |
127 | printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", start, end); |
128 | #endif |
129 | local_irq_save(flags); |
130 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
131 | if (size <= current_cpu_data.tlbsize) { |
132 | int pid = read_c0_entryhi(); |
133 | |
134 | start &= PAGE_MASK; |
135 | end += PAGE_SIZE - 1; |
136 | end &= PAGE_MASK; |
137 | |
138 | while (start < end) { |
139 | int idx; |
140 | |
141 | write_c0_entryhi(start); |
142 | start += PAGE_SIZE; /* BARRIER */ |
143 | tlb_probe(); |
144 | idx = read_c0_index(); |
145 | write_c0_entrylo0(0); |
146 | write_c0_entryhi(KSEG0); |
147 | if (idx < 0) /* BARRIER */ |
148 | continue; |
149 | tlb_write_indexed(); |
150 | } |
151 | write_c0_entryhi(pid); |
152 | } else { |
153 | local_flush_tlb_all(); |
154 | } |
155 | local_irq_restore(flags); |
156 | } |
157 | |
158 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
159 | { |
160 | int cpu = smp_processor_id(); |
161 | |
162 | if (!vma || cpu_context(cpu, vma->vm_mm) != 0) { |
163 | unsigned long flags; |
164 | int oldpid, newpid, idx; |
165 | |
166 | #ifdef DEBUG_TLB |
167 | printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); |
168 | #endif |
169 | newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; |
170 | page &= PAGE_MASK; |
171 | local_irq_save(flags); |
172 | oldpid = read_c0_entryhi() & ASID_MASK; |
173 | write_c0_entryhi(page | newpid); |
174 | BARRIER; |
175 | tlb_probe(); |
176 | idx = read_c0_index(); |
177 | write_c0_entrylo0(0); |
178 | write_c0_entryhi(KSEG0); |
179 | if (idx < 0) /* BARRIER */ |
180 | goto finish; |
181 | tlb_write_indexed(); |
182 | |
183 | finish: |
184 | write_c0_entryhi(oldpid); |
185 | local_irq_restore(flags); |
186 | } |
187 | } |
188 | |
189 | void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) |
190 | { |
191 | unsigned long flags; |
192 | int idx, pid; |
193 | |
194 | /* |
195 | * Handle debugger faulting in for debugee. |
196 | */ |
197 | if (current->active_mm != vma->vm_mm) |
198 | return; |
199 | |
200 | pid = read_c0_entryhi() & ASID_MASK; |
201 | |
202 | #ifdef DEBUG_TLB |
203 | if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { |
204 | printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", |
205 | (cpu_context(cpu, vma->vm_mm)), pid); |
206 | } |
207 | #endif |
208 | |
209 | local_irq_save(flags); |
210 | address &= PAGE_MASK; |
211 | write_c0_entryhi(address | pid); |
212 | BARRIER; |
213 | tlb_probe(); |
214 | idx = read_c0_index(); |
215 | write_c0_entrylo0(pte_val(pte)); |
216 | write_c0_entryhi(address | pid); |
217 | if (idx < 0) { /* BARRIER */ |
218 | tlb_write_random(); |
219 | } else { |
220 | tlb_write_indexed(); |
221 | } |
222 | write_c0_entryhi(pid); |
223 | local_irq_restore(flags); |
224 | } |
225 | |
226 | void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, |
227 | unsigned long entryhi, unsigned long pagemask) |
228 | { |
229 | unsigned long flags; |
230 | unsigned long old_ctx; |
231 | static unsigned long wired = 0; |
232 | |
233 | if (r3k_have_wired_reg) { /* TX39XX */ |
234 | unsigned long old_pagemask; |
235 | unsigned long w; |
236 | |
237 | #ifdef DEBUG_TLB |
238 | printk("[tlbwired<entry lo0 %8x, hi %8x\n, pagemask %8x>]\n", |
239 | entrylo0, entryhi, pagemask); |
240 | #endif |
241 | |
242 | local_irq_save(flags); |
243 | /* Save old context and create impossible VPN2 value */ |
244 | old_ctx = read_c0_entryhi() & ASID_MASK; |
245 | old_pagemask = read_c0_pagemask(); |
246 | w = read_c0_wired(); |
247 | write_c0_wired(w + 1); |
248 | write_c0_index(w << 8); |
249 | write_c0_pagemask(pagemask); |
250 | write_c0_entryhi(entryhi); |
251 | write_c0_entrylo0(entrylo0); |
252 | BARRIER; |
253 | tlb_write_indexed(); |
254 | |
255 | write_c0_entryhi(old_ctx); |
256 | write_c0_pagemask(old_pagemask); |
257 | local_flush_tlb_all(); |
258 | local_irq_restore(flags); |
259 | |
260 | } else if (wired < 8) { |
261 | #ifdef DEBUG_TLB |
262 | printk("[tlbwired<entry lo0 %8x, hi %8x\n>]\n", |
263 | entrylo0, entryhi); |
264 | #endif |
265 | |
266 | local_irq_save(flags); |
267 | old_ctx = read_c0_entryhi() & ASID_MASK; |
268 | write_c0_entrylo0(entrylo0); |
269 | write_c0_entryhi(entryhi); |
270 | write_c0_index(wired); |
271 | wired++; /* BARRIER */ |
272 | tlb_write_indexed(); |
273 | write_c0_entryhi(old_ctx); |
274 | local_flush_tlb_all(); |
275 | local_irq_restore(flags); |
276 | } |
277 | } |
278 | |
279 | void tlb_init(void) |
280 | { |
281 | local_flush_tlb_all(); |
282 | |
283 | build_tlb_refill_handler(); |
284 | } |
285 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9