Root/
Source at commit 694c7fbe86b8a9c91392e505afcb9fcfc91deccc created 12 years 8 months ago. By Maarten ter Huurne, MIPS: JZ4740: Add cpufreq support | |
---|---|
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) |
7 | * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org |
8 | * Carsten Langgaard, carstenl@mips.com |
9 | * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved. |
10 | */ |
11 | #include <linux/init.h> |
12 | #include <linux/sched.h> |
13 | #include <linux/smp.h> |
14 | #include <linux/mm.h> |
15 | #include <linux/hugetlb.h> |
16 | #include <linux/module.h> |
17 | |
18 | #include <asm/cpu.h> |
19 | #include <asm/cpu-type.h> |
20 | #include <asm/bootinfo.h> |
21 | #include <asm/mmu_context.h> |
22 | #include <asm/pgtable.h> |
23 | #include <asm/tlbmisc.h> |
24 | |
25 | extern void build_tlb_refill_handler(void); |
26 | |
27 | /* |
28 | * Make sure all entries differ. If they're not different |
29 | * MIPS32 will take revenge ... |
30 | */ |
31 | #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) |
32 | |
33 | /* Atomicity and interruptability */ |
34 | #ifdef CONFIG_MIPS_MT_SMTC |
35 | |
36 | #include <asm/smtc.h> |
37 | #include <asm/mipsmtregs.h> |
38 | |
39 | #define ENTER_CRITICAL(flags) \ |
40 | { \ |
41 | unsigned int mvpflags; \ |
42 | local_irq_save(flags);\ |
43 | mvpflags = dvpe() |
44 | #define EXIT_CRITICAL(flags) \ |
45 | evpe(mvpflags); \ |
46 | local_irq_restore(flags); \ |
47 | } |
48 | #else |
49 | |
50 | #define ENTER_CRITICAL(flags) local_irq_save(flags) |
51 | #define EXIT_CRITICAL(flags) local_irq_restore(flags) |
52 | |
53 | #endif /* CONFIG_MIPS_MT_SMTC */ |
54 | |
55 | /* |
56 | * LOONGSON2 has a 4 entry itlb which is a subset of dtlb, |
57 | * unfortrunately, itlb is not totally transparent to software. |
58 | */ |
59 | static inline void flush_itlb(void) |
60 | { |
61 | switch (current_cpu_type()) { |
62 | case CPU_LOONGSON2: |
63 | write_c0_diag(4); |
64 | break; |
65 | default: |
66 | break; |
67 | } |
68 | } |
69 | |
70 | static inline void flush_itlb_vm(struct vm_area_struct *vma) |
71 | { |
72 | if (vma->vm_flags & VM_EXEC) |
73 | flush_itlb(); |
74 | } |
75 | |
76 | void local_flush_tlb_all(void) |
77 | { |
78 | unsigned long flags; |
79 | unsigned long old_ctx; |
80 | int entry; |
81 | |
82 | ENTER_CRITICAL(flags); |
83 | /* Save old context and create impossible VPN2 value */ |
84 | old_ctx = read_c0_entryhi(); |
85 | write_c0_entrylo0(0); |
86 | write_c0_entrylo1(0); |
87 | |
88 | entry = read_c0_wired(); |
89 | |
90 | /* Blast 'em all away. */ |
91 | while (entry < current_cpu_data.tlbsize) { |
92 | /* Make sure all entries differ. */ |
93 | write_c0_entryhi(UNIQUE_ENTRYHI(entry)); |
94 | write_c0_index(entry); |
95 | mtc0_tlbw_hazard(); |
96 | tlb_write_indexed(); |
97 | entry++; |
98 | } |
99 | tlbw_use_hazard(); |
100 | write_c0_entryhi(old_ctx); |
101 | flush_itlb(); |
102 | EXIT_CRITICAL(flags); |
103 | } |
104 | EXPORT_SYMBOL(local_flush_tlb_all); |
105 | |
106 | /* All entries common to a mm share an asid. To effectively flush |
107 | these entries, we just bump the asid. */ |
108 | void local_flush_tlb_mm(struct mm_struct *mm) |
109 | { |
110 | int cpu; |
111 | |
112 | preempt_disable(); |
113 | |
114 | cpu = smp_processor_id(); |
115 | |
116 | if (cpu_context(cpu, mm) != 0) { |
117 | drop_mmu_context(mm, cpu); |
118 | } |
119 | |
120 | preempt_enable(); |
121 | } |
122 | |
123 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
124 | unsigned long end) |
125 | { |
126 | struct mm_struct *mm = vma->vm_mm; |
127 | int cpu = smp_processor_id(); |
128 | |
129 | if (cpu_context(cpu, mm) != 0) { |
130 | unsigned long size, flags; |
131 | |
132 | ENTER_CRITICAL(flags); |
133 | start = round_down(start, PAGE_SIZE << 1); |
134 | end = round_up(end, PAGE_SIZE << 1); |
135 | size = (end - start) >> (PAGE_SHIFT + 1); |
136 | if (size <= current_cpu_data.tlbsize/2) { |
137 | int oldpid = read_c0_entryhi(); |
138 | int newpid = cpu_asid(cpu, mm); |
139 | |
140 | while (start < end) { |
141 | int idx; |
142 | |
143 | write_c0_entryhi(start | newpid); |
144 | start += (PAGE_SIZE << 1); |
145 | mtc0_tlbw_hazard(); |
146 | tlb_probe(); |
147 | tlb_probe_hazard(); |
148 | idx = read_c0_index(); |
149 | write_c0_entrylo0(0); |
150 | write_c0_entrylo1(0); |
151 | if (idx < 0) |
152 | continue; |
153 | /* Make sure all entries differ. */ |
154 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
155 | mtc0_tlbw_hazard(); |
156 | tlb_write_indexed(); |
157 | } |
158 | tlbw_use_hazard(); |
159 | write_c0_entryhi(oldpid); |
160 | } else { |
161 | drop_mmu_context(mm, cpu); |
162 | } |
163 | flush_itlb(); |
164 | EXIT_CRITICAL(flags); |
165 | } |
166 | } |
167 | |
168 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) |
169 | { |
170 | unsigned long size, flags; |
171 | |
172 | ENTER_CRITICAL(flags); |
173 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
174 | size = (size + 1) >> 1; |
175 | if (size <= current_cpu_data.tlbsize / 2) { |
176 | int pid = read_c0_entryhi(); |
177 | |
178 | start &= (PAGE_MASK << 1); |
179 | end += ((PAGE_SIZE << 1) - 1); |
180 | end &= (PAGE_MASK << 1); |
181 | |
182 | while (start < end) { |
183 | int idx; |
184 | |
185 | write_c0_entryhi(start); |
186 | start += (PAGE_SIZE << 1); |
187 | mtc0_tlbw_hazard(); |
188 | tlb_probe(); |
189 | tlb_probe_hazard(); |
190 | idx = read_c0_index(); |
191 | write_c0_entrylo0(0); |
192 | write_c0_entrylo1(0); |
193 | if (idx < 0) |
194 | continue; |
195 | /* Make sure all entries differ. */ |
196 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
197 | mtc0_tlbw_hazard(); |
198 | tlb_write_indexed(); |
199 | } |
200 | tlbw_use_hazard(); |
201 | write_c0_entryhi(pid); |
202 | } else { |
203 | local_flush_tlb_all(); |
204 | } |
205 | flush_itlb(); |
206 | EXIT_CRITICAL(flags); |
207 | } |
208 | |
209 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
210 | { |
211 | int cpu = smp_processor_id(); |
212 | |
213 | if (cpu_context(cpu, vma->vm_mm) != 0) { |
214 | unsigned long flags; |
215 | int oldpid, newpid, idx; |
216 | |
217 | newpid = cpu_asid(cpu, vma->vm_mm); |
218 | page &= (PAGE_MASK << 1); |
219 | ENTER_CRITICAL(flags); |
220 | oldpid = read_c0_entryhi(); |
221 | write_c0_entryhi(page | newpid); |
222 | mtc0_tlbw_hazard(); |
223 | tlb_probe(); |
224 | tlb_probe_hazard(); |
225 | idx = read_c0_index(); |
226 | write_c0_entrylo0(0); |
227 | write_c0_entrylo1(0); |
228 | if (idx < 0) |
229 | goto finish; |
230 | /* Make sure all entries differ. */ |
231 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
232 | mtc0_tlbw_hazard(); |
233 | tlb_write_indexed(); |
234 | tlbw_use_hazard(); |
235 | |
236 | finish: |
237 | write_c0_entryhi(oldpid); |
238 | flush_itlb_vm(vma); |
239 | EXIT_CRITICAL(flags); |
240 | } |
241 | } |
242 | |
243 | /* |
244 | * This one is only used for pages with the global bit set so we don't care |
245 | * much about the ASID. |
246 | */ |
247 | void local_flush_tlb_one(unsigned long page) |
248 | { |
249 | unsigned long flags; |
250 | int oldpid, idx; |
251 | |
252 | ENTER_CRITICAL(flags); |
253 | oldpid = read_c0_entryhi(); |
254 | page &= (PAGE_MASK << 1); |
255 | write_c0_entryhi(page); |
256 | mtc0_tlbw_hazard(); |
257 | tlb_probe(); |
258 | tlb_probe_hazard(); |
259 | idx = read_c0_index(); |
260 | write_c0_entrylo0(0); |
261 | write_c0_entrylo1(0); |
262 | if (idx >= 0) { |
263 | /* Make sure all entries differ. */ |
264 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
265 | mtc0_tlbw_hazard(); |
266 | tlb_write_indexed(); |
267 | tlbw_use_hazard(); |
268 | } |
269 | write_c0_entryhi(oldpid); |
270 | flush_itlb(); |
271 | EXIT_CRITICAL(flags); |
272 | } |
273 | |
274 | /* |
275 | * We will need multiple versions of update_mmu_cache(), one that just |
276 | * updates the TLB with the new pte(s), and another which also checks |
277 | * for the R4k "end of page" hardware bug and does the needy. |
278 | */ |
279 | void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) |
280 | { |
281 | unsigned long flags; |
282 | pgd_t *pgdp; |
283 | pud_t *pudp; |
284 | pmd_t *pmdp; |
285 | pte_t *ptep; |
286 | int idx, pid; |
287 | |
288 | /* |
289 | * Handle debugger faulting in for debugee. |
290 | */ |
291 | if (current->active_mm != vma->vm_mm) |
292 | return; |
293 | |
294 | ENTER_CRITICAL(flags); |
295 | |
296 | pid = read_c0_entryhi() & ASID_MASK; |
297 | address &= (PAGE_MASK << 1); |
298 | write_c0_entryhi(address | pid); |
299 | pgdp = pgd_offset(vma->vm_mm, address); |
300 | mtc0_tlbw_hazard(); |
301 | tlb_probe(); |
302 | tlb_probe_hazard(); |
303 | pudp = pud_offset(pgdp, address); |
304 | pmdp = pmd_offset(pudp, address); |
305 | idx = read_c0_index(); |
306 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
307 | /* this could be a huge page */ |
308 | if (pmd_huge(*pmdp)) { |
309 | unsigned long lo; |
310 | write_c0_pagemask(PM_HUGE_MASK); |
311 | ptep = (pte_t *)pmdp; |
312 | lo = pte_to_entrylo(pte_val(*ptep)); |
313 | write_c0_entrylo0(lo); |
314 | write_c0_entrylo1(lo + (HPAGE_SIZE >> 7)); |
315 | |
316 | mtc0_tlbw_hazard(); |
317 | if (idx < 0) |
318 | tlb_write_random(); |
319 | else |
320 | tlb_write_indexed(); |
321 | tlbw_use_hazard(); |
322 | write_c0_pagemask(PM_DEFAULT_MASK); |
323 | } else |
324 | #endif |
325 | { |
326 | ptep = pte_offset_map(pmdp, address); |
327 | |
328 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) |
329 | write_c0_entrylo0(ptep->pte_high); |
330 | ptep++; |
331 | write_c0_entrylo1(ptep->pte_high); |
332 | #else |
333 | write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++))); |
334 | write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep))); |
335 | #endif |
336 | mtc0_tlbw_hazard(); |
337 | if (idx < 0) |
338 | tlb_write_random(); |
339 | else |
340 | tlb_write_indexed(); |
341 | } |
342 | tlbw_use_hazard(); |
343 | flush_itlb_vm(vma); |
344 | EXIT_CRITICAL(flags); |
345 | } |
346 | |
347 | void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, |
348 | unsigned long entryhi, unsigned long pagemask) |
349 | { |
350 | unsigned long flags; |
351 | unsigned long wired; |
352 | unsigned long old_pagemask; |
353 | unsigned long old_ctx; |
354 | |
355 | ENTER_CRITICAL(flags); |
356 | /* Save old context and create impossible VPN2 value */ |
357 | old_ctx = read_c0_entryhi(); |
358 | old_pagemask = read_c0_pagemask(); |
359 | wired = read_c0_wired(); |
360 | write_c0_wired(wired + 1); |
361 | write_c0_index(wired); |
362 | tlbw_use_hazard(); /* What is the hazard here? */ |
363 | write_c0_pagemask(pagemask); |
364 | write_c0_entryhi(entryhi); |
365 | write_c0_entrylo0(entrylo0); |
366 | write_c0_entrylo1(entrylo1); |
367 | mtc0_tlbw_hazard(); |
368 | tlb_write_indexed(); |
369 | tlbw_use_hazard(); |
370 | |
371 | write_c0_entryhi(old_ctx); |
372 | tlbw_use_hazard(); /* What is the hazard here? */ |
373 | write_c0_pagemask(old_pagemask); |
374 | local_flush_tlb_all(); |
375 | EXIT_CRITICAL(flags); |
376 | } |
377 | |
378 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
379 | |
380 | int __init has_transparent_hugepage(void) |
381 | { |
382 | unsigned int mask; |
383 | unsigned long flags; |
384 | |
385 | ENTER_CRITICAL(flags); |
386 | write_c0_pagemask(PM_HUGE_MASK); |
387 | back_to_back_c0_hazard(); |
388 | mask = read_c0_pagemask(); |
389 | write_c0_pagemask(PM_DEFAULT_MASK); |
390 | |
391 | EXIT_CRITICAL(flags); |
392 | |
393 | return mask == PM_HUGE_MASK; |
394 | } |
395 | |
396 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
397 | |
398 | static int ntlb; |
399 | static int __init set_ntlb(char *str) |
400 | { |
401 | get_option(&str, &ntlb); |
402 | return 1; |
403 | } |
404 | |
405 | __setup("ntlb=", set_ntlb); |
406 | |
407 | void tlb_init(void) |
408 | { |
409 | /* |
410 | * You should never change this register: |
411 | * - On R4600 1.7 the tlbp never hits for pages smaller than |
412 | * the value in the c0_pagemask register. |
413 | * - The entire mm handling assumes the c0_pagemask register to |
414 | * be set to fixed-size pages. |
415 | */ |
416 | write_c0_pagemask(PM_DEFAULT_MASK); |
417 | write_c0_wired(0); |
418 | if (current_cpu_type() == CPU_R10000 || |
419 | current_cpu_type() == CPU_R12000 || |
420 | current_cpu_type() == CPU_R14000) |
421 | write_c0_framemask(0); |
422 | |
423 | if (cpu_has_rixi) { |
424 | /* |
425 | * Enable the no read, no exec bits, and enable large virtual |
426 | * address. |
427 | */ |
428 | u32 pg = PG_RIE | PG_XIE; |
429 | #ifdef CONFIG_64BIT |
430 | pg |= PG_ELPA; |
431 | #endif |
432 | write_c0_pagegrain(pg); |
433 | } |
434 | |
435 | /* From this point on the ARC firmware is dead. */ |
436 | local_flush_tlb_all(); |
437 | |
438 | /* Did I tell you that ARC SUCKS? */ |
439 | |
440 | if (ntlb) { |
441 | if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { |
442 | int wired = current_cpu_data.tlbsize - ntlb; |
443 | write_c0_wired(wired); |
444 | write_c0_index(wired-1); |
445 | printk("Restricting TLB to %d entries\n", ntlb); |
446 | } else |
447 | printk("Ignoring invalid argument ntlb=%d\n", ntlb); |
448 | } |
449 | |
450 | build_tlb_refill_handler(); |
451 | } |
452 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9