Root/
1 | /* include/asm-generic/tlb.h |
2 | * |
3 | * Generic TLB shootdown code |
4 | * |
5 | * Copyright 2001 Red Hat, Inc. |
6 | * Based on code from mm/memory.c Copyright Linus Torvalds and others. |
7 | * |
8 | * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
9 | * |
10 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version |
13 | * 2 of the License, or (at your option) any later version. |
14 | */ |
15 | #ifndef _ASM_GENERIC__TLB_H |
16 | #define _ASM_GENERIC__TLB_H |
17 | |
18 | #include <linux/swap.h> |
19 | #include <asm/pgalloc.h> |
20 | #include <asm/tlbflush.h> |
21 | |
22 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
23 | /* |
24 | * Semi RCU freeing of the page directories. |
25 | * |
26 | * This is needed by some architectures to implement software pagetable walkers. |
27 | * |
28 | * gup_fast() and other software pagetable walkers do a lockless page-table |
29 | * walk and therefore needs some synchronization with the freeing of the page |
30 | * directories. The chosen means to accomplish that is by disabling IRQs over |
31 | * the walk. |
32 | * |
33 | * Architectures that use IPIs to flush TLBs will then automagically DTRT, |
34 | * since we unlink the page, flush TLBs, free the page. Since the disabling of |
35 | * IRQs delays the completion of the TLB flush we can never observe an already |
36 | * freed page. |
37 | * |
38 | * Architectures that do not have this (PPC) need to delay the freeing by some |
39 | * other means, this is that means. |
40 | * |
41 | * What we do is batch the freed directory pages (tables) and RCU free them. |
42 | * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling |
43 | * holds off grace periods. |
44 | * |
45 | * However, in order to batch these pages we need to allocate storage, this |
46 | * allocation is deep inside the MM code and can thus easily fail on memory |
47 | * pressure. To guarantee progress we fall back to single table freeing, see |
48 | * the implementation of tlb_remove_table_one(). |
49 | * |
50 | */ |
51 | struct mmu_table_batch { |
52 | struct rcu_head rcu; |
53 | unsigned int nr; |
54 | void *tables[0]; |
55 | }; |
56 | |
57 | #define MAX_TABLE_BATCH \ |
58 | ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) |
59 | |
60 | extern void tlb_table_flush(struct mmu_gather *tlb); |
61 | extern void tlb_remove_table(struct mmu_gather *tlb, void *table); |
62 | |
63 | #endif |
64 | |
65 | /* |
66 | * If we can't allocate a page to make a big batch of page pointers |
67 | * to work on, then just handle a few from the on-stack structure. |
68 | */ |
69 | #define MMU_GATHER_BUNDLE 8 |
70 | |
71 | struct mmu_gather_batch { |
72 | struct mmu_gather_batch *next; |
73 | unsigned int nr; |
74 | unsigned int max; |
75 | struct page *pages[0]; |
76 | }; |
77 | |
78 | #define MAX_GATHER_BATCH \ |
79 | ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) |
80 | |
81 | /* |
82 | * Limit the maximum number of mmu_gather batches to reduce a risk of soft |
83 | * lockups for non-preemptible kernels on huge machines when a lot of memory |
84 | * is zapped during unmapping. |
85 | * 10K pages freed at once should be safe even without a preemption point. |
86 | */ |
87 | #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) |
88 | |
89 | /* struct mmu_gather is an opaque type used by the mm code for passing around |
90 | * any data needed by arch specific code for tlb_remove_page. |
91 | */ |
92 | struct mmu_gather { |
93 | struct mm_struct *mm; |
94 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
95 | struct mmu_table_batch *batch; |
96 | #endif |
97 | unsigned long start; |
98 | unsigned long end; |
99 | unsigned int need_flush : 1, /* Did free PTEs */ |
100 | /* we are in the middle of an operation to clear |
101 | * a full mm and can make some optimizations */ |
102 | fullmm : 1, |
103 | /* we have performed an operation which |
104 | * requires a complete flush of the tlb */ |
105 | need_flush_all : 1; |
106 | |
107 | struct mmu_gather_batch *active; |
108 | struct mmu_gather_batch local; |
109 | struct page *__pages[MMU_GATHER_BUNDLE]; |
110 | unsigned int batch_count; |
111 | }; |
112 | |
113 | #define HAVE_GENERIC_MMU_GATHER |
114 | |
115 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); |
116 | void tlb_flush_mmu(struct mmu_gather *tlb); |
117 | void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, |
118 | unsigned long end); |
119 | int __tlb_remove_page(struct mmu_gather *tlb, struct page *page); |
120 | |
121 | /* tlb_remove_page |
122 | * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when |
123 | * required. |
124 | */ |
125 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
126 | { |
127 | if (!__tlb_remove_page(tlb, page)) |
128 | tlb_flush_mmu(tlb); |
129 | } |
130 | |
131 | /** |
132 | * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. |
133 | * |
134 | * Record the fact that pte's were really umapped in ->need_flush, so we can |
135 | * later optimise away the tlb invalidate. This helps when userspace is |
136 | * unmapping already-unmapped pages, which happens quite a lot. |
137 | */ |
138 | #define tlb_remove_tlb_entry(tlb, ptep, address) \ |
139 | do { \ |
140 | tlb->need_flush = 1; \ |
141 | __tlb_remove_tlb_entry(tlb, ptep, address); \ |
142 | } while (0) |
143 | |
144 | /** |
145 | * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation |
146 | * This is a nop so far, because only x86 needs it. |
147 | */ |
148 | #ifndef __tlb_remove_pmd_tlb_entry |
149 | #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) |
150 | #endif |
151 | |
152 | #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ |
153 | do { \ |
154 | tlb->need_flush = 1; \ |
155 | __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ |
156 | } while (0) |
157 | |
158 | #define pte_free_tlb(tlb, ptep, address) \ |
159 | do { \ |
160 | tlb->need_flush = 1; \ |
161 | __pte_free_tlb(tlb, ptep, address); \ |
162 | } while (0) |
163 | |
164 | #ifndef __ARCH_HAS_4LEVEL_HACK |
165 | #define pud_free_tlb(tlb, pudp, address) \ |
166 | do { \ |
167 | tlb->need_flush = 1; \ |
168 | __pud_free_tlb(tlb, pudp, address); \ |
169 | } while (0) |
170 | #endif |
171 | |
172 | #define pmd_free_tlb(tlb, pmdp, address) \ |
173 | do { \ |
174 | tlb->need_flush = 1; \ |
175 | __pmd_free_tlb(tlb, pmdp, address); \ |
176 | } while (0) |
177 | |
178 | #define tlb_migrate_finish(mm) do {} while (0) |
179 | |
180 | #endif /* _ASM_GENERIC__TLB_H */ |
181 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9