Root/
1 | /* |
2 | * linux/mm/mmu_notifier.c |
3 | * |
4 | * Copyright (C) 2008 Qumranet, Inc. |
5 | * Copyright (C) 2008 SGI |
6 | * Christoph Lameter <clameter@sgi.com> |
7 | * |
8 | * This work is licensed under the terms of the GNU GPL, version 2. See |
9 | * the COPYING file in the top-level directory. |
10 | */ |
11 | |
12 | #include <linux/rculist.h> |
13 | #include <linux/mmu_notifier.h> |
14 | #include <linux/export.h> |
15 | #include <linux/mm.h> |
16 | #include <linux/err.h> |
17 | #include <linux/srcu.h> |
18 | #include <linux/rcupdate.h> |
19 | #include <linux/sched.h> |
20 | #include <linux/slab.h> |
21 | |
22 | /* global SRCU for all MMs */ |
23 | static struct srcu_struct srcu; |
24 | |
25 | /* |
26 | * This function can't run concurrently against mmu_notifier_register |
27 | * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap |
28 | * runs with mm_users == 0. Other tasks may still invoke mmu notifiers |
29 | * in parallel despite there being no task using this mm any more, |
30 | * through the vmas outside of the exit_mmap context, such as with |
31 | * vmtruncate. This serializes against mmu_notifier_unregister with |
32 | * the mmu_notifier_mm->lock in addition to SRCU and it serializes |
33 | * against the other mmu notifiers with SRCU. struct mmu_notifier_mm |
34 | * can't go away from under us as exit_mmap holds an mm_count pin |
35 | * itself. |
36 | */ |
37 | void __mmu_notifier_release(struct mm_struct *mm) |
38 | { |
39 | struct mmu_notifier *mn; |
40 | int id; |
41 | |
42 | /* |
43 | * srcu_read_lock() here will block synchronize_srcu() in |
44 | * mmu_notifier_unregister() until all registered |
45 | * ->release() callouts this function makes have |
46 | * returned. |
47 | */ |
48 | id = srcu_read_lock(&srcu); |
49 | spin_lock(&mm->mmu_notifier_mm->lock); |
50 | while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { |
51 | mn = hlist_entry(mm->mmu_notifier_mm->list.first, |
52 | struct mmu_notifier, |
53 | hlist); |
54 | |
55 | /* |
56 | * Unlink. This will prevent mmu_notifier_unregister() |
57 | * from also making the ->release() callout. |
58 | */ |
59 | hlist_del_init_rcu(&mn->hlist); |
60 | spin_unlock(&mm->mmu_notifier_mm->lock); |
61 | |
62 | /* |
63 | * Clear sptes. (see 'release' description in mmu_notifier.h) |
64 | */ |
65 | if (mn->ops->release) |
66 | mn->ops->release(mn, mm); |
67 | |
68 | spin_lock(&mm->mmu_notifier_mm->lock); |
69 | } |
70 | spin_unlock(&mm->mmu_notifier_mm->lock); |
71 | |
72 | /* |
73 | * All callouts to ->release() which we have done are complete. |
74 | * Allow synchronize_srcu() in mmu_notifier_unregister() to complete |
75 | */ |
76 | srcu_read_unlock(&srcu, id); |
77 | |
78 | /* |
79 | * mmu_notifier_unregister() may have unlinked a notifier and may |
80 | * still be calling out to it. Additionally, other notifiers |
81 | * may have been active via vmtruncate() et. al. Block here |
82 | * to ensure that all notifier callouts for this mm have been |
83 | * completed and the sptes are really cleaned up before returning |
84 | * to exit_mmap(). |
85 | */ |
86 | synchronize_srcu(&srcu); |
87 | } |
88 | |
89 | /* |
90 | * If no young bitflag is supported by the hardware, ->clear_flush_young can |
91 | * unmap the address and return 1 or 0 depending if the mapping previously |
92 | * existed or not. |
93 | */ |
94 | int __mmu_notifier_clear_flush_young(struct mm_struct *mm, |
95 | unsigned long address) |
96 | { |
97 | struct mmu_notifier *mn; |
98 | int young = 0, id; |
99 | |
100 | id = srcu_read_lock(&srcu); |
101 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
102 | if (mn->ops->clear_flush_young) |
103 | young |= mn->ops->clear_flush_young(mn, mm, address); |
104 | } |
105 | srcu_read_unlock(&srcu, id); |
106 | |
107 | return young; |
108 | } |
109 | |
110 | int __mmu_notifier_test_young(struct mm_struct *mm, |
111 | unsigned long address) |
112 | { |
113 | struct mmu_notifier *mn; |
114 | int young = 0, id; |
115 | |
116 | id = srcu_read_lock(&srcu); |
117 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
118 | if (mn->ops->test_young) { |
119 | young = mn->ops->test_young(mn, mm, address); |
120 | if (young) |
121 | break; |
122 | } |
123 | } |
124 | srcu_read_unlock(&srcu, id); |
125 | |
126 | return young; |
127 | } |
128 | |
129 | void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, |
130 | pte_t pte) |
131 | { |
132 | struct mmu_notifier *mn; |
133 | int id; |
134 | |
135 | id = srcu_read_lock(&srcu); |
136 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
137 | if (mn->ops->change_pte) |
138 | mn->ops->change_pte(mn, mm, address, pte); |
139 | } |
140 | srcu_read_unlock(&srcu, id); |
141 | } |
142 | |
143 | void __mmu_notifier_invalidate_page(struct mm_struct *mm, |
144 | unsigned long address) |
145 | { |
146 | struct mmu_notifier *mn; |
147 | int id; |
148 | |
149 | id = srcu_read_lock(&srcu); |
150 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
151 | if (mn->ops->invalidate_page) |
152 | mn->ops->invalidate_page(mn, mm, address); |
153 | } |
154 | srcu_read_unlock(&srcu, id); |
155 | } |
156 | |
157 | void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, |
158 | unsigned long start, unsigned long end) |
159 | { |
160 | struct mmu_notifier *mn; |
161 | int id; |
162 | |
163 | id = srcu_read_lock(&srcu); |
164 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
165 | if (mn->ops->invalidate_range_start) |
166 | mn->ops->invalidate_range_start(mn, mm, start, end); |
167 | } |
168 | srcu_read_unlock(&srcu, id); |
169 | } |
170 | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start); |
171 | |
172 | void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, |
173 | unsigned long start, unsigned long end) |
174 | { |
175 | struct mmu_notifier *mn; |
176 | int id; |
177 | |
178 | id = srcu_read_lock(&srcu); |
179 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
180 | if (mn->ops->invalidate_range_end) |
181 | mn->ops->invalidate_range_end(mn, mm, start, end); |
182 | } |
183 | srcu_read_unlock(&srcu, id); |
184 | } |
185 | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end); |
186 | |
187 | static int do_mmu_notifier_register(struct mmu_notifier *mn, |
188 | struct mm_struct *mm, |
189 | int take_mmap_sem) |
190 | { |
191 | struct mmu_notifier_mm *mmu_notifier_mm; |
192 | int ret; |
193 | |
194 | BUG_ON(atomic_read(&mm->mm_users) <= 0); |
195 | |
196 | /* |
197 | * Verify that mmu_notifier_init() already run and the global srcu is |
198 | * initialized. |
199 | */ |
200 | BUG_ON(!srcu.per_cpu_ref); |
201 | |
202 | ret = -ENOMEM; |
203 | mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); |
204 | if (unlikely(!mmu_notifier_mm)) |
205 | goto out; |
206 | |
207 | if (take_mmap_sem) |
208 | down_write(&mm->mmap_sem); |
209 | ret = mm_take_all_locks(mm); |
210 | if (unlikely(ret)) |
211 | goto out_clean; |
212 | |
213 | if (!mm_has_notifiers(mm)) { |
214 | INIT_HLIST_HEAD(&mmu_notifier_mm->list); |
215 | spin_lock_init(&mmu_notifier_mm->lock); |
216 | |
217 | mm->mmu_notifier_mm = mmu_notifier_mm; |
218 | mmu_notifier_mm = NULL; |
219 | } |
220 | atomic_inc(&mm->mm_count); |
221 | |
222 | /* |
223 | * Serialize the update against mmu_notifier_unregister. A |
224 | * side note: mmu_notifier_release can't run concurrently with |
225 | * us because we hold the mm_users pin (either implicitly as |
226 | * current->mm or explicitly with get_task_mm() or similar). |
227 | * We can't race against any other mmu notifier method either |
228 | * thanks to mm_take_all_locks(). |
229 | */ |
230 | spin_lock(&mm->mmu_notifier_mm->lock); |
231 | hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); |
232 | spin_unlock(&mm->mmu_notifier_mm->lock); |
233 | |
234 | mm_drop_all_locks(mm); |
235 | out_clean: |
236 | if (take_mmap_sem) |
237 | up_write(&mm->mmap_sem); |
238 | kfree(mmu_notifier_mm); |
239 | out: |
240 | BUG_ON(atomic_read(&mm->mm_users) <= 0); |
241 | return ret; |
242 | } |
243 | |
244 | /* |
245 | * Must not hold mmap_sem nor any other VM related lock when calling |
246 | * this registration function. Must also ensure mm_users can't go down |
247 | * to zero while this runs to avoid races with mmu_notifier_release, |
248 | * so mm has to be current->mm or the mm should be pinned safely such |
249 | * as with get_task_mm(). If the mm is not current->mm, the mm_users |
250 | * pin should be released by calling mmput after mmu_notifier_register |
251 | * returns. mmu_notifier_unregister must be always called to |
252 | * unregister the notifier. mm_count is automatically pinned to allow |
253 | * mmu_notifier_unregister to safely run at any time later, before or |
254 | * after exit_mmap. ->release will always be called before exit_mmap |
255 | * frees the pages. |
256 | */ |
257 | int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) |
258 | { |
259 | return do_mmu_notifier_register(mn, mm, 1); |
260 | } |
261 | EXPORT_SYMBOL_GPL(mmu_notifier_register); |
262 | |
263 | /* |
264 | * Same as mmu_notifier_register but here the caller must hold the |
265 | * mmap_sem in write mode. |
266 | */ |
267 | int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) |
268 | { |
269 | return do_mmu_notifier_register(mn, mm, 0); |
270 | } |
271 | EXPORT_SYMBOL_GPL(__mmu_notifier_register); |
272 | |
273 | /* this is called after the last mmu_notifier_unregister() returned */ |
274 | void __mmu_notifier_mm_destroy(struct mm_struct *mm) |
275 | { |
276 | BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list)); |
277 | kfree(mm->mmu_notifier_mm); |
278 | mm->mmu_notifier_mm = LIST_POISON1; /* debug */ |
279 | } |
280 | |
281 | /* |
282 | * This releases the mm_count pin automatically and frees the mm |
283 | * structure if it was the last user of it. It serializes against |
284 | * running mmu notifiers with SRCU and against mmu_notifier_unregister |
285 | * with the unregister lock + SRCU. All sptes must be dropped before |
286 | * calling mmu_notifier_unregister. ->release or any other notifier |
287 | * method may be invoked concurrently with mmu_notifier_unregister, |
288 | * and only after mmu_notifier_unregister returned we're guaranteed |
289 | * that ->release or any other method can't run anymore. |
290 | */ |
291 | void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) |
292 | { |
293 | BUG_ON(atomic_read(&mm->mm_count) <= 0); |
294 | |
295 | spin_lock(&mm->mmu_notifier_mm->lock); |
296 | if (!hlist_unhashed(&mn->hlist)) { |
297 | int id; |
298 | |
299 | /* |
300 | * Ensure we synchronize up with __mmu_notifier_release(). |
301 | */ |
302 | id = srcu_read_lock(&srcu); |
303 | |
304 | hlist_del_rcu(&mn->hlist); |
305 | spin_unlock(&mm->mmu_notifier_mm->lock); |
306 | |
307 | if (mn->ops->release) |
308 | mn->ops->release(mn, mm); |
309 | |
310 | /* |
311 | * Allow __mmu_notifier_release() to complete. |
312 | */ |
313 | srcu_read_unlock(&srcu, id); |
314 | } else |
315 | spin_unlock(&mm->mmu_notifier_mm->lock); |
316 | |
317 | /* |
318 | * Wait for any running method to finish, including ->release() if it |
319 | * was run by __mmu_notifier_release() instead of us. |
320 | */ |
321 | synchronize_srcu(&srcu); |
322 | |
323 | BUG_ON(atomic_read(&mm->mm_count) <= 0); |
324 | |
325 | mmdrop(mm); |
326 | } |
327 | EXPORT_SYMBOL_GPL(mmu_notifier_unregister); |
328 | |
329 | static int __init mmu_notifier_init(void) |
330 | { |
331 | return init_srcu_struct(&srcu); |
332 | } |
333 | |
334 | module_init(mmu_notifier_init); |
335 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9