Root/
1 | /* |
2 | * Memory merging support. |
3 | * |
4 | * This code enables dynamic sharing of identical pages found in different |
5 | * memory areas, even if they are not shared by fork() |
6 | * |
7 | * Copyright (C) 2008-2009 Red Hat, Inc. |
8 | * Authors: |
9 | * Izik Eidus |
10 | * Andrea Arcangeli |
11 | * Chris Wright |
12 | * Hugh Dickins |
13 | * |
14 | * This work is licensed under the terms of the GNU GPL, version 2. |
15 | */ |
16 | |
17 | #include <linux/errno.h> |
18 | #include <linux/mm.h> |
19 | #include <linux/fs.h> |
20 | #include <linux/mman.h> |
21 | #include <linux/sched.h> |
22 | #include <linux/rwsem.h> |
23 | #include <linux/pagemap.h> |
24 | #include <linux/rmap.h> |
25 | #include <linux/spinlock.h> |
26 | #include <linux/jhash.h> |
27 | #include <linux/delay.h> |
28 | #include <linux/kthread.h> |
29 | #include <linux/wait.h> |
30 | #include <linux/slab.h> |
31 | #include <linux/rbtree.h> |
32 | #include <linux/mmu_notifier.h> |
33 | #include <linux/swap.h> |
34 | #include <linux/ksm.h> |
35 | |
36 | #include <asm/tlbflush.h> |
37 | #include "internal.h" |
38 | |
39 | /* |
40 | * A few notes about the KSM scanning process, |
41 | * to make it easier to understand the data structures below: |
42 | * |
43 | * In order to reduce excessive scanning, KSM sorts the memory pages by their |
44 | * contents into a data structure that holds pointers to the pages' locations. |
45 | * |
46 | * Since the contents of the pages may change at any moment, KSM cannot just |
47 | * insert the pages into a normal sorted tree and expect it to find anything. |
48 | * Therefore KSM uses two data structures - the stable and the unstable tree. |
49 | * |
50 | * The stable tree holds pointers to all the merged pages (ksm pages), sorted |
51 | * by their contents. Because each such page is write-protected, searching on |
52 | * this tree is fully assured to be working (except when pages are unmapped), |
53 | * and therefore this tree is called the stable tree. |
54 | * |
55 | * In addition to the stable tree, KSM uses a second data structure called the |
56 | * unstable tree: this tree holds pointers to pages which have been found to |
57 | * be "unchanged for a period of time". The unstable tree sorts these pages |
58 | * by their contents, but since they are not write-protected, KSM cannot rely |
59 | * upon the unstable tree to work correctly - the unstable tree is liable to |
60 | * be corrupted as its contents are modified, and so it is called unstable. |
61 | * |
62 | * KSM solves this problem by several techniques: |
63 | * |
64 | * 1) The unstable tree is flushed every time KSM completes scanning all |
65 | * memory areas, and then the tree is rebuilt again from the beginning. |
66 | * 2) KSM will only insert into the unstable tree, pages whose hash value |
67 | * has not changed since the previous scan of all memory areas. |
68 | * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the |
69 | * colors of the nodes and not on their contents, assuring that even when |
70 | * the tree gets "corrupted" it won't get out of balance, so scanning time |
71 | * remains the same (also, searching and inserting nodes in an rbtree uses |
72 | * the same algorithm, so we have no overhead when we flush and rebuild). |
73 | * 4) KSM never flushes the stable tree, which means that even if it were to |
74 | * take 10 attempts to find a page in the unstable tree, once it is found, |
75 | * it is secured in the stable tree. (When we scan a new page, we first |
76 | * compare it against the stable tree, and then against the unstable tree.) |
77 | */ |
78 | |
79 | /** |
80 | * struct mm_slot - ksm information per mm that is being scanned |
81 | * @link: link to the mm_slots hash list |
82 | * @mm_list: link into the mm_slots list, rooted in ksm_mm_head |
83 | * @rmap_list: head for this mm_slot's list of rmap_items |
84 | * @mm: the mm that this information is valid for |
85 | */ |
86 | struct mm_slot { |
87 | struct hlist_node link; |
88 | struct list_head mm_list; |
89 | struct list_head rmap_list; |
90 | struct mm_struct *mm; |
91 | }; |
92 | |
93 | /** |
94 | * struct ksm_scan - cursor for scanning |
95 | * @mm_slot: the current mm_slot we are scanning |
96 | * @address: the next address inside that to be scanned |
97 | * @rmap_item: the current rmap that we are scanning inside the rmap_list |
98 | * @seqnr: count of completed full scans (needed when removing unstable node) |
99 | * |
100 | * There is only the one ksm_scan instance of this cursor structure. |
101 | */ |
102 | struct ksm_scan { |
103 | struct mm_slot *mm_slot; |
104 | unsigned long address; |
105 | struct rmap_item *rmap_item; |
106 | unsigned long seqnr; |
107 | }; |
108 | |
109 | /** |
110 | * struct rmap_item - reverse mapping item for virtual addresses |
111 | * @link: link into mm_slot's rmap_list (rmap_list is per mm) |
112 | * @mm: the memory structure this rmap_item is pointing into |
113 | * @address: the virtual address this rmap_item tracks (+ flags in low bits) |
114 | * @oldchecksum: previous checksum of the page at that virtual address |
115 | * @node: rb_node of this rmap_item in either unstable or stable tree |
116 | * @next: next rmap_item hanging off the same node of the stable tree |
117 | * @prev: previous rmap_item hanging off the same node of the stable tree |
118 | */ |
119 | struct rmap_item { |
120 | struct list_head link; |
121 | struct mm_struct *mm; |
122 | unsigned long address; /* + low bits used for flags below */ |
123 | union { |
124 | unsigned int oldchecksum; /* when unstable */ |
125 | struct rmap_item *next; /* when stable */ |
126 | }; |
127 | union { |
128 | struct rb_node node; /* when tree node */ |
129 | struct rmap_item *prev; /* in stable list */ |
130 | }; |
131 | }; |
132 | |
133 | #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ |
134 | #define NODE_FLAG 0x100 /* is a node of unstable or stable tree */ |
135 | #define STABLE_FLAG 0x200 /* is a node or list item of stable tree */ |
136 | |
137 | /* The stable and unstable tree heads */ |
138 | static struct rb_root root_stable_tree = RB_ROOT; |
139 | static struct rb_root root_unstable_tree = RB_ROOT; |
140 | |
141 | #define MM_SLOTS_HASH_HEADS 1024 |
142 | static struct hlist_head *mm_slots_hash; |
143 | |
144 | static struct mm_slot ksm_mm_head = { |
145 | .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), |
146 | }; |
147 | static struct ksm_scan ksm_scan = { |
148 | .mm_slot = &ksm_mm_head, |
149 | }; |
150 | |
151 | static struct kmem_cache *rmap_item_cache; |
152 | static struct kmem_cache *mm_slot_cache; |
153 | |
154 | /* The number of nodes in the stable tree */ |
155 | static unsigned long ksm_pages_shared; |
156 | |
157 | /* The number of page slots additionally sharing those nodes */ |
158 | static unsigned long ksm_pages_sharing; |
159 | |
160 | /* The number of nodes in the unstable tree */ |
161 | static unsigned long ksm_pages_unshared; |
162 | |
163 | /* The number of rmap_items in use: to calculate pages_volatile */ |
164 | static unsigned long ksm_rmap_items; |
165 | |
166 | /* Limit on the number of unswappable pages used */ |
167 | static unsigned long ksm_max_kernel_pages; |
168 | |
169 | /* Number of pages ksmd should scan in one batch */ |
170 | static unsigned int ksm_thread_pages_to_scan = 100; |
171 | |
172 | /* Milliseconds ksmd should sleep between batches */ |
173 | static unsigned int ksm_thread_sleep_millisecs = 20; |
174 | |
175 | #define KSM_RUN_STOP 0 |
176 | #define KSM_RUN_MERGE 1 |
177 | #define KSM_RUN_UNMERGE 2 |
178 | static unsigned int ksm_run = KSM_RUN_STOP; |
179 | |
180 | static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); |
181 | static DEFINE_MUTEX(ksm_thread_mutex); |
182 | static DEFINE_SPINLOCK(ksm_mmlist_lock); |
183 | |
184 | #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ |
185 | sizeof(struct __struct), __alignof__(struct __struct),\ |
186 | (__flags), NULL) |
187 | |
188 | static int __init ksm_slab_init(void) |
189 | { |
190 | rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); |
191 | if (!rmap_item_cache) |
192 | goto out; |
193 | |
194 | mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); |
195 | if (!mm_slot_cache) |
196 | goto out_free; |
197 | |
198 | return 0; |
199 | |
200 | out_free: |
201 | kmem_cache_destroy(rmap_item_cache); |
202 | out: |
203 | return -ENOMEM; |
204 | } |
205 | |
206 | static void __init ksm_slab_free(void) |
207 | { |
208 | kmem_cache_destroy(mm_slot_cache); |
209 | kmem_cache_destroy(rmap_item_cache); |
210 | mm_slot_cache = NULL; |
211 | } |
212 | |
213 | static inline struct rmap_item *alloc_rmap_item(void) |
214 | { |
215 | struct rmap_item *rmap_item; |
216 | |
217 | rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); |
218 | if (rmap_item) |
219 | ksm_rmap_items++; |
220 | return rmap_item; |
221 | } |
222 | |
223 | static inline void free_rmap_item(struct rmap_item *rmap_item) |
224 | { |
225 | ksm_rmap_items--; |
226 | rmap_item->mm = NULL; /* debug safety */ |
227 | kmem_cache_free(rmap_item_cache, rmap_item); |
228 | } |
229 | |
230 | static inline struct mm_slot *alloc_mm_slot(void) |
231 | { |
232 | if (!mm_slot_cache) /* initialization failed */ |
233 | return NULL; |
234 | return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); |
235 | } |
236 | |
237 | static inline void free_mm_slot(struct mm_slot *mm_slot) |
238 | { |
239 | kmem_cache_free(mm_slot_cache, mm_slot); |
240 | } |
241 | |
242 | static int __init mm_slots_hash_init(void) |
243 | { |
244 | mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head), |
245 | GFP_KERNEL); |
246 | if (!mm_slots_hash) |
247 | return -ENOMEM; |
248 | return 0; |
249 | } |
250 | |
251 | static void __init mm_slots_hash_free(void) |
252 | { |
253 | kfree(mm_slots_hash); |
254 | } |
255 | |
256 | static struct mm_slot *get_mm_slot(struct mm_struct *mm) |
257 | { |
258 | struct mm_slot *mm_slot; |
259 | struct hlist_head *bucket; |
260 | struct hlist_node *node; |
261 | |
262 | bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) |
263 | % MM_SLOTS_HASH_HEADS]; |
264 | hlist_for_each_entry(mm_slot, node, bucket, link) { |
265 | if (mm == mm_slot->mm) |
266 | return mm_slot; |
267 | } |
268 | return NULL; |
269 | } |
270 | |
271 | static void insert_to_mm_slots_hash(struct mm_struct *mm, |
272 | struct mm_slot *mm_slot) |
273 | { |
274 | struct hlist_head *bucket; |
275 | |
276 | bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) |
277 | % MM_SLOTS_HASH_HEADS]; |
278 | mm_slot->mm = mm; |
279 | INIT_LIST_HEAD(&mm_slot->rmap_list); |
280 | hlist_add_head(&mm_slot->link, bucket); |
281 | } |
282 | |
283 | static inline int in_stable_tree(struct rmap_item *rmap_item) |
284 | { |
285 | return rmap_item->address & STABLE_FLAG; |
286 | } |
287 | |
288 | /* |
289 | * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's |
290 | * page tables after it has passed through ksm_exit() - which, if necessary, |
291 | * takes mmap_sem briefly to serialize against them. ksm_exit() does not set |
292 | * a special flag: they can just back out as soon as mm_users goes to zero. |
293 | * ksm_test_exit() is used throughout to make this test for exit: in some |
294 | * places for correctness, in some places just to avoid unnecessary work. |
295 | */ |
296 | static inline bool ksm_test_exit(struct mm_struct *mm) |
297 | { |
298 | return atomic_read(&mm->mm_users) == 0; |
299 | } |
300 | |
301 | /* |
302 | * We use break_ksm to break COW on a ksm page: it's a stripped down |
303 | * |
304 | * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1) |
305 | * put_page(page); |
306 | * |
307 | * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, |
308 | * in case the application has unmapped and remapped mm,addr meanwhile. |
309 | * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP |
310 | * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. |
311 | */ |
312 | static int break_ksm(struct vm_area_struct *vma, unsigned long addr) |
313 | { |
314 | struct page *page; |
315 | int ret = 0; |
316 | |
317 | do { |
318 | cond_resched(); |
319 | page = follow_page(vma, addr, FOLL_GET); |
320 | if (!page) |
321 | break; |
322 | if (PageKsm(page)) |
323 | ret = handle_mm_fault(vma->vm_mm, vma, addr, |
324 | FAULT_FLAG_WRITE); |
325 | else |
326 | ret = VM_FAULT_WRITE; |
327 | put_page(page); |
328 | } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); |
329 | /* |
330 | * We must loop because handle_mm_fault() may back out if there's |
331 | * any difficulty e.g. if pte accessed bit gets updated concurrently. |
332 | * |
333 | * VM_FAULT_WRITE is what we have been hoping for: it indicates that |
334 | * COW has been broken, even if the vma does not permit VM_WRITE; |
335 | * but note that a concurrent fault might break PageKsm for us. |
336 | * |
337 | * VM_FAULT_SIGBUS could occur if we race with truncation of the |
338 | * backing file, which also invalidates anonymous pages: that's |
339 | * okay, that truncation will have unmapped the PageKsm for us. |
340 | * |
341 | * VM_FAULT_OOM: at the time of writing (late July 2009), setting |
342 | * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the |
343 | * current task has TIF_MEMDIE set, and will be OOM killed on return |
344 | * to user; and ksmd, having no mm, would never be chosen for that. |
345 | * |
346 | * But if the mm is in a limited mem_cgroup, then the fault may fail |
347 | * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and |
348 | * even ksmd can fail in this way - though it's usually breaking ksm |
349 | * just to undo a merge it made a moment before, so unlikely to oom. |
350 | * |
351 | * That's a pity: we might therefore have more kernel pages allocated |
352 | * than we're counting as nodes in the stable tree; but ksm_do_scan |
353 | * will retry to break_cow on each pass, so should recover the page |
354 | * in due course. The important thing is to not let VM_MERGEABLE |
355 | * be cleared while any such pages might remain in the area. |
356 | */ |
357 | return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; |
358 | } |
359 | |
360 | static void break_cow(struct mm_struct *mm, unsigned long addr) |
361 | { |
362 | struct vm_area_struct *vma; |
363 | |
364 | down_read(&mm->mmap_sem); |
365 | if (ksm_test_exit(mm)) |
366 | goto out; |
367 | vma = find_vma(mm, addr); |
368 | if (!vma || vma->vm_start > addr) |
369 | goto out; |
370 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
371 | goto out; |
372 | break_ksm(vma, addr); |
373 | out: |
374 | up_read(&mm->mmap_sem); |
375 | } |
376 | |
377 | static struct page *get_mergeable_page(struct rmap_item *rmap_item) |
378 | { |
379 | struct mm_struct *mm = rmap_item->mm; |
380 | unsigned long addr = rmap_item->address; |
381 | struct vm_area_struct *vma; |
382 | struct page *page; |
383 | |
384 | down_read(&mm->mmap_sem); |
385 | if (ksm_test_exit(mm)) |
386 | goto out; |
387 | vma = find_vma(mm, addr); |
388 | if (!vma || vma->vm_start > addr) |
389 | goto out; |
390 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
391 | goto out; |
392 | |
393 | page = follow_page(vma, addr, FOLL_GET); |
394 | if (!page) |
395 | goto out; |
396 | if (PageAnon(page)) { |
397 | flush_anon_page(vma, page, addr); |
398 | flush_dcache_page(page); |
399 | } else { |
400 | put_page(page); |
401 | out: page = NULL; |
402 | } |
403 | up_read(&mm->mmap_sem); |
404 | return page; |
405 | } |
406 | |
407 | /* |
408 | * get_ksm_page: checks if the page at the virtual address in rmap_item |
409 | * is still PageKsm, in which case we can trust the content of the page, |
410 | * and it returns the gotten page; but NULL if the page has been zapped. |
411 | */ |
412 | static struct page *get_ksm_page(struct rmap_item *rmap_item) |
413 | { |
414 | struct page *page; |
415 | |
416 | page = get_mergeable_page(rmap_item); |
417 | if (page && !PageKsm(page)) { |
418 | put_page(page); |
419 | page = NULL; |
420 | } |
421 | return page; |
422 | } |
423 | |
424 | /* |
425 | * Removing rmap_item from stable or unstable tree. |
426 | * This function will clean the information from the stable/unstable tree. |
427 | */ |
428 | static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) |
429 | { |
430 | if (in_stable_tree(rmap_item)) { |
431 | struct rmap_item *next_item = rmap_item->next; |
432 | |
433 | if (rmap_item->address & NODE_FLAG) { |
434 | if (next_item) { |
435 | rb_replace_node(&rmap_item->node, |
436 | &next_item->node, |
437 | &root_stable_tree); |
438 | next_item->address |= NODE_FLAG; |
439 | ksm_pages_sharing--; |
440 | } else { |
441 | rb_erase(&rmap_item->node, &root_stable_tree); |
442 | ksm_pages_shared--; |
443 | } |
444 | } else { |
445 | struct rmap_item *prev_item = rmap_item->prev; |
446 | |
447 | BUG_ON(prev_item->next != rmap_item); |
448 | prev_item->next = next_item; |
449 | if (next_item) { |
450 | BUG_ON(next_item->prev != rmap_item); |
451 | next_item->prev = rmap_item->prev; |
452 | } |
453 | ksm_pages_sharing--; |
454 | } |
455 | |
456 | rmap_item->next = NULL; |
457 | |
458 | } else if (rmap_item->address & NODE_FLAG) { |
459 | unsigned char age; |
460 | /* |
461 | * Usually ksmd can and must skip the rb_erase, because |
462 | * root_unstable_tree was already reset to RB_ROOT. |
463 | * But be careful when an mm is exiting: do the rb_erase |
464 | * if this rmap_item was inserted by this scan, rather |
465 | * than left over from before. |
466 | */ |
467 | age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); |
468 | BUG_ON(age > 1); |
469 | if (!age) |
470 | rb_erase(&rmap_item->node, &root_unstable_tree); |
471 | ksm_pages_unshared--; |
472 | } |
473 | |
474 | rmap_item->address &= PAGE_MASK; |
475 | |
476 | cond_resched(); /* we're called from many long loops */ |
477 | } |
478 | |
479 | static void remove_trailing_rmap_items(struct mm_slot *mm_slot, |
480 | struct list_head *cur) |
481 | { |
482 | struct rmap_item *rmap_item; |
483 | |
484 | while (cur != &mm_slot->rmap_list) { |
485 | rmap_item = list_entry(cur, struct rmap_item, link); |
486 | cur = cur->next; |
487 | remove_rmap_item_from_tree(rmap_item); |
488 | list_del(&rmap_item->link); |
489 | free_rmap_item(rmap_item); |
490 | } |
491 | } |
492 | |
493 | /* |
494 | * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather |
495 | * than check every pte of a given vma, the locking doesn't quite work for |
496 | * that - an rmap_item is assigned to the stable tree after inserting ksm |
497 | * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing |
498 | * rmap_items from parent to child at fork time (so as not to waste time |
499 | * if exit comes before the next scan reaches it). |
500 | * |
501 | * Similarly, although we'd like to remove rmap_items (so updating counts |
502 | * and freeing memory) when unmerging an area, it's easier to leave that |
503 | * to the next pass of ksmd - consider, for example, how ksmd might be |
504 | * in cmp_and_merge_page on one of the rmap_items we would be removing. |
505 | */ |
506 | static int unmerge_ksm_pages(struct vm_area_struct *vma, |
507 | unsigned long start, unsigned long end) |
508 | { |
509 | unsigned long addr; |
510 | int err = 0; |
511 | |
512 | for (addr = start; addr < end && !err; addr += PAGE_SIZE) { |
513 | if (ksm_test_exit(vma->vm_mm)) |
514 | break; |
515 | if (signal_pending(current)) |
516 | err = -ERESTARTSYS; |
517 | else |
518 | err = break_ksm(vma, addr); |
519 | } |
520 | return err; |
521 | } |
522 | |
523 | #ifdef CONFIG_SYSFS |
524 | /* |
525 | * Only called through the sysfs control interface: |
526 | */ |
527 | static int unmerge_and_remove_all_rmap_items(void) |
528 | { |
529 | struct mm_slot *mm_slot; |
530 | struct mm_struct *mm; |
531 | struct vm_area_struct *vma; |
532 | int err = 0; |
533 | |
534 | spin_lock(&ksm_mmlist_lock); |
535 | ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, |
536 | struct mm_slot, mm_list); |
537 | spin_unlock(&ksm_mmlist_lock); |
538 | |
539 | for (mm_slot = ksm_scan.mm_slot; |
540 | mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { |
541 | mm = mm_slot->mm; |
542 | down_read(&mm->mmap_sem); |
543 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
544 | if (ksm_test_exit(mm)) |
545 | break; |
546 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
547 | continue; |
548 | err = unmerge_ksm_pages(vma, |
549 | vma->vm_start, vma->vm_end); |
550 | if (err) |
551 | goto error; |
552 | } |
553 | |
554 | remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next); |
555 | |
556 | spin_lock(&ksm_mmlist_lock); |
557 | ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, |
558 | struct mm_slot, mm_list); |
559 | if (ksm_test_exit(mm)) { |
560 | hlist_del(&mm_slot->link); |
561 | list_del(&mm_slot->mm_list); |
562 | spin_unlock(&ksm_mmlist_lock); |
563 | |
564 | free_mm_slot(mm_slot); |
565 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
566 | up_read(&mm->mmap_sem); |
567 | mmdrop(mm); |
568 | } else { |
569 | spin_unlock(&ksm_mmlist_lock); |
570 | up_read(&mm->mmap_sem); |
571 | } |
572 | } |
573 | |
574 | ksm_scan.seqnr = 0; |
575 | return 0; |
576 | |
577 | error: |
578 | up_read(&mm->mmap_sem); |
579 | spin_lock(&ksm_mmlist_lock); |
580 | ksm_scan.mm_slot = &ksm_mm_head; |
581 | spin_unlock(&ksm_mmlist_lock); |
582 | return err; |
583 | } |
584 | #endif /* CONFIG_SYSFS */ |
585 | |
586 | static u32 calc_checksum(struct page *page) |
587 | { |
588 | u32 checksum; |
589 | void *addr = kmap_atomic(page, KM_USER0); |
590 | checksum = jhash2(addr, PAGE_SIZE / 4, 17); |
591 | kunmap_atomic(addr, KM_USER0); |
592 | return checksum; |
593 | } |
594 | |
595 | static int memcmp_pages(struct page *page1, struct page *page2) |
596 | { |
597 | char *addr1, *addr2; |
598 | int ret; |
599 | |
600 | addr1 = kmap_atomic(page1, KM_USER0); |
601 | addr2 = kmap_atomic(page2, KM_USER1); |
602 | ret = memcmp(addr1, addr2, PAGE_SIZE); |
603 | kunmap_atomic(addr2, KM_USER1); |
604 | kunmap_atomic(addr1, KM_USER0); |
605 | return ret; |
606 | } |
607 | |
608 | static inline int pages_identical(struct page *page1, struct page *page2) |
609 | { |
610 | return !memcmp_pages(page1, page2); |
611 | } |
612 | |
613 | static int write_protect_page(struct vm_area_struct *vma, struct page *page, |
614 | pte_t *orig_pte) |
615 | { |
616 | struct mm_struct *mm = vma->vm_mm; |
617 | unsigned long addr; |
618 | pte_t *ptep; |
619 | spinlock_t *ptl; |
620 | int swapped; |
621 | int err = -EFAULT; |
622 | |
623 | addr = page_address_in_vma(page, vma); |
624 | if (addr == -EFAULT) |
625 | goto out; |
626 | |
627 | ptep = page_check_address(page, mm, addr, &ptl, 0); |
628 | if (!ptep) |
629 | goto out; |
630 | |
631 | if (pte_write(*ptep)) { |
632 | pte_t entry; |
633 | |
634 | swapped = PageSwapCache(page); |
635 | flush_cache_page(vma, addr, page_to_pfn(page)); |
636 | /* |
637 | * Ok this is tricky, when get_user_pages_fast() run it doesnt |
638 | * take any lock, therefore the check that we are going to make |
639 | * with the pagecount against the mapcount is racey and |
640 | * O_DIRECT can happen right after the check. |
641 | * So we clear the pte and flush the tlb before the check |
642 | * this assure us that no O_DIRECT can happen after the check |
643 | * or in the middle of the check. |
644 | */ |
645 | entry = ptep_clear_flush(vma, addr, ptep); |
646 | /* |
647 | * Check that no O_DIRECT or similar I/O is in progress on the |
648 | * page |
649 | */ |
650 | if ((page_mapcount(page) + 2 + swapped) != page_count(page)) { |
651 | set_pte_at_notify(mm, addr, ptep, entry); |
652 | goto out_unlock; |
653 | } |
654 | entry = pte_wrprotect(entry); |
655 | set_pte_at_notify(mm, addr, ptep, entry); |
656 | } |
657 | *orig_pte = *ptep; |
658 | err = 0; |
659 | |
660 | out_unlock: |
661 | pte_unmap_unlock(ptep, ptl); |
662 | out: |
663 | return err; |
664 | } |
665 | |
666 | /** |
667 | * replace_page - replace page in vma by new ksm page |
668 | * @vma: vma that holds the pte pointing to oldpage |
669 | * @oldpage: the page we are replacing by newpage |
670 | * @newpage: the ksm page we replace oldpage by |
671 | * @orig_pte: the original value of the pte |
672 | * |
673 | * Returns 0 on success, -EFAULT on failure. |
674 | */ |
675 | static int replace_page(struct vm_area_struct *vma, struct page *oldpage, |
676 | struct page *newpage, pte_t orig_pte) |
677 | { |
678 | struct mm_struct *mm = vma->vm_mm; |
679 | pgd_t *pgd; |
680 | pud_t *pud; |
681 | pmd_t *pmd; |
682 | pte_t *ptep; |
683 | spinlock_t *ptl; |
684 | unsigned long addr; |
685 | pgprot_t prot; |
686 | int err = -EFAULT; |
687 | |
688 | prot = vm_get_page_prot(vma->vm_flags & ~VM_WRITE); |
689 | |
690 | addr = page_address_in_vma(oldpage, vma); |
691 | if (addr == -EFAULT) |
692 | goto out; |
693 | |
694 | pgd = pgd_offset(mm, addr); |
695 | if (!pgd_present(*pgd)) |
696 | goto out; |
697 | |
698 | pud = pud_offset(pgd, addr); |
699 | if (!pud_present(*pud)) |
700 | goto out; |
701 | |
702 | pmd = pmd_offset(pud, addr); |
703 | if (!pmd_present(*pmd)) |
704 | goto out; |
705 | |
706 | ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); |
707 | if (!pte_same(*ptep, orig_pte)) { |
708 | pte_unmap_unlock(ptep, ptl); |
709 | goto out; |
710 | } |
711 | |
712 | get_page(newpage); |
713 | page_add_ksm_rmap(newpage); |
714 | |
715 | flush_cache_page(vma, addr, pte_pfn(*ptep)); |
716 | ptep_clear_flush(vma, addr, ptep); |
717 | set_pte_at_notify(mm, addr, ptep, mk_pte(newpage, prot)); |
718 | |
719 | page_remove_rmap(oldpage); |
720 | put_page(oldpage); |
721 | |
722 | pte_unmap_unlock(ptep, ptl); |
723 | err = 0; |
724 | out: |
725 | return err; |
726 | } |
727 | |
728 | /* |
729 | * try_to_merge_one_page - take two pages and merge them into one |
730 | * @vma: the vma that hold the pte pointing into oldpage |
731 | * @oldpage: the page that we want to replace with newpage |
732 | * @newpage: the page that we want to map instead of oldpage |
733 | * |
734 | * Note: |
735 | * oldpage should be a PageAnon page, while newpage should be a PageKsm page, |
736 | * or a newly allocated kernel page which page_add_ksm_rmap will make PageKsm. |
737 | * |
738 | * This function returns 0 if the pages were merged, -EFAULT otherwise. |
739 | */ |
740 | static int try_to_merge_one_page(struct vm_area_struct *vma, |
741 | struct page *oldpage, |
742 | struct page *newpage) |
743 | { |
744 | pte_t orig_pte = __pte(0); |
745 | int err = -EFAULT; |
746 | |
747 | if (!(vma->vm_flags & VM_MERGEABLE)) |
748 | goto out; |
749 | |
750 | if (!PageAnon(oldpage)) |
751 | goto out; |
752 | |
753 | get_page(newpage); |
754 | get_page(oldpage); |
755 | |
756 | /* |
757 | * We need the page lock to read a stable PageSwapCache in |
758 | * write_protect_page(). We use trylock_page() instead of |
759 | * lock_page() because we don't want to wait here - we |
760 | * prefer to continue scanning and merging different pages, |
761 | * then come back to this page when it is unlocked. |
762 | */ |
763 | if (!trylock_page(oldpage)) |
764 | goto out_putpage; |
765 | /* |
766 | * If this anonymous page is mapped only here, its pte may need |
767 | * to be write-protected. If it's mapped elsewhere, all of its |
768 | * ptes are necessarily already write-protected. But in either |
769 | * case, we need to lock and check page_count is not raised. |
770 | */ |
771 | if (write_protect_page(vma, oldpage, &orig_pte) == 0 && |
772 | pages_identical(oldpage, newpage)) |
773 | err = replace_page(vma, oldpage, newpage, orig_pte); |
774 | |
775 | if ((vma->vm_flags & VM_LOCKED) && !err) |
776 | munlock_vma_page(oldpage); |
777 | |
778 | unlock_page(oldpage); |
779 | out_putpage: |
780 | put_page(oldpage); |
781 | put_page(newpage); |
782 | out: |
783 | return err; |
784 | } |
785 | |
786 | /* |
787 | * try_to_merge_with_ksm_page - like try_to_merge_two_pages, |
788 | * but no new kernel page is allocated: kpage must already be a ksm page. |
789 | */ |
790 | static int try_to_merge_with_ksm_page(struct mm_struct *mm1, |
791 | unsigned long addr1, |
792 | struct page *page1, |
793 | struct page *kpage) |
794 | { |
795 | struct vm_area_struct *vma; |
796 | int err = -EFAULT; |
797 | |
798 | down_read(&mm1->mmap_sem); |
799 | if (ksm_test_exit(mm1)) |
800 | goto out; |
801 | |
802 | vma = find_vma(mm1, addr1); |
803 | if (!vma || vma->vm_start > addr1) |
804 | goto out; |
805 | |
806 | err = try_to_merge_one_page(vma, page1, kpage); |
807 | out: |
808 | up_read(&mm1->mmap_sem); |
809 | return err; |
810 | } |
811 | |
812 | /* |
813 | * try_to_merge_two_pages - take two identical pages and prepare them |
814 | * to be merged into one page. |
815 | * |
816 | * This function returns 0 if we successfully mapped two identical pages |
817 | * into one page, -EFAULT otherwise. |
818 | * |
819 | * Note that this function allocates a new kernel page: if one of the pages |
820 | * is already a ksm page, try_to_merge_with_ksm_page should be used. |
821 | */ |
822 | static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1, |
823 | struct page *page1, struct mm_struct *mm2, |
824 | unsigned long addr2, struct page *page2) |
825 | { |
826 | struct vm_area_struct *vma; |
827 | struct page *kpage; |
828 | int err = -EFAULT; |
829 | |
830 | /* |
831 | * The number of nodes in the stable tree |
832 | * is the number of kernel pages that we hold. |
833 | */ |
834 | if (ksm_max_kernel_pages && |
835 | ksm_max_kernel_pages <= ksm_pages_shared) |
836 | return err; |
837 | |
838 | kpage = alloc_page(GFP_HIGHUSER); |
839 | if (!kpage) |
840 | return err; |
841 | |
842 | down_read(&mm1->mmap_sem); |
843 | if (ksm_test_exit(mm1)) { |
844 | up_read(&mm1->mmap_sem); |
845 | goto out; |
846 | } |
847 | vma = find_vma(mm1, addr1); |
848 | if (!vma || vma->vm_start > addr1) { |
849 | up_read(&mm1->mmap_sem); |
850 | goto out; |
851 | } |
852 | |
853 | copy_user_highpage(kpage, page1, addr1, vma); |
854 | err = try_to_merge_one_page(vma, page1, kpage); |
855 | up_read(&mm1->mmap_sem); |
856 | |
857 | if (!err) { |
858 | err = try_to_merge_with_ksm_page(mm2, addr2, page2, kpage); |
859 | /* |
860 | * If that fails, we have a ksm page with only one pte |
861 | * pointing to it: so break it. |
862 | */ |
863 | if (err) |
864 | break_cow(mm1, addr1); |
865 | } |
866 | out: |
867 | put_page(kpage); |
868 | return err; |
869 | } |
870 | |
871 | /* |
872 | * stable_tree_search - search page inside the stable tree |
873 | * @page: the page that we are searching identical pages to. |
874 | * @page2: pointer into identical page that we are holding inside the stable |
875 | * tree that we have found. |
876 | * @rmap_item: the reverse mapping item |
877 | * |
878 | * This function checks if there is a page inside the stable tree |
879 | * with identical content to the page that we are scanning right now. |
880 | * |
881 | * This function return rmap_item pointer to the identical item if found, |
882 | * NULL otherwise. |
883 | */ |
884 | static struct rmap_item *stable_tree_search(struct page *page, |
885 | struct page **page2, |
886 | struct rmap_item *rmap_item) |
887 | { |
888 | struct rb_node *node = root_stable_tree.rb_node; |
889 | |
890 | while (node) { |
891 | struct rmap_item *tree_rmap_item, *next_rmap_item; |
892 | int ret; |
893 | |
894 | tree_rmap_item = rb_entry(node, struct rmap_item, node); |
895 | while (tree_rmap_item) { |
896 | BUG_ON(!in_stable_tree(tree_rmap_item)); |
897 | cond_resched(); |
898 | page2[0] = get_ksm_page(tree_rmap_item); |
899 | if (page2[0]) |
900 | break; |
901 | next_rmap_item = tree_rmap_item->next; |
902 | remove_rmap_item_from_tree(tree_rmap_item); |
903 | tree_rmap_item = next_rmap_item; |
904 | } |
905 | if (!tree_rmap_item) |
906 | return NULL; |
907 | |
908 | ret = memcmp_pages(page, page2[0]); |
909 | |
910 | if (ret < 0) { |
911 | put_page(page2[0]); |
912 | node = node->rb_left; |
913 | } else if (ret > 0) { |
914 | put_page(page2[0]); |
915 | node = node->rb_right; |
916 | } else { |
917 | return tree_rmap_item; |
918 | } |
919 | } |
920 | |
921 | return NULL; |
922 | } |
923 | |
924 | /* |
925 | * stable_tree_insert - insert rmap_item pointing to new ksm page |
926 | * into the stable tree. |
927 | * |
928 | * @page: the page that we are searching identical page to inside the stable |
929 | * tree. |
930 | * @rmap_item: pointer to the reverse mapping item. |
931 | * |
932 | * This function returns rmap_item if success, NULL otherwise. |
933 | */ |
934 | static struct rmap_item *stable_tree_insert(struct page *page, |
935 | struct rmap_item *rmap_item) |
936 | { |
937 | struct rb_node **new = &root_stable_tree.rb_node; |
938 | struct rb_node *parent = NULL; |
939 | |
940 | while (*new) { |
941 | struct rmap_item *tree_rmap_item, *next_rmap_item; |
942 | struct page *tree_page; |
943 | int ret; |
944 | |
945 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); |
946 | while (tree_rmap_item) { |
947 | BUG_ON(!in_stable_tree(tree_rmap_item)); |
948 | cond_resched(); |
949 | tree_page = get_ksm_page(tree_rmap_item); |
950 | if (tree_page) |
951 | break; |
952 | next_rmap_item = tree_rmap_item->next; |
953 | remove_rmap_item_from_tree(tree_rmap_item); |
954 | tree_rmap_item = next_rmap_item; |
955 | } |
956 | if (!tree_rmap_item) |
957 | return NULL; |
958 | |
959 | ret = memcmp_pages(page, tree_page); |
960 | put_page(tree_page); |
961 | |
962 | parent = *new; |
963 | if (ret < 0) |
964 | new = &parent->rb_left; |
965 | else if (ret > 0) |
966 | new = &parent->rb_right; |
967 | else { |
968 | /* |
969 | * It is not a bug that stable_tree_search() didn't |
970 | * find this node: because at that time our page was |
971 | * not yet write-protected, so may have changed since. |
972 | */ |
973 | return NULL; |
974 | } |
975 | } |
976 | |
977 | rmap_item->address |= NODE_FLAG | STABLE_FLAG; |
978 | rmap_item->next = NULL; |
979 | rb_link_node(&rmap_item->node, parent, new); |
980 | rb_insert_color(&rmap_item->node, &root_stable_tree); |
981 | |
982 | ksm_pages_shared++; |
983 | return rmap_item; |
984 | } |
985 | |
986 | /* |
987 | * unstable_tree_search_insert - search and insert items into the unstable tree. |
988 | * |
989 | * @page: the page that we are going to search for identical page or to insert |
990 | * into the unstable tree |
991 | * @page2: pointer into identical page that was found inside the unstable tree |
992 | * @rmap_item: the reverse mapping item of page |
993 | * |
994 | * This function searches for a page in the unstable tree identical to the |
995 | * page currently being scanned; and if no identical page is found in the |
996 | * tree, we insert rmap_item as a new object into the unstable tree. |
997 | * |
998 | * This function returns pointer to rmap_item found to be identical |
999 | * to the currently scanned page, NULL otherwise. |
1000 | * |
1001 | * This function does both searching and inserting, because they share |
1002 | * the same walking algorithm in an rbtree. |
1003 | */ |
1004 | static struct rmap_item *unstable_tree_search_insert(struct page *page, |
1005 | struct page **page2, |
1006 | struct rmap_item *rmap_item) |
1007 | { |
1008 | struct rb_node **new = &root_unstable_tree.rb_node; |
1009 | struct rb_node *parent = NULL; |
1010 | |
1011 | while (*new) { |
1012 | struct rmap_item *tree_rmap_item; |
1013 | int ret; |
1014 | |
1015 | cond_resched(); |
1016 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); |
1017 | page2[0] = get_mergeable_page(tree_rmap_item); |
1018 | if (!page2[0]) |
1019 | return NULL; |
1020 | |
1021 | /* |
1022 | * Don't substitute an unswappable ksm page |
1023 | * just for one good swappable forked page. |
1024 | */ |
1025 | if (page == page2[0]) { |
1026 | put_page(page2[0]); |
1027 | return NULL; |
1028 | } |
1029 | |
1030 | ret = memcmp_pages(page, page2[0]); |
1031 | |
1032 | parent = *new; |
1033 | if (ret < 0) { |
1034 | put_page(page2[0]); |
1035 | new = &parent->rb_left; |
1036 | } else if (ret > 0) { |
1037 | put_page(page2[0]); |
1038 | new = &parent->rb_right; |
1039 | } else { |
1040 | return tree_rmap_item; |
1041 | } |
1042 | } |
1043 | |
1044 | rmap_item->address |= NODE_FLAG; |
1045 | rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); |
1046 | rb_link_node(&rmap_item->node, parent, new); |
1047 | rb_insert_color(&rmap_item->node, &root_unstable_tree); |
1048 | |
1049 | ksm_pages_unshared++; |
1050 | return NULL; |
1051 | } |
1052 | |
1053 | /* |
1054 | * stable_tree_append - add another rmap_item to the linked list of |
1055 | * rmap_items hanging off a given node of the stable tree, all sharing |
1056 | * the same ksm page. |
1057 | */ |
1058 | static void stable_tree_append(struct rmap_item *rmap_item, |
1059 | struct rmap_item *tree_rmap_item) |
1060 | { |
1061 | rmap_item->next = tree_rmap_item->next; |
1062 | rmap_item->prev = tree_rmap_item; |
1063 | |
1064 | if (tree_rmap_item->next) |
1065 | tree_rmap_item->next->prev = rmap_item; |
1066 | |
1067 | tree_rmap_item->next = rmap_item; |
1068 | rmap_item->address |= STABLE_FLAG; |
1069 | |
1070 | ksm_pages_sharing++; |
1071 | } |
1072 | |
1073 | /* |
1074 | * cmp_and_merge_page - first see if page can be merged into the stable tree; |
1075 | * if not, compare checksum to previous and if it's the same, see if page can |
1076 | * be inserted into the unstable tree, or merged with a page already there and |
1077 | * both transferred to the stable tree. |
1078 | * |
1079 | * @page: the page that we are searching identical page to. |
1080 | * @rmap_item: the reverse mapping into the virtual address of this page |
1081 | */ |
1082 | static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) |
1083 | { |
1084 | struct page *page2[1]; |
1085 | struct rmap_item *tree_rmap_item; |
1086 | unsigned int checksum; |
1087 | int err; |
1088 | |
1089 | if (in_stable_tree(rmap_item)) |
1090 | remove_rmap_item_from_tree(rmap_item); |
1091 | |
1092 | /* We first start with searching the page inside the stable tree */ |
1093 | tree_rmap_item = stable_tree_search(page, page2, rmap_item); |
1094 | if (tree_rmap_item) { |
1095 | if (page == page2[0]) /* forked */ |
1096 | err = 0; |
1097 | else |
1098 | err = try_to_merge_with_ksm_page(rmap_item->mm, |
1099 | rmap_item->address, |
1100 | page, page2[0]); |
1101 | put_page(page2[0]); |
1102 | |
1103 | if (!err) { |
1104 | /* |
1105 | * The page was successfully merged: |
1106 | * add its rmap_item to the stable tree. |
1107 | */ |
1108 | stable_tree_append(rmap_item, tree_rmap_item); |
1109 | } |
1110 | return; |
1111 | } |
1112 | |
1113 | /* |
1114 | * A ksm page might have got here by fork, but its other |
1115 | * references have already been removed from the stable tree. |
1116 | * Or it might be left over from a break_ksm which failed |
1117 | * when the mem_cgroup had reached its limit: try again now. |
1118 | */ |
1119 | if (PageKsm(page)) |
1120 | break_cow(rmap_item->mm, rmap_item->address); |
1121 | |
1122 | /* |
1123 | * In case the hash value of the page was changed from the last time we |
1124 | * have calculated it, this page to be changed frequely, therefore we |
1125 | * don't want to insert it to the unstable tree, and we don't want to |
1126 | * waste our time to search if there is something identical to it there. |
1127 | */ |
1128 | checksum = calc_checksum(page); |
1129 | if (rmap_item->oldchecksum != checksum) { |
1130 | rmap_item->oldchecksum = checksum; |
1131 | return; |
1132 | } |
1133 | |
1134 | tree_rmap_item = unstable_tree_search_insert(page, page2, rmap_item); |
1135 | if (tree_rmap_item) { |
1136 | err = try_to_merge_two_pages(rmap_item->mm, |
1137 | rmap_item->address, page, |
1138 | tree_rmap_item->mm, |
1139 | tree_rmap_item->address, page2[0]); |
1140 | /* |
1141 | * As soon as we merge this page, we want to remove the |
1142 | * rmap_item of the page we have merged with from the unstable |
1143 | * tree, and insert it instead as new node in the stable tree. |
1144 | */ |
1145 | if (!err) { |
1146 | rb_erase(&tree_rmap_item->node, &root_unstable_tree); |
1147 | tree_rmap_item->address &= ~NODE_FLAG; |
1148 | ksm_pages_unshared--; |
1149 | |
1150 | /* |
1151 | * If we fail to insert the page into the stable tree, |
1152 | * we will have 2 virtual addresses that are pointing |
1153 | * to a ksm page left outside the stable tree, |
1154 | * in which case we need to break_cow on both. |
1155 | */ |
1156 | if (stable_tree_insert(page2[0], tree_rmap_item)) |
1157 | stable_tree_append(rmap_item, tree_rmap_item); |
1158 | else { |
1159 | break_cow(tree_rmap_item->mm, |
1160 | tree_rmap_item->address); |
1161 | break_cow(rmap_item->mm, rmap_item->address); |
1162 | } |
1163 | } |
1164 | |
1165 | put_page(page2[0]); |
1166 | } |
1167 | } |
1168 | |
1169 | static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, |
1170 | struct list_head *cur, |
1171 | unsigned long addr) |
1172 | { |
1173 | struct rmap_item *rmap_item; |
1174 | |
1175 | while (cur != &mm_slot->rmap_list) { |
1176 | rmap_item = list_entry(cur, struct rmap_item, link); |
1177 | if ((rmap_item->address & PAGE_MASK) == addr) { |
1178 | if (!in_stable_tree(rmap_item)) |
1179 | remove_rmap_item_from_tree(rmap_item); |
1180 | return rmap_item; |
1181 | } |
1182 | if (rmap_item->address > addr) |
1183 | break; |
1184 | cur = cur->next; |
1185 | remove_rmap_item_from_tree(rmap_item); |
1186 | list_del(&rmap_item->link); |
1187 | free_rmap_item(rmap_item); |
1188 | } |
1189 | |
1190 | rmap_item = alloc_rmap_item(); |
1191 | if (rmap_item) { |
1192 | /* It has already been zeroed */ |
1193 | rmap_item->mm = mm_slot->mm; |
1194 | rmap_item->address = addr; |
1195 | list_add_tail(&rmap_item->link, cur); |
1196 | } |
1197 | return rmap_item; |
1198 | } |
1199 | |
1200 | static struct rmap_item *scan_get_next_rmap_item(struct page **page) |
1201 | { |
1202 | struct mm_struct *mm; |
1203 | struct mm_slot *slot; |
1204 | struct vm_area_struct *vma; |
1205 | struct rmap_item *rmap_item; |
1206 | |
1207 | if (list_empty(&ksm_mm_head.mm_list)) |
1208 | return NULL; |
1209 | |
1210 | slot = ksm_scan.mm_slot; |
1211 | if (slot == &ksm_mm_head) { |
1212 | root_unstable_tree = RB_ROOT; |
1213 | |
1214 | spin_lock(&ksm_mmlist_lock); |
1215 | slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); |
1216 | ksm_scan.mm_slot = slot; |
1217 | spin_unlock(&ksm_mmlist_lock); |
1218 | next_mm: |
1219 | ksm_scan.address = 0; |
1220 | ksm_scan.rmap_item = list_entry(&slot->rmap_list, |
1221 | struct rmap_item, link); |
1222 | } |
1223 | |
1224 | mm = slot->mm; |
1225 | down_read(&mm->mmap_sem); |
1226 | if (ksm_test_exit(mm)) |
1227 | vma = NULL; |
1228 | else |
1229 | vma = find_vma(mm, ksm_scan.address); |
1230 | |
1231 | for (; vma; vma = vma->vm_next) { |
1232 | if (!(vma->vm_flags & VM_MERGEABLE)) |
1233 | continue; |
1234 | if (ksm_scan.address < vma->vm_start) |
1235 | ksm_scan.address = vma->vm_start; |
1236 | if (!vma->anon_vma) |
1237 | ksm_scan.address = vma->vm_end; |
1238 | |
1239 | while (ksm_scan.address < vma->vm_end) { |
1240 | if (ksm_test_exit(mm)) |
1241 | break; |
1242 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); |
1243 | if (*page && PageAnon(*page)) { |
1244 | flush_anon_page(vma, *page, ksm_scan.address); |
1245 | flush_dcache_page(*page); |
1246 | rmap_item = get_next_rmap_item(slot, |
1247 | ksm_scan.rmap_item->link.next, |
1248 | ksm_scan.address); |
1249 | if (rmap_item) { |
1250 | ksm_scan.rmap_item = rmap_item; |
1251 | ksm_scan.address += PAGE_SIZE; |
1252 | } else |
1253 | put_page(*page); |
1254 | up_read(&mm->mmap_sem); |
1255 | return rmap_item; |
1256 | } |
1257 | if (*page) |
1258 | put_page(*page); |
1259 | ksm_scan.address += PAGE_SIZE; |
1260 | cond_resched(); |
1261 | } |
1262 | } |
1263 | |
1264 | if (ksm_test_exit(mm)) { |
1265 | ksm_scan.address = 0; |
1266 | ksm_scan.rmap_item = list_entry(&slot->rmap_list, |
1267 | struct rmap_item, link); |
1268 | } |
1269 | /* |
1270 | * Nuke all the rmap_items that are above this current rmap: |
1271 | * because there were no VM_MERGEABLE vmas with such addresses. |
1272 | */ |
1273 | remove_trailing_rmap_items(slot, ksm_scan.rmap_item->link.next); |
1274 | |
1275 | spin_lock(&ksm_mmlist_lock); |
1276 | ksm_scan.mm_slot = list_entry(slot->mm_list.next, |
1277 | struct mm_slot, mm_list); |
1278 | if (ksm_scan.address == 0) { |
1279 | /* |
1280 | * We've completed a full scan of all vmas, holding mmap_sem |
1281 | * throughout, and found no VM_MERGEABLE: so do the same as |
1282 | * __ksm_exit does to remove this mm from all our lists now. |
1283 | * This applies either when cleaning up after __ksm_exit |
1284 | * (but beware: we can reach here even before __ksm_exit), |
1285 | * or when all VM_MERGEABLE areas have been unmapped (and |
1286 | * mmap_sem then protects against race with MADV_MERGEABLE). |
1287 | */ |
1288 | hlist_del(&slot->link); |
1289 | list_del(&slot->mm_list); |
1290 | spin_unlock(&ksm_mmlist_lock); |
1291 | |
1292 | free_mm_slot(slot); |
1293 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
1294 | up_read(&mm->mmap_sem); |
1295 | mmdrop(mm); |
1296 | } else { |
1297 | spin_unlock(&ksm_mmlist_lock); |
1298 | up_read(&mm->mmap_sem); |
1299 | } |
1300 | |
1301 | /* Repeat until we've completed scanning the whole list */ |
1302 | slot = ksm_scan.mm_slot; |
1303 | if (slot != &ksm_mm_head) |
1304 | goto next_mm; |
1305 | |
1306 | ksm_scan.seqnr++; |
1307 | return NULL; |
1308 | } |
1309 | |
1310 | /** |
1311 | * ksm_do_scan - the ksm scanner main worker function. |
1312 | * @scan_npages - number of pages we want to scan before we return. |
1313 | */ |
1314 | static void ksm_do_scan(unsigned int scan_npages) |
1315 | { |
1316 | struct rmap_item *rmap_item; |
1317 | struct page *page; |
1318 | |
1319 | while (scan_npages--) { |
1320 | cond_resched(); |
1321 | rmap_item = scan_get_next_rmap_item(&page); |
1322 | if (!rmap_item) |
1323 | return; |
1324 | if (!PageKsm(page) || !in_stable_tree(rmap_item)) |
1325 | cmp_and_merge_page(page, rmap_item); |
1326 | else if (page_mapcount(page) == 1) { |
1327 | /* |
1328 | * Replace now-unshared ksm page by ordinary page. |
1329 | */ |
1330 | break_cow(rmap_item->mm, rmap_item->address); |
1331 | remove_rmap_item_from_tree(rmap_item); |
1332 | rmap_item->oldchecksum = calc_checksum(page); |
1333 | } |
1334 | put_page(page); |
1335 | } |
1336 | } |
1337 | |
1338 | static int ksmd_should_run(void) |
1339 | { |
1340 | return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); |
1341 | } |
1342 | |
1343 | static int ksm_scan_thread(void *nothing) |
1344 | { |
1345 | set_user_nice(current, 5); |
1346 | |
1347 | while (!kthread_should_stop()) { |
1348 | mutex_lock(&ksm_thread_mutex); |
1349 | if (ksmd_should_run()) |
1350 | ksm_do_scan(ksm_thread_pages_to_scan); |
1351 | mutex_unlock(&ksm_thread_mutex); |
1352 | |
1353 | if (ksmd_should_run()) { |
1354 | schedule_timeout_interruptible( |
1355 | msecs_to_jiffies(ksm_thread_sleep_millisecs)); |
1356 | } else { |
1357 | wait_event_interruptible(ksm_thread_wait, |
1358 | ksmd_should_run() || kthread_should_stop()); |
1359 | } |
1360 | } |
1361 | return 0; |
1362 | } |
1363 | |
1364 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
1365 | unsigned long end, int advice, unsigned long *vm_flags) |
1366 | { |
1367 | struct mm_struct *mm = vma->vm_mm; |
1368 | int err; |
1369 | |
1370 | switch (advice) { |
1371 | case MADV_MERGEABLE: |
1372 | /* |
1373 | * Be somewhat over-protective for now! |
1374 | */ |
1375 | if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | |
1376 | VM_PFNMAP | VM_IO | VM_DONTEXPAND | |
1377 | VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | |
1378 | VM_MIXEDMAP | VM_SAO)) |
1379 | return 0; /* just ignore the advice */ |
1380 | |
1381 | if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { |
1382 | err = __ksm_enter(mm); |
1383 | if (err) |
1384 | return err; |
1385 | } |
1386 | |
1387 | *vm_flags |= VM_MERGEABLE; |
1388 | break; |
1389 | |
1390 | case MADV_UNMERGEABLE: |
1391 | if (!(*vm_flags & VM_MERGEABLE)) |
1392 | return 0; /* just ignore the advice */ |
1393 | |
1394 | if (vma->anon_vma) { |
1395 | err = unmerge_ksm_pages(vma, start, end); |
1396 | if (err) |
1397 | return err; |
1398 | } |
1399 | |
1400 | *vm_flags &= ~VM_MERGEABLE; |
1401 | break; |
1402 | } |
1403 | |
1404 | return 0; |
1405 | } |
1406 | |
1407 | int __ksm_enter(struct mm_struct *mm) |
1408 | { |
1409 | struct mm_slot *mm_slot; |
1410 | int needs_wakeup; |
1411 | |
1412 | mm_slot = alloc_mm_slot(); |
1413 | if (!mm_slot) |
1414 | return -ENOMEM; |
1415 | |
1416 | /* Check ksm_run too? Would need tighter locking */ |
1417 | needs_wakeup = list_empty(&ksm_mm_head.mm_list); |
1418 | |
1419 | spin_lock(&ksm_mmlist_lock); |
1420 | insert_to_mm_slots_hash(mm, mm_slot); |
1421 | /* |
1422 | * Insert just behind the scanning cursor, to let the area settle |
1423 | * down a little; when fork is followed by immediate exec, we don't |
1424 | * want ksmd to waste time setting up and tearing down an rmap_list. |
1425 | */ |
1426 | list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); |
1427 | spin_unlock(&ksm_mmlist_lock); |
1428 | |
1429 | set_bit(MMF_VM_MERGEABLE, &mm->flags); |
1430 | atomic_inc(&mm->mm_count); |
1431 | |
1432 | if (needs_wakeup) |
1433 | wake_up_interruptible(&ksm_thread_wait); |
1434 | |
1435 | return 0; |
1436 | } |
1437 | |
1438 | void __ksm_exit(struct mm_struct *mm) |
1439 | { |
1440 | struct mm_slot *mm_slot; |
1441 | int easy_to_free = 0; |
1442 | |
1443 | /* |
1444 | * This process is exiting: if it's straightforward (as is the |
1445 | * case when ksmd was never running), free mm_slot immediately. |
1446 | * But if it's at the cursor or has rmap_items linked to it, use |
1447 | * mmap_sem to synchronize with any break_cows before pagetables |
1448 | * are freed, and leave the mm_slot on the list for ksmd to free. |
1449 | * Beware: ksm may already have noticed it exiting and freed the slot. |
1450 | */ |
1451 | |
1452 | spin_lock(&ksm_mmlist_lock); |
1453 | mm_slot = get_mm_slot(mm); |
1454 | if (mm_slot && ksm_scan.mm_slot != mm_slot) { |
1455 | if (list_empty(&mm_slot->rmap_list)) { |
1456 | hlist_del(&mm_slot->link); |
1457 | list_del(&mm_slot->mm_list); |
1458 | easy_to_free = 1; |
1459 | } else { |
1460 | list_move(&mm_slot->mm_list, |
1461 | &ksm_scan.mm_slot->mm_list); |
1462 | } |
1463 | } |
1464 | spin_unlock(&ksm_mmlist_lock); |
1465 | |
1466 | if (easy_to_free) { |
1467 | free_mm_slot(mm_slot); |
1468 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
1469 | mmdrop(mm); |
1470 | } else if (mm_slot) { |
1471 | down_write(&mm->mmap_sem); |
1472 | up_write(&mm->mmap_sem); |
1473 | } |
1474 | } |
1475 | |
1476 | #ifdef CONFIG_SYSFS |
1477 | /* |
1478 | * This all compiles without CONFIG_SYSFS, but is a waste of space. |
1479 | */ |
1480 | |
1481 | #define KSM_ATTR_RO(_name) \ |
1482 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) |
1483 | #define KSM_ATTR(_name) \ |
1484 | static struct kobj_attribute _name##_attr = \ |
1485 | __ATTR(_name, 0644, _name##_show, _name##_store) |
1486 | |
1487 | static ssize_t sleep_millisecs_show(struct kobject *kobj, |
1488 | struct kobj_attribute *attr, char *buf) |
1489 | { |
1490 | return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs); |
1491 | } |
1492 | |
1493 | static ssize_t sleep_millisecs_store(struct kobject *kobj, |
1494 | struct kobj_attribute *attr, |
1495 | const char *buf, size_t count) |
1496 | { |
1497 | unsigned long msecs; |
1498 | int err; |
1499 | |
1500 | err = strict_strtoul(buf, 10, &msecs); |
1501 | if (err || msecs > UINT_MAX) |
1502 | return -EINVAL; |
1503 | |
1504 | ksm_thread_sleep_millisecs = msecs; |
1505 | |
1506 | return count; |
1507 | } |
1508 | KSM_ATTR(sleep_millisecs); |
1509 | |
1510 | static ssize_t pages_to_scan_show(struct kobject *kobj, |
1511 | struct kobj_attribute *attr, char *buf) |
1512 | { |
1513 | return sprintf(buf, "%u\n", ksm_thread_pages_to_scan); |
1514 | } |
1515 | |
1516 | static ssize_t pages_to_scan_store(struct kobject *kobj, |
1517 | struct kobj_attribute *attr, |
1518 | const char *buf, size_t count) |
1519 | { |
1520 | int err; |
1521 | unsigned long nr_pages; |
1522 | |
1523 | err = strict_strtoul(buf, 10, &nr_pages); |
1524 | if (err || nr_pages > UINT_MAX) |
1525 | return -EINVAL; |
1526 | |
1527 | ksm_thread_pages_to_scan = nr_pages; |
1528 | |
1529 | return count; |
1530 | } |
1531 | KSM_ATTR(pages_to_scan); |
1532 | |
1533 | static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, |
1534 | char *buf) |
1535 | { |
1536 | return sprintf(buf, "%u\n", ksm_run); |
1537 | } |
1538 | |
1539 | static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, |
1540 | const char *buf, size_t count) |
1541 | { |
1542 | int err; |
1543 | unsigned long flags; |
1544 | |
1545 | err = strict_strtoul(buf, 10, &flags); |
1546 | if (err || flags > UINT_MAX) |
1547 | return -EINVAL; |
1548 | if (flags > KSM_RUN_UNMERGE) |
1549 | return -EINVAL; |
1550 | |
1551 | /* |
1552 | * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. |
1553 | * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, |
1554 | * breaking COW to free the unswappable pages_shared (but leaves |
1555 | * mm_slots on the list for when ksmd may be set running again). |
1556 | */ |
1557 | |
1558 | mutex_lock(&ksm_thread_mutex); |
1559 | if (ksm_run != flags) { |
1560 | ksm_run = flags; |
1561 | if (flags & KSM_RUN_UNMERGE) { |
1562 | current->flags |= PF_OOM_ORIGIN; |
1563 | err = unmerge_and_remove_all_rmap_items(); |
1564 | current->flags &= ~PF_OOM_ORIGIN; |
1565 | if (err) { |
1566 | ksm_run = KSM_RUN_STOP; |
1567 | count = err; |
1568 | } |
1569 | } |
1570 | } |
1571 | mutex_unlock(&ksm_thread_mutex); |
1572 | |
1573 | if (flags & KSM_RUN_MERGE) |
1574 | wake_up_interruptible(&ksm_thread_wait); |
1575 | |
1576 | return count; |
1577 | } |
1578 | KSM_ATTR(run); |
1579 | |
1580 | static ssize_t max_kernel_pages_store(struct kobject *kobj, |
1581 | struct kobj_attribute *attr, |
1582 | const char *buf, size_t count) |
1583 | { |
1584 | int err; |
1585 | unsigned long nr_pages; |
1586 | |
1587 | err = strict_strtoul(buf, 10, &nr_pages); |
1588 | if (err) |
1589 | return -EINVAL; |
1590 | |
1591 | ksm_max_kernel_pages = nr_pages; |
1592 | |
1593 | return count; |
1594 | } |
1595 | |
1596 | static ssize_t max_kernel_pages_show(struct kobject *kobj, |
1597 | struct kobj_attribute *attr, char *buf) |
1598 | { |
1599 | return sprintf(buf, "%lu\n", ksm_max_kernel_pages); |
1600 | } |
1601 | KSM_ATTR(max_kernel_pages); |
1602 | |
1603 | static ssize_t pages_shared_show(struct kobject *kobj, |
1604 | struct kobj_attribute *attr, char *buf) |
1605 | { |
1606 | return sprintf(buf, "%lu\n", ksm_pages_shared); |
1607 | } |
1608 | KSM_ATTR_RO(pages_shared); |
1609 | |
1610 | static ssize_t pages_sharing_show(struct kobject *kobj, |
1611 | struct kobj_attribute *attr, char *buf) |
1612 | { |
1613 | return sprintf(buf, "%lu\n", ksm_pages_sharing); |
1614 | } |
1615 | KSM_ATTR_RO(pages_sharing); |
1616 | |
1617 | static ssize_t pages_unshared_show(struct kobject *kobj, |
1618 | struct kobj_attribute *attr, char *buf) |
1619 | { |
1620 | return sprintf(buf, "%lu\n", ksm_pages_unshared); |
1621 | } |
1622 | KSM_ATTR_RO(pages_unshared); |
1623 | |
1624 | static ssize_t pages_volatile_show(struct kobject *kobj, |
1625 | struct kobj_attribute *attr, char *buf) |
1626 | { |
1627 | long ksm_pages_volatile; |
1628 | |
1629 | ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared |
1630 | - ksm_pages_sharing - ksm_pages_unshared; |
1631 | /* |
1632 | * It was not worth any locking to calculate that statistic, |
1633 | * but it might therefore sometimes be negative: conceal that. |
1634 | */ |
1635 | if (ksm_pages_volatile < 0) |
1636 | ksm_pages_volatile = 0; |
1637 | return sprintf(buf, "%ld\n", ksm_pages_volatile); |
1638 | } |
1639 | KSM_ATTR_RO(pages_volatile); |
1640 | |
1641 | static ssize_t full_scans_show(struct kobject *kobj, |
1642 | struct kobj_attribute *attr, char *buf) |
1643 | { |
1644 | return sprintf(buf, "%lu\n", ksm_scan.seqnr); |
1645 | } |
1646 | KSM_ATTR_RO(full_scans); |
1647 | |
1648 | static struct attribute *ksm_attrs[] = { |
1649 | &sleep_millisecs_attr.attr, |
1650 | &pages_to_scan_attr.attr, |
1651 | &run_attr.attr, |
1652 | &max_kernel_pages_attr.attr, |
1653 | &pages_shared_attr.attr, |
1654 | &pages_sharing_attr.attr, |
1655 | &pages_unshared_attr.attr, |
1656 | &pages_volatile_attr.attr, |
1657 | &full_scans_attr.attr, |
1658 | NULL, |
1659 | }; |
1660 | |
1661 | static struct attribute_group ksm_attr_group = { |
1662 | .attrs = ksm_attrs, |
1663 | .name = "ksm", |
1664 | }; |
1665 | #endif /* CONFIG_SYSFS */ |
1666 | |
1667 | static int __init ksm_init(void) |
1668 | { |
1669 | struct task_struct *ksm_thread; |
1670 | int err; |
1671 | |
1672 | ksm_max_kernel_pages = totalram_pages / 4; |
1673 | |
1674 | err = ksm_slab_init(); |
1675 | if (err) |
1676 | goto out; |
1677 | |
1678 | err = mm_slots_hash_init(); |
1679 | if (err) |
1680 | goto out_free1; |
1681 | |
1682 | ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); |
1683 | if (IS_ERR(ksm_thread)) { |
1684 | printk(KERN_ERR "ksm: creating kthread failed\n"); |
1685 | err = PTR_ERR(ksm_thread); |
1686 | goto out_free2; |
1687 | } |
1688 | |
1689 | #ifdef CONFIG_SYSFS |
1690 | err = sysfs_create_group(mm_kobj, &ksm_attr_group); |
1691 | if (err) { |
1692 | printk(KERN_ERR "ksm: register sysfs failed\n"); |
1693 | kthread_stop(ksm_thread); |
1694 | goto out_free2; |
1695 | } |
1696 | #else |
1697 | ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ |
1698 | |
1699 | #endif /* CONFIG_SYSFS */ |
1700 | |
1701 | return 0; |
1702 | |
1703 | out_free2: |
1704 | mm_slots_hash_free(); |
1705 | out_free1: |
1706 | ksm_slab_free(); |
1707 | out: |
1708 | return err; |
1709 | } |
1710 | module_init(ksm_init) |
1711 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9