Root/
1 | /* |
2 | * Memory Migration functionality - linux/mm/migration.c |
3 | * |
4 | * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter |
5 | * |
6 | * Page migration was first developed in the context of the memory hotplug |
7 | * project. The main authors of the migration code are: |
8 | * |
9 | * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> |
10 | * Hirokazu Takahashi <taka@valinux.co.jp> |
11 | * Dave Hansen <haveblue@us.ibm.com> |
12 | * Christoph Lameter |
13 | */ |
14 | |
15 | #include <linux/migrate.h> |
16 | #include <linux/module.h> |
17 | #include <linux/swap.h> |
18 | #include <linux/swapops.h> |
19 | #include <linux/pagemap.h> |
20 | #include <linux/buffer_head.h> |
21 | #include <linux/mm_inline.h> |
22 | #include <linux/nsproxy.h> |
23 | #include <linux/pagevec.h> |
24 | #include <linux/ksm.h> |
25 | #include <linux/rmap.h> |
26 | #include <linux/topology.h> |
27 | #include <linux/cpu.h> |
28 | #include <linux/cpuset.h> |
29 | #include <linux/writeback.h> |
30 | #include <linux/mempolicy.h> |
31 | #include <linux/vmalloc.h> |
32 | #include <linux/security.h> |
33 | #include <linux/memcontrol.h> |
34 | #include <linux/syscalls.h> |
35 | #include <linux/hugetlb.h> |
36 | #include <linux/gfp.h> |
37 | |
38 | #include <asm/tlbflush.h> |
39 | |
40 | #include "internal.h" |
41 | |
42 | #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) |
43 | |
44 | /* |
45 | * migrate_prep() needs to be called before we start compiling a list of pages |
46 | * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is |
47 | * undesirable, use migrate_prep_local() |
48 | */ |
49 | int migrate_prep(void) |
50 | { |
51 | /* |
52 | * Clear the LRU lists so pages can be isolated. |
53 | * Note that pages may be moved off the LRU after we have |
54 | * drained them. Those pages will fail to migrate like other |
55 | * pages that may be busy. |
56 | */ |
57 | lru_add_drain_all(); |
58 | |
59 | return 0; |
60 | } |
61 | |
62 | /* Do the necessary work of migrate_prep but not if it involves other CPUs */ |
63 | int migrate_prep_local(void) |
64 | { |
65 | lru_add_drain(); |
66 | |
67 | return 0; |
68 | } |
69 | |
70 | /* |
71 | * Add isolated pages on the list back to the LRU under page lock |
72 | * to avoid leaking evictable pages back onto unevictable list. |
73 | */ |
74 | void putback_lru_pages(struct list_head *l) |
75 | { |
76 | struct page *page; |
77 | struct page *page2; |
78 | |
79 | list_for_each_entry_safe(page, page2, l, lru) { |
80 | list_del(&page->lru); |
81 | dec_zone_page_state(page, NR_ISOLATED_ANON + |
82 | page_is_file_cache(page)); |
83 | putback_lru_page(page); |
84 | } |
85 | } |
86 | |
87 | /* |
88 | * Restore a potential migration pte to a working pte entry |
89 | */ |
90 | static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, |
91 | unsigned long addr, void *old) |
92 | { |
93 | struct mm_struct *mm = vma->vm_mm; |
94 | swp_entry_t entry; |
95 | pgd_t *pgd; |
96 | pud_t *pud; |
97 | pmd_t *pmd; |
98 | pte_t *ptep, pte; |
99 | spinlock_t *ptl; |
100 | |
101 | if (unlikely(PageHuge(new))) { |
102 | ptep = huge_pte_offset(mm, addr); |
103 | if (!ptep) |
104 | goto out; |
105 | ptl = &mm->page_table_lock; |
106 | } else { |
107 | pgd = pgd_offset(mm, addr); |
108 | if (!pgd_present(*pgd)) |
109 | goto out; |
110 | |
111 | pud = pud_offset(pgd, addr); |
112 | if (!pud_present(*pud)) |
113 | goto out; |
114 | |
115 | pmd = pmd_offset(pud, addr); |
116 | if (pmd_trans_huge(*pmd)) |
117 | goto out; |
118 | if (!pmd_present(*pmd)) |
119 | goto out; |
120 | |
121 | ptep = pte_offset_map(pmd, addr); |
122 | |
123 | if (!is_swap_pte(*ptep)) { |
124 | pte_unmap(ptep); |
125 | goto out; |
126 | } |
127 | |
128 | ptl = pte_lockptr(mm, pmd); |
129 | } |
130 | |
131 | spin_lock(ptl); |
132 | pte = *ptep; |
133 | if (!is_swap_pte(pte)) |
134 | goto unlock; |
135 | |
136 | entry = pte_to_swp_entry(pte); |
137 | |
138 | if (!is_migration_entry(entry) || |
139 | migration_entry_to_page(entry) != old) |
140 | goto unlock; |
141 | |
142 | get_page(new); |
143 | pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); |
144 | if (is_write_migration_entry(entry)) |
145 | pte = pte_mkwrite(pte); |
146 | #ifdef CONFIG_HUGETLB_PAGE |
147 | if (PageHuge(new)) |
148 | pte = pte_mkhuge(pte); |
149 | #endif |
150 | flush_cache_page(vma, addr, pte_pfn(pte)); |
151 | set_pte_at(mm, addr, ptep, pte); |
152 | |
153 | if (PageHuge(new)) { |
154 | if (PageAnon(new)) |
155 | hugepage_add_anon_rmap(new, vma, addr); |
156 | else |
157 | page_dup_rmap(new); |
158 | } else if (PageAnon(new)) |
159 | page_add_anon_rmap(new, vma, addr); |
160 | else |
161 | page_add_file_rmap(new); |
162 | |
163 | /* No need to invalidate - it was non-present before */ |
164 | update_mmu_cache(vma, addr, ptep); |
165 | unlock: |
166 | pte_unmap_unlock(ptep, ptl); |
167 | out: |
168 | return SWAP_AGAIN; |
169 | } |
170 | |
171 | /* |
172 | * Get rid of all migration entries and replace them by |
173 | * references to the indicated page. |
174 | */ |
175 | static void remove_migration_ptes(struct page *old, struct page *new) |
176 | { |
177 | rmap_walk(new, remove_migration_pte, old); |
178 | } |
179 | |
180 | /* |
181 | * Something used the pte of a page under migration. We need to |
182 | * get to the page and wait until migration is finished. |
183 | * When we return from this function the fault will be retried. |
184 | * |
185 | * This function is called from do_swap_page(). |
186 | */ |
187 | void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, |
188 | unsigned long address) |
189 | { |
190 | pte_t *ptep, pte; |
191 | spinlock_t *ptl; |
192 | swp_entry_t entry; |
193 | struct page *page; |
194 | |
195 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); |
196 | pte = *ptep; |
197 | if (!is_swap_pte(pte)) |
198 | goto out; |
199 | |
200 | entry = pte_to_swp_entry(pte); |
201 | if (!is_migration_entry(entry)) |
202 | goto out; |
203 | |
204 | page = migration_entry_to_page(entry); |
205 | |
206 | /* |
207 | * Once radix-tree replacement of page migration started, page_count |
208 | * *must* be zero. And, we don't want to call wait_on_page_locked() |
209 | * against a page without get_page(). |
210 | * So, we use get_page_unless_zero(), here. Even failed, page fault |
211 | * will occur again. |
212 | */ |
213 | if (!get_page_unless_zero(page)) |
214 | goto out; |
215 | pte_unmap_unlock(ptep, ptl); |
216 | wait_on_page_locked(page); |
217 | put_page(page); |
218 | return; |
219 | out: |
220 | pte_unmap_unlock(ptep, ptl); |
221 | } |
222 | |
223 | /* |
224 | * Replace the page in the mapping. |
225 | * |
226 | * The number of remaining references must be: |
227 | * 1 for anonymous pages without a mapping |
228 | * 2 for pages with a mapping |
229 | * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. |
230 | */ |
231 | static int migrate_page_move_mapping(struct address_space *mapping, |
232 | struct page *newpage, struct page *page) |
233 | { |
234 | int expected_count; |
235 | void **pslot; |
236 | |
237 | if (!mapping) { |
238 | /* Anonymous page without mapping */ |
239 | if (page_count(page) != 1) |
240 | return -EAGAIN; |
241 | return 0; |
242 | } |
243 | |
244 | spin_lock_irq(&mapping->tree_lock); |
245 | |
246 | pslot = radix_tree_lookup_slot(&mapping->page_tree, |
247 | page_index(page)); |
248 | |
249 | expected_count = 2 + page_has_private(page); |
250 | if (page_count(page) != expected_count || |
251 | radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { |
252 | spin_unlock_irq(&mapping->tree_lock); |
253 | return -EAGAIN; |
254 | } |
255 | |
256 | if (!page_freeze_refs(page, expected_count)) { |
257 | spin_unlock_irq(&mapping->tree_lock); |
258 | return -EAGAIN; |
259 | } |
260 | |
261 | /* |
262 | * Now we know that no one else is looking at the page. |
263 | */ |
264 | get_page(newpage); /* add cache reference */ |
265 | if (PageSwapCache(page)) { |
266 | SetPageSwapCache(newpage); |
267 | set_page_private(newpage, page_private(page)); |
268 | } |
269 | |
270 | radix_tree_replace_slot(pslot, newpage); |
271 | |
272 | page_unfreeze_refs(page, expected_count); |
273 | /* |
274 | * Drop cache reference from old page. |
275 | * We know this isn't the last reference. |
276 | */ |
277 | __put_page(page); |
278 | |
279 | /* |
280 | * If moved to a different zone then also account |
281 | * the page for that zone. Other VM counters will be |
282 | * taken care of when we establish references to the |
283 | * new page and drop references to the old page. |
284 | * |
285 | * Note that anonymous pages are accounted for |
286 | * via NR_FILE_PAGES and NR_ANON_PAGES if they |
287 | * are mapped to swap space. |
288 | */ |
289 | __dec_zone_page_state(page, NR_FILE_PAGES); |
290 | __inc_zone_page_state(newpage, NR_FILE_PAGES); |
291 | if (PageSwapBacked(page)) { |
292 | __dec_zone_page_state(page, NR_SHMEM); |
293 | __inc_zone_page_state(newpage, NR_SHMEM); |
294 | } |
295 | spin_unlock_irq(&mapping->tree_lock); |
296 | |
297 | return 0; |
298 | } |
299 | |
300 | /* |
301 | * The expected number of remaining references is the same as that |
302 | * of migrate_page_move_mapping(). |
303 | */ |
304 | int migrate_huge_page_move_mapping(struct address_space *mapping, |
305 | struct page *newpage, struct page *page) |
306 | { |
307 | int expected_count; |
308 | void **pslot; |
309 | |
310 | if (!mapping) { |
311 | if (page_count(page) != 1) |
312 | return -EAGAIN; |
313 | return 0; |
314 | } |
315 | |
316 | spin_lock_irq(&mapping->tree_lock); |
317 | |
318 | pslot = radix_tree_lookup_slot(&mapping->page_tree, |
319 | page_index(page)); |
320 | |
321 | expected_count = 2 + page_has_private(page); |
322 | if (page_count(page) != expected_count || |
323 | radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { |
324 | spin_unlock_irq(&mapping->tree_lock); |
325 | return -EAGAIN; |
326 | } |
327 | |
328 | if (!page_freeze_refs(page, expected_count)) { |
329 | spin_unlock_irq(&mapping->tree_lock); |
330 | return -EAGAIN; |
331 | } |
332 | |
333 | get_page(newpage); |
334 | |
335 | radix_tree_replace_slot(pslot, newpage); |
336 | |
337 | page_unfreeze_refs(page, expected_count); |
338 | |
339 | __put_page(page); |
340 | |
341 | spin_unlock_irq(&mapping->tree_lock); |
342 | return 0; |
343 | } |
344 | |
345 | /* |
346 | * Copy the page to its new location |
347 | */ |
348 | void migrate_page_copy(struct page *newpage, struct page *page) |
349 | { |
350 | if (PageHuge(page)) |
351 | copy_huge_page(newpage, page); |
352 | else |
353 | copy_highpage(newpage, page); |
354 | |
355 | if (PageError(page)) |
356 | SetPageError(newpage); |
357 | if (PageReferenced(page)) |
358 | SetPageReferenced(newpage); |
359 | if (PageUptodate(page)) |
360 | SetPageUptodate(newpage); |
361 | if (TestClearPageActive(page)) { |
362 | VM_BUG_ON(PageUnevictable(page)); |
363 | SetPageActive(newpage); |
364 | } else if (TestClearPageUnevictable(page)) |
365 | SetPageUnevictable(newpage); |
366 | if (PageChecked(page)) |
367 | SetPageChecked(newpage); |
368 | if (PageMappedToDisk(page)) |
369 | SetPageMappedToDisk(newpage); |
370 | |
371 | if (PageDirty(page)) { |
372 | clear_page_dirty_for_io(page); |
373 | /* |
374 | * Want to mark the page and the radix tree as dirty, and |
375 | * redo the accounting that clear_page_dirty_for_io undid, |
376 | * but we can't use set_page_dirty because that function |
377 | * is actually a signal that all of the page has become dirty. |
378 | * Wheras only part of our page may be dirty. |
379 | */ |
380 | __set_page_dirty_nobuffers(newpage); |
381 | } |
382 | |
383 | mlock_migrate_page(newpage, page); |
384 | ksm_migrate_page(newpage, page); |
385 | |
386 | ClearPageSwapCache(page); |
387 | ClearPagePrivate(page); |
388 | set_page_private(page, 0); |
389 | page->mapping = NULL; |
390 | |
391 | /* |
392 | * If any waiters have accumulated on the new page then |
393 | * wake them up. |
394 | */ |
395 | if (PageWriteback(newpage)) |
396 | end_page_writeback(newpage); |
397 | } |
398 | |
399 | /************************************************************ |
400 | * Migration functions |
401 | ***********************************************************/ |
402 | |
403 | /* Always fail migration. Used for mappings that are not movable */ |
404 | int fail_migrate_page(struct address_space *mapping, |
405 | struct page *newpage, struct page *page) |
406 | { |
407 | return -EIO; |
408 | } |
409 | EXPORT_SYMBOL(fail_migrate_page); |
410 | |
411 | /* |
412 | * Common logic to directly migrate a single page suitable for |
413 | * pages that do not use PagePrivate/PagePrivate2. |
414 | * |
415 | * Pages are locked upon entry and exit. |
416 | */ |
417 | int migrate_page(struct address_space *mapping, |
418 | struct page *newpage, struct page *page) |
419 | { |
420 | int rc; |
421 | |
422 | BUG_ON(PageWriteback(page)); /* Writeback must be complete */ |
423 | |
424 | rc = migrate_page_move_mapping(mapping, newpage, page); |
425 | |
426 | if (rc) |
427 | return rc; |
428 | |
429 | migrate_page_copy(newpage, page); |
430 | return 0; |
431 | } |
432 | EXPORT_SYMBOL(migrate_page); |
433 | |
434 | #ifdef CONFIG_BLOCK |
435 | /* |
436 | * Migration function for pages with buffers. This function can only be used |
437 | * if the underlying filesystem guarantees that no other references to "page" |
438 | * exist. |
439 | */ |
440 | int buffer_migrate_page(struct address_space *mapping, |
441 | struct page *newpage, struct page *page) |
442 | { |
443 | struct buffer_head *bh, *head; |
444 | int rc; |
445 | |
446 | if (!page_has_buffers(page)) |
447 | return migrate_page(mapping, newpage, page); |
448 | |
449 | head = page_buffers(page); |
450 | |
451 | rc = migrate_page_move_mapping(mapping, newpage, page); |
452 | |
453 | if (rc) |
454 | return rc; |
455 | |
456 | bh = head; |
457 | do { |
458 | get_bh(bh); |
459 | lock_buffer(bh); |
460 | bh = bh->b_this_page; |
461 | |
462 | } while (bh != head); |
463 | |
464 | ClearPagePrivate(page); |
465 | set_page_private(newpage, page_private(page)); |
466 | set_page_private(page, 0); |
467 | put_page(page); |
468 | get_page(newpage); |
469 | |
470 | bh = head; |
471 | do { |
472 | set_bh_page(bh, newpage, bh_offset(bh)); |
473 | bh = bh->b_this_page; |
474 | |
475 | } while (bh != head); |
476 | |
477 | SetPagePrivate(newpage); |
478 | |
479 | migrate_page_copy(newpage, page); |
480 | |
481 | bh = head; |
482 | do { |
483 | unlock_buffer(bh); |
484 | put_bh(bh); |
485 | bh = bh->b_this_page; |
486 | |
487 | } while (bh != head); |
488 | |
489 | return 0; |
490 | } |
491 | EXPORT_SYMBOL(buffer_migrate_page); |
492 | #endif |
493 | |
494 | /* |
495 | * Writeback a page to clean the dirty state |
496 | */ |
497 | static int writeout(struct address_space *mapping, struct page *page) |
498 | { |
499 | struct writeback_control wbc = { |
500 | .sync_mode = WB_SYNC_NONE, |
501 | .nr_to_write = 1, |
502 | .range_start = 0, |
503 | .range_end = LLONG_MAX, |
504 | .for_reclaim = 1 |
505 | }; |
506 | int rc; |
507 | |
508 | if (!mapping->a_ops->writepage) |
509 | /* No write method for the address space */ |
510 | return -EINVAL; |
511 | |
512 | if (!clear_page_dirty_for_io(page)) |
513 | /* Someone else already triggered a write */ |
514 | return -EAGAIN; |
515 | |
516 | /* |
517 | * A dirty page may imply that the underlying filesystem has |
518 | * the page on some queue. So the page must be clean for |
519 | * migration. Writeout may mean we loose the lock and the |
520 | * page state is no longer what we checked for earlier. |
521 | * At this point we know that the migration attempt cannot |
522 | * be successful. |
523 | */ |
524 | remove_migration_ptes(page, page); |
525 | |
526 | rc = mapping->a_ops->writepage(page, &wbc); |
527 | |
528 | if (rc != AOP_WRITEPAGE_ACTIVATE) |
529 | /* unlocked. Relock */ |
530 | lock_page(page); |
531 | |
532 | return (rc < 0) ? -EIO : -EAGAIN; |
533 | } |
534 | |
535 | /* |
536 | * Default handling if a filesystem does not provide a migration function. |
537 | */ |
538 | static int fallback_migrate_page(struct address_space *mapping, |
539 | struct page *newpage, struct page *page) |
540 | { |
541 | if (PageDirty(page)) |
542 | return writeout(mapping, page); |
543 | |
544 | /* |
545 | * Buffers may be managed in a filesystem specific way. |
546 | * We must have no buffers or drop them. |
547 | */ |
548 | if (page_has_private(page) && |
549 | !try_to_release_page(page, GFP_KERNEL)) |
550 | return -EAGAIN; |
551 | |
552 | return migrate_page(mapping, newpage, page); |
553 | } |
554 | |
555 | /* |
556 | * Move a page to a newly allocated page |
557 | * The page is locked and all ptes have been successfully removed. |
558 | * |
559 | * The new page will have replaced the old page if this function |
560 | * is successful. |
561 | * |
562 | * Return value: |
563 | * < 0 - error code |
564 | * == 0 - success |
565 | */ |
566 | static int move_to_new_page(struct page *newpage, struct page *page, |
567 | int remap_swapcache) |
568 | { |
569 | struct address_space *mapping; |
570 | int rc; |
571 | |
572 | /* |
573 | * Block others from accessing the page when we get around to |
574 | * establishing additional references. We are the only one |
575 | * holding a reference to the new page at this point. |
576 | */ |
577 | if (!trylock_page(newpage)) |
578 | BUG(); |
579 | |
580 | /* Prepare mapping for the new page.*/ |
581 | newpage->index = page->index; |
582 | newpage->mapping = page->mapping; |
583 | if (PageSwapBacked(page)) |
584 | SetPageSwapBacked(newpage); |
585 | |
586 | mapping = page_mapping(page); |
587 | if (!mapping) |
588 | rc = migrate_page(mapping, newpage, page); |
589 | else if (mapping->a_ops->migratepage) |
590 | /* |
591 | * Most pages have a mapping and most filesystems |
592 | * should provide a migration function. Anonymous |
593 | * pages are part of swap space which also has its |
594 | * own migration function. This is the most common |
595 | * path for page migration. |
596 | */ |
597 | rc = mapping->a_ops->migratepage(mapping, |
598 | newpage, page); |
599 | else |
600 | rc = fallback_migrate_page(mapping, newpage, page); |
601 | |
602 | if (rc) { |
603 | newpage->mapping = NULL; |
604 | } else { |
605 | if (remap_swapcache) |
606 | remove_migration_ptes(page, newpage); |
607 | } |
608 | |
609 | unlock_page(newpage); |
610 | |
611 | return rc; |
612 | } |
613 | |
614 | /* |
615 | * Obtain the lock on page, remove all ptes and migrate the page |
616 | * to the newly allocated page in newpage. |
617 | */ |
618 | static int unmap_and_move(new_page_t get_new_page, unsigned long private, |
619 | struct page *page, int force, bool offlining, bool sync) |
620 | { |
621 | int rc = 0; |
622 | int *result = NULL; |
623 | struct page *newpage = get_new_page(page, private, &result); |
624 | int remap_swapcache = 1; |
625 | int charge = 0; |
626 | struct mem_cgroup *mem = NULL; |
627 | struct anon_vma *anon_vma = NULL; |
628 | |
629 | if (!newpage) |
630 | return -ENOMEM; |
631 | |
632 | if (page_count(page) == 1) { |
633 | /* page was freed from under us. So we are done. */ |
634 | goto move_newpage; |
635 | } |
636 | if (unlikely(PageTransHuge(page))) |
637 | if (unlikely(split_huge_page(page))) |
638 | goto move_newpage; |
639 | |
640 | /* prepare cgroup just returns 0 or -ENOMEM */ |
641 | rc = -EAGAIN; |
642 | |
643 | if (!trylock_page(page)) { |
644 | if (!force) |
645 | goto move_newpage; |
646 | |
647 | /* |
648 | * It's not safe for direct compaction to call lock_page. |
649 | * For example, during page readahead pages are added locked |
650 | * to the LRU. Later, when the IO completes the pages are |
651 | * marked uptodate and unlocked. However, the queueing |
652 | * could be merging multiple pages for one bio (e.g. |
653 | * mpage_readpages). If an allocation happens for the |
654 | * second or third page, the process can end up locking |
655 | * the same page twice and deadlocking. Rather than |
656 | * trying to be clever about what pages can be locked, |
657 | * avoid the use of lock_page for direct compaction |
658 | * altogether. |
659 | */ |
660 | if (current->flags & PF_MEMALLOC) |
661 | goto move_newpage; |
662 | |
663 | lock_page(page); |
664 | } |
665 | |
666 | /* |
667 | * Only memory hotplug's offline_pages() caller has locked out KSM, |
668 | * and can safely migrate a KSM page. The other cases have skipped |
669 | * PageKsm along with PageReserved - but it is only now when we have |
670 | * the page lock that we can be certain it will not go KSM beneath us |
671 | * (KSM will not upgrade a page from PageAnon to PageKsm when it sees |
672 | * its pagecount raised, but only here do we take the page lock which |
673 | * serializes that). |
674 | */ |
675 | if (PageKsm(page) && !offlining) { |
676 | rc = -EBUSY; |
677 | goto unlock; |
678 | } |
679 | |
680 | /* charge against new page */ |
681 | charge = mem_cgroup_prepare_migration(page, newpage, &mem); |
682 | if (charge == -ENOMEM) { |
683 | rc = -ENOMEM; |
684 | goto unlock; |
685 | } |
686 | BUG_ON(charge); |
687 | |
688 | if (PageWriteback(page)) { |
689 | if (!force || !sync) |
690 | goto uncharge; |
691 | wait_on_page_writeback(page); |
692 | } |
693 | /* |
694 | * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, |
695 | * we cannot notice that anon_vma is freed while we migrates a page. |
696 | * This get_anon_vma() delays freeing anon_vma pointer until the end |
697 | * of migration. File cache pages are no problem because of page_lock() |
698 | * File Caches may use write_page() or lock_page() in migration, then, |
699 | * just care Anon page here. |
700 | */ |
701 | if (PageAnon(page)) { |
702 | /* |
703 | * Only page_lock_anon_vma() understands the subtleties of |
704 | * getting a hold on an anon_vma from outside one of its mms. |
705 | */ |
706 | anon_vma = page_lock_anon_vma(page); |
707 | if (anon_vma) { |
708 | /* |
709 | * Take a reference count on the anon_vma if the |
710 | * page is mapped so that it is guaranteed to |
711 | * exist when the page is remapped later |
712 | */ |
713 | get_anon_vma(anon_vma); |
714 | page_unlock_anon_vma(anon_vma); |
715 | } else if (PageSwapCache(page)) { |
716 | /* |
717 | * We cannot be sure that the anon_vma of an unmapped |
718 | * swapcache page is safe to use because we don't |
719 | * know in advance if the VMA that this page belonged |
720 | * to still exists. If the VMA and others sharing the |
721 | * data have been freed, then the anon_vma could |
722 | * already be invalid. |
723 | * |
724 | * To avoid this possibility, swapcache pages get |
725 | * migrated but are not remapped when migration |
726 | * completes |
727 | */ |
728 | remap_swapcache = 0; |
729 | } else { |
730 | goto uncharge; |
731 | } |
732 | } |
733 | |
734 | /* |
735 | * Corner case handling: |
736 | * 1. When a new swap-cache page is read into, it is added to the LRU |
737 | * and treated as swapcache but it has no rmap yet. |
738 | * Calling try_to_unmap() against a page->mapping==NULL page will |
739 | * trigger a BUG. So handle it here. |
740 | * 2. An orphaned page (see truncate_complete_page) might have |
741 | * fs-private metadata. The page can be picked up due to memory |
742 | * offlining. Everywhere else except page reclaim, the page is |
743 | * invisible to the vm, so the page can not be migrated. So try to |
744 | * free the metadata, so the page can be freed. |
745 | */ |
746 | if (!page->mapping) { |
747 | VM_BUG_ON(PageAnon(page)); |
748 | if (page_has_private(page)) { |
749 | try_to_free_buffers(page); |
750 | goto uncharge; |
751 | } |
752 | goto skip_unmap; |
753 | } |
754 | |
755 | /* Establish migration ptes or remove ptes */ |
756 | try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); |
757 | |
758 | skip_unmap: |
759 | if (!page_mapped(page)) |
760 | rc = move_to_new_page(newpage, page, remap_swapcache); |
761 | |
762 | if (rc && remap_swapcache) |
763 | remove_migration_ptes(page, page); |
764 | |
765 | /* Drop an anon_vma reference if we took one */ |
766 | if (anon_vma) |
767 | drop_anon_vma(anon_vma); |
768 | |
769 | uncharge: |
770 | if (!charge) |
771 | mem_cgroup_end_migration(mem, page, newpage, rc == 0); |
772 | unlock: |
773 | unlock_page(page); |
774 | |
775 | move_newpage: |
776 | if (rc != -EAGAIN) { |
777 | /* |
778 | * A page that has been migrated has all references |
779 | * removed and will be freed. A page that has not been |
780 | * migrated will have kepts its references and be |
781 | * restored. |
782 | */ |
783 | list_del(&page->lru); |
784 | dec_zone_page_state(page, NR_ISOLATED_ANON + |
785 | page_is_file_cache(page)); |
786 | putback_lru_page(page); |
787 | } |
788 | |
789 | /* |
790 | * Move the new page to the LRU. If migration was not successful |
791 | * then this will free the page. |
792 | */ |
793 | putback_lru_page(newpage); |
794 | |
795 | if (result) { |
796 | if (rc) |
797 | *result = rc; |
798 | else |
799 | *result = page_to_nid(newpage); |
800 | } |
801 | return rc; |
802 | } |
803 | |
804 | /* |
805 | * Counterpart of unmap_and_move_page() for hugepage migration. |
806 | * |
807 | * This function doesn't wait the completion of hugepage I/O |
808 | * because there is no race between I/O and migration for hugepage. |
809 | * Note that currently hugepage I/O occurs only in direct I/O |
810 | * where no lock is held and PG_writeback is irrelevant, |
811 | * and writeback status of all subpages are counted in the reference |
812 | * count of the head page (i.e. if all subpages of a 2MB hugepage are |
813 | * under direct I/O, the reference of the head page is 512 and a bit more.) |
814 | * This means that when we try to migrate hugepage whose subpages are |
815 | * doing direct I/O, some references remain after try_to_unmap() and |
816 | * hugepage migration fails without data corruption. |
817 | * |
818 | * There is also no race when direct I/O is issued on the page under migration, |
819 | * because then pte is replaced with migration swap entry and direct I/O code |
820 | * will wait in the page fault for migration to complete. |
821 | */ |
822 | static int unmap_and_move_huge_page(new_page_t get_new_page, |
823 | unsigned long private, struct page *hpage, |
824 | int force, bool offlining, bool sync) |
825 | { |
826 | int rc = 0; |
827 | int *result = NULL; |
828 | struct page *new_hpage = get_new_page(hpage, private, &result); |
829 | struct anon_vma *anon_vma = NULL; |
830 | |
831 | if (!new_hpage) |
832 | return -ENOMEM; |
833 | |
834 | rc = -EAGAIN; |
835 | |
836 | if (!trylock_page(hpage)) { |
837 | if (!force || !sync) |
838 | goto out; |
839 | lock_page(hpage); |
840 | } |
841 | |
842 | if (PageAnon(hpage)) { |
843 | anon_vma = page_lock_anon_vma(hpage); |
844 | if (anon_vma) { |
845 | get_anon_vma(anon_vma); |
846 | page_unlock_anon_vma(anon_vma); |
847 | } |
848 | } |
849 | |
850 | try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); |
851 | |
852 | if (!page_mapped(hpage)) |
853 | rc = move_to_new_page(new_hpage, hpage, 1); |
854 | |
855 | if (rc) |
856 | remove_migration_ptes(hpage, hpage); |
857 | |
858 | if (anon_vma) |
859 | drop_anon_vma(anon_vma); |
860 | out: |
861 | unlock_page(hpage); |
862 | |
863 | if (rc != -EAGAIN) { |
864 | list_del(&hpage->lru); |
865 | put_page(hpage); |
866 | } |
867 | |
868 | put_page(new_hpage); |
869 | |
870 | if (result) { |
871 | if (rc) |
872 | *result = rc; |
873 | else |
874 | *result = page_to_nid(new_hpage); |
875 | } |
876 | return rc; |
877 | } |
878 | |
879 | /* |
880 | * migrate_pages |
881 | * |
882 | * The function takes one list of pages to migrate and a function |
883 | * that determines from the page to be migrated and the private data |
884 | * the target of the move and allocates the page. |
885 | * |
886 | * The function returns after 10 attempts or if no pages |
887 | * are movable anymore because to has become empty |
888 | * or no retryable pages exist anymore. |
889 | * Caller should call putback_lru_pages to return pages to the LRU |
890 | * or free list only if ret != 0. |
891 | * |
892 | * Return: Number of pages not migrated or error code. |
893 | */ |
894 | int migrate_pages(struct list_head *from, |
895 | new_page_t get_new_page, unsigned long private, bool offlining, |
896 | bool sync) |
897 | { |
898 | int retry = 1; |
899 | int nr_failed = 0; |
900 | int pass = 0; |
901 | struct page *page; |
902 | struct page *page2; |
903 | int swapwrite = current->flags & PF_SWAPWRITE; |
904 | int rc; |
905 | |
906 | if (!swapwrite) |
907 | current->flags |= PF_SWAPWRITE; |
908 | |
909 | for(pass = 0; pass < 10 && retry; pass++) { |
910 | retry = 0; |
911 | |
912 | list_for_each_entry_safe(page, page2, from, lru) { |
913 | cond_resched(); |
914 | |
915 | rc = unmap_and_move(get_new_page, private, |
916 | page, pass > 2, offlining, |
917 | sync); |
918 | |
919 | switch(rc) { |
920 | case -ENOMEM: |
921 | goto out; |
922 | case -EAGAIN: |
923 | retry++; |
924 | break; |
925 | case 0: |
926 | break; |
927 | default: |
928 | /* Permanent failure */ |
929 | nr_failed++; |
930 | break; |
931 | } |
932 | } |
933 | } |
934 | rc = 0; |
935 | out: |
936 | if (!swapwrite) |
937 | current->flags &= ~PF_SWAPWRITE; |
938 | |
939 | if (rc) |
940 | return rc; |
941 | |
942 | return nr_failed + retry; |
943 | } |
944 | |
945 | int migrate_huge_pages(struct list_head *from, |
946 | new_page_t get_new_page, unsigned long private, bool offlining, |
947 | bool sync) |
948 | { |
949 | int retry = 1; |
950 | int nr_failed = 0; |
951 | int pass = 0; |
952 | struct page *page; |
953 | struct page *page2; |
954 | int rc; |
955 | |
956 | for (pass = 0; pass < 10 && retry; pass++) { |
957 | retry = 0; |
958 | |
959 | list_for_each_entry_safe(page, page2, from, lru) { |
960 | cond_resched(); |
961 | |
962 | rc = unmap_and_move_huge_page(get_new_page, |
963 | private, page, pass > 2, offlining, |
964 | sync); |
965 | |
966 | switch(rc) { |
967 | case -ENOMEM: |
968 | goto out; |
969 | case -EAGAIN: |
970 | retry++; |
971 | break; |
972 | case 0: |
973 | break; |
974 | default: |
975 | /* Permanent failure */ |
976 | nr_failed++; |
977 | break; |
978 | } |
979 | } |
980 | } |
981 | rc = 0; |
982 | out: |
983 | if (rc) |
984 | return rc; |
985 | |
986 | return nr_failed + retry; |
987 | } |
988 | |
989 | #ifdef CONFIG_NUMA |
990 | /* |
991 | * Move a list of individual pages |
992 | */ |
993 | struct page_to_node { |
994 | unsigned long addr; |
995 | struct page *page; |
996 | int node; |
997 | int status; |
998 | }; |
999 | |
1000 | static struct page *new_page_node(struct page *p, unsigned long private, |
1001 | int **result) |
1002 | { |
1003 | struct page_to_node *pm = (struct page_to_node *)private; |
1004 | |
1005 | while (pm->node != MAX_NUMNODES && pm->page != p) |
1006 | pm++; |
1007 | |
1008 | if (pm->node == MAX_NUMNODES) |
1009 | return NULL; |
1010 | |
1011 | *result = &pm->status; |
1012 | |
1013 | return alloc_pages_exact_node(pm->node, |
1014 | GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); |
1015 | } |
1016 | |
1017 | /* |
1018 | * Move a set of pages as indicated in the pm array. The addr |
1019 | * field must be set to the virtual address of the page to be moved |
1020 | * and the node number must contain a valid target node. |
1021 | * The pm array ends with node = MAX_NUMNODES. |
1022 | */ |
1023 | static int do_move_page_to_node_array(struct mm_struct *mm, |
1024 | struct page_to_node *pm, |
1025 | int migrate_all) |
1026 | { |
1027 | int err; |
1028 | struct page_to_node *pp; |
1029 | LIST_HEAD(pagelist); |
1030 | |
1031 | down_read(&mm->mmap_sem); |
1032 | |
1033 | /* |
1034 | * Build a list of pages to migrate |
1035 | */ |
1036 | for (pp = pm; pp->node != MAX_NUMNODES; pp++) { |
1037 | struct vm_area_struct *vma; |
1038 | struct page *page; |
1039 | |
1040 | err = -EFAULT; |
1041 | vma = find_vma(mm, pp->addr); |
1042 | if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma)) |
1043 | goto set_status; |
1044 | |
1045 | page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT); |
1046 | |
1047 | err = PTR_ERR(page); |
1048 | if (IS_ERR(page)) |
1049 | goto set_status; |
1050 | |
1051 | err = -ENOENT; |
1052 | if (!page) |
1053 | goto set_status; |
1054 | |
1055 | /* Use PageReserved to check for zero page */ |
1056 | if (PageReserved(page) || PageKsm(page)) |
1057 | goto put_and_set; |
1058 | |
1059 | pp->page = page; |
1060 | err = page_to_nid(page); |
1061 | |
1062 | if (err == pp->node) |
1063 | /* |
1064 | * Node already in the right place |
1065 | */ |
1066 | goto put_and_set; |
1067 | |
1068 | err = -EACCES; |
1069 | if (page_mapcount(page) > 1 && |
1070 | !migrate_all) |
1071 | goto put_and_set; |
1072 | |
1073 | err = isolate_lru_page(page); |
1074 | if (!err) { |
1075 | list_add_tail(&page->lru, &pagelist); |
1076 | inc_zone_page_state(page, NR_ISOLATED_ANON + |
1077 | page_is_file_cache(page)); |
1078 | } |
1079 | put_and_set: |
1080 | /* |
1081 | * Either remove the duplicate refcount from |
1082 | * isolate_lru_page() or drop the page ref if it was |
1083 | * not isolated. |
1084 | */ |
1085 | put_page(page); |
1086 | set_status: |
1087 | pp->status = err; |
1088 | } |
1089 | |
1090 | err = 0; |
1091 | if (!list_empty(&pagelist)) { |
1092 | err = migrate_pages(&pagelist, new_page_node, |
1093 | (unsigned long)pm, 0, true); |
1094 | if (err) |
1095 | putback_lru_pages(&pagelist); |
1096 | } |
1097 | |
1098 | up_read(&mm->mmap_sem); |
1099 | return err; |
1100 | } |
1101 | |
1102 | /* |
1103 | * Migrate an array of page address onto an array of nodes and fill |
1104 | * the corresponding array of status. |
1105 | */ |
1106 | static int do_pages_move(struct mm_struct *mm, struct task_struct *task, |
1107 | unsigned long nr_pages, |
1108 | const void __user * __user *pages, |
1109 | const int __user *nodes, |
1110 | int __user *status, int flags) |
1111 | { |
1112 | struct page_to_node *pm; |
1113 | nodemask_t task_nodes; |
1114 | unsigned long chunk_nr_pages; |
1115 | unsigned long chunk_start; |
1116 | int err; |
1117 | |
1118 | task_nodes = cpuset_mems_allowed(task); |
1119 | |
1120 | err = -ENOMEM; |
1121 | pm = (struct page_to_node *)__get_free_page(GFP_KERNEL); |
1122 | if (!pm) |
1123 | goto out; |
1124 | |
1125 | migrate_prep(); |
1126 | |
1127 | /* |
1128 | * Store a chunk of page_to_node array in a page, |
1129 | * but keep the last one as a marker |
1130 | */ |
1131 | chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1; |
1132 | |
1133 | for (chunk_start = 0; |
1134 | chunk_start < nr_pages; |
1135 | chunk_start += chunk_nr_pages) { |
1136 | int j; |
1137 | |
1138 | if (chunk_start + chunk_nr_pages > nr_pages) |
1139 | chunk_nr_pages = nr_pages - chunk_start; |
1140 | |
1141 | /* fill the chunk pm with addrs and nodes from user-space */ |
1142 | for (j = 0; j < chunk_nr_pages; j++) { |
1143 | const void __user *p; |
1144 | int node; |
1145 | |
1146 | err = -EFAULT; |
1147 | if (get_user(p, pages + j + chunk_start)) |
1148 | goto out_pm; |
1149 | pm[j].addr = (unsigned long) p; |
1150 | |
1151 | if (get_user(node, nodes + j + chunk_start)) |
1152 | goto out_pm; |
1153 | |
1154 | err = -ENODEV; |
1155 | if (node < 0 || node >= MAX_NUMNODES) |
1156 | goto out_pm; |
1157 | |
1158 | if (!node_state(node, N_HIGH_MEMORY)) |
1159 | goto out_pm; |
1160 | |
1161 | err = -EACCES; |
1162 | if (!node_isset(node, task_nodes)) |
1163 | goto out_pm; |
1164 | |
1165 | pm[j].node = node; |
1166 | } |
1167 | |
1168 | /* End marker for this chunk */ |
1169 | pm[chunk_nr_pages].node = MAX_NUMNODES; |
1170 | |
1171 | /* Migrate this chunk */ |
1172 | err = do_move_page_to_node_array(mm, pm, |
1173 | flags & MPOL_MF_MOVE_ALL); |
1174 | if (err < 0) |
1175 | goto out_pm; |
1176 | |
1177 | /* Return status information */ |
1178 | for (j = 0; j < chunk_nr_pages; j++) |
1179 | if (put_user(pm[j].status, status + j + chunk_start)) { |
1180 | err = -EFAULT; |
1181 | goto out_pm; |
1182 | } |
1183 | } |
1184 | err = 0; |
1185 | |
1186 | out_pm: |
1187 | free_page((unsigned long)pm); |
1188 | out: |
1189 | return err; |
1190 | } |
1191 | |
1192 | /* |
1193 | * Determine the nodes of an array of pages and store it in an array of status. |
1194 | */ |
1195 | static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, |
1196 | const void __user **pages, int *status) |
1197 | { |
1198 | unsigned long i; |
1199 | |
1200 | down_read(&mm->mmap_sem); |
1201 | |
1202 | for (i = 0; i < nr_pages; i++) { |
1203 | unsigned long addr = (unsigned long)(*pages); |
1204 | struct vm_area_struct *vma; |
1205 | struct page *page; |
1206 | int err = -EFAULT; |
1207 | |
1208 | vma = find_vma(mm, addr); |
1209 | if (!vma || addr < vma->vm_start) |
1210 | goto set_status; |
1211 | |
1212 | page = follow_page(vma, addr, 0); |
1213 | |
1214 | err = PTR_ERR(page); |
1215 | if (IS_ERR(page)) |
1216 | goto set_status; |
1217 | |
1218 | err = -ENOENT; |
1219 | /* Use PageReserved to check for zero page */ |
1220 | if (!page || PageReserved(page) || PageKsm(page)) |
1221 | goto set_status; |
1222 | |
1223 | err = page_to_nid(page); |
1224 | set_status: |
1225 | *status = err; |
1226 | |
1227 | pages++; |
1228 | status++; |
1229 | } |
1230 | |
1231 | up_read(&mm->mmap_sem); |
1232 | } |
1233 | |
1234 | /* |
1235 | * Determine the nodes of a user array of pages and store it in |
1236 | * a user array of status. |
1237 | */ |
1238 | static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, |
1239 | const void __user * __user *pages, |
1240 | int __user *status) |
1241 | { |
1242 | #define DO_PAGES_STAT_CHUNK_NR 16 |
1243 | const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; |
1244 | int chunk_status[DO_PAGES_STAT_CHUNK_NR]; |
1245 | |
1246 | while (nr_pages) { |
1247 | unsigned long chunk_nr; |
1248 | |
1249 | chunk_nr = nr_pages; |
1250 | if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) |
1251 | chunk_nr = DO_PAGES_STAT_CHUNK_NR; |
1252 | |
1253 | if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) |
1254 | break; |
1255 | |
1256 | do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); |
1257 | |
1258 | if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) |
1259 | break; |
1260 | |
1261 | pages += chunk_nr; |
1262 | status += chunk_nr; |
1263 | nr_pages -= chunk_nr; |
1264 | } |
1265 | return nr_pages ? -EFAULT : 0; |
1266 | } |
1267 | |
1268 | /* |
1269 | * Move a list of pages in the address space of the currently executing |
1270 | * process. |
1271 | */ |
1272 | SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, |
1273 | const void __user * __user *, pages, |
1274 | const int __user *, nodes, |
1275 | int __user *, status, int, flags) |
1276 | { |
1277 | const struct cred *cred = current_cred(), *tcred; |
1278 | struct task_struct *task; |
1279 | struct mm_struct *mm; |
1280 | int err; |
1281 | |
1282 | /* Check flags */ |
1283 | if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) |
1284 | return -EINVAL; |
1285 | |
1286 | if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) |
1287 | return -EPERM; |
1288 | |
1289 | /* Find the mm_struct */ |
1290 | rcu_read_lock(); |
1291 | task = pid ? find_task_by_vpid(pid) : current; |
1292 | if (!task) { |
1293 | rcu_read_unlock(); |
1294 | return -ESRCH; |
1295 | } |
1296 | mm = get_task_mm(task); |
1297 | rcu_read_unlock(); |
1298 | |
1299 | if (!mm) |
1300 | return -EINVAL; |
1301 | |
1302 | /* |
1303 | * Check if this process has the right to modify the specified |
1304 | * process. The right exists if the process has administrative |
1305 | * capabilities, superuser privileges or the same |
1306 | * userid as the target process. |
1307 | */ |
1308 | rcu_read_lock(); |
1309 | tcred = __task_cred(task); |
1310 | if (cred->euid != tcred->suid && cred->euid != tcred->uid && |
1311 | cred->uid != tcred->suid && cred->uid != tcred->uid && |
1312 | !capable(CAP_SYS_NICE)) { |
1313 | rcu_read_unlock(); |
1314 | err = -EPERM; |
1315 | goto out; |
1316 | } |
1317 | rcu_read_unlock(); |
1318 | |
1319 | err = security_task_movememory(task); |
1320 | if (err) |
1321 | goto out; |
1322 | |
1323 | if (nodes) { |
1324 | err = do_pages_move(mm, task, nr_pages, pages, nodes, status, |
1325 | flags); |
1326 | } else { |
1327 | err = do_pages_stat(mm, nr_pages, pages, status); |
1328 | } |
1329 | |
1330 | out: |
1331 | mmput(mm); |
1332 | return err; |
1333 | } |
1334 | |
1335 | /* |
1336 | * Call migration functions in the vma_ops that may prepare |
1337 | * memory in a vm for migration. migration functions may perform |
1338 | * the migration for vmas that do not have an underlying page struct. |
1339 | */ |
1340 | int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, |
1341 | const nodemask_t *from, unsigned long flags) |
1342 | { |
1343 | struct vm_area_struct *vma; |
1344 | int err = 0; |
1345 | |
1346 | for (vma = mm->mmap; vma && !err; vma = vma->vm_next) { |
1347 | if (vma->vm_ops && vma->vm_ops->migrate) { |
1348 | err = vma->vm_ops->migrate(vma, to, from, flags); |
1349 | if (err) |
1350 | break; |
1351 | } |
1352 | } |
1353 | return err; |
1354 | } |
1355 | #endif |
1356 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9