Root/mm/vmscan.c

1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/gfp.h>
17#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
22#include <linux/vmstat.h>
23#include <linux/file.h>
24#include <linux/writeback.h>
25#include <linux/blkdev.h>
26#include <linux/buffer_head.h> /* for try_to_release_page(),
27                    buffer_heads_over_limit */
28#include <linux/mm_inline.h>
29#include <linux/pagevec.h>
30#include <linux/backing-dev.h>
31#include <linux/rmap.h>
32#include <linux/topology.h>
33#include <linux/cpu.h>
34#include <linux/cpuset.h>
35#include <linux/compaction.h>
36#include <linux/notifier.h>
37#include <linux/rwsem.h>
38#include <linux/delay.h>
39#include <linux/kthread.h>
40#include <linux/freezer.h>
41#include <linux/memcontrol.h>
42#include <linux/delayacct.h>
43#include <linux/sysctl.h>
44#include <linux/oom.h>
45#include <linux/prefetch.h>
46
47#include <asm/tlbflush.h>
48#include <asm/div64.h>
49
50#include <linux/swapops.h>
51
52#include "internal.h"
53
54#define CREATE_TRACE_POINTS
55#include <trace/events/vmscan.h>
56
57/*
58 * reclaim_mode determines how the inactive list is shrunk
59 * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
60 * RECLAIM_MODE_ASYNC: Do not block
61 * RECLAIM_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback
62 * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference
63 * page from the LRU and reclaim all pages within a
64 * naturally aligned range
65 * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
66 * order-0 pages and then compact the zone
67 */
68typedef unsigned __bitwise__ reclaim_mode_t;
69#define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u)
70#define RECLAIM_MODE_ASYNC ((__force reclaim_mode_t)0x02u)
71#define RECLAIM_MODE_SYNC ((__force reclaim_mode_t)0x04u)
72#define RECLAIM_MODE_LUMPYRECLAIM ((__force reclaim_mode_t)0x08u)
73#define RECLAIM_MODE_COMPACTION ((__force reclaim_mode_t)0x10u)
74
75struct scan_control {
76    /* Incremented by the number of inactive pages that were scanned */
77    unsigned long nr_scanned;
78
79    /* Number of pages freed so far during a call to shrink_zones() */
80    unsigned long nr_reclaimed;
81
82    /* How many pages shrink_list() should reclaim */
83    unsigned long nr_to_reclaim;
84
85    unsigned long hibernation_mode;
86
87    /* This context's GFP mask */
88    gfp_t gfp_mask;
89
90    int may_writepage;
91
92    /* Can mapped pages be reclaimed? */
93    int may_unmap;
94
95    /* Can pages be swapped as part of reclaim? */
96    int may_swap;
97
98    int swappiness;
99
100    int order;
101
102    /*
103     * Intend to reclaim enough continuous memory rather than reclaim
104     * enough amount of memory. i.e, mode for high order allocation.
105     */
106    reclaim_mode_t reclaim_mode;
107
108    /* Which cgroup do we reclaim from */
109    struct mem_cgroup *mem_cgroup;
110
111    /*
112     * Nodemask of nodes allowed by the caller. If NULL, all nodes
113     * are scanned.
114     */
115    nodemask_t *nodemask;
116};
117
118#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
119
120#ifdef ARCH_HAS_PREFETCH
121#define prefetch_prev_lru_page(_page, _base, _field) \
122    do { \
123        if ((_page)->lru.prev != _base) { \
124            struct page *prev; \
125                                    \
126            prev = lru_to_page(&(_page->lru)); \
127            prefetch(&prev->_field); \
128        } \
129    } while (0)
130#else
131#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
132#endif
133
134#ifdef ARCH_HAS_PREFETCHW
135#define prefetchw_prev_lru_page(_page, _base, _field) \
136    do { \
137        if ((_page)->lru.prev != _base) { \
138            struct page *prev; \
139                                    \
140            prev = lru_to_page(&(_page->lru)); \
141            prefetchw(&prev->_field); \
142        } \
143    } while (0)
144#else
145#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
146#endif
147
148/*
149 * From 0 .. 100. Higher means more swappy.
150 */
151int vm_swappiness = 60;
152long vm_total_pages; /* The total number of pages which the VM controls */
153
154static LIST_HEAD(shrinker_list);
155static DECLARE_RWSEM(shrinker_rwsem);
156
157#ifdef CONFIG_CGROUP_MEM_RES_CTLR
158#define scanning_global_lru(sc) (!(sc)->mem_cgroup)
159#else
160#define scanning_global_lru(sc) (1)
161#endif
162
163static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
164                          struct scan_control *sc)
165{
166    if (!scanning_global_lru(sc))
167        return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
168
169    return &zone->reclaim_stat;
170}
171
172static unsigned long zone_nr_lru_pages(struct zone *zone,
173                struct scan_control *sc, enum lru_list lru)
174{
175    if (!scanning_global_lru(sc))
176        return mem_cgroup_zone_nr_lru_pages(sc->mem_cgroup, zone, lru);
177
178    return zone_page_state(zone, NR_LRU_BASE + lru);
179}
180
181
182/*
183 * Add a shrinker callback to be called from the vm
184 */
185void register_shrinker(struct shrinker *shrinker)
186{
187    shrinker->nr = 0;
188    down_write(&shrinker_rwsem);
189    list_add_tail(&shrinker->list, &shrinker_list);
190    up_write(&shrinker_rwsem);
191}
192EXPORT_SYMBOL(register_shrinker);
193
194/*
195 * Remove one
196 */
197void unregister_shrinker(struct shrinker *shrinker)
198{
199    down_write(&shrinker_rwsem);
200    list_del(&shrinker->list);
201    up_write(&shrinker_rwsem);
202}
203EXPORT_SYMBOL(unregister_shrinker);
204
205static inline int do_shrinker_shrink(struct shrinker *shrinker,
206                     struct shrink_control *sc,
207                     unsigned long nr_to_scan)
208{
209    sc->nr_to_scan = nr_to_scan;
210    return (*shrinker->shrink)(shrinker, sc);
211}
212
213#define SHRINK_BATCH 128
214/*
215 * Call the shrink functions to age shrinkable caches
216 *
217 * Here we assume it costs one seek to replace a lru page and that it also
218 * takes a seek to recreate a cache object. With this in mind we age equal
219 * percentages of the lru and ageable caches. This should balance the seeks
220 * generated by these structures.
221 *
222 * If the vm encountered mapped pages on the LRU it increase the pressure on
223 * slab to avoid swapping.
224 *
225 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
226 *
227 * `lru_pages' represents the number of on-LRU pages in all the zones which
228 * are eligible for the caller's allocation attempt. It is used for balancing
229 * slab reclaim versus page reclaim.
230 *
231 * Returns the number of slab objects which we shrunk.
232 */
233unsigned long shrink_slab(struct shrink_control *shrink,
234              unsigned long nr_pages_scanned,
235              unsigned long lru_pages)
236{
237    struct shrinker *shrinker;
238    unsigned long ret = 0;
239
240    if (nr_pages_scanned == 0)
241        nr_pages_scanned = SWAP_CLUSTER_MAX;
242
243    if (!down_read_trylock(&shrinker_rwsem)) {
244        /* Assume we'll be able to shrink next time */
245        ret = 1;
246        goto out;
247    }
248
249    list_for_each_entry(shrinker, &shrinker_list, list) {
250        unsigned long long delta;
251        unsigned long total_scan;
252        unsigned long max_pass;
253
254        max_pass = do_shrinker_shrink(shrinker, shrink, 0);
255        delta = (4 * nr_pages_scanned) / shrinker->seeks;
256        delta *= max_pass;
257        do_div(delta, lru_pages + 1);
258        shrinker->nr += delta;
259        if (shrinker->nr < 0) {
260            printk(KERN_ERR "shrink_slab: %pF negative objects to "
261                   "delete nr=%ld\n",
262                   shrinker->shrink, shrinker->nr);
263            shrinker->nr = max_pass;
264        }
265
266        /*
267         * Avoid risking looping forever due to too large nr value:
268         * never try to free more than twice the estimate number of
269         * freeable entries.
270         */
271        if (shrinker->nr > max_pass * 2)
272            shrinker->nr = max_pass * 2;
273
274        total_scan = shrinker->nr;
275        shrinker->nr = 0;
276
277        while (total_scan >= SHRINK_BATCH) {
278            long this_scan = SHRINK_BATCH;
279            int shrink_ret;
280            int nr_before;
281
282            nr_before = do_shrinker_shrink(shrinker, shrink, 0);
283            shrink_ret = do_shrinker_shrink(shrinker, shrink,
284                            this_scan);
285            if (shrink_ret == -1)
286                break;
287            if (shrink_ret < nr_before)
288                ret += nr_before - shrink_ret;
289            count_vm_events(SLABS_SCANNED, this_scan);
290            total_scan -= this_scan;
291
292            cond_resched();
293        }
294
295        shrinker->nr += total_scan;
296    }
297    up_read(&shrinker_rwsem);
298out:
299    cond_resched();
300    return ret;
301}
302
303static void set_reclaim_mode(int priority, struct scan_control *sc,
304                   bool sync)
305{
306    reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC;
307
308    /*
309     * Initially assume we are entering either lumpy reclaim or
310     * reclaim/compaction.Depending on the order, we will either set the
311     * sync mode or just reclaim order-0 pages later.
312     */
313    if (COMPACTION_BUILD)
314        sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
315    else
316        sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM;
317
318    /*
319     * Avoid using lumpy reclaim or reclaim/compaction if possible by
320     * restricting when its set to either costly allocations or when
321     * under memory pressure
322     */
323    if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
324        sc->reclaim_mode |= syncmode;
325    else if (sc->order && priority < DEF_PRIORITY - 2)
326        sc->reclaim_mode |= syncmode;
327    else
328        sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
329}
330
331static void reset_reclaim_mode(struct scan_control *sc)
332{
333    sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
334}
335
336static inline int is_page_cache_freeable(struct page *page)
337{
338    /*
339     * A freeable page cache page is referenced only by the caller
340     * that isolated the page, the page cache radix tree and
341     * optional buffer heads at page->private.
342     */
343    return page_count(page) - page_has_private(page) == 2;
344}
345
346static int may_write_to_queue(struct backing_dev_info *bdi,
347                  struct scan_control *sc)
348{
349    if (current->flags & PF_SWAPWRITE)
350        return 1;
351    if (!bdi_write_congested(bdi))
352        return 1;
353    if (bdi == current->backing_dev_info)
354        return 1;
355
356    /* lumpy reclaim for hugepage often need a lot of write */
357    if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
358        return 1;
359    return 0;
360}
361
362/*
363 * We detected a synchronous write error writing a page out. Probably
364 * -ENOSPC. We need to propagate that into the address_space for a subsequent
365 * fsync(), msync() or close().
366 *
367 * The tricky part is that after writepage we cannot touch the mapping: nothing
368 * prevents it from being freed up. But we have a ref on the page and once
369 * that page is locked, the mapping is pinned.
370 *
371 * We're allowed to run sleeping lock_page() here because we know the caller has
372 * __GFP_FS.
373 */
374static void handle_write_error(struct address_space *mapping,
375                struct page *page, int error)
376{
377    lock_page(page);
378    if (page_mapping(page) == mapping)
379        mapping_set_error(mapping, error);
380    unlock_page(page);
381}
382
383/* possible outcome of pageout() */
384typedef enum {
385    /* failed to write page out, page is locked */
386    PAGE_KEEP,
387    /* move page to the active list, page is locked */
388    PAGE_ACTIVATE,
389    /* page has been sent to the disk successfully, page is unlocked */
390    PAGE_SUCCESS,
391    /* page is clean and locked */
392    PAGE_CLEAN,
393} pageout_t;
394
395/*
396 * pageout is called by shrink_page_list() for each dirty page.
397 * Calls ->writepage().
398 */
399static pageout_t pageout(struct page *page, struct address_space *mapping,
400             struct scan_control *sc)
401{
402    /*
403     * If the page is dirty, only perform writeback if that write
404     * will be non-blocking. To prevent this allocation from being
405     * stalled by pagecache activity. But note that there may be
406     * stalls if we need to run get_block(). We could test
407     * PagePrivate for that.
408     *
409     * If this process is currently in __generic_file_aio_write() against
410     * this page's queue, we can perform writeback even if that
411     * will block.
412     *
413     * If the page is swapcache, write it back even if that would
414     * block, for some throttling. This happens by accident, because
415     * swap_backing_dev_info is bust: it doesn't reflect the
416     * congestion state of the swapdevs. Easy to fix, if needed.
417     */
418    if (!is_page_cache_freeable(page))
419        return PAGE_KEEP;
420    if (!mapping) {
421        /*
422         * Some data journaling orphaned pages can have
423         * page->mapping == NULL while being dirty with clean buffers.
424         */
425        if (page_has_private(page)) {
426            if (try_to_free_buffers(page)) {
427                ClearPageDirty(page);
428                printk("%s: orphaned page\n", __func__);
429                return PAGE_CLEAN;
430            }
431        }
432        return PAGE_KEEP;
433    }
434    if (mapping->a_ops->writepage == NULL)
435        return PAGE_ACTIVATE;
436    if (!may_write_to_queue(mapping->backing_dev_info, sc))
437        return PAGE_KEEP;
438
439    if (clear_page_dirty_for_io(page)) {
440        int res;
441        struct writeback_control wbc = {
442            .sync_mode = WB_SYNC_NONE,
443            .nr_to_write = SWAP_CLUSTER_MAX,
444            .range_start = 0,
445            .range_end = LLONG_MAX,
446            .for_reclaim = 1,
447        };
448
449        SetPageReclaim(page);
450        res = mapping->a_ops->writepage(page, &wbc);
451        if (res < 0)
452            handle_write_error(mapping, page, res);
453        if (res == AOP_WRITEPAGE_ACTIVATE) {
454            ClearPageReclaim(page);
455            return PAGE_ACTIVATE;
456        }
457
458        /*
459         * Wait on writeback if requested to. This happens when
460         * direct reclaiming a large contiguous area and the
461         * first attempt to free a range of pages fails.
462         */
463        if (PageWriteback(page) &&
464            (sc->reclaim_mode & RECLAIM_MODE_SYNC))
465            wait_on_page_writeback(page);
466
467        if (!PageWriteback(page)) {
468            /* synchronous write or broken a_ops? */
469            ClearPageReclaim(page);
470        }
471        trace_mm_vmscan_writepage(page,
472            trace_reclaim_flags(page, sc->reclaim_mode));
473        inc_zone_page_state(page, NR_VMSCAN_WRITE);
474        return PAGE_SUCCESS;
475    }
476
477    return PAGE_CLEAN;
478}
479
480/*
481 * Same as remove_mapping, but if the page is removed from the mapping, it
482 * gets returned with a refcount of 0.
483 */
484static int __remove_mapping(struct address_space *mapping, struct page *page)
485{
486    BUG_ON(!PageLocked(page));
487    BUG_ON(mapping != page_mapping(page));
488
489    spin_lock_irq(&mapping->tree_lock);
490    /*
491     * The non racy check for a busy page.
492     *
493     * Must be careful with the order of the tests. When someone has
494     * a ref to the page, it may be possible that they dirty it then
495     * drop the reference. So if PageDirty is tested before page_count
496     * here, then the following race may occur:
497     *
498     * get_user_pages(&page);
499     * [user mapping goes away]
500     * write_to(page);
501     * !PageDirty(page) [good]
502     * SetPageDirty(page);
503     * put_page(page);
504     * !page_count(page) [good, discard it]
505     *
506     * [oops, our write_to data is lost]
507     *
508     * Reversing the order of the tests ensures such a situation cannot
509     * escape unnoticed. The smp_rmb is needed to ensure the page->flags
510     * load is not satisfied before that of page->_count.
511     *
512     * Note that if SetPageDirty is always performed via set_page_dirty,
513     * and thus under tree_lock, then this ordering is not required.
514     */
515    if (!page_freeze_refs(page, 2))
516        goto cannot_free;
517    /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
518    if (unlikely(PageDirty(page))) {
519        page_unfreeze_refs(page, 2);
520        goto cannot_free;
521    }
522
523    if (PageSwapCache(page)) {
524        swp_entry_t swap = { .val = page_private(page) };
525        __delete_from_swap_cache(page);
526        spin_unlock_irq(&mapping->tree_lock);
527        swapcache_free(swap, page);
528    } else {
529        void (*freepage)(struct page *);
530
531        freepage = mapping->a_ops->freepage;
532
533        __delete_from_page_cache(page);
534        spin_unlock_irq(&mapping->tree_lock);
535        mem_cgroup_uncharge_cache_page(page);
536
537        if (freepage != NULL)
538            freepage(page);
539    }
540
541    return 1;
542
543cannot_free:
544    spin_unlock_irq(&mapping->tree_lock);
545    return 0;
546}
547
548/*
549 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
550 * someone else has a ref on the page, abort and return 0. If it was
551 * successfully detached, return 1. Assumes the caller has a single ref on
552 * this page.
553 */
554int remove_mapping(struct address_space *mapping, struct page *page)
555{
556    if (__remove_mapping(mapping, page)) {
557        /*
558         * Unfreezing the refcount with 1 rather than 2 effectively
559         * drops the pagecache ref for us without requiring another
560         * atomic operation.
561         */
562        page_unfreeze_refs(page, 1);
563        return 1;
564    }
565    return 0;
566}
567
568/**
569 * putback_lru_page - put previously isolated page onto appropriate LRU list
570 * @page: page to be put back to appropriate lru list
571 *
572 * Add previously isolated @page to appropriate LRU list.
573 * Page may still be unevictable for other reasons.
574 *
575 * lru_lock must not be held, interrupts must be enabled.
576 */
577void putback_lru_page(struct page *page)
578{
579    int lru;
580    int active = !!TestClearPageActive(page);
581    int was_unevictable = PageUnevictable(page);
582
583    VM_BUG_ON(PageLRU(page));
584
585redo:
586    ClearPageUnevictable(page);
587
588    if (page_evictable(page, NULL)) {
589        /*
590         * For evictable pages, we can use the cache.
591         * In event of a race, worst case is we end up with an
592         * unevictable page on [in]active list.
593         * We know how to handle that.
594         */
595        lru = active + page_lru_base_type(page);
596        lru_cache_add_lru(page, lru);
597    } else {
598        /*
599         * Put unevictable pages directly on zone's unevictable
600         * list.
601         */
602        lru = LRU_UNEVICTABLE;
603        add_page_to_unevictable_list(page);
604        /*
605         * When racing with an mlock clearing (page is
606         * unlocked), make sure that if the other thread does
607         * not observe our setting of PG_lru and fails
608         * isolation, we see PG_mlocked cleared below and move
609         * the page back to the evictable list.
610         *
611         * The other side is TestClearPageMlocked().
612         */
613        smp_mb();
614    }
615
616    /*
617     * page's status can change while we move it among lru. If an evictable
618     * page is on unevictable list, it never be freed. To avoid that,
619     * check after we added it to the list, again.
620     */
621    if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
622        if (!isolate_lru_page(page)) {
623            put_page(page);
624            goto redo;
625        }
626        /* This means someone else dropped this page from LRU
627         * So, it will be freed or putback to LRU again. There is
628         * nothing to do here.
629         */
630    }
631
632    if (was_unevictable && lru != LRU_UNEVICTABLE)
633        count_vm_event(UNEVICTABLE_PGRESCUED);
634    else if (!was_unevictable && lru == LRU_UNEVICTABLE)
635        count_vm_event(UNEVICTABLE_PGCULLED);
636
637    put_page(page); /* drop ref from isolate */
638}
639
640enum page_references {
641    PAGEREF_RECLAIM,
642    PAGEREF_RECLAIM_CLEAN,
643    PAGEREF_KEEP,
644    PAGEREF_ACTIVATE,
645};
646
647static enum page_references page_check_references(struct page *page,
648                          struct scan_control *sc)
649{
650    int referenced_ptes, referenced_page;
651    unsigned long vm_flags;
652
653    referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags);
654    referenced_page = TestClearPageReferenced(page);
655
656    /* Lumpy reclaim - ignore references */
657    if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
658        return PAGEREF_RECLAIM;
659
660    /*
661     * Mlock lost the isolation race with us. Let try_to_unmap()
662     * move the page to the unevictable list.
663     */
664    if (vm_flags & VM_LOCKED)
665        return PAGEREF_RECLAIM;
666
667    if (referenced_ptes) {
668        if (PageAnon(page))
669            return PAGEREF_ACTIVATE;
670        /*
671         * All mapped pages start out with page table
672         * references from the instantiating fault, so we need
673         * to look twice if a mapped file page is used more
674         * than once.
675         *
676         * Mark it and spare it for another trip around the
677         * inactive list. Another page table reference will
678         * lead to its activation.
679         *
680         * Note: the mark is set for activated pages as well
681         * so that recently deactivated but used pages are
682         * quickly recovered.
683         */
684        SetPageReferenced(page);
685
686        if (referenced_page)
687            return PAGEREF_ACTIVATE;
688
689        return PAGEREF_KEEP;
690    }
691
692    /* Reclaim if clean, defer dirty pages to writeback */
693    if (referenced_page && !PageSwapBacked(page))
694        return PAGEREF_RECLAIM_CLEAN;
695
696    return PAGEREF_RECLAIM;
697}
698
699static noinline_for_stack void free_page_list(struct list_head *free_pages)
700{
701    struct pagevec freed_pvec;
702    struct page *page, *tmp;
703
704    pagevec_init(&freed_pvec, 1);
705
706    list_for_each_entry_safe(page, tmp, free_pages, lru) {
707        list_del(&page->lru);
708        if (!pagevec_add(&freed_pvec, page)) {
709            __pagevec_free(&freed_pvec);
710            pagevec_reinit(&freed_pvec);
711        }
712    }
713
714    pagevec_free(&freed_pvec);
715}
716
717/*
718 * shrink_page_list() returns the number of reclaimed pages
719 */
720static unsigned long shrink_page_list(struct list_head *page_list,
721                      struct zone *zone,
722                      struct scan_control *sc)
723{
724    LIST_HEAD(ret_pages);
725    LIST_HEAD(free_pages);
726    int pgactivate = 0;
727    unsigned long nr_dirty = 0;
728    unsigned long nr_congested = 0;
729    unsigned long nr_reclaimed = 0;
730
731    cond_resched();
732
733    while (!list_empty(page_list)) {
734        enum page_references references;
735        struct address_space *mapping;
736        struct page *page;
737        int may_enter_fs;
738
739        cond_resched();
740
741        page = lru_to_page(page_list);
742        list_del(&page->lru);
743
744        if (!trylock_page(page))
745            goto keep;
746
747        VM_BUG_ON(PageActive(page));
748        VM_BUG_ON(page_zone(page) != zone);
749
750        sc->nr_scanned++;
751
752        if (unlikely(!page_evictable(page, NULL)))
753            goto cull_mlocked;
754
755        if (!sc->may_unmap && page_mapped(page))
756            goto keep_locked;
757
758        /* Double the slab pressure for mapped and swapcache pages */
759        if (page_mapped(page) || PageSwapCache(page))
760            sc->nr_scanned++;
761
762        may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
763            (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
764
765        if (PageWriteback(page)) {
766            /*
767             * Synchronous reclaim is performed in two passes,
768             * first an asynchronous pass over the list to
769             * start parallel writeback, and a second synchronous
770             * pass to wait for the IO to complete. Wait here
771             * for any page for which writeback has already
772             * started.
773             */
774            if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
775                may_enter_fs)
776                wait_on_page_writeback(page);
777            else {
778                unlock_page(page);
779                goto keep_lumpy;
780            }
781        }
782
783        references = page_check_references(page, sc);
784        switch (references) {
785        case PAGEREF_ACTIVATE:
786            goto activate_locked;
787        case PAGEREF_KEEP:
788            goto keep_locked;
789        case PAGEREF_RECLAIM:
790        case PAGEREF_RECLAIM_CLEAN:
791            ; /* try to reclaim the page below */
792        }
793
794        /*
795         * Anonymous process memory has backing store?
796         * Try to allocate it some swap space here.
797         */
798        if (PageAnon(page) && !PageSwapCache(page)) {
799            if (!(sc->gfp_mask & __GFP_IO))
800                goto keep_locked;
801            if (!add_to_swap(page))
802                goto activate_locked;
803            may_enter_fs = 1;
804        }
805
806        mapping = page_mapping(page);
807
808        /*
809         * The page is mapped into the page tables of one or more
810         * processes. Try to unmap it here.
811         */
812        if (page_mapped(page) && mapping) {
813            switch (try_to_unmap(page, TTU_UNMAP)) {
814            case SWAP_FAIL:
815                goto activate_locked;
816            case SWAP_AGAIN:
817                goto keep_locked;
818            case SWAP_MLOCK:
819                goto cull_mlocked;
820            case SWAP_SUCCESS:
821                ; /* try to free the page below */
822            }
823        }
824
825        if (PageDirty(page)) {
826            nr_dirty++;
827
828            if (references == PAGEREF_RECLAIM_CLEAN)
829                goto keep_locked;
830            if (!may_enter_fs)
831                goto keep_locked;
832            if (!sc->may_writepage)
833                goto keep_locked;
834
835            /* Page is dirty, try to write it out here */
836            switch (pageout(page, mapping, sc)) {
837            case PAGE_KEEP:
838                nr_congested++;
839                goto keep_locked;
840            case PAGE_ACTIVATE:
841                goto activate_locked;
842            case PAGE_SUCCESS:
843                if (PageWriteback(page))
844                    goto keep_lumpy;
845                if (PageDirty(page))
846                    goto keep;
847
848                /*
849                 * A synchronous write - probably a ramdisk. Go
850                 * ahead and try to reclaim the page.
851                 */
852                if (!trylock_page(page))
853                    goto keep;
854                if (PageDirty(page) || PageWriteback(page))
855                    goto keep_locked;
856                mapping = page_mapping(page);
857            case PAGE_CLEAN:
858                ; /* try to free the page below */
859            }
860        }
861
862        /*
863         * If the page has buffers, try to free the buffer mappings
864         * associated with this page. If we succeed we try to free
865         * the page as well.
866         *
867         * We do this even if the page is PageDirty().
868         * try_to_release_page() does not perform I/O, but it is
869         * possible for a page to have PageDirty set, but it is actually
870         * clean (all its buffers are clean). This happens if the
871         * buffers were written out directly, with submit_bh(). ext3
872         * will do this, as well as the blockdev mapping.
873         * try_to_release_page() will discover that cleanness and will
874         * drop the buffers and mark the page clean - it can be freed.
875         *
876         * Rarely, pages can have buffers and no ->mapping. These are
877         * the pages which were not successfully invalidated in
878         * truncate_complete_page(). We try to drop those buffers here
879         * and if that worked, and the page is no longer mapped into
880         * process address space (page_count == 1) it can be freed.
881         * Otherwise, leave the page on the LRU so it is swappable.
882         */
883        if (page_has_private(page)) {
884            if (!try_to_release_page(page, sc->gfp_mask))
885                goto activate_locked;
886            if (!mapping && page_count(page) == 1) {
887                unlock_page(page);
888                if (put_page_testzero(page))
889                    goto free_it;
890                else {
891                    /*
892                     * rare race with speculative reference.
893                     * the speculative reference will free
894                     * this page shortly, so we may
895                     * increment nr_reclaimed here (and
896                     * leave it off the LRU).
897                     */
898                    nr_reclaimed++;
899                    continue;
900                }
901            }
902        }
903
904        if (!mapping || !__remove_mapping(mapping, page))
905            goto keep_locked;
906
907        /*
908         * At this point, we have no other references and there is
909         * no way to pick any more up (removed from LRU, removed
910         * from pagecache). Can use non-atomic bitops now (and
911         * we obviously don't have to worry about waking up a process
912         * waiting on the page lock, because there are no references.
913         */
914        __clear_page_locked(page);
915free_it:
916        nr_reclaimed++;
917
918        /*
919         * Is there need to periodically free_page_list? It would
920         * appear not as the counts should be low
921         */
922        list_add(&page->lru, &free_pages);
923        continue;
924
925cull_mlocked:
926        if (PageSwapCache(page))
927            try_to_free_swap(page);
928        unlock_page(page);
929        putback_lru_page(page);
930        reset_reclaim_mode(sc);
931        continue;
932
933activate_locked:
934        /* Not a candidate for swapping, so reclaim swap space. */
935        if (PageSwapCache(page) && vm_swap_full())
936            try_to_free_swap(page);
937        VM_BUG_ON(PageActive(page));
938        SetPageActive(page);
939        pgactivate++;
940keep_locked:
941        unlock_page(page);
942keep:
943        reset_reclaim_mode(sc);
944keep_lumpy:
945        list_add(&page->lru, &ret_pages);
946        VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
947    }
948
949    /*
950     * Tag a zone as congested if all the dirty pages encountered were
951     * backed by a congested BDI. In this case, reclaimers should just
952     * back off and wait for congestion to clear because further reclaim
953     * will encounter the same problem
954     */
955    if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc))
956        zone_set_flag(zone, ZONE_CONGESTED);
957
958    free_page_list(&free_pages);
959
960    list_splice(&ret_pages, page_list);
961    count_vm_events(PGACTIVATE, pgactivate);
962    return nr_reclaimed;
963}
964
965/*
966 * Attempt to remove the specified page from its LRU. Only take this page
967 * if it is of the appropriate PageActive status. Pages which are being
968 * freed elsewhere are also ignored.
969 *
970 * page: page to consider
971 * mode: one of the LRU isolation modes defined above
972 *
973 * returns 0 on success, -ve errno on failure.
974 */
975int __isolate_lru_page(struct page *page, int mode, int file)
976{
977    int ret = -EINVAL;
978
979    /* Only take pages on the LRU. */
980    if (!PageLRU(page))
981        return ret;
982
983    /*
984     * When checking the active state, we need to be sure we are
985     * dealing with comparible boolean values. Take the logical not
986     * of each.
987     */
988    if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
989        return ret;
990
991    if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
992        return ret;
993
994    /*
995     * When this function is being called for lumpy reclaim, we
996     * initially look into all LRU pages, active, inactive and
997     * unevictable; only give shrink_page_list evictable pages.
998     */
999    if (PageUnevictable(page))
1000        return ret;
1001
1002    ret = -EBUSY;
1003
1004    if (likely(get_page_unless_zero(page))) {
1005        /*
1006         * Be careful not to clear PageLRU until after we're
1007         * sure the page is not being freed elsewhere -- the
1008         * page release code relies on it.
1009         */
1010        ClearPageLRU(page);
1011        ret = 0;
1012    }
1013
1014    return ret;
1015}
1016
1017/*
1018 * zone->lru_lock is heavily contended. Some of the functions that
1019 * shrink the lists perform better by taking out a batch of pages
1020 * and working on them outside the LRU lock.
1021 *
1022 * For pagecache intensive workloads, this function is the hottest
1023 * spot in the kernel (apart from copy_*_user functions).
1024 *
1025 * Appropriate locks must be held before calling this function.
1026 *
1027 * @nr_to_scan: The number of pages to look through on the list.
1028 * @src: The LRU list to pull pages off.
1029 * @dst: The temp list to put pages on to.
1030 * @scanned: The number of pages that were scanned.
1031 * @order: The caller's attempted allocation order
1032 * @mode: One of the LRU isolation modes
1033 * @file: True [1] if isolating file [!anon] pages
1034 *
1035 * returns how many pages were moved onto *@dst.
1036 */
1037static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1038        struct list_head *src, struct list_head *dst,
1039        unsigned long *scanned, int order, int mode, int file)
1040{
1041    unsigned long nr_taken = 0;
1042    unsigned long nr_lumpy_taken = 0;
1043    unsigned long nr_lumpy_dirty = 0;
1044    unsigned long nr_lumpy_failed = 0;
1045    unsigned long scan;
1046
1047    for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
1048        struct page *page;
1049        unsigned long pfn;
1050        unsigned long end_pfn;
1051        unsigned long page_pfn;
1052        int zone_id;
1053
1054        page = lru_to_page(src);
1055        prefetchw_prev_lru_page(page, src, flags);
1056
1057        VM_BUG_ON(!PageLRU(page));
1058
1059        switch (__isolate_lru_page(page, mode, file)) {
1060        case 0:
1061            list_move(&page->lru, dst);
1062            mem_cgroup_del_lru(page);
1063            nr_taken += hpage_nr_pages(page);
1064            break;
1065
1066        case -EBUSY:
1067            /* else it is being freed elsewhere */
1068            list_move(&page->lru, src);
1069            mem_cgroup_rotate_lru_list(page, page_lru(page));
1070            continue;
1071
1072        default:
1073            BUG();
1074        }
1075
1076        if (!order)
1077            continue;
1078
1079        /*
1080         * Attempt to take all pages in the order aligned region
1081         * surrounding the tag page. Only take those pages of
1082         * the same active state as that tag page. We may safely
1083         * round the target page pfn down to the requested order
1084         * as the mem_map is guaranteed valid out to MAX_ORDER,
1085         * where that page is in a different zone we will detect
1086         * it from its zone id and abort this block scan.
1087         */
1088        zone_id = page_zone_id(page);
1089        page_pfn = page_to_pfn(page);
1090        pfn = page_pfn & ~((1 << order) - 1);
1091        end_pfn = pfn + (1 << order);
1092        for (; pfn < end_pfn; pfn++) {
1093            struct page *cursor_page;
1094
1095            /* The target page is in the block, ignore it. */
1096            if (unlikely(pfn == page_pfn))
1097                continue;
1098
1099            /* Avoid holes within the zone. */
1100            if (unlikely(!pfn_valid_within(pfn)))
1101                break;
1102
1103            cursor_page = pfn_to_page(pfn);
1104
1105            /* Check that we have not crossed a zone boundary. */
1106            if (unlikely(page_zone_id(cursor_page) != zone_id))
1107                break;
1108
1109            /*
1110             * If we don't have enough swap space, reclaiming of
1111             * anon page which don't already have a swap slot is
1112             * pointless.
1113             */
1114            if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
1115                !PageSwapCache(cursor_page))
1116                break;
1117
1118            if (__isolate_lru_page(cursor_page, mode, file) == 0) {
1119                list_move(&cursor_page->lru, dst);
1120                mem_cgroup_del_lru(cursor_page);
1121                nr_taken += hpage_nr_pages(page);
1122                nr_lumpy_taken++;
1123                if (PageDirty(cursor_page))
1124                    nr_lumpy_dirty++;
1125                scan++;
1126            } else {
1127                /*
1128                 * Check if the page is freed already.
1129                 *
1130                 * We can't use page_count() as that
1131                 * requires compound_head and we don't
1132                 * have a pin on the page here. If a
1133                 * page is tail, we may or may not
1134                 * have isolated the head, so assume
1135                 * it's not free, it'd be tricky to
1136                 * track the head status without a
1137                 * page pin.
1138                 */
1139                if (!PageTail(cursor_page) &&
1140                    !atomic_read(&cursor_page->_count))
1141                    continue;
1142                break;
1143            }
1144        }
1145
1146        /* If we break out of the loop above, lumpy reclaim failed */
1147        if (pfn < end_pfn)
1148            nr_lumpy_failed++;
1149    }
1150
1151    *scanned = scan;
1152
1153    trace_mm_vmscan_lru_isolate(order,
1154            nr_to_scan, scan,
1155            nr_taken,
1156            nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
1157            mode);
1158    return nr_taken;
1159}
1160
1161static unsigned long isolate_pages_global(unsigned long nr,
1162                    struct list_head *dst,
1163                    unsigned long *scanned, int order,
1164                    int mode, struct zone *z,
1165                    int active, int file)
1166{
1167    int lru = LRU_BASE;
1168    if (active)
1169        lru += LRU_ACTIVE;
1170    if (file)
1171        lru += LRU_FILE;
1172    return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
1173                                mode, file);
1174}
1175
1176/*
1177 * clear_active_flags() is a helper for shrink_active_list(), clearing
1178 * any active bits from the pages in the list.
1179 */
1180static unsigned long clear_active_flags(struct list_head *page_list,
1181                    unsigned int *count)
1182{
1183    int nr_active = 0;
1184    int lru;
1185    struct page *page;
1186
1187    list_for_each_entry(page, page_list, lru) {
1188        int numpages = hpage_nr_pages(page);
1189        lru = page_lru_base_type(page);
1190        if (PageActive(page)) {
1191            lru += LRU_ACTIVE;
1192            ClearPageActive(page);
1193            nr_active += numpages;
1194        }
1195        if (count)
1196            count[lru] += numpages;
1197    }
1198
1199    return nr_active;
1200}
1201
1202/**
1203 * isolate_lru_page - tries to isolate a page from its LRU list
1204 * @page: page to isolate from its LRU list
1205 *
1206 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1207 * vmstat statistic corresponding to whatever LRU list the page was on.
1208 *
1209 * Returns 0 if the page was removed from an LRU list.
1210 * Returns -EBUSY if the page was not on an LRU list.
1211 *
1212 * The returned page will have PageLRU() cleared. If it was found on
1213 * the active list, it will have PageActive set. If it was found on
1214 * the unevictable list, it will have the PageUnevictable bit set. That flag
1215 * may need to be cleared by the caller before letting the page go.
1216 *
1217 * The vmstat statistic corresponding to the list on which the page was
1218 * found will be decremented.
1219 *
1220 * Restrictions:
1221 * (1) Must be called with an elevated refcount on the page. This is a
1222 * fundamentnal difference from isolate_lru_pages (which is called
1223 * without a stable reference).
1224 * (2) the lru_lock must not be held.
1225 * (3) interrupts must be enabled.
1226 */
1227int isolate_lru_page(struct page *page)
1228{
1229    int ret = -EBUSY;
1230
1231    VM_BUG_ON(!page_count(page));
1232
1233    if (PageLRU(page)) {
1234        struct zone *zone = page_zone(page);
1235
1236        spin_lock_irq(&zone->lru_lock);
1237        if (PageLRU(page)) {
1238            int lru = page_lru(page);
1239            ret = 0;
1240            get_page(page);
1241            ClearPageLRU(page);
1242
1243            del_page_from_lru_list(zone, page, lru);
1244        }
1245        spin_unlock_irq(&zone->lru_lock);
1246    }
1247    return ret;
1248}
1249
1250/*
1251 * Are there way too many processes in the direct reclaim path already?
1252 */
1253static int too_many_isolated(struct zone *zone, int file,
1254        struct scan_control *sc)
1255{
1256    unsigned long inactive, isolated;
1257
1258    if (current_is_kswapd())
1259        return 0;
1260
1261    if (!scanning_global_lru(sc))
1262        return 0;
1263
1264    if (file) {
1265        inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1266        isolated = zone_page_state(zone, NR_ISOLATED_FILE);
1267    } else {
1268        inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1269        isolated = zone_page_state(zone, NR_ISOLATED_ANON);
1270    }
1271
1272    return isolated > inactive;
1273}
1274
1275/*
1276 * TODO: Try merging with migrations version of putback_lru_pages
1277 */
1278static noinline_for_stack void
1279putback_lru_pages(struct zone *zone, struct scan_control *sc,
1280                unsigned long nr_anon, unsigned long nr_file,
1281                struct list_head *page_list)
1282{
1283    struct page *page;
1284    struct pagevec pvec;
1285    struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1286
1287    pagevec_init(&pvec, 1);
1288
1289    /*
1290     * Put back any unfreeable pages.
1291     */
1292    spin_lock(&zone->lru_lock);
1293    while (!list_empty(page_list)) {
1294        int lru;
1295        page = lru_to_page(page_list);
1296        VM_BUG_ON(PageLRU(page));
1297        list_del(&page->lru);
1298        if (unlikely(!page_evictable(page, NULL))) {
1299            spin_unlock_irq(&zone->lru_lock);
1300            putback_lru_page(page);
1301            spin_lock_irq(&zone->lru_lock);
1302            continue;
1303        }
1304        SetPageLRU(page);
1305        lru = page_lru(page);
1306        add_page_to_lru_list(zone, page, lru);
1307        if (is_active_lru(lru)) {
1308            int file = is_file_lru(lru);
1309            int numpages = hpage_nr_pages(page);
1310            reclaim_stat->recent_rotated[file] += numpages;
1311        }
1312        if (!pagevec_add(&pvec, page)) {
1313            spin_unlock_irq(&zone->lru_lock);
1314            __pagevec_release(&pvec);
1315            spin_lock_irq(&zone->lru_lock);
1316        }
1317    }
1318    __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
1319    __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
1320
1321    spin_unlock_irq(&zone->lru_lock);
1322    pagevec_release(&pvec);
1323}
1324
1325static noinline_for_stack void update_isolated_counts(struct zone *zone,
1326                    struct scan_control *sc,
1327                    unsigned long *nr_anon,
1328                    unsigned long *nr_file,
1329                    struct list_head *isolated_list)
1330{
1331    unsigned long nr_active;
1332    unsigned int count[NR_LRU_LISTS] = { 0, };
1333    struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1334
1335    nr_active = clear_active_flags(isolated_list, count);
1336    __count_vm_events(PGDEACTIVATE, nr_active);
1337
1338    __mod_zone_page_state(zone, NR_ACTIVE_FILE,
1339                  -count[LRU_ACTIVE_FILE]);
1340    __mod_zone_page_state(zone, NR_INACTIVE_FILE,
1341                  -count[LRU_INACTIVE_FILE]);
1342    __mod_zone_page_state(zone, NR_ACTIVE_ANON,
1343                  -count[LRU_ACTIVE_ANON]);
1344    __mod_zone_page_state(zone, NR_INACTIVE_ANON,
1345                  -count[LRU_INACTIVE_ANON]);
1346
1347    *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
1348    *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
1349    __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
1350    __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
1351
1352    reclaim_stat->recent_scanned[0] += *nr_anon;
1353    reclaim_stat->recent_scanned[1] += *nr_file;
1354}
1355
1356/*
1357 * Returns true if the caller should wait to clean dirty/writeback pages.
1358 *
1359 * If we are direct reclaiming for contiguous pages and we do not reclaim
1360 * everything in the list, try again and wait for writeback IO to complete.
1361 * This will stall high-order allocations noticeably. Only do that when really
1362 * need to free the pages under high memory pressure.
1363 */
1364static inline bool should_reclaim_stall(unsigned long nr_taken,
1365                    unsigned long nr_freed,
1366                    int priority,
1367                    struct scan_control *sc)
1368{
1369    int lumpy_stall_priority;
1370
1371    /* kswapd should not stall on sync IO */
1372    if (current_is_kswapd())
1373        return false;
1374
1375    /* Only stall on lumpy reclaim */
1376    if (sc->reclaim_mode & RECLAIM_MODE_SINGLE)
1377        return false;
1378
1379    /* If we have relaimed everything on the isolated list, no stall */
1380    if (nr_freed == nr_taken)
1381        return false;
1382
1383    /*
1384     * For high-order allocations, there are two stall thresholds.
1385     * High-cost allocations stall immediately where as lower
1386     * order allocations such as stacks require the scanning
1387     * priority to be much higher before stalling.
1388     */
1389    if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1390        lumpy_stall_priority = DEF_PRIORITY;
1391    else
1392        lumpy_stall_priority = DEF_PRIORITY / 3;
1393
1394    return priority <= lumpy_stall_priority;
1395}
1396
1397/*
1398 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
1399 * of reclaimed pages
1400 */
1401static noinline_for_stack unsigned long
1402shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
1403            struct scan_control *sc, int priority, int file)
1404{
1405    LIST_HEAD(page_list);
1406    unsigned long nr_scanned;
1407    unsigned long nr_reclaimed = 0;
1408    unsigned long nr_taken;
1409    unsigned long nr_anon;
1410    unsigned long nr_file;
1411
1412    while (unlikely(too_many_isolated(zone, file, sc))) {
1413        congestion_wait(BLK_RW_ASYNC, HZ/10);
1414
1415        /* We are about to die and free our memory. Return now. */
1416        if (fatal_signal_pending(current))
1417            return SWAP_CLUSTER_MAX;
1418    }
1419
1420    set_reclaim_mode(priority, sc, false);
1421    lru_add_drain();
1422    spin_lock_irq(&zone->lru_lock);
1423
1424    if (scanning_global_lru(sc)) {
1425        nr_taken = isolate_pages_global(nr_to_scan,
1426            &page_list, &nr_scanned, sc->order,
1427            sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
1428                    ISOLATE_BOTH : ISOLATE_INACTIVE,
1429            zone, 0, file);
1430        zone->pages_scanned += nr_scanned;
1431        if (current_is_kswapd())
1432            __count_zone_vm_events(PGSCAN_KSWAPD, zone,
1433                           nr_scanned);
1434        else
1435            __count_zone_vm_events(PGSCAN_DIRECT, zone,
1436                           nr_scanned);
1437    } else {
1438        nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
1439            &page_list, &nr_scanned, sc->order,
1440            sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
1441                    ISOLATE_BOTH : ISOLATE_INACTIVE,
1442            zone, sc->mem_cgroup,
1443            0, file);
1444        /*
1445         * mem_cgroup_isolate_pages() keeps track of
1446         * scanned pages on its own.
1447         */
1448    }
1449
1450    if (nr_taken == 0) {
1451        spin_unlock_irq(&zone->lru_lock);
1452        return 0;
1453    }
1454
1455    update_isolated_counts(zone, sc, &nr_anon, &nr_file, &page_list);
1456
1457    spin_unlock_irq(&zone->lru_lock);
1458
1459    nr_reclaimed = shrink_page_list(&page_list, zone, sc);
1460
1461    /* Check if we should syncronously wait for writeback */
1462    if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
1463        set_reclaim_mode(priority, sc, true);
1464        nr_reclaimed += shrink_page_list(&page_list, zone, sc);
1465    }
1466
1467    local_irq_disable();
1468    if (current_is_kswapd())
1469        __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
1470    __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
1471
1472    putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);
1473
1474    trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1475        zone_idx(zone),
1476        nr_scanned, nr_reclaimed,
1477        priority,
1478        trace_shrink_flags(file, sc->reclaim_mode));
1479    return nr_reclaimed;
1480}
1481
1482/*
1483 * This moves pages from the active list to the inactive list.
1484 *
1485 * We move them the other way if the page is referenced by one or more
1486 * processes, from rmap.
1487 *
1488 * If the pages are mostly unmapped, the processing is fast and it is
1489 * appropriate to hold zone->lru_lock across the whole operation. But if
1490 * the pages are mapped, the processing is slow (page_referenced()) so we
1491 * should drop zone->lru_lock around each page. It's impossible to balance
1492 * this, so instead we remove the pages from the LRU while processing them.
1493 * It is safe to rely on PG_active against the non-LRU pages in here because
1494 * nobody will play with that bit on a non-LRU page.
1495 *
1496 * The downside is that we have to touch page->_count against each page.
1497 * But we had to alter page->flags anyway.
1498 */
1499
1500static void move_active_pages_to_lru(struct zone *zone,
1501                     struct list_head *list,
1502                     enum lru_list lru)
1503{
1504    unsigned long pgmoved = 0;
1505    struct pagevec pvec;
1506    struct page *page;
1507
1508    pagevec_init(&pvec, 1);
1509
1510    while (!list_empty(list)) {
1511        page = lru_to_page(list);
1512
1513        VM_BUG_ON(PageLRU(page));
1514        SetPageLRU(page);
1515
1516        list_move(&page->lru, &zone->lru[lru].list);
1517        mem_cgroup_add_lru_list(page, lru);
1518        pgmoved += hpage_nr_pages(page);
1519
1520        if (!pagevec_add(&pvec, page) || list_empty(list)) {
1521            spin_unlock_irq(&zone->lru_lock);
1522            if (buffer_heads_over_limit)
1523                pagevec_strip(&pvec);
1524            __pagevec_release(&pvec);
1525            spin_lock_irq(&zone->lru_lock);
1526        }
1527    }
1528    __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1529    if (!is_active_lru(lru))
1530        __count_vm_events(PGDEACTIVATE, pgmoved);
1531}
1532
1533static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1534            struct scan_control *sc, int priority, int file)
1535{
1536    unsigned long nr_taken;
1537    unsigned long pgscanned;
1538    unsigned long vm_flags;
1539    LIST_HEAD(l_hold); /* The pages which were snipped off */
1540    LIST_HEAD(l_active);
1541    LIST_HEAD(l_inactive);
1542    struct page *page;
1543    struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1544    unsigned long nr_rotated = 0;
1545
1546    lru_add_drain();
1547    spin_lock_irq(&zone->lru_lock);
1548    if (scanning_global_lru(sc)) {
1549        nr_taken = isolate_pages_global(nr_pages, &l_hold,
1550                        &pgscanned, sc->order,
1551                        ISOLATE_ACTIVE, zone,
1552                        1, file);
1553        zone->pages_scanned += pgscanned;
1554    } else {
1555        nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
1556                        &pgscanned, sc->order,
1557                        ISOLATE_ACTIVE, zone,
1558                        sc->mem_cgroup, 1, file);
1559        /*
1560         * mem_cgroup_isolate_pages() keeps track of
1561         * scanned pages on its own.
1562         */
1563    }
1564
1565    reclaim_stat->recent_scanned[file] += nr_taken;
1566
1567    __count_zone_vm_events(PGREFILL, zone, pgscanned);
1568    if (file)
1569        __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
1570    else
1571        __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
1572    __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1573    spin_unlock_irq(&zone->lru_lock);
1574
1575    while (!list_empty(&l_hold)) {
1576        cond_resched();
1577        page = lru_to_page(&l_hold);
1578        list_del(&page->lru);
1579
1580        if (unlikely(!page_evictable(page, NULL))) {
1581            putback_lru_page(page);
1582            continue;
1583        }
1584
1585        if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
1586            nr_rotated += hpage_nr_pages(page);
1587            /*
1588             * Identify referenced, file-backed active pages and
1589             * give them one more trip around the active list. So
1590             * that executable code get better chances to stay in
1591             * memory under moderate memory pressure. Anon pages
1592             * are not likely to be evicted by use-once streaming
1593             * IO, plus JVM can create lots of anon VM_EXEC pages,
1594             * so we ignore them here.
1595             */
1596            if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
1597                list_add(&page->lru, &l_active);
1598                continue;
1599            }
1600        }
1601
1602        ClearPageActive(page); /* we are de-activating */
1603        list_add(&page->lru, &l_inactive);
1604    }
1605
1606    /*
1607     * Move pages back to the lru list.
1608     */
1609    spin_lock_irq(&zone->lru_lock);
1610    /*
1611     * Count referenced pages from currently used mappings as rotated,
1612     * even though only some of them are actually re-activated. This
1613     * helps balance scan pressure between file and anonymous pages in
1614     * get_scan_ratio.
1615     */
1616    reclaim_stat->recent_rotated[file] += nr_rotated;
1617
1618    move_active_pages_to_lru(zone, &l_active,
1619                        LRU_ACTIVE + file * LRU_FILE);
1620    move_active_pages_to_lru(zone, &l_inactive,
1621                        LRU_BASE + file * LRU_FILE);
1622    __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1623    spin_unlock_irq(&zone->lru_lock);
1624}
1625
1626#ifdef CONFIG_SWAP
1627static int inactive_anon_is_low_global(struct zone *zone)
1628{
1629    unsigned long active, inactive;
1630
1631    active = zone_page_state(zone, NR_ACTIVE_ANON);
1632    inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1633
1634    if (inactive * zone->inactive_ratio < active)
1635        return 1;
1636
1637    return 0;
1638}
1639
1640/**
1641 * inactive_anon_is_low - check if anonymous pages need to be deactivated
1642 * @zone: zone to check
1643 * @sc: scan control of this context
1644 *
1645 * Returns true if the zone does not have enough inactive anon pages,
1646 * meaning some active anon pages need to be deactivated.
1647 */
1648static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
1649{
1650    int low;
1651
1652    /*
1653     * If we don't have swap space, anonymous page deactivation
1654     * is pointless.
1655     */
1656    if (!total_swap_pages)
1657        return 0;
1658
1659    if (scanning_global_lru(sc))
1660        low = inactive_anon_is_low_global(zone);
1661    else
1662        low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
1663    return low;
1664}
1665#else
1666static inline int inactive_anon_is_low(struct zone *zone,
1667                    struct scan_control *sc)
1668{
1669    return 0;
1670}
1671#endif
1672
1673static int inactive_file_is_low_global(struct zone *zone)
1674{
1675    unsigned long active, inactive;
1676
1677    active = zone_page_state(zone, NR_ACTIVE_FILE);
1678    inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1679
1680    return (active > inactive);
1681}
1682
1683/**
1684 * inactive_file_is_low - check if file pages need to be deactivated
1685 * @zone: zone to check
1686 * @sc: scan control of this context
1687 *
1688 * When the system is doing streaming IO, memory pressure here
1689 * ensures that active file pages get deactivated, until more
1690 * than half of the file pages are on the inactive list.
1691 *
1692 * Once we get to that situation, protect the system's working
1693 * set from being evicted by disabling active file page aging.
1694 *
1695 * This uses a different ratio than the anonymous pages, because
1696 * the page cache uses a use-once replacement algorithm.
1697 */
1698static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
1699{
1700    int low;
1701
1702    if (scanning_global_lru(sc))
1703        low = inactive_file_is_low_global(zone);
1704    else
1705        low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
1706    return low;
1707}
1708
1709static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
1710                int file)
1711{
1712    if (file)
1713        return inactive_file_is_low(zone, sc);
1714    else
1715        return inactive_anon_is_low(zone, sc);
1716}
1717
1718static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1719    struct zone *zone, struct scan_control *sc, int priority)
1720{
1721    int file = is_file_lru(lru);
1722
1723    if (is_active_lru(lru)) {
1724        if (inactive_list_is_low(zone, sc, file))
1725            shrink_active_list(nr_to_scan, zone, sc, priority, file);
1726        return 0;
1727    }
1728
1729    return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
1730}
1731
1732/*
1733 * Determine how aggressively the anon and file LRU lists should be
1734 * scanned. The relative value of each set of LRU lists is determined
1735 * by looking at the fraction of the pages scanned we did rotate back
1736 * onto the active list instead of evict.
1737 *
1738 * nr[0] = anon pages to scan; nr[1] = file pages to scan
1739 */
1740static void get_scan_count(struct zone *zone, struct scan_control *sc,
1741                    unsigned long *nr, int priority)
1742{
1743    unsigned long anon, file, free;
1744    unsigned long anon_prio, file_prio;
1745    unsigned long ap, fp;
1746    struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1747    u64 fraction[2], denominator;
1748    enum lru_list l;
1749    int noswap = 0;
1750    int force_scan = 0;
1751
1752
1753    anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1754        zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
1755    file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
1756        zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1757
1758    if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) {
1759        /* kswapd does zone balancing and need to scan this zone */
1760        if (scanning_global_lru(sc) && current_is_kswapd())
1761            force_scan = 1;
1762        /* memcg may have small limit and need to avoid priority drop */
1763        if (!scanning_global_lru(sc))
1764            force_scan = 1;
1765    }
1766
1767    /* If we have no swap space, do not bother scanning anon pages. */
1768    if (!sc->may_swap || (nr_swap_pages <= 0)) {
1769        noswap = 1;
1770        fraction[0] = 0;
1771        fraction[1] = 1;
1772        denominator = 1;
1773        goto out;
1774    }
1775
1776    if (scanning_global_lru(sc)) {
1777        free = zone_page_state(zone, NR_FREE_PAGES);
1778        /* If we have very few page cache pages,
1779           force-scan anon pages. */
1780        if (unlikely(file + free <= high_wmark_pages(zone))) {
1781            fraction[0] = 1;
1782            fraction[1] = 0;
1783            denominator = 1;
1784            goto out;
1785        }
1786    }
1787
1788    /*
1789     * With swappiness at 100, anonymous and file have the same priority.
1790     * This scanning priority is essentially the inverse of IO cost.
1791     */
1792    anon_prio = sc->swappiness;
1793    file_prio = 200 - sc->swappiness;
1794
1795    /*
1796     * OK, so we have swap space and a fair amount of page cache
1797     * pages. We use the recently rotated / recently scanned
1798     * ratios to determine how valuable each cache is.
1799     *
1800     * Because workloads change over time (and to avoid overflow)
1801     * we keep these statistics as a floating average, which ends
1802     * up weighing recent references more than old ones.
1803     *
1804     * anon in [0], file in [1]
1805     */
1806    spin_lock_irq(&zone->lru_lock);
1807    if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1808        reclaim_stat->recent_scanned[0] /= 2;
1809        reclaim_stat->recent_rotated[0] /= 2;
1810    }
1811
1812    if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
1813        reclaim_stat->recent_scanned[1] /= 2;
1814        reclaim_stat->recent_rotated[1] /= 2;
1815    }
1816
1817    /*
1818     * The amount of pressure on anon vs file pages is inversely
1819     * proportional to the fraction of recently scanned pages on
1820     * each list that were recently referenced and in active use.
1821     */
1822    ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
1823    ap /= reclaim_stat->recent_rotated[0] + 1;
1824
1825    fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
1826    fp /= reclaim_stat->recent_rotated[1] + 1;
1827    spin_unlock_irq(&zone->lru_lock);
1828
1829    fraction[0] = ap;
1830    fraction[1] = fp;
1831    denominator = ap + fp + 1;
1832out:
1833    for_each_evictable_lru(l) {
1834        int file = is_file_lru(l);
1835        unsigned long scan;
1836
1837        scan = zone_nr_lru_pages(zone, sc, l);
1838        if (priority || noswap) {
1839            scan >>= priority;
1840            scan = div64_u64(scan * fraction[file], denominator);
1841        }
1842
1843        /*
1844         * If zone is small or memcg is small, nr[l] can be 0.
1845         * This results no-scan on this priority and priority drop down.
1846         * For global direct reclaim, it can visit next zone and tend
1847         * not to have problems. For global kswapd, it's for zone
1848         * balancing and it need to scan a small amounts. When using
1849         * memcg, priority drop can cause big latency. So, it's better
1850         * to scan small amount. See may_noscan above.
1851         */
1852        if (!scan && force_scan) {
1853            if (file)
1854                scan = SWAP_CLUSTER_MAX;
1855            else if (!noswap)
1856                scan = SWAP_CLUSTER_MAX;
1857        }
1858        nr[l] = scan;
1859    }
1860}
1861
1862/*
1863 * Reclaim/compaction depends on a number of pages being freed. To avoid
1864 * disruption to the system, a small number of order-0 pages continue to be
1865 * rotated and reclaimed in the normal fashion. However, by the time we get
1866 * back to the allocator and call try_to_compact_zone(), we ensure that
1867 * there are enough free pages for it to be likely successful
1868 */
1869static inline bool should_continue_reclaim(struct zone *zone,
1870                    unsigned long nr_reclaimed,
1871                    unsigned long nr_scanned,
1872                    struct scan_control *sc)
1873{
1874    unsigned long pages_for_compaction;
1875    unsigned long inactive_lru_pages;
1876
1877    /* If not in reclaim/compaction mode, stop */
1878    if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
1879        return false;
1880
1881    /* Consider stopping depending on scan and reclaim activity */
1882    if (sc->gfp_mask & __GFP_REPEAT) {
1883        /*
1884         * For __GFP_REPEAT allocations, stop reclaiming if the
1885         * full LRU list has been scanned and we are still failing
1886         * to reclaim pages. This full LRU scan is potentially
1887         * expensive but a __GFP_REPEAT caller really wants to succeed
1888         */
1889        if (!nr_reclaimed && !nr_scanned)
1890            return false;
1891    } else {
1892        /*
1893         * For non-__GFP_REPEAT allocations which can presumably
1894         * fail without consequence, stop if we failed to reclaim
1895         * any pages from the last SWAP_CLUSTER_MAX number of
1896         * pages that were scanned. This will return to the
1897         * caller faster at the risk reclaim/compaction and
1898         * the resulting allocation attempt fails
1899         */
1900        if (!nr_reclaimed)
1901            return false;
1902    }
1903
1904    /*
1905     * If we have not reclaimed enough pages for compaction and the
1906     * inactive lists are large enough, continue reclaiming
1907     */
1908    pages_for_compaction = (2UL << sc->order);
1909    inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON) +
1910                zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1911    if (sc->nr_reclaimed < pages_for_compaction &&
1912            inactive_lru_pages > pages_for_compaction)
1913        return true;
1914
1915    /* If compaction would go ahead or the allocation would succeed, stop */
1916    switch (compaction_suitable(zone, sc->order)) {
1917    case COMPACT_PARTIAL:
1918    case COMPACT_CONTINUE:
1919        return false;
1920    default:
1921        return true;
1922    }
1923}
1924
1925/*
1926 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1927 */
1928static void shrink_zone(int priority, struct zone *zone,
1929                struct scan_control *sc)
1930{
1931    unsigned long nr[NR_LRU_LISTS];
1932    unsigned long nr_to_scan;
1933    enum lru_list l;
1934    unsigned long nr_reclaimed, nr_scanned;
1935    unsigned long nr_to_reclaim = sc->nr_to_reclaim;
1936
1937restart:
1938    nr_reclaimed = 0;
1939    nr_scanned = sc->nr_scanned;
1940    get_scan_count(zone, sc, nr, priority);
1941
1942    while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1943                    nr[LRU_INACTIVE_FILE]) {
1944        for_each_evictable_lru(l) {
1945            if (nr[l]) {
1946                nr_to_scan = min_t(unsigned long,
1947                           nr[l], SWAP_CLUSTER_MAX);
1948                nr[l] -= nr_to_scan;
1949
1950                nr_reclaimed += shrink_list(l, nr_to_scan,
1951                                zone, sc, priority);
1952            }
1953        }
1954        /*
1955         * On large memory systems, scan >> priority can become
1956         * really large. This is fine for the starting priority;
1957         * we want to put equal scanning pressure on each zone.
1958         * However, if the VM has a harder time of freeing pages,
1959         * with multiple processes reclaiming pages, the total
1960         * freeing target can get unreasonably large.
1961         */
1962        if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
1963            break;
1964    }
1965    sc->nr_reclaimed += nr_reclaimed;
1966
1967    /*
1968     * Even if we did not try to evict anon pages at all, we want to
1969     * rebalance the anon lru active/inactive ratio.
1970     */
1971    if (inactive_anon_is_low(zone, sc))
1972        shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1973
1974    /* reclaim/compaction might need reclaim to continue */
1975    if (should_continue_reclaim(zone, nr_reclaimed,
1976                    sc->nr_scanned - nr_scanned, sc))
1977        goto restart;
1978
1979    throttle_vm_writeout(sc->gfp_mask);
1980}
1981
1982/*
1983 * This is the direct reclaim path, for page-allocating processes. We only
1984 * try to reclaim pages from zones which will satisfy the caller's allocation
1985 * request.
1986 *
1987 * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
1988 * Because:
1989 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1990 * allocation or
1991 * b) The target zone may be at high_wmark_pages(zone) but the lower zones
1992 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
1993 * zone defense algorithm.
1994 *
1995 * If a zone is deemed to be full of pinned pages then just give it a light
1996 * scan then give up on it.
1997 */
1998static void shrink_zones(int priority, struct zonelist *zonelist,
1999                    struct scan_control *sc)
2000{
2001    struct zoneref *z;
2002    struct zone *zone;
2003    unsigned long nr_soft_reclaimed;
2004    unsigned long nr_soft_scanned;
2005
2006    for_each_zone_zonelist_nodemask(zone, z, zonelist,
2007                    gfp_zone(sc->gfp_mask), sc->nodemask) {
2008        if (!populated_zone(zone))
2009            continue;
2010        /*
2011         * Take care memory controller reclaiming has small influence
2012         * to global LRU.
2013         */
2014        if (scanning_global_lru(sc)) {
2015            if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2016                continue;
2017            if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2018                continue; /* Let kswapd poll it */
2019            /*
2020             * This steals pages from memory cgroups over softlimit
2021             * and returns the number of reclaimed pages and
2022             * scanned pages. This works for global memory pressure
2023             * and balancing, not for a memcg's limit.
2024             */
2025            nr_soft_scanned = 0;
2026            nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2027                        sc->order, sc->gfp_mask,
2028                        &nr_soft_scanned);
2029            sc->nr_reclaimed += nr_soft_reclaimed;
2030            sc->nr_scanned += nr_soft_scanned;
2031            /* need some check for avoid more shrink_zone() */
2032        }
2033
2034        shrink_zone(priority, zone, sc);
2035    }
2036}
2037
2038static bool zone_reclaimable(struct zone *zone)
2039{
2040    return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
2041}
2042
2043/* All zones in zonelist are unreclaimable? */
2044static bool all_unreclaimable(struct zonelist *zonelist,
2045        struct scan_control *sc)
2046{
2047    struct zoneref *z;
2048    struct zone *zone;
2049
2050    for_each_zone_zonelist_nodemask(zone, z, zonelist,
2051            gfp_zone(sc->gfp_mask), sc->nodemask) {
2052        if (!populated_zone(zone))
2053            continue;
2054        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2055            continue;
2056        if (!zone->all_unreclaimable)
2057            return false;
2058    }
2059
2060    return true;
2061}
2062
2063/*
2064 * This is the main entry point to direct page reclaim.
2065 *
2066 * If a full scan of the inactive list fails to free enough memory then we
2067 * are "out of memory" and something needs to be killed.
2068 *
2069 * If the caller is !__GFP_FS then the probability of a failure is reasonably
2070 * high - the zone may be full of dirty or under-writeback pages, which this
2071 * caller can't do much about. We kick the writeback threads and take explicit
2072 * naps in the hope that some of these pages can be written. But if the
2073 * allocating task holds filesystem locks which prevent writeout this might not
2074 * work, and the allocation attempt will fail.
2075 *
2076 * returns: 0, if no pages reclaimed
2077 * else, the number of pages reclaimed
2078 */
2079static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2080                    struct scan_control *sc,
2081                    struct shrink_control *shrink)
2082{
2083    int priority;
2084    unsigned long total_scanned = 0;
2085    struct reclaim_state *reclaim_state = current->reclaim_state;
2086    struct zoneref *z;
2087    struct zone *zone;
2088    unsigned long writeback_threshold;
2089
2090    get_mems_allowed();
2091    delayacct_freepages_start();
2092
2093    if (scanning_global_lru(sc))
2094        count_vm_event(ALLOCSTALL);
2095
2096    for (priority = DEF_PRIORITY; priority >= 0; priority--) {
2097        sc->nr_scanned = 0;
2098        if (!priority)
2099            disable_swap_token(sc->mem_cgroup);
2100        shrink_zones(priority, zonelist, sc);
2101        /*
2102         * Don't shrink slabs when reclaiming memory from
2103         * over limit cgroups
2104         */
2105        if (scanning_global_lru(sc)) {
2106            unsigned long lru_pages = 0;
2107            for_each_zone_zonelist(zone, z, zonelist,
2108                    gfp_zone(sc->gfp_mask)) {
2109                if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2110                    continue;
2111
2112                lru_pages += zone_reclaimable_pages(zone);
2113            }
2114
2115            shrink_slab(shrink, sc->nr_scanned, lru_pages);
2116            if (reclaim_state) {
2117                sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2118                reclaim_state->reclaimed_slab = 0;
2119            }
2120        }
2121        total_scanned += sc->nr_scanned;
2122        if (sc->nr_reclaimed >= sc->nr_to_reclaim)
2123            goto out;
2124
2125        /*
2126         * Try to write back as many pages as we just scanned. This
2127         * tends to cause slow streaming writers to write data to the
2128         * disk smoothly, at the dirtying rate, which is nice. But
2129         * that's undesirable in laptop mode, where we *want* lumpy
2130         * writeout. So in laptop mode, write out the whole world.
2131         */
2132        writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
2133        if (total_scanned > writeback_threshold) {
2134            wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
2135            sc->may_writepage = 1;
2136        }
2137
2138        /* Take a nap, wait for some writeback to complete */
2139        if (!sc->hibernation_mode && sc->nr_scanned &&
2140            priority < DEF_PRIORITY - 2) {
2141            struct zone *preferred_zone;
2142
2143            first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
2144                        &cpuset_current_mems_allowed,
2145                        &preferred_zone);
2146            wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
2147        }
2148    }
2149
2150out:
2151    delayacct_freepages_end();
2152    put_mems_allowed();
2153
2154    if (sc->nr_reclaimed)
2155        return sc->nr_reclaimed;
2156
2157    /*
2158     * As hibernation is going on, kswapd is freezed so that it can't mark
2159     * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
2160     * check.
2161     */
2162    if (oom_killer_disabled)
2163        return 0;
2164
2165    /* top priority shrink_zones still had more to do? don't OOM, then */
2166    if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
2167        return 1;
2168
2169    return 0;
2170}
2171
2172unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2173                gfp_t gfp_mask, nodemask_t *nodemask)
2174{
2175    unsigned long nr_reclaimed;
2176    struct scan_control sc = {
2177        .gfp_mask = gfp_mask,
2178        .may_writepage = !laptop_mode,
2179        .nr_to_reclaim = SWAP_CLUSTER_MAX,
2180        .may_unmap = 1,
2181        .may_swap = 1,
2182        .swappiness = vm_swappiness,
2183        .order = order,
2184        .mem_cgroup = NULL,
2185        .nodemask = nodemask,
2186    };
2187    struct shrink_control shrink = {
2188        .gfp_mask = sc.gfp_mask,
2189    };
2190
2191    trace_mm_vmscan_direct_reclaim_begin(order,
2192                sc.may_writepage,
2193                gfp_mask);
2194
2195    nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2196
2197    trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2198
2199    return nr_reclaimed;
2200}
2201
2202#ifdef CONFIG_CGROUP_MEM_RES_CTLR
2203
2204unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2205                        gfp_t gfp_mask, bool noswap,
2206                        unsigned int swappiness,
2207                        struct zone *zone,
2208                        unsigned long *nr_scanned)
2209{
2210    struct scan_control sc = {
2211        .nr_scanned = 0,
2212        .nr_to_reclaim = SWAP_CLUSTER_MAX,
2213        .may_writepage = !laptop_mode,
2214        .may_unmap = 1,
2215        .may_swap = !noswap,
2216        .swappiness = swappiness,
2217        .order = 0,
2218        .mem_cgroup = mem,
2219    };
2220
2221    sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2222            (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2223
2224    trace_mm_vmscan_memcg_softlimit_reclaim_begin(0,
2225                              sc.may_writepage,
2226                              sc.gfp_mask);
2227
2228    /*
2229     * NOTE: Although we can get the priority field, using it
2230     * here is not a good idea, since it limits the pages we can scan.
2231     * if we don't reclaim here, the shrink_zone from balance_pgdat
2232     * will pick up pages from other mem cgroup's as well. We hack
2233     * the priority and make it zero.
2234     */
2235    shrink_zone(0, zone, &sc);
2236
2237    trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2238
2239    *nr_scanned = sc.nr_scanned;
2240    return sc.nr_reclaimed;
2241}
2242
2243unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2244                       gfp_t gfp_mask,
2245                       bool noswap,
2246                       unsigned int swappiness)
2247{
2248    struct zonelist *zonelist;
2249    unsigned long nr_reclaimed;
2250    int nid;
2251    struct scan_control sc = {
2252        .may_writepage = !laptop_mode,
2253        .may_unmap = 1,
2254        .may_swap = !noswap,
2255        .nr_to_reclaim = SWAP_CLUSTER_MAX,
2256        .swappiness = swappiness,
2257        .order = 0,
2258        .mem_cgroup = mem_cont,
2259        .nodemask = NULL, /* we don't care the placement */
2260        .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2261                (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
2262    };
2263    struct shrink_control shrink = {
2264        .gfp_mask = sc.gfp_mask,
2265    };
2266
2267    /*
2268     * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
2269     * take care of from where we get pages. So the node where we start the
2270     * scan does not need to be the current node.
2271     */
2272    nid = mem_cgroup_select_victim_node(mem_cont);
2273
2274    zonelist = NODE_DATA(nid)->node_zonelists;
2275
2276    trace_mm_vmscan_memcg_reclaim_begin(0,
2277                        sc.may_writepage,
2278                        sc.gfp_mask);
2279
2280    nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2281
2282    trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2283
2284    return nr_reclaimed;
2285}
2286#endif
2287
2288/*
2289 * pgdat_balanced is used when checking if a node is balanced for high-order
2290 * allocations. Only zones that meet watermarks and are in a zone allowed
2291 * by the callers classzone_idx are added to balanced_pages. The total of
2292 * balanced pages must be at least 25% of the zones allowed by classzone_idx
2293 * for the node to be considered balanced. Forcing all zones to be balanced
2294 * for high orders can cause excessive reclaim when there are imbalanced zones.
2295 * The choice of 25% is due to
2296 * o a 16M DMA zone that is balanced will not balance a zone on any
2297 * reasonable sized machine
2298 * o On all other machines, the top zone must be at least a reasonable
2299 * percentage of the middle zones. For example, on 32-bit x86, highmem
2300 * would need to be at least 256M for it to be balance a whole node.
2301 * Similarly, on x86-64 the Normal zone would need to be at least 1G
2302 * to balance a node on its own. These seemed like reasonable ratios.
2303 */
2304static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
2305                        int classzone_idx)
2306{
2307    unsigned long present_pages = 0;
2308    int i;
2309
2310    for (i = 0; i <= classzone_idx; i++)
2311        present_pages += pgdat->node_zones[i].present_pages;
2312
2313    /* A special case here: if zone has no page, we think it's balanced */
2314    return balanced_pages >= (present_pages >> 2);
2315}
2316
2317/* is kswapd sleeping prematurely? */
2318static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
2319                    int classzone_idx)
2320{
2321    int i;
2322    unsigned long balanced = 0;
2323    bool all_zones_ok = true;
2324
2325    /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
2326    if (remaining)
2327        return true;
2328
2329    /* Check the watermark levels */
2330    for (i = 0; i <= classzone_idx; i++) {
2331        struct zone *zone = pgdat->node_zones + i;
2332
2333        if (!populated_zone(zone))
2334            continue;
2335
2336        /*
2337         * balance_pgdat() skips over all_unreclaimable after
2338         * DEF_PRIORITY. Effectively, it considers them balanced so
2339         * they must be considered balanced here as well if kswapd
2340         * is to sleep
2341         */
2342        if (zone->all_unreclaimable) {
2343            balanced += zone->present_pages;
2344            continue;
2345        }
2346
2347        if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
2348                            i, 0))
2349            all_zones_ok = false;
2350        else
2351            balanced += zone->present_pages;
2352    }
2353
2354    /*
2355     * For high-order requests, the balanced zones must contain at least
2356     * 25% of the nodes pages for kswapd to sleep. For order-0, all zones
2357     * must be balanced
2358     */
2359    if (order)
2360        return !pgdat_balanced(pgdat, balanced, classzone_idx);
2361    else
2362        return !all_zones_ok;
2363}
2364
2365/*
2366 * For kswapd, balance_pgdat() will work across all this node's zones until
2367 * they are all at high_wmark_pages(zone).
2368 *
2369 * Returns the final order kswapd was reclaiming at
2370 *
2371 * There is special handling here for zones which are full of pinned pages.
2372 * This can happen if the pages are all mlocked, or if they are all used by
2373 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
2374 * What we do is to detect the case where all pages in the zone have been
2375 * scanned twice and there has been zero successful reclaim. Mark the zone as
2376 * dead and from now on, only perform a short scan. Basically we're polling
2377 * the zone for when the problem goes away.
2378 *
2379 * kswapd scans the zones in the highmem->normal->dma direction. It skips
2380 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
2381 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
2382 * lower zones regardless of the number of free pages in the lower zones. This
2383 * interoperates with the page allocator fallback scheme to ensure that aging
2384 * of pages is balanced across the zones.
2385 */
2386static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2387                            int *classzone_idx)
2388{
2389    int all_zones_ok;
2390    unsigned long balanced;
2391    int priority;
2392    int i;
2393    int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
2394    unsigned long total_scanned;
2395    struct reclaim_state *reclaim_state = current->reclaim_state;
2396    unsigned long nr_soft_reclaimed;
2397    unsigned long nr_soft_scanned;
2398    struct scan_control sc = {
2399        .gfp_mask = GFP_KERNEL,
2400        .may_unmap = 1,
2401        .may_swap = 1,
2402        /*
2403         * kswapd doesn't want to be bailed out while reclaim. because
2404         * we want to put equal scanning pressure on each zone.
2405         */
2406        .nr_to_reclaim = ULONG_MAX,
2407        .swappiness = vm_swappiness,
2408        .order = order,
2409        .mem_cgroup = NULL,
2410    };
2411    struct shrink_control shrink = {
2412        .gfp_mask = sc.gfp_mask,
2413    };
2414loop_again:
2415    total_scanned = 0;
2416    sc.nr_reclaimed = 0;
2417    sc.may_writepage = !laptop_mode;
2418    count_vm_event(PAGEOUTRUN);
2419
2420    for (priority = DEF_PRIORITY; priority >= 0; priority--) {
2421        unsigned long lru_pages = 0;
2422        int has_under_min_watermark_zone = 0;
2423
2424        /* The swap token gets in the way of swapout... */
2425        if (!priority)
2426            disable_swap_token(NULL);
2427
2428        all_zones_ok = 1;
2429        balanced = 0;
2430
2431        /*
2432         * Scan in the highmem->dma direction for the highest
2433         * zone which needs scanning
2434         */
2435        for (i = pgdat->nr_zones - 1; i >= 0; i--) {
2436            struct zone *zone = pgdat->node_zones + i;
2437
2438            if (!populated_zone(zone))
2439                continue;
2440
2441            if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2442                continue;
2443
2444            /*
2445             * Do some background aging of the anon list, to give
2446             * pages a chance to be referenced before reclaiming.
2447             */
2448            if (inactive_anon_is_low(zone, &sc))
2449                shrink_active_list(SWAP_CLUSTER_MAX, zone,
2450                            &sc, priority, 0);
2451
2452            if (!zone_watermark_ok_safe(zone, order,
2453                    high_wmark_pages(zone), 0, 0)) {
2454                end_zone = i;
2455                break;
2456            }
2457        }
2458        if (i < 0)
2459            goto out;
2460
2461        for (i = 0; i <= end_zone; i++) {
2462            struct zone *zone = pgdat->node_zones + i;
2463
2464            lru_pages += zone_reclaimable_pages(zone);
2465        }
2466
2467        /*
2468         * Now scan the zone in the dma->highmem direction, stopping
2469         * at the last zone which needs scanning.
2470         *
2471         * We do this because the page allocator works in the opposite
2472         * direction. This prevents the page allocator from allocating
2473         * pages behind kswapd's direction of progress, which would
2474         * cause too much scanning of the lower zones.
2475         */
2476        for (i = 0; i <= end_zone; i++) {
2477            struct zone *zone = pgdat->node_zones + i;
2478            int nr_slab;
2479            unsigned long balance_gap;
2480
2481            if (!populated_zone(zone))
2482                continue;
2483
2484            if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2485                continue;
2486
2487            sc.nr_scanned = 0;
2488
2489            nr_soft_scanned = 0;
2490            /*
2491             * Call soft limit reclaim before calling shrink_zone.
2492             */
2493            nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2494                            order, sc.gfp_mask,
2495                            &nr_soft_scanned);
2496            sc.nr_reclaimed += nr_soft_reclaimed;
2497            total_scanned += nr_soft_scanned;
2498
2499            /*
2500             * We put equal pressure on every zone, unless
2501             * one zone has way too many pages free
2502             * already. The "too many pages" is defined
2503             * as the high wmark plus a "gap" where the
2504             * gap is either the low watermark or 1%
2505             * of the zone, whichever is smaller.
2506             */
2507            balance_gap = min(low_wmark_pages(zone),
2508                (zone->present_pages +
2509                    KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2510                KSWAPD_ZONE_BALANCE_GAP_RATIO);
2511            if (!zone_watermark_ok_safe(zone, order,
2512                    high_wmark_pages(zone) + balance_gap,
2513                    end_zone, 0)) {
2514                shrink_zone(priority, zone, &sc);
2515
2516                reclaim_state->reclaimed_slab = 0;
2517                nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
2518                sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2519                total_scanned += sc.nr_scanned;
2520
2521                if (nr_slab == 0 && !zone_reclaimable(zone))
2522                    zone->all_unreclaimable = 1;
2523            }
2524
2525            /*
2526             * If we've done a decent amount of scanning and
2527             * the reclaim ratio is low, start doing writepage
2528             * even in laptop mode
2529             */
2530            if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
2531                total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
2532                sc.may_writepage = 1;
2533
2534            if (zone->all_unreclaimable) {
2535                if (end_zone && end_zone == i)
2536                    end_zone--;
2537                continue;
2538            }
2539
2540            if (!zone_watermark_ok_safe(zone, order,
2541                    high_wmark_pages(zone), end_zone, 0)) {
2542                all_zones_ok = 0;
2543                /*
2544                 * We are still under min water mark. This
2545                 * means that we have a GFP_ATOMIC allocation
2546                 * failure risk. Hurry up!
2547                 */
2548                if (!zone_watermark_ok_safe(zone, order,
2549                        min_wmark_pages(zone), end_zone, 0))
2550                    has_under_min_watermark_zone = 1;
2551            } else {
2552                /*
2553                 * If a zone reaches its high watermark,
2554                 * consider it to be no longer congested. It's
2555                 * possible there are dirty pages backed by
2556                 * congested BDIs but as pressure is relieved,
2557                 * spectulatively avoid congestion waits
2558                 */
2559                zone_clear_flag(zone, ZONE_CONGESTED);
2560                if (i <= *classzone_idx)
2561                    balanced += zone->present_pages;
2562            }
2563
2564        }
2565        if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
2566            break; /* kswapd: all done */
2567        /*
2568         * OK, kswapd is getting into trouble. Take a nap, then take
2569         * another pass across the zones.
2570         */
2571        if (total_scanned && (priority < DEF_PRIORITY - 2)) {
2572            if (has_under_min_watermark_zone)
2573                count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
2574            else
2575                congestion_wait(BLK_RW_ASYNC, HZ/10);
2576        }
2577
2578        /*
2579         * We do this so kswapd doesn't build up large priorities for
2580         * example when it is freeing in parallel with allocators. It
2581         * matches the direct reclaim path behaviour in terms of impact
2582         * on zone->*_priority.
2583         */
2584        if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
2585            break;
2586    }
2587out:
2588
2589    /*
2590     * order-0: All zones must meet high watermark for a balanced node
2591     * high-order: Balanced zones must make up at least 25% of the node
2592     * for the node to be balanced
2593     */
2594    if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) {
2595        cond_resched();
2596
2597        try_to_freeze();
2598
2599        /*
2600         * Fragmentation may mean that the system cannot be
2601         * rebalanced for high-order allocations in all zones.
2602         * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
2603         * it means the zones have been fully scanned and are still
2604         * not balanced. For high-order allocations, there is
2605         * little point trying all over again as kswapd may
2606         * infinite loop.
2607         *
2608         * Instead, recheck all watermarks at order-0 as they
2609         * are the most important. If watermarks are ok, kswapd will go
2610         * back to sleep. High-order users can still perform direct
2611         * reclaim if they wish.
2612         */
2613        if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
2614            order = sc.order = 0;
2615
2616        goto loop_again;
2617    }
2618
2619    /*
2620     * If kswapd was reclaiming at a higher order, it has the option of
2621     * sleeping without all zones being balanced. Before it does, it must
2622     * ensure that the watermarks for order-0 on *all* zones are met and
2623     * that the congestion flags are cleared. The congestion flag must
2624     * be cleared as kswapd is the only mechanism that clears the flag
2625     * and it is potentially going to sleep here.
2626     */
2627    if (order) {
2628        for (i = 0; i <= end_zone; i++) {
2629            struct zone *zone = pgdat->node_zones + i;
2630
2631            if (!populated_zone(zone))
2632                continue;
2633
2634            if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2635                continue;
2636
2637            /* Confirm the zone is balanced for order-0 */
2638            if (!zone_watermark_ok(zone, 0,
2639                    high_wmark_pages(zone), 0, 0)) {
2640                order = sc.order = 0;
2641                goto loop_again;
2642            }
2643
2644            /* If balanced, clear the congested flag */
2645            zone_clear_flag(zone, ZONE_CONGESTED);
2646        }
2647    }
2648
2649    /*
2650     * Return the order we were reclaiming at so sleeping_prematurely()
2651     * makes a decision on the order we were last reclaiming at. However,
2652     * if another caller entered the allocator slow path while kswapd
2653     * was awake, order will remain at the higher level
2654     */
2655    *classzone_idx = end_zone;
2656    return order;
2657}
2658
2659static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
2660{
2661    long remaining = 0;
2662    DEFINE_WAIT(wait);
2663
2664    if (freezing(current) || kthread_should_stop())
2665        return;
2666
2667    prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2668
2669    /* Try to sleep for a short interval */
2670    if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
2671        remaining = schedule_timeout(HZ/10);
2672        finish_wait(&pgdat->kswapd_wait, &wait);
2673        prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2674    }
2675
2676    /*
2677     * After a short sleep, check if it was a premature sleep. If not, then
2678     * go fully to sleep until explicitly woken up.
2679     */
2680    if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
2681        trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
2682
2683        /*
2684         * vmstat counters are not perfectly accurate and the estimated
2685         * value for counters such as NR_FREE_PAGES can deviate from the
2686         * true value by nr_online_cpus * threshold. To avoid the zone
2687         * watermarks being breached while under pressure, we reduce the
2688         * per-cpu vmstat threshold while kswapd is awake and restore
2689         * them before going back to sleep.
2690         */
2691        set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
2692        schedule();
2693        set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
2694    } else {
2695        if (remaining)
2696            count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
2697        else
2698            count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
2699    }
2700    finish_wait(&pgdat->kswapd_wait, &wait);
2701}
2702
2703/*
2704 * The background pageout daemon, started as a kernel thread
2705 * from the init process.
2706 *
2707 * This basically trickles out pages so that we have _some_
2708 * free memory available even if there is no other activity
2709 * that frees anything up. This is needed for things like routing
2710 * etc, where we otherwise might have all activity going on in
2711 * asynchronous contexts that cannot page things out.
2712 *
2713 * If there are applications that are active memory-allocators
2714 * (most normal use), this basically shouldn't matter.
2715 */
2716static int kswapd(void *p)
2717{
2718    unsigned long order, new_order;
2719    int classzone_idx, new_classzone_idx;
2720    pg_data_t *pgdat = (pg_data_t*)p;
2721    struct task_struct *tsk = current;
2722
2723    struct reclaim_state reclaim_state = {
2724        .reclaimed_slab = 0,
2725    };
2726    const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2727
2728    lockdep_set_current_reclaim_state(GFP_KERNEL);
2729
2730    if (!cpumask_empty(cpumask))
2731        set_cpus_allowed_ptr(tsk, cpumask);
2732    current->reclaim_state = &reclaim_state;
2733
2734    /*
2735     * Tell the memory management that we're a "memory allocator",
2736     * and that if we need more memory we should get access to it
2737     * regardless (see "__alloc_pages()"). "kswapd" should
2738     * never get caught in the normal page freeing logic.
2739     *
2740     * (Kswapd normally doesn't need memory anyway, but sometimes
2741     * you need a small amount of memory in order to be able to
2742     * page out something else, and this flag essentially protects
2743     * us from recursively trying to free more memory as we're
2744     * trying to free the first piece of memory in the first place).
2745     */
2746    tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2747    set_freezable();
2748
2749    order = new_order = 0;
2750    classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
2751    for ( ; ; ) {
2752        int ret;
2753
2754        /*
2755         * If the last balance_pgdat was unsuccessful it's unlikely a
2756         * new request of a similar or harder type will succeed soon
2757         * so consider going to sleep on the basis we reclaimed at
2758         */
2759        if (classzone_idx >= new_classzone_idx && order == new_order) {
2760            new_order = pgdat->kswapd_max_order;
2761            new_classzone_idx = pgdat->classzone_idx;
2762            pgdat->kswapd_max_order = 0;
2763            pgdat->classzone_idx = pgdat->nr_zones - 1;
2764        }
2765
2766        if (order < new_order || classzone_idx > new_classzone_idx) {
2767            /*
2768             * Don't sleep if someone wants a larger 'order'
2769             * allocation or has tigher zone constraints
2770             */
2771            order = new_order;
2772            classzone_idx = new_classzone_idx;
2773        } else {
2774            kswapd_try_to_sleep(pgdat, order, classzone_idx);
2775            order = pgdat->kswapd_max_order;
2776            classzone_idx = pgdat->classzone_idx;
2777            pgdat->kswapd_max_order = 0;
2778            pgdat->classzone_idx = pgdat->nr_zones - 1;
2779        }
2780
2781        ret = try_to_freeze();
2782        if (kthread_should_stop())
2783            break;
2784
2785        /*
2786         * We can speed up thawing tasks if we don't call balance_pgdat
2787         * after returning from the refrigerator
2788         */
2789        if (!ret) {
2790            trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
2791            order = balance_pgdat(pgdat, order, &classzone_idx);
2792        }
2793    }
2794    return 0;
2795}
2796
2797/*
2798 * A zone is low on free memory, so wake its kswapd task to service it.
2799 */
2800void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
2801{
2802    pg_data_t *pgdat;
2803
2804    if (!populated_zone(zone))
2805        return;
2806
2807    if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2808        return;
2809    pgdat = zone->zone_pgdat;
2810    if (pgdat->kswapd_max_order < order) {
2811        pgdat->kswapd_max_order = order;
2812        pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
2813    }
2814    if (!waitqueue_active(&pgdat->kswapd_wait))
2815        return;
2816    if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
2817        return;
2818
2819    trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
2820    wake_up_interruptible(&pgdat->kswapd_wait);
2821}
2822
2823/*
2824 * The reclaimable count would be mostly accurate.
2825 * The less reclaimable pages may be
2826 * - mlocked pages, which will be moved to unevictable list when encountered
2827 * - mapped pages, which may require several travels to be reclaimed
2828 * - dirty pages, which is not "instantly" reclaimable
2829 */
2830unsigned long global_reclaimable_pages(void)
2831{
2832    int nr;
2833
2834    nr = global_page_state(NR_ACTIVE_FILE) +
2835         global_page_state(NR_INACTIVE_FILE);
2836
2837    if (nr_swap_pages > 0)
2838        nr += global_page_state(NR_ACTIVE_ANON) +
2839              global_page_state(NR_INACTIVE_ANON);
2840
2841    return nr;
2842}
2843
2844unsigned long zone_reclaimable_pages(struct zone *zone)
2845{
2846    int nr;
2847
2848    nr = zone_page_state(zone, NR_ACTIVE_FILE) +
2849         zone_page_state(zone, NR_INACTIVE_FILE);
2850
2851    if (nr_swap_pages > 0)
2852        nr += zone_page_state(zone, NR_ACTIVE_ANON) +
2853              zone_page_state(zone, NR_INACTIVE_ANON);
2854
2855    return nr;
2856}
2857
2858#ifdef CONFIG_HIBERNATION
2859/*
2860 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
2861 * freed pages.
2862 *
2863 * Rather than trying to age LRUs the aim is to preserve the overall
2864 * LRU order by reclaiming preferentially
2865 * inactive > active > active referenced > active mapped
2866 */
2867unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2868{
2869    struct reclaim_state reclaim_state;
2870    struct scan_control sc = {
2871        .gfp_mask = GFP_HIGHUSER_MOVABLE,
2872        .may_swap = 1,
2873        .may_unmap = 1,
2874        .may_writepage = 1,
2875        .nr_to_reclaim = nr_to_reclaim,
2876        .hibernation_mode = 1,
2877        .swappiness = vm_swappiness,
2878        .order = 0,
2879    };
2880    struct shrink_control shrink = {
2881        .gfp_mask = sc.gfp_mask,
2882    };
2883    struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
2884    struct task_struct *p = current;
2885    unsigned long nr_reclaimed;
2886
2887    p->flags |= PF_MEMALLOC;
2888    lockdep_set_current_reclaim_state(sc.gfp_mask);
2889    reclaim_state.reclaimed_slab = 0;
2890    p->reclaim_state = &reclaim_state;
2891
2892    nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2893
2894    p->reclaim_state = NULL;
2895    lockdep_clear_current_reclaim_state();
2896    p->flags &= ~PF_MEMALLOC;
2897
2898    return nr_reclaimed;
2899}
2900#endif /* CONFIG_HIBERNATION */
2901
2902/* It's optimal to keep kswapds on the same CPUs as their memory, but
2903   not required for correctness. So if the last cpu in a node goes
2904   away, we get changed to run anywhere: as the first one comes back,
2905   restore their cpu bindings. */
2906static int __devinit cpu_callback(struct notifier_block *nfb,
2907                  unsigned long action, void *hcpu)
2908{
2909    int nid;
2910
2911    if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
2912        for_each_node_state(nid, N_HIGH_MEMORY) {
2913            pg_data_t *pgdat = NODE_DATA(nid);
2914            const struct cpumask *mask;
2915
2916            mask = cpumask_of_node(pgdat->node_id);
2917
2918            if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2919                /* One of our CPUs online: restore mask */
2920                set_cpus_allowed_ptr(pgdat->kswapd, mask);
2921        }
2922    }
2923    return NOTIFY_OK;
2924}
2925
2926/*
2927 * This kswapd start function will be called by init and node-hot-add.
2928 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
2929 */
2930int kswapd_run(int nid)
2931{
2932    pg_data_t *pgdat = NODE_DATA(nid);
2933    int ret = 0;
2934
2935    if (pgdat->kswapd)
2936        return 0;
2937
2938    pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
2939    if (IS_ERR(pgdat->kswapd)) {
2940        /* failure at boot is fatal */
2941        BUG_ON(system_state == SYSTEM_BOOTING);
2942        printk("Failed to start kswapd on node %d\n",nid);
2943        ret = -1;
2944    }
2945    return ret;
2946}
2947
2948/*
2949 * Called by memory hotplug when all memory in a node is offlined.
2950 */
2951void kswapd_stop(int nid)
2952{
2953    struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
2954
2955    if (kswapd)
2956        kthread_stop(kswapd);
2957}
2958
2959static int __init kswapd_init(void)
2960{
2961    int nid;
2962
2963    swap_setup();
2964    for_each_node_state(nid, N_HIGH_MEMORY)
2965         kswapd_run(nid);
2966    hotcpu_notifier(cpu_callback, 0);
2967    return 0;
2968}
2969
2970module_init(kswapd_init)
2971
2972#ifdef CONFIG_NUMA
2973/*
2974 * Zone reclaim mode
2975 *
2976 * If non-zero call zone_reclaim when the number of free pages falls below
2977 * the watermarks.
2978 */
2979int zone_reclaim_mode __read_mostly;
2980
2981#define RECLAIM_OFF 0
2982#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
2983#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
2984#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
2985
2986/*
2987 * Priority for ZONE_RECLAIM. This determines the fraction of pages
2988 * of a node considered for each zone_reclaim. 4 scans 1/16th of
2989 * a zone.
2990 */
2991#define ZONE_RECLAIM_PRIORITY 4
2992
2993/*
2994 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
2995 * occur.
2996 */
2997int sysctl_min_unmapped_ratio = 1;
2998
2999/*
3000 * If the number of slab pages in a zone grows beyond this percentage then
3001 * slab reclaim needs to occur.
3002 */
3003int sysctl_min_slab_ratio = 5;
3004
3005static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
3006{
3007    unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
3008    unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
3009        zone_page_state(zone, NR_ACTIVE_FILE);
3010
3011    /*
3012     * It's possible for there to be more file mapped pages than
3013     * accounted for by the pages on the file LRU lists because
3014     * tmpfs pages accounted for as ANON can also be FILE_MAPPED
3015     */
3016    return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
3017}
3018
3019/* Work out how many page cache pages we can reclaim in this reclaim_mode */
3020static long zone_pagecache_reclaimable(struct zone *zone)
3021{
3022    long nr_pagecache_reclaimable;
3023    long delta = 0;
3024
3025    /*
3026     * If RECLAIM_SWAP is set, then all file pages are considered
3027     * potentially reclaimable. Otherwise, we have to worry about
3028     * pages like swapcache and zone_unmapped_file_pages() provides
3029     * a better estimate
3030     */
3031    if (zone_reclaim_mode & RECLAIM_SWAP)
3032        nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
3033    else
3034        nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
3035
3036    /* If we can't clean pages, remove dirty pages from consideration */
3037    if (!(zone_reclaim_mode & RECLAIM_WRITE))
3038        delta += zone_page_state(zone, NR_FILE_DIRTY);
3039
3040    /* Watch for any possible underflows due to delta */
3041    if (unlikely(delta > nr_pagecache_reclaimable))
3042        delta = nr_pagecache_reclaimable;
3043
3044    return nr_pagecache_reclaimable - delta;
3045}
3046
3047/*
3048 * Try to free up some pages from this zone through reclaim.
3049 */
3050static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3051{
3052    /* Minimum pages needed in order to stay on node */
3053    const unsigned long nr_pages = 1 << order;
3054    struct task_struct *p = current;
3055    struct reclaim_state reclaim_state;
3056    int priority;
3057    struct scan_control sc = {
3058        .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
3059        .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
3060        .may_swap = 1,
3061        .nr_to_reclaim = max_t(unsigned long, nr_pages,
3062                       SWAP_CLUSTER_MAX),
3063        .gfp_mask = gfp_mask,
3064        .swappiness = vm_swappiness,
3065        .order = order,
3066    };
3067    struct shrink_control shrink = {
3068        .gfp_mask = sc.gfp_mask,
3069    };
3070    unsigned long nr_slab_pages0, nr_slab_pages1;
3071
3072    cond_resched();
3073    /*
3074     * We need to be able to allocate from the reserves for RECLAIM_SWAP
3075     * and we also need to be able to write out pages for RECLAIM_WRITE
3076     * and RECLAIM_SWAP.
3077     */
3078    p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
3079    lockdep_set_current_reclaim_state(gfp_mask);
3080    reclaim_state.reclaimed_slab = 0;
3081    p->reclaim_state = &reclaim_state;
3082
3083    if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
3084        /*
3085         * Free memory by calling shrink zone with increasing
3086         * priorities until we have enough memory freed.
3087         */
3088        priority = ZONE_RECLAIM_PRIORITY;
3089        do {
3090            shrink_zone(priority, zone, &sc);
3091            priority--;
3092        } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
3093    }
3094
3095    nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3096    if (nr_slab_pages0 > zone->min_slab_pages) {
3097        /*
3098         * shrink_slab() does not currently allow us to determine how
3099         * many pages were freed in this zone. So we take the current
3100         * number of slab pages and shake the slab until it is reduced
3101         * by the same nr_pages that we used for reclaiming unmapped
3102         * pages.
3103         *
3104         * Note that shrink_slab will free memory on all zones and may
3105         * take a long time.
3106         */
3107        for (;;) {
3108            unsigned long lru_pages = zone_reclaimable_pages(zone);
3109
3110            /* No reclaimable slab or very low memory pressure */
3111            if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
3112                break;
3113
3114            /* Freed enough memory */
3115            nr_slab_pages1 = zone_page_state(zone,
3116                            NR_SLAB_RECLAIMABLE);
3117            if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
3118                break;
3119        }
3120
3121        /*
3122         * Update nr_reclaimed by the number of slab pages we
3123         * reclaimed from this zone.
3124         */
3125        nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3126        if (nr_slab_pages1 < nr_slab_pages0)
3127            sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
3128    }
3129
3130    p->reclaim_state = NULL;
3131    current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
3132    lockdep_clear_current_reclaim_state();
3133    return sc.nr_reclaimed >= nr_pages;
3134}
3135
3136int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3137{
3138    int node_id;
3139    int ret;
3140
3141    /*
3142     * Zone reclaim reclaims unmapped file backed pages and
3143     * slab pages if we are over the defined limits.
3144     *
3145     * A small portion of unmapped file backed pages is needed for
3146     * file I/O otherwise pages read by file I/O will be immediately
3147     * thrown out if the zone is overallocated. So we do not reclaim
3148     * if less than a specified percentage of the zone is used by
3149     * unmapped file backed pages.
3150     */
3151    if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
3152        zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
3153        return ZONE_RECLAIM_FULL;
3154
3155    if (zone->all_unreclaimable)
3156        return ZONE_RECLAIM_FULL;
3157
3158    /*
3159     * Do not scan if the allocation should not be delayed.
3160     */
3161    if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
3162        return ZONE_RECLAIM_NOSCAN;
3163
3164    /*
3165     * Only run zone reclaim on the local zone or on zones that do not
3166     * have associated processors. This will favor the local processor
3167     * over remote processors and spread off node memory allocations
3168     * as wide as possible.
3169     */
3170    node_id = zone_to_nid(zone);
3171    if (node_state(node_id, N_CPU) && node_id != numa_node_id())
3172        return ZONE_RECLAIM_NOSCAN;
3173
3174    if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
3175        return ZONE_RECLAIM_NOSCAN;
3176
3177    ret = __zone_reclaim(zone, gfp_mask, order);
3178    zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
3179
3180    if (!ret)
3181        count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
3182
3183    return ret;
3184}
3185#endif
3186
3187/*
3188 * page_evictable - test whether a page is evictable
3189 * @page: the page to test
3190 * @vma: the VMA in which the page is or will be mapped, may be NULL
3191 *
3192 * Test whether page is evictable--i.e., should be placed on active/inactive
3193 * lists vs unevictable list. The vma argument is !NULL when called from the
3194 * fault path to determine how to instantate a new page.
3195 *
3196 * Reasons page might not be evictable:
3197 * (1) page's mapping marked unevictable
3198 * (2) page is part of an mlocked VMA
3199 *
3200 */
3201int page_evictable(struct page *page, struct vm_area_struct *vma)
3202{
3203
3204    if (mapping_unevictable(page_mapping(page)))
3205        return 0;
3206
3207    if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
3208        return 0;
3209
3210    return 1;
3211}
3212
3213/**
3214 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
3215 * @page: page to check evictability and move to appropriate lru list
3216 * @zone: zone page is in
3217 *
3218 * Checks a page for evictability and moves the page to the appropriate
3219 * zone lru list.
3220 *
3221 * Restrictions: zone->lru_lock must be held, page must be on LRU and must
3222 * have PageUnevictable set.
3223 */
3224static void check_move_unevictable_page(struct page *page, struct zone *zone)
3225{
3226    VM_BUG_ON(PageActive(page));
3227
3228retry:
3229    ClearPageUnevictable(page);
3230    if (page_evictable(page, NULL)) {
3231        enum lru_list l = page_lru_base_type(page);
3232
3233        __dec_zone_state(zone, NR_UNEVICTABLE);
3234        list_move(&page->lru, &zone->lru[l].list);
3235        mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
3236        __inc_zone_state(zone, NR_INACTIVE_ANON + l);
3237        __count_vm_event(UNEVICTABLE_PGRESCUED);
3238    } else {
3239        /*
3240         * rotate unevictable list
3241         */
3242        SetPageUnevictable(page);
3243        list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
3244        mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
3245        if (page_evictable(page, NULL))
3246            goto retry;
3247    }
3248}
3249
3250/**
3251 * scan_mapping_unevictable_pages - scan an address space for evictable pages
3252 * @mapping: struct address_space to scan for evictable pages
3253 *
3254 * Scan all pages in mapping. Check unevictable pages for
3255 * evictability and move them to the appropriate zone lru list.
3256 */
3257void scan_mapping_unevictable_pages(struct address_space *mapping)
3258{
3259    pgoff_t next = 0;
3260    pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
3261             PAGE_CACHE_SHIFT;
3262    struct zone *zone;
3263    struct pagevec pvec;
3264
3265    if (mapping->nrpages == 0)
3266        return;
3267
3268    pagevec_init(&pvec, 0);
3269    while (next < end &&
3270        pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
3271        int i;
3272        int pg_scanned = 0;
3273
3274        zone = NULL;
3275
3276        for (i = 0; i < pagevec_count(&pvec); i++) {
3277            struct page *page = pvec.pages[i];
3278            pgoff_t page_index = page->index;
3279            struct zone *pagezone = page_zone(page);
3280
3281            pg_scanned++;
3282            if (page_index > next)
3283                next = page_index;
3284            next++;
3285
3286            if (pagezone != zone) {
3287                if (zone)
3288                    spin_unlock_irq(&zone->lru_lock);
3289                zone = pagezone;
3290                spin_lock_irq(&zone->lru_lock);
3291            }
3292
3293            if (PageLRU(page) && PageUnevictable(page))
3294                check_move_unevictable_page(page, zone);
3295        }
3296        if (zone)
3297            spin_unlock_irq(&zone->lru_lock);
3298        pagevec_release(&pvec);
3299
3300        count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
3301    }
3302
3303}
3304
3305/**
3306 * scan_zone_unevictable_pages - check unevictable list for evictable pages
3307 * @zone - zone of which to scan the unevictable list
3308 *
3309 * Scan @zone's unevictable LRU lists to check for pages that have become
3310 * evictable. Move those that have to @zone's inactive list where they
3311 * become candidates for reclaim, unless shrink_inactive_zone() decides
3312 * to reactivate them. Pages that are still unevictable are rotated
3313 * back onto @zone's unevictable list.
3314 */
3315#define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
3316static void scan_zone_unevictable_pages(struct zone *zone)
3317{
3318    struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
3319    unsigned long scan;
3320    unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
3321
3322    while (nr_to_scan > 0) {
3323        unsigned long batch_size = min(nr_to_scan,
3324                        SCAN_UNEVICTABLE_BATCH_SIZE);
3325
3326        spin_lock_irq(&zone->lru_lock);
3327        for (scan = 0; scan < batch_size; scan++) {
3328            struct page *page = lru_to_page(l_unevictable);
3329
3330            if (!trylock_page(page))
3331                continue;
3332
3333            prefetchw_prev_lru_page(page, l_unevictable, flags);
3334
3335            if (likely(PageLRU(page) && PageUnevictable(page)))
3336                check_move_unevictable_page(page, zone);
3337
3338            unlock_page(page);
3339        }
3340        spin_unlock_irq(&zone->lru_lock);
3341
3342        nr_to_scan -= batch_size;
3343    }
3344}
3345
3346
3347/**
3348 * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
3349 *
3350 * A really big hammer: scan all zones' unevictable LRU lists to check for
3351 * pages that have become evictable. Move those back to the zones'
3352 * inactive list where they become candidates for reclaim.
3353 * This occurs when, e.g., we have unswappable pages on the unevictable lists,
3354 * and we add swap to the system. As such, it runs in the context of a task
3355 * that has possibly/probably made some previously unevictable pages
3356 * evictable.
3357 */
3358static void scan_all_zones_unevictable_pages(void)
3359{
3360    struct zone *zone;
3361
3362    for_each_zone(zone) {
3363        scan_zone_unevictable_pages(zone);
3364    }
3365}
3366
3367/*
3368 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
3369 * all nodes' unevictable lists for evictable pages
3370 */
3371unsigned long scan_unevictable_pages;
3372
3373int scan_unevictable_handler(struct ctl_table *table, int write,
3374               void __user *buffer,
3375               size_t *length, loff_t *ppos)
3376{
3377    proc_doulongvec_minmax(table, write, buffer, length, ppos);
3378
3379    if (write && *(unsigned long *)table->data)
3380        scan_all_zones_unevictable_pages();
3381
3382    scan_unevictable_pages = 0;
3383    return 0;
3384}
3385
3386#ifdef CONFIG_NUMA
3387/*
3388 * per node 'scan_unevictable_pages' attribute. On demand re-scan of
3389 * a specified node's per zone unevictable lists for evictable pages.
3390 */
3391
3392static ssize_t read_scan_unevictable_node(struct sys_device *dev,
3393                      struct sysdev_attribute *attr,
3394                      char *buf)
3395{
3396    return sprintf(buf, "0\n"); /* always zero; should fit... */
3397}
3398
3399static ssize_t write_scan_unevictable_node(struct sys_device *dev,
3400                       struct sysdev_attribute *attr,
3401                    const char *buf, size_t count)
3402{
3403    struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
3404    struct zone *zone;
3405    unsigned long res;
3406    unsigned long req = strict_strtoul(buf, 10, &res);
3407
3408    if (!req)
3409        return 1; /* zero is no-op */
3410
3411    for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
3412        if (!populated_zone(zone))
3413            continue;
3414        scan_zone_unevictable_pages(zone);
3415    }
3416    return 1;
3417}
3418
3419
3420static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
3421            read_scan_unevictable_node,
3422            write_scan_unevictable_node);
3423
3424int scan_unevictable_register_node(struct node *node)
3425{
3426    return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
3427}
3428
3429void scan_unevictable_unregister_node(struct node *node)
3430{
3431    sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
3432}
3433#endif
3434

Archive Download this file



interactive