Root/mm/vmscan.c

1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/gfp.h>
17#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
22#include <linux/vmstat.h>
23#include <linux/file.h>
24#include <linux/writeback.h>
25#include <linux/blkdev.h>
26#include <linux/buffer_head.h> /* for try_to_release_page(),
27                    buffer_heads_over_limit */
28#include <linux/mm_inline.h>
29#include <linux/backing-dev.h>
30#include <linux/rmap.h>
31#include <linux/topology.h>
32#include <linux/cpu.h>
33#include <linux/cpuset.h>
34#include <linux/compaction.h>
35#include <linux/notifier.h>
36#include <linux/rwsem.h>
37#include <linux/delay.h>
38#include <linux/kthread.h>
39#include <linux/freezer.h>
40#include <linux/memcontrol.h>
41#include <linux/delayacct.h>
42#include <linux/sysctl.h>
43#include <linux/oom.h>
44#include <linux/prefetch.h>
45
46#include <asm/tlbflush.h>
47#include <asm/div64.h>
48
49#include <linux/swapops.h>
50
51#include "internal.h"
52
53#define CREATE_TRACE_POINTS
54#include <trace/events/vmscan.h>
55
56struct scan_control {
57    /* Incremented by the number of inactive pages that were scanned */
58    unsigned long nr_scanned;
59
60    /* Number of pages freed so far during a call to shrink_zones() */
61    unsigned long nr_reclaimed;
62
63    /* How many pages shrink_list() should reclaim */
64    unsigned long nr_to_reclaim;
65
66    unsigned long hibernation_mode;
67
68    /* This context's GFP mask */
69    gfp_t gfp_mask;
70
71    int may_writepage;
72
73    /* Can mapped pages be reclaimed? */
74    int may_unmap;
75
76    /* Can pages be swapped as part of reclaim? */
77    int may_swap;
78
79    int order;
80
81    /* Scan (total_size >> priority) pages at once */
82    int priority;
83
84    /*
85     * The memory cgroup that hit its limit and as a result is the
86     * primary target of this reclaim invocation.
87     */
88    struct mem_cgroup *target_mem_cgroup;
89
90    /*
91     * Nodemask of nodes allowed by the caller. If NULL, all nodes
92     * are scanned.
93     */
94    nodemask_t *nodemask;
95};
96
97#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
98
99#ifdef ARCH_HAS_PREFETCH
100#define prefetch_prev_lru_page(_page, _base, _field) \
101    do { \
102        if ((_page)->lru.prev != _base) { \
103            struct page *prev; \
104                                    \
105            prev = lru_to_page(&(_page->lru)); \
106            prefetch(&prev->_field); \
107        } \
108    } while (0)
109#else
110#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
111#endif
112
113#ifdef ARCH_HAS_PREFETCHW
114#define prefetchw_prev_lru_page(_page, _base, _field) \
115    do { \
116        if ((_page)->lru.prev != _base) { \
117            struct page *prev; \
118                                    \
119            prev = lru_to_page(&(_page->lru)); \
120            prefetchw(&prev->_field); \
121        } \
122    } while (0)
123#else
124#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
125#endif
126
127/*
128 * From 0 .. 100. Higher means more swappy.
129 */
130int vm_swappiness = 60;
131long vm_total_pages; /* The total number of pages which the VM controls */
132
133static LIST_HEAD(shrinker_list);
134static DECLARE_RWSEM(shrinker_rwsem);
135
136#ifdef CONFIG_MEMCG
137static bool global_reclaim(struct scan_control *sc)
138{
139    return !sc->target_mem_cgroup;
140}
141#else
142static bool global_reclaim(struct scan_control *sc)
143{
144    return true;
145}
146#endif
147
148static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
149{
150    if (!mem_cgroup_disabled())
151        return mem_cgroup_get_lru_size(lruvec, lru);
152
153    return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
154}
155
156/*
157 * Add a shrinker callback to be called from the vm
158 */
159void register_shrinker(struct shrinker *shrinker)
160{
161    atomic_long_set(&shrinker->nr_in_batch, 0);
162    down_write(&shrinker_rwsem);
163    list_add_tail(&shrinker->list, &shrinker_list);
164    up_write(&shrinker_rwsem);
165}
166EXPORT_SYMBOL(register_shrinker);
167
168/*
169 * Remove one
170 */
171void unregister_shrinker(struct shrinker *shrinker)
172{
173    down_write(&shrinker_rwsem);
174    list_del(&shrinker->list);
175    up_write(&shrinker_rwsem);
176}
177EXPORT_SYMBOL(unregister_shrinker);
178
179static inline int do_shrinker_shrink(struct shrinker *shrinker,
180                     struct shrink_control *sc,
181                     unsigned long nr_to_scan)
182{
183    sc->nr_to_scan = nr_to_scan;
184    return (*shrinker->shrink)(shrinker, sc);
185}
186
187#define SHRINK_BATCH 128
188/*
189 * Call the shrink functions to age shrinkable caches
190 *
191 * Here we assume it costs one seek to replace a lru page and that it also
192 * takes a seek to recreate a cache object. With this in mind we age equal
193 * percentages of the lru and ageable caches. This should balance the seeks
194 * generated by these structures.
195 *
196 * If the vm encountered mapped pages on the LRU it increase the pressure on
197 * slab to avoid swapping.
198 *
199 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
200 *
201 * `lru_pages' represents the number of on-LRU pages in all the zones which
202 * are eligible for the caller's allocation attempt. It is used for balancing
203 * slab reclaim versus page reclaim.
204 *
205 * Returns the number of slab objects which we shrunk.
206 */
207unsigned long shrink_slab(struct shrink_control *shrink,
208              unsigned long nr_pages_scanned,
209              unsigned long lru_pages)
210{
211    struct shrinker *shrinker;
212    unsigned long ret = 0;
213
214    if (nr_pages_scanned == 0)
215        nr_pages_scanned = SWAP_CLUSTER_MAX;
216
217    if (!down_read_trylock(&shrinker_rwsem)) {
218        /* Assume we'll be able to shrink next time */
219        ret = 1;
220        goto out;
221    }
222
223    list_for_each_entry(shrinker, &shrinker_list, list) {
224        unsigned long long delta;
225        long total_scan;
226        long max_pass;
227        int shrink_ret = 0;
228        long nr;
229        long new_nr;
230        long batch_size = shrinker->batch ? shrinker->batch
231                          : SHRINK_BATCH;
232
233        max_pass = do_shrinker_shrink(shrinker, shrink, 0);
234        if (max_pass <= 0)
235            continue;
236
237        /*
238         * copy the current shrinker scan count into a local variable
239         * and zero it so that other concurrent shrinker invocations
240         * don't also do this scanning work.
241         */
242        nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
243
244        total_scan = nr;
245        delta = (4 * nr_pages_scanned) / shrinker->seeks;
246        delta *= max_pass;
247        do_div(delta, lru_pages + 1);
248        total_scan += delta;
249        if (total_scan < 0) {
250            printk(KERN_ERR "shrink_slab: %pF negative objects to "
251                   "delete nr=%ld\n",
252                   shrinker->shrink, total_scan);
253            total_scan = max_pass;
254        }
255
256        /*
257         * We need to avoid excessive windup on filesystem shrinkers
258         * due to large numbers of GFP_NOFS allocations causing the
259         * shrinkers to return -1 all the time. This results in a large
260         * nr being built up so when a shrink that can do some work
261         * comes along it empties the entire cache due to nr >>>
262         * max_pass. This is bad for sustaining a working set in
263         * memory.
264         *
265         * Hence only allow the shrinker to scan the entire cache when
266         * a large delta change is calculated directly.
267         */
268        if (delta < max_pass / 4)
269            total_scan = min(total_scan, max_pass / 2);
270
271        /*
272         * Avoid risking looping forever due to too large nr value:
273         * never try to free more than twice the estimate number of
274         * freeable entries.
275         */
276        if (total_scan > max_pass * 2)
277            total_scan = max_pass * 2;
278
279        trace_mm_shrink_slab_start(shrinker, shrink, nr,
280                    nr_pages_scanned, lru_pages,
281                    max_pass, delta, total_scan);
282
283        while (total_scan >= batch_size) {
284            int nr_before;
285
286            nr_before = do_shrinker_shrink(shrinker, shrink, 0);
287            shrink_ret = do_shrinker_shrink(shrinker, shrink,
288                            batch_size);
289            if (shrink_ret == -1)
290                break;
291            if (shrink_ret < nr_before)
292                ret += nr_before - shrink_ret;
293            count_vm_events(SLABS_SCANNED, batch_size);
294            total_scan -= batch_size;
295
296            cond_resched();
297        }
298
299        /*
300         * move the unused scan count back into the shrinker in a
301         * manner that handles concurrent updates. If we exhausted the
302         * scan, there is no need to do an update.
303         */
304        if (total_scan > 0)
305            new_nr = atomic_long_add_return(total_scan,
306                    &shrinker->nr_in_batch);
307        else
308            new_nr = atomic_long_read(&shrinker->nr_in_batch);
309
310        trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
311    }
312    up_read(&shrinker_rwsem);
313out:
314    cond_resched();
315    return ret;
316}
317
318static inline int is_page_cache_freeable(struct page *page)
319{
320    /*
321     * A freeable page cache page is referenced only by the caller
322     * that isolated the page, the page cache radix tree and
323     * optional buffer heads at page->private.
324     */
325    return page_count(page) - page_has_private(page) == 2;
326}
327
328static int may_write_to_queue(struct backing_dev_info *bdi,
329                  struct scan_control *sc)
330{
331    if (current->flags & PF_SWAPWRITE)
332        return 1;
333    if (!bdi_write_congested(bdi))
334        return 1;
335    if (bdi == current->backing_dev_info)
336        return 1;
337    return 0;
338}
339
340/*
341 * We detected a synchronous write error writing a page out. Probably
342 * -ENOSPC. We need to propagate that into the address_space for a subsequent
343 * fsync(), msync() or close().
344 *
345 * The tricky part is that after writepage we cannot touch the mapping: nothing
346 * prevents it from being freed up. But we have a ref on the page and once
347 * that page is locked, the mapping is pinned.
348 *
349 * We're allowed to run sleeping lock_page() here because we know the caller has
350 * __GFP_FS.
351 */
352static void handle_write_error(struct address_space *mapping,
353                struct page *page, int error)
354{
355    lock_page(page);
356    if (page_mapping(page) == mapping)
357        mapping_set_error(mapping, error);
358    unlock_page(page);
359}
360
361/* possible outcome of pageout() */
362typedef enum {
363    /* failed to write page out, page is locked */
364    PAGE_KEEP,
365    /* move page to the active list, page is locked */
366    PAGE_ACTIVATE,
367    /* page has been sent to the disk successfully, page is unlocked */
368    PAGE_SUCCESS,
369    /* page is clean and locked */
370    PAGE_CLEAN,
371} pageout_t;
372
373/*
374 * pageout is called by shrink_page_list() for each dirty page.
375 * Calls ->writepage().
376 */
377static pageout_t pageout(struct page *page, struct address_space *mapping,
378             struct scan_control *sc)
379{
380    /*
381     * If the page is dirty, only perform writeback if that write
382     * will be non-blocking. To prevent this allocation from being
383     * stalled by pagecache activity. But note that there may be
384     * stalls if we need to run get_block(). We could test
385     * PagePrivate for that.
386     *
387     * If this process is currently in __generic_file_aio_write() against
388     * this page's queue, we can perform writeback even if that
389     * will block.
390     *
391     * If the page is swapcache, write it back even if that would
392     * block, for some throttling. This happens by accident, because
393     * swap_backing_dev_info is bust: it doesn't reflect the
394     * congestion state of the swapdevs. Easy to fix, if needed.
395     */
396    if (!is_page_cache_freeable(page))
397        return PAGE_KEEP;
398    if (!mapping) {
399        /*
400         * Some data journaling orphaned pages can have
401         * page->mapping == NULL while being dirty with clean buffers.
402         */
403        if (page_has_private(page)) {
404            if (try_to_free_buffers(page)) {
405                ClearPageDirty(page);
406                printk("%s: orphaned page\n", __func__);
407                return PAGE_CLEAN;
408            }
409        }
410        return PAGE_KEEP;
411    }
412    if (mapping->a_ops->writepage == NULL)
413        return PAGE_ACTIVATE;
414    if (!may_write_to_queue(mapping->backing_dev_info, sc))
415        return PAGE_KEEP;
416
417    if (clear_page_dirty_for_io(page)) {
418        int res;
419        struct writeback_control wbc = {
420            .sync_mode = WB_SYNC_NONE,
421            .nr_to_write = SWAP_CLUSTER_MAX,
422            .range_start = 0,
423            .range_end = LLONG_MAX,
424            .for_reclaim = 1,
425        };
426
427        SetPageReclaim(page);
428        res = mapping->a_ops->writepage(page, &wbc);
429        if (res < 0)
430            handle_write_error(mapping, page, res);
431        if (res == AOP_WRITEPAGE_ACTIVATE) {
432            ClearPageReclaim(page);
433            return PAGE_ACTIVATE;
434        }
435
436        if (!PageWriteback(page)) {
437            /* synchronous write or broken a_ops? */
438            ClearPageReclaim(page);
439        }
440        trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
441        inc_zone_page_state(page, NR_VMSCAN_WRITE);
442        return PAGE_SUCCESS;
443    }
444
445    return PAGE_CLEAN;
446}
447
448/*
449 * Same as remove_mapping, but if the page is removed from the mapping, it
450 * gets returned with a refcount of 0.
451 */
452static int __remove_mapping(struct address_space *mapping, struct page *page)
453{
454    BUG_ON(!PageLocked(page));
455    BUG_ON(mapping != page_mapping(page));
456
457    spin_lock_irq(&mapping->tree_lock);
458    /*
459     * The non racy check for a busy page.
460     *
461     * Must be careful with the order of the tests. When someone has
462     * a ref to the page, it may be possible that they dirty it then
463     * drop the reference. So if PageDirty is tested before page_count
464     * here, then the following race may occur:
465     *
466     * get_user_pages(&page);
467     * [user mapping goes away]
468     * write_to(page);
469     * !PageDirty(page) [good]
470     * SetPageDirty(page);
471     * put_page(page);
472     * !page_count(page) [good, discard it]
473     *
474     * [oops, our write_to data is lost]
475     *
476     * Reversing the order of the tests ensures such a situation cannot
477     * escape unnoticed. The smp_rmb is needed to ensure the page->flags
478     * load is not satisfied before that of page->_count.
479     *
480     * Note that if SetPageDirty is always performed via set_page_dirty,
481     * and thus under tree_lock, then this ordering is not required.
482     */
483    if (!page_freeze_refs(page, 2))
484        goto cannot_free;
485    /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
486    if (unlikely(PageDirty(page))) {
487        page_unfreeze_refs(page, 2);
488        goto cannot_free;
489    }
490
491    if (PageSwapCache(page)) {
492        swp_entry_t swap = { .val = page_private(page) };
493        __delete_from_swap_cache(page);
494        spin_unlock_irq(&mapping->tree_lock);
495        swapcache_free(swap, page);
496    } else {
497        void (*freepage)(struct page *);
498
499        freepage = mapping->a_ops->freepage;
500
501        __delete_from_page_cache(page);
502        spin_unlock_irq(&mapping->tree_lock);
503        mem_cgroup_uncharge_cache_page(page);
504
505        if (freepage != NULL)
506            freepage(page);
507    }
508
509    return 1;
510
511cannot_free:
512    spin_unlock_irq(&mapping->tree_lock);
513    return 0;
514}
515
516/*
517 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
518 * someone else has a ref on the page, abort and return 0. If it was
519 * successfully detached, return 1. Assumes the caller has a single ref on
520 * this page.
521 */
522int remove_mapping(struct address_space *mapping, struct page *page)
523{
524    if (__remove_mapping(mapping, page)) {
525        /*
526         * Unfreezing the refcount with 1 rather than 2 effectively
527         * drops the pagecache ref for us without requiring another
528         * atomic operation.
529         */
530        page_unfreeze_refs(page, 1);
531        return 1;
532    }
533    return 0;
534}
535
536/**
537 * putback_lru_page - put previously isolated page onto appropriate LRU list
538 * @page: page to be put back to appropriate lru list
539 *
540 * Add previously isolated @page to appropriate LRU list.
541 * Page may still be unevictable for other reasons.
542 *
543 * lru_lock must not be held, interrupts must be enabled.
544 */
545void putback_lru_page(struct page *page)
546{
547    int lru;
548    int active = !!TestClearPageActive(page);
549    int was_unevictable = PageUnevictable(page);
550
551    VM_BUG_ON(PageLRU(page));
552
553redo:
554    ClearPageUnevictable(page);
555
556    if (page_evictable(page, NULL)) {
557        /*
558         * For evictable pages, we can use the cache.
559         * In event of a race, worst case is we end up with an
560         * unevictable page on [in]active list.
561         * We know how to handle that.
562         */
563        lru = active + page_lru_base_type(page);
564        lru_cache_add_lru(page, lru);
565    } else {
566        /*
567         * Put unevictable pages directly on zone's unevictable
568         * list.
569         */
570        lru = LRU_UNEVICTABLE;
571        add_page_to_unevictable_list(page);
572        /*
573         * When racing with an mlock or AS_UNEVICTABLE clearing
574         * (page is unlocked) make sure that if the other thread
575         * does not observe our setting of PG_lru and fails
576         * isolation/check_move_unevictable_pages,
577         * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
578         * the page back to the evictable list.
579         *
580         * The other side is TestClearPageMlocked() or shmem_lock().
581         */
582        smp_mb();
583    }
584
585    /*
586     * page's status can change while we move it among lru. If an evictable
587     * page is on unevictable list, it never be freed. To avoid that,
588     * check after we added it to the list, again.
589     */
590    if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
591        if (!isolate_lru_page(page)) {
592            put_page(page);
593            goto redo;
594        }
595        /* This means someone else dropped this page from LRU
596         * So, it will be freed or putback to LRU again. There is
597         * nothing to do here.
598         */
599    }
600
601    if (was_unevictable && lru != LRU_UNEVICTABLE)
602        count_vm_event(UNEVICTABLE_PGRESCUED);
603    else if (!was_unevictable && lru == LRU_UNEVICTABLE)
604        count_vm_event(UNEVICTABLE_PGCULLED);
605
606    put_page(page); /* drop ref from isolate */
607}
608
609enum page_references {
610    PAGEREF_RECLAIM,
611    PAGEREF_RECLAIM_CLEAN,
612    PAGEREF_KEEP,
613    PAGEREF_ACTIVATE,
614};
615
616static enum page_references page_check_references(struct page *page,
617                          struct scan_control *sc)
618{
619    int referenced_ptes, referenced_page;
620    unsigned long vm_flags;
621
622    referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
623                      &vm_flags);
624    referenced_page = TestClearPageReferenced(page);
625
626    /*
627     * Mlock lost the isolation race with us. Let try_to_unmap()
628     * move the page to the unevictable list.
629     */
630    if (vm_flags & VM_LOCKED)
631        return PAGEREF_RECLAIM;
632
633    if (referenced_ptes) {
634        if (PageSwapBacked(page))
635            return PAGEREF_ACTIVATE;
636        /*
637         * All mapped pages start out with page table
638         * references from the instantiating fault, so we need
639         * to look twice if a mapped file page is used more
640         * than once.
641         *
642         * Mark it and spare it for another trip around the
643         * inactive list. Another page table reference will
644         * lead to its activation.
645         *
646         * Note: the mark is set for activated pages as well
647         * so that recently deactivated but used pages are
648         * quickly recovered.
649         */
650        SetPageReferenced(page);
651
652        if (referenced_page || referenced_ptes > 1)
653            return PAGEREF_ACTIVATE;
654
655        /*
656         * Activate file-backed executable pages after first usage.
657         */
658        if (vm_flags & VM_EXEC)
659            return PAGEREF_ACTIVATE;
660
661        return PAGEREF_KEEP;
662    }
663
664    /* Reclaim if clean, defer dirty pages to writeback */
665    if (referenced_page && !PageSwapBacked(page))
666        return PAGEREF_RECLAIM_CLEAN;
667
668    return PAGEREF_RECLAIM;
669}
670
671/*
672 * shrink_page_list() returns the number of reclaimed pages
673 */
674static unsigned long shrink_page_list(struct list_head *page_list,
675                      struct zone *zone,
676                      struct scan_control *sc,
677                      unsigned long *ret_nr_dirty,
678                      unsigned long *ret_nr_writeback)
679{
680    LIST_HEAD(ret_pages);
681    LIST_HEAD(free_pages);
682    int pgactivate = 0;
683    unsigned long nr_dirty = 0;
684    unsigned long nr_congested = 0;
685    unsigned long nr_reclaimed = 0;
686    unsigned long nr_writeback = 0;
687
688    cond_resched();
689
690    mem_cgroup_uncharge_start();
691    while (!list_empty(page_list)) {
692        enum page_references references;
693        struct address_space *mapping;
694        struct page *page;
695        int may_enter_fs;
696
697        cond_resched();
698
699        page = lru_to_page(page_list);
700        list_del(&page->lru);
701
702        if (!trylock_page(page))
703            goto keep;
704
705        VM_BUG_ON(PageActive(page));
706        VM_BUG_ON(page_zone(page) != zone);
707
708        sc->nr_scanned++;
709
710        if (unlikely(!page_evictable(page, NULL)))
711            goto cull_mlocked;
712
713        if (!sc->may_unmap && page_mapped(page))
714            goto keep_locked;
715
716        /* Double the slab pressure for mapped and swapcache pages */
717        if (page_mapped(page) || PageSwapCache(page))
718            sc->nr_scanned++;
719
720        may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
721            (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
722
723        if (PageWriteback(page)) {
724            /*
725             * memcg doesn't have any dirty pages throttling so we
726             * could easily OOM just because too many pages are in
727             * writeback and there is nothing else to reclaim.
728             *
729             * Check __GFP_IO, certainly because a loop driver
730             * thread might enter reclaim, and deadlock if it waits
731             * on a page for which it is needed to do the write
732             * (loop masks off __GFP_IO|__GFP_FS for this reason);
733             * but more thought would probably show more reasons.
734             *
735             * Don't require __GFP_FS, since we're not going into
736             * the FS, just waiting on its writeback completion.
737             * Worryingly, ext4 gfs2 and xfs allocate pages with
738             * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so
739             * testing may_enter_fs here is liable to OOM on them.
740             */
741            if (global_reclaim(sc) ||
742                !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
743                /*
744                 * This is slightly racy - end_page_writeback()
745                 * might have just cleared PageReclaim, then
746                 * setting PageReclaim here end up interpreted
747                 * as PageReadahead - but that does not matter
748                 * enough to care. What we do want is for this
749                 * page to have PageReclaim set next time memcg
750                 * reclaim reaches the tests above, so it will
751                 * then wait_on_page_writeback() to avoid OOM;
752                 * and it's also appropriate in global reclaim.
753                 */
754                SetPageReclaim(page);
755                nr_writeback++;
756                goto keep_locked;
757            }
758            wait_on_page_writeback(page);
759        }
760
761        references = page_check_references(page, sc);
762        switch (references) {
763        case PAGEREF_ACTIVATE:
764            goto activate_locked;
765        case PAGEREF_KEEP:
766            goto keep_locked;
767        case PAGEREF_RECLAIM:
768        case PAGEREF_RECLAIM_CLEAN:
769            ; /* try to reclaim the page below */
770        }
771
772        /*
773         * Anonymous process memory has backing store?
774         * Try to allocate it some swap space here.
775         */
776        if (PageAnon(page) && !PageSwapCache(page)) {
777            if (!(sc->gfp_mask & __GFP_IO))
778                goto keep_locked;
779            if (!add_to_swap(page))
780                goto activate_locked;
781            may_enter_fs = 1;
782        }
783
784        mapping = page_mapping(page);
785
786        /*
787         * The page is mapped into the page tables of one or more
788         * processes. Try to unmap it here.
789         */
790        if (page_mapped(page) && mapping) {
791            switch (try_to_unmap(page, TTU_UNMAP)) {
792            case SWAP_FAIL:
793                goto activate_locked;
794            case SWAP_AGAIN:
795                goto keep_locked;
796            case SWAP_MLOCK:
797                goto cull_mlocked;
798            case SWAP_SUCCESS:
799                ; /* try to free the page below */
800            }
801        }
802
803        if (PageDirty(page)) {
804            nr_dirty++;
805
806            /*
807             * Only kswapd can writeback filesystem pages to
808             * avoid risk of stack overflow but do not writeback
809             * unless under significant pressure.
810             */
811            if (page_is_file_cache(page) &&
812                    (!current_is_kswapd() ||
813                     sc->priority >= DEF_PRIORITY - 2)) {
814                /*
815                 * Immediately reclaim when written back.
816                 * Similar in principal to deactivate_page()
817                 * except we already have the page isolated
818                 * and know it's dirty
819                 */
820                inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
821                SetPageReclaim(page);
822
823                goto keep_locked;
824            }
825
826            if (references == PAGEREF_RECLAIM_CLEAN)
827                goto keep_locked;
828            if (!may_enter_fs)
829                goto keep_locked;
830            if (!sc->may_writepage)
831                goto keep_locked;
832
833            /* Page is dirty, try to write it out here */
834            switch (pageout(page, mapping, sc)) {
835            case PAGE_KEEP:
836                nr_congested++;
837                goto keep_locked;
838            case PAGE_ACTIVATE:
839                goto activate_locked;
840            case PAGE_SUCCESS:
841                if (PageWriteback(page))
842                    goto keep;
843                if (PageDirty(page))
844                    goto keep;
845
846                /*
847                 * A synchronous write - probably a ramdisk. Go
848                 * ahead and try to reclaim the page.
849                 */
850                if (!trylock_page(page))
851                    goto keep;
852                if (PageDirty(page) || PageWriteback(page))
853                    goto keep_locked;
854                mapping = page_mapping(page);
855            case PAGE_CLEAN:
856                ; /* try to free the page below */
857            }
858        }
859
860        /*
861         * If the page has buffers, try to free the buffer mappings
862         * associated with this page. If we succeed we try to free
863         * the page as well.
864         *
865         * We do this even if the page is PageDirty().
866         * try_to_release_page() does not perform I/O, but it is
867         * possible for a page to have PageDirty set, but it is actually
868         * clean (all its buffers are clean). This happens if the
869         * buffers were written out directly, with submit_bh(). ext3
870         * will do this, as well as the blockdev mapping.
871         * try_to_release_page() will discover that cleanness and will
872         * drop the buffers and mark the page clean - it can be freed.
873         *
874         * Rarely, pages can have buffers and no ->mapping. These are
875         * the pages which were not successfully invalidated in
876         * truncate_complete_page(). We try to drop those buffers here
877         * and if that worked, and the page is no longer mapped into
878         * process address space (page_count == 1) it can be freed.
879         * Otherwise, leave the page on the LRU so it is swappable.
880         */
881        if (page_has_private(page)) {
882            if (!try_to_release_page(page, sc->gfp_mask))
883                goto activate_locked;
884            if (!mapping && page_count(page) == 1) {
885                unlock_page(page);
886                if (put_page_testzero(page))
887                    goto free_it;
888                else {
889                    /*
890                     * rare race with speculative reference.
891                     * the speculative reference will free
892                     * this page shortly, so we may
893                     * increment nr_reclaimed here (and
894                     * leave it off the LRU).
895                     */
896                    nr_reclaimed++;
897                    continue;
898                }
899            }
900        }
901
902        if (!mapping || !__remove_mapping(mapping, page))
903            goto keep_locked;
904
905        /*
906         * At this point, we have no other references and there is
907         * no way to pick any more up (removed from LRU, removed
908         * from pagecache). Can use non-atomic bitops now (and
909         * we obviously don't have to worry about waking up a process
910         * waiting on the page lock, because there are no references.
911         */
912        __clear_page_locked(page);
913free_it:
914        nr_reclaimed++;
915
916        /*
917         * Is there need to periodically free_page_list? It would
918         * appear not as the counts should be low
919         */
920        list_add(&page->lru, &free_pages);
921        continue;
922
923cull_mlocked:
924        if (PageSwapCache(page))
925            try_to_free_swap(page);
926        unlock_page(page);
927        putback_lru_page(page);
928        continue;
929
930activate_locked:
931        /* Not a candidate for swapping, so reclaim swap space. */
932        if (PageSwapCache(page) && vm_swap_full())
933            try_to_free_swap(page);
934        VM_BUG_ON(PageActive(page));
935        SetPageActive(page);
936        pgactivate++;
937keep_locked:
938        unlock_page(page);
939keep:
940        list_add(&page->lru, &ret_pages);
941        VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
942    }
943
944    /*
945     * Tag a zone as congested if all the dirty pages encountered were
946     * backed by a congested BDI. In this case, reclaimers should just
947     * back off and wait for congestion to clear because further reclaim
948     * will encounter the same problem
949     */
950    if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
951        zone_set_flag(zone, ZONE_CONGESTED);
952
953    free_hot_cold_page_list(&free_pages, 1);
954
955    list_splice(&ret_pages, page_list);
956    count_vm_events(PGACTIVATE, pgactivate);
957    mem_cgroup_uncharge_end();
958    *ret_nr_dirty += nr_dirty;
959    *ret_nr_writeback += nr_writeback;
960    return nr_reclaimed;
961}
962
963/*
964 * Attempt to remove the specified page from its LRU. Only take this page
965 * if it is of the appropriate PageActive status. Pages which are being
966 * freed elsewhere are also ignored.
967 *
968 * page: page to consider
969 * mode: one of the LRU isolation modes defined above
970 *
971 * returns 0 on success, -ve errno on failure.
972 */
973int __isolate_lru_page(struct page *page, isolate_mode_t mode)
974{
975    int ret = -EINVAL;
976
977    /* Only take pages on the LRU. */
978    if (!PageLRU(page))
979        return ret;
980
981    /* Do not give back unevictable pages for compaction */
982    if (PageUnevictable(page))
983        return ret;
984
985    ret = -EBUSY;
986
987    /*
988     * To minimise LRU disruption, the caller can indicate that it only
989     * wants to isolate pages it will be able to operate on without
990     * blocking - clean pages for the most part.
991     *
992     * ISOLATE_CLEAN means that only clean pages should be isolated. This
993     * is used by reclaim when it is cannot write to backing storage
994     *
995     * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
996     * that it is possible to migrate without blocking
997     */
998    if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
999        /* All the caller can do on PageWriteback is block */
1000        if (PageWriteback(page))
1001            return ret;
1002
1003        if (PageDirty(page)) {
1004            struct address_space *mapping;
1005
1006            /* ISOLATE_CLEAN means only clean pages */
1007            if (mode & ISOLATE_CLEAN)
1008                return ret;
1009
1010            /*
1011             * Only pages without mappings or that have a
1012             * ->migratepage callback are possible to migrate
1013             * without blocking
1014             */
1015            mapping = page_mapping(page);
1016            if (mapping && !mapping->a_ops->migratepage)
1017                return ret;
1018        }
1019    }
1020
1021    if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1022        return ret;
1023
1024    if (likely(get_page_unless_zero(page))) {
1025        /*
1026         * Be careful not to clear PageLRU until after we're
1027         * sure the page is not being freed elsewhere -- the
1028         * page release code relies on it.
1029         */
1030        ClearPageLRU(page);
1031        ret = 0;
1032    }
1033
1034    return ret;
1035}
1036
1037/*
1038 * zone->lru_lock is heavily contended. Some of the functions that
1039 * shrink the lists perform better by taking out a batch of pages
1040 * and working on them outside the LRU lock.
1041 *
1042 * For pagecache intensive workloads, this function is the hottest
1043 * spot in the kernel (apart from copy_*_user functions).
1044 *
1045 * Appropriate locks must be held before calling this function.
1046 *
1047 * @nr_to_scan: The number of pages to look through on the list.
1048 * @lruvec: The LRU vector to pull pages from.
1049 * @dst: The temp list to put pages on to.
1050 * @nr_scanned: The number of pages that were scanned.
1051 * @sc: The scan_control struct for this reclaim session
1052 * @mode: One of the LRU isolation modes
1053 * @lru: LRU list id for isolating
1054 *
1055 * returns how many pages were moved onto *@dst.
1056 */
1057static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1058        struct lruvec *lruvec, struct list_head *dst,
1059        unsigned long *nr_scanned, struct scan_control *sc,
1060        isolate_mode_t mode, enum lru_list lru)
1061{
1062    struct list_head *src = &lruvec->lists[lru];
1063    unsigned long nr_taken = 0;
1064    unsigned long scan;
1065
1066    for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
1067        struct page *page;
1068        int nr_pages;
1069
1070        page = lru_to_page(src);
1071        prefetchw_prev_lru_page(page, src, flags);
1072
1073        VM_BUG_ON(!PageLRU(page));
1074
1075        switch (__isolate_lru_page(page, mode)) {
1076        case 0:
1077            nr_pages = hpage_nr_pages(page);
1078            mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
1079            list_move(&page->lru, dst);
1080            nr_taken += nr_pages;
1081            break;
1082
1083        case -EBUSY:
1084            /* else it is being freed elsewhere */
1085            list_move(&page->lru, src);
1086            continue;
1087
1088        default:
1089            BUG();
1090        }
1091    }
1092
1093    *nr_scanned = scan;
1094    trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
1095                    nr_taken, mode, is_file_lru(lru));
1096    return nr_taken;
1097}
1098
1099/**
1100 * isolate_lru_page - tries to isolate a page from its LRU list
1101 * @page: page to isolate from its LRU list
1102 *
1103 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1104 * vmstat statistic corresponding to whatever LRU list the page was on.
1105 *
1106 * Returns 0 if the page was removed from an LRU list.
1107 * Returns -EBUSY if the page was not on an LRU list.
1108 *
1109 * The returned page will have PageLRU() cleared. If it was found on
1110 * the active list, it will have PageActive set. If it was found on
1111 * the unevictable list, it will have the PageUnevictable bit set. That flag
1112 * may need to be cleared by the caller before letting the page go.
1113 *
1114 * The vmstat statistic corresponding to the list on which the page was
1115 * found will be decremented.
1116 *
1117 * Restrictions:
1118 * (1) Must be called with an elevated refcount on the page. This is a
1119 * fundamentnal difference from isolate_lru_pages (which is called
1120 * without a stable reference).
1121 * (2) the lru_lock must not be held.
1122 * (3) interrupts must be enabled.
1123 */
1124int isolate_lru_page(struct page *page)
1125{
1126    int ret = -EBUSY;
1127
1128    VM_BUG_ON(!page_count(page));
1129
1130    if (PageLRU(page)) {
1131        struct zone *zone = page_zone(page);
1132        struct lruvec *lruvec;
1133
1134        spin_lock_irq(&zone->lru_lock);
1135        lruvec = mem_cgroup_page_lruvec(page, zone);
1136        if (PageLRU(page)) {
1137            int lru = page_lru(page);
1138            get_page(page);
1139            ClearPageLRU(page);
1140            del_page_from_lru_list(page, lruvec, lru);
1141            ret = 0;
1142        }
1143        spin_unlock_irq(&zone->lru_lock);
1144    }
1145    return ret;
1146}
1147
1148/*
1149 * Are there way too many processes in the direct reclaim path already?
1150 */
1151static int too_many_isolated(struct zone *zone, int file,
1152        struct scan_control *sc)
1153{
1154    unsigned long inactive, isolated;
1155
1156    if (current_is_kswapd())
1157        return 0;
1158
1159    if (!global_reclaim(sc))
1160        return 0;
1161
1162    if (file) {
1163        inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1164        isolated = zone_page_state(zone, NR_ISOLATED_FILE);
1165    } else {
1166        inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1167        isolated = zone_page_state(zone, NR_ISOLATED_ANON);
1168    }
1169
1170    return isolated > inactive;
1171}
1172
1173static noinline_for_stack void
1174putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1175{
1176    struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1177    struct zone *zone = lruvec_zone(lruvec);
1178    LIST_HEAD(pages_to_free);
1179
1180    /*
1181     * Put back any unfreeable pages.
1182     */
1183    while (!list_empty(page_list)) {
1184        struct page *page = lru_to_page(page_list);
1185        int lru;
1186
1187        VM_BUG_ON(PageLRU(page));
1188        list_del(&page->lru);
1189        if (unlikely(!page_evictable(page, NULL))) {
1190            spin_unlock_irq(&zone->lru_lock);
1191            putback_lru_page(page);
1192            spin_lock_irq(&zone->lru_lock);
1193            continue;
1194        }
1195
1196        lruvec = mem_cgroup_page_lruvec(page, zone);
1197
1198        SetPageLRU(page);
1199        lru = page_lru(page);
1200        add_page_to_lru_list(page, lruvec, lru);
1201
1202        if (is_active_lru(lru)) {
1203            int file = is_file_lru(lru);
1204            int numpages = hpage_nr_pages(page);
1205            reclaim_stat->recent_rotated[file] += numpages;
1206        }
1207        if (put_page_testzero(page)) {
1208            __ClearPageLRU(page);
1209            __ClearPageActive(page);
1210            del_page_from_lru_list(page, lruvec, lru);
1211
1212            if (unlikely(PageCompound(page))) {
1213                spin_unlock_irq(&zone->lru_lock);
1214                (*get_compound_page_dtor(page))(page);
1215                spin_lock_irq(&zone->lru_lock);
1216            } else
1217                list_add(&page->lru, &pages_to_free);
1218        }
1219    }
1220
1221    /*
1222     * To save our caller's stack, now use input list for pages to free.
1223     */
1224    list_splice(&pages_to_free, page_list);
1225}
1226
1227/*
1228 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
1229 * of reclaimed pages
1230 */
1231static noinline_for_stack unsigned long
1232shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1233             struct scan_control *sc, enum lru_list lru)
1234{
1235    LIST_HEAD(page_list);
1236    unsigned long nr_scanned;
1237    unsigned long nr_reclaimed = 0;
1238    unsigned long nr_taken;
1239    unsigned long nr_dirty = 0;
1240    unsigned long nr_writeback = 0;
1241    isolate_mode_t isolate_mode = 0;
1242    int file = is_file_lru(lru);
1243    struct zone *zone = lruvec_zone(lruvec);
1244    struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1245
1246    while (unlikely(too_many_isolated(zone, file, sc))) {
1247        congestion_wait(BLK_RW_ASYNC, HZ/10);
1248
1249        /* We are about to die and free our memory. Return now. */
1250        if (fatal_signal_pending(current))
1251            return SWAP_CLUSTER_MAX;
1252    }
1253
1254    lru_add_drain();
1255
1256    if (!sc->may_unmap)
1257        isolate_mode |= ISOLATE_UNMAPPED;
1258    if (!sc->may_writepage)
1259        isolate_mode |= ISOLATE_CLEAN;
1260
1261    spin_lock_irq(&zone->lru_lock);
1262
1263    nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1264                     &nr_scanned, sc, isolate_mode, lru);
1265
1266    __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
1267    __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1268
1269    if (global_reclaim(sc)) {
1270        zone->pages_scanned += nr_scanned;
1271        if (current_is_kswapd())
1272            __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
1273        else
1274            __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
1275    }
1276    spin_unlock_irq(&zone->lru_lock);
1277
1278    if (nr_taken == 0)
1279        return 0;
1280
1281    nr_reclaimed = shrink_page_list(&page_list, zone, sc,
1282                        &nr_dirty, &nr_writeback);
1283
1284    spin_lock_irq(&zone->lru_lock);
1285
1286    reclaim_stat->recent_scanned[file] += nr_taken;
1287
1288    if (global_reclaim(sc)) {
1289        if (current_is_kswapd())
1290            __count_zone_vm_events(PGSTEAL_KSWAPD, zone,
1291                           nr_reclaimed);
1292        else
1293            __count_zone_vm_events(PGSTEAL_DIRECT, zone,
1294                           nr_reclaimed);
1295    }
1296
1297    putback_inactive_pages(lruvec, &page_list);
1298
1299    __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1300
1301    spin_unlock_irq(&zone->lru_lock);
1302
1303    free_hot_cold_page_list(&page_list, 1);
1304
1305    /*
1306     * If reclaim is isolating dirty pages under writeback, it implies
1307     * that the long-lived page allocation rate is exceeding the page
1308     * laundering rate. Either the global limits are not being effective
1309     * at throttling processes due to the page distribution throughout
1310     * zones or there is heavy usage of a slow backing device. The
1311     * only option is to throttle from reclaim context which is not ideal
1312     * as there is no guarantee the dirtying process is throttled in the
1313     * same way balance_dirty_pages() manages.
1314     *
1315     * This scales the number of dirty pages that must be under writeback
1316     * before throttling depending on priority. It is a simple backoff
1317     * function that has the most effect in the range DEF_PRIORITY to
1318     * DEF_PRIORITY-2 which is the priority reclaim is considered to be
1319     * in trouble and reclaim is considered to be in trouble.
1320     *
1321     * DEF_PRIORITY 100% isolated pages must be PageWriteback to throttle
1322     * DEF_PRIORITY-1 50% must be PageWriteback
1323     * DEF_PRIORITY-2 25% must be PageWriteback, kswapd in trouble
1324     * ...
1325     * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
1326     * isolated page is PageWriteback
1327     */
1328    if (nr_writeback && nr_writeback >=
1329            (nr_taken >> (DEF_PRIORITY - sc->priority)))
1330        wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
1331
1332    trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1333        zone_idx(zone),
1334        nr_scanned, nr_reclaimed,
1335        sc->priority,
1336        trace_shrink_flags(file));
1337    return nr_reclaimed;
1338}
1339
1340/*
1341 * This moves pages from the active list to the inactive list.
1342 *
1343 * We move them the other way if the page is referenced by one or more
1344 * processes, from rmap.
1345 *
1346 * If the pages are mostly unmapped, the processing is fast and it is
1347 * appropriate to hold zone->lru_lock across the whole operation. But if
1348 * the pages are mapped, the processing is slow (page_referenced()) so we
1349 * should drop zone->lru_lock around each page. It's impossible to balance
1350 * this, so instead we remove the pages from the LRU while processing them.
1351 * It is safe to rely on PG_active against the non-LRU pages in here because
1352 * nobody will play with that bit on a non-LRU page.
1353 *
1354 * The downside is that we have to touch page->_count against each page.
1355 * But we had to alter page->flags anyway.
1356 */
1357
1358static void move_active_pages_to_lru(struct lruvec *lruvec,
1359                     struct list_head *list,
1360                     struct list_head *pages_to_free,
1361                     enum lru_list lru)
1362{
1363    struct zone *zone = lruvec_zone(lruvec);
1364    unsigned long pgmoved = 0;
1365    struct page *page;
1366    int nr_pages;
1367
1368    while (!list_empty(list)) {
1369        page = lru_to_page(list);
1370        lruvec = mem_cgroup_page_lruvec(page, zone);
1371
1372        VM_BUG_ON(PageLRU(page));
1373        SetPageLRU(page);
1374
1375        nr_pages = hpage_nr_pages(page);
1376        mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
1377        list_move(&page->lru, &lruvec->lists[lru]);
1378        pgmoved += nr_pages;
1379
1380        if (put_page_testzero(page)) {
1381            __ClearPageLRU(page);
1382            __ClearPageActive(page);
1383            del_page_from_lru_list(page, lruvec, lru);
1384
1385            if (unlikely(PageCompound(page))) {
1386                spin_unlock_irq(&zone->lru_lock);
1387                (*get_compound_page_dtor(page))(page);
1388                spin_lock_irq(&zone->lru_lock);
1389            } else
1390                list_add(&page->lru, pages_to_free);
1391        }
1392    }
1393    __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1394    if (!is_active_lru(lru))
1395        __count_vm_events(PGDEACTIVATE, pgmoved);
1396}
1397
1398static void shrink_active_list(unsigned long nr_to_scan,
1399                   struct lruvec *lruvec,
1400                   struct scan_control *sc,
1401                   enum lru_list lru)
1402{
1403    unsigned long nr_taken;
1404    unsigned long nr_scanned;
1405    unsigned long vm_flags;
1406    LIST_HEAD(l_hold); /* The pages which were snipped off */
1407    LIST_HEAD(l_active);
1408    LIST_HEAD(l_inactive);
1409    struct page *page;
1410    struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1411    unsigned long nr_rotated = 0;
1412    isolate_mode_t isolate_mode = 0;
1413    int file = is_file_lru(lru);
1414    struct zone *zone = lruvec_zone(lruvec);
1415
1416    lru_add_drain();
1417
1418    if (!sc->may_unmap)
1419        isolate_mode |= ISOLATE_UNMAPPED;
1420    if (!sc->may_writepage)
1421        isolate_mode |= ISOLATE_CLEAN;
1422
1423    spin_lock_irq(&zone->lru_lock);
1424
1425    nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
1426                     &nr_scanned, sc, isolate_mode, lru);
1427    if (global_reclaim(sc))
1428        zone->pages_scanned += nr_scanned;
1429
1430    reclaim_stat->recent_scanned[file] += nr_taken;
1431
1432    __count_zone_vm_events(PGREFILL, zone, nr_scanned);
1433    __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
1434    __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1435    spin_unlock_irq(&zone->lru_lock);
1436
1437    while (!list_empty(&l_hold)) {
1438        cond_resched();
1439        page = lru_to_page(&l_hold);
1440        list_del(&page->lru);
1441
1442        if (unlikely(!page_evictable(page, NULL))) {
1443            putback_lru_page(page);
1444            continue;
1445        }
1446
1447        if (unlikely(buffer_heads_over_limit)) {
1448            if (page_has_private(page) && trylock_page(page)) {
1449                if (page_has_private(page))
1450                    try_to_release_page(page, 0);
1451                unlock_page(page);
1452            }
1453        }
1454
1455        if (page_referenced(page, 0, sc->target_mem_cgroup,
1456                    &vm_flags)) {
1457            nr_rotated += hpage_nr_pages(page);
1458            /*
1459             * Identify referenced, file-backed active pages and
1460             * give them one more trip around the active list. So
1461             * that executable code get better chances to stay in
1462             * memory under moderate memory pressure. Anon pages
1463             * are not likely to be evicted by use-once streaming
1464             * IO, plus JVM can create lots of anon VM_EXEC pages,
1465             * so we ignore them here.
1466             */
1467            if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
1468                list_add(&page->lru, &l_active);
1469                continue;
1470            }
1471        }
1472
1473        ClearPageActive(page); /* we are de-activating */
1474        list_add(&page->lru, &l_inactive);
1475    }
1476
1477    /*
1478     * Move pages back to the lru list.
1479     */
1480    spin_lock_irq(&zone->lru_lock);
1481    /*
1482     * Count referenced pages from currently used mappings as rotated,
1483     * even though only some of them are actually re-activated. This
1484     * helps balance scan pressure between file and anonymous pages in
1485     * get_scan_ratio.
1486     */
1487    reclaim_stat->recent_rotated[file] += nr_rotated;
1488
1489    move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
1490    move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
1491    __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1492    spin_unlock_irq(&zone->lru_lock);
1493
1494    free_hot_cold_page_list(&l_hold, 1);
1495}
1496
1497#ifdef CONFIG_SWAP
1498static int inactive_anon_is_low_global(struct zone *zone)
1499{
1500    unsigned long active, inactive;
1501
1502    active = zone_page_state(zone, NR_ACTIVE_ANON);
1503    inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1504
1505    if (inactive * zone->inactive_ratio < active)
1506        return 1;
1507
1508    return 0;
1509}
1510
1511/**
1512 * inactive_anon_is_low - check if anonymous pages need to be deactivated
1513 * @lruvec: LRU vector to check
1514 *
1515 * Returns true if the zone does not have enough inactive anon pages,
1516 * meaning some active anon pages need to be deactivated.
1517 */
1518static int inactive_anon_is_low(struct lruvec *lruvec)
1519{
1520    /*
1521     * If we don't have swap space, anonymous page deactivation
1522     * is pointless.
1523     */
1524    if (!total_swap_pages)
1525        return 0;
1526
1527    if (!mem_cgroup_disabled())
1528        return mem_cgroup_inactive_anon_is_low(lruvec);
1529
1530    return inactive_anon_is_low_global(lruvec_zone(lruvec));
1531}
1532#else
1533static inline int inactive_anon_is_low(struct lruvec *lruvec)
1534{
1535    return 0;
1536}
1537#endif
1538
1539static int inactive_file_is_low_global(struct zone *zone)
1540{
1541    unsigned long active, inactive;
1542
1543    active = zone_page_state(zone, NR_ACTIVE_FILE);
1544    inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1545
1546    return (active > inactive);
1547}
1548
1549/**
1550 * inactive_file_is_low - check if file pages need to be deactivated
1551 * @lruvec: LRU vector to check
1552 *
1553 * When the system is doing streaming IO, memory pressure here
1554 * ensures that active file pages get deactivated, until more
1555 * than half of the file pages are on the inactive list.
1556 *
1557 * Once we get to that situation, protect the system's working
1558 * set from being evicted by disabling active file page aging.
1559 *
1560 * This uses a different ratio than the anonymous pages, because
1561 * the page cache uses a use-once replacement algorithm.
1562 */
1563static int inactive_file_is_low(struct lruvec *lruvec)
1564{
1565    if (!mem_cgroup_disabled())
1566        return mem_cgroup_inactive_file_is_low(lruvec);
1567
1568    return inactive_file_is_low_global(lruvec_zone(lruvec));
1569}
1570
1571static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
1572{
1573    if (is_file_lru(lru))
1574        return inactive_file_is_low(lruvec);
1575    else
1576        return inactive_anon_is_low(lruvec);
1577}
1578
1579static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1580                 struct lruvec *lruvec, struct scan_control *sc)
1581{
1582    if (is_active_lru(lru)) {
1583        if (inactive_list_is_low(lruvec, lru))
1584            shrink_active_list(nr_to_scan, lruvec, sc, lru);
1585        return 0;
1586    }
1587
1588    return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
1589}
1590
1591static int vmscan_swappiness(struct scan_control *sc)
1592{
1593    if (global_reclaim(sc))
1594        return vm_swappiness;
1595    return mem_cgroup_swappiness(sc->target_mem_cgroup);
1596}
1597
1598/*
1599 * Determine how aggressively the anon and file LRU lists should be
1600 * scanned. The relative value of each set of LRU lists is determined
1601 * by looking at the fraction of the pages scanned we did rotate back
1602 * onto the active list instead of evict.
1603 *
1604 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
1605 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
1606 */
1607static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
1608               unsigned long *nr)
1609{
1610    unsigned long anon, file, free;
1611    unsigned long anon_prio, file_prio;
1612    unsigned long ap, fp;
1613    struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1614    u64 fraction[2], denominator;
1615    enum lru_list lru;
1616    int noswap = 0;
1617    bool force_scan = false;
1618    struct zone *zone = lruvec_zone(lruvec);
1619
1620    /*
1621     * If the zone or memcg is small, nr[l] can be 0. This
1622     * results in no scanning on this priority and a potential
1623     * priority drop. Global direct reclaim can go to the next
1624     * zone and tends to have no problems. Global kswapd is for
1625     * zone balancing and it needs to scan a minimum amount. When
1626     * reclaiming for a memcg, a priority drop can cause high
1627     * latencies, so it's better to scan a minimum amount there as
1628     * well.
1629     */
1630    if (current_is_kswapd() && zone->all_unreclaimable)
1631        force_scan = true;
1632    if (!global_reclaim(sc))
1633        force_scan = true;
1634
1635    /* If we have no swap space, do not bother scanning anon pages. */
1636    if (!sc->may_swap || (nr_swap_pages <= 0)) {
1637        noswap = 1;
1638        fraction[0] = 0;
1639        fraction[1] = 1;
1640        denominator = 1;
1641        goto out;
1642    }
1643
1644    anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
1645        get_lru_size(lruvec, LRU_INACTIVE_ANON);
1646    file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
1647        get_lru_size(lruvec, LRU_INACTIVE_FILE);
1648
1649    if (global_reclaim(sc)) {
1650        free = zone_page_state(zone, NR_FREE_PAGES);
1651        /* If we have very few page cache pages,
1652           force-scan anon pages. */
1653        if (unlikely(file + free <= high_wmark_pages(zone))) {
1654            fraction[0] = 1;
1655            fraction[1] = 0;
1656            denominator = 1;
1657            goto out;
1658        }
1659    }
1660
1661    /*
1662     * With swappiness at 100, anonymous and file have the same priority.
1663     * This scanning priority is essentially the inverse of IO cost.
1664     */
1665    anon_prio = vmscan_swappiness(sc);
1666    file_prio = 200 - anon_prio;
1667
1668    /*
1669     * OK, so we have swap space and a fair amount of page cache
1670     * pages. We use the recently rotated / recently scanned
1671     * ratios to determine how valuable each cache is.
1672     *
1673     * Because workloads change over time (and to avoid overflow)
1674     * we keep these statistics as a floating average, which ends
1675     * up weighing recent references more than old ones.
1676     *
1677     * anon in [0], file in [1]
1678     */
1679    spin_lock_irq(&zone->lru_lock);
1680    if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1681        reclaim_stat->recent_scanned[0] /= 2;
1682        reclaim_stat->recent_rotated[0] /= 2;
1683    }
1684
1685    if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
1686        reclaim_stat->recent_scanned[1] /= 2;
1687        reclaim_stat->recent_rotated[1] /= 2;
1688    }
1689
1690    /*
1691     * The amount of pressure on anon vs file pages is inversely
1692     * proportional to the fraction of recently scanned pages on
1693     * each list that were recently referenced and in active use.
1694     */
1695    ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
1696    ap /= reclaim_stat->recent_rotated[0] + 1;
1697
1698    fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
1699    fp /= reclaim_stat->recent_rotated[1] + 1;
1700    spin_unlock_irq(&zone->lru_lock);
1701
1702    fraction[0] = ap;
1703    fraction[1] = fp;
1704    denominator = ap + fp + 1;
1705out:
1706    for_each_evictable_lru(lru) {
1707        int file = is_file_lru(lru);
1708        unsigned long scan;
1709
1710        scan = get_lru_size(lruvec, lru);
1711        if (sc->priority || noswap || !vmscan_swappiness(sc)) {
1712            scan >>= sc->priority;
1713            if (!scan && force_scan)
1714                scan = SWAP_CLUSTER_MAX;
1715            scan = div64_u64(scan * fraction[file], denominator);
1716        }
1717        nr[lru] = scan;
1718    }
1719}
1720
1721/* Use reclaim/compaction for costly allocs or under memory pressure */
1722static bool in_reclaim_compaction(struct scan_control *sc)
1723{
1724    if (COMPACTION_BUILD && sc->order &&
1725            (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
1726             sc->priority < DEF_PRIORITY - 2))
1727        return true;
1728
1729    return false;
1730}
1731
1732/*
1733 * Reclaim/compaction is used for high-order allocation requests. It reclaims
1734 * order-0 pages before compacting the zone. should_continue_reclaim() returns
1735 * true if more pages should be reclaimed such that when the page allocator
1736 * calls try_to_compact_zone() that it will have enough free pages to succeed.
1737 * It will give up earlier than that if there is difficulty reclaiming pages.
1738 */
1739static inline bool should_continue_reclaim(struct lruvec *lruvec,
1740                    unsigned long nr_reclaimed,
1741                    unsigned long nr_scanned,
1742                    struct scan_control *sc)
1743{
1744    unsigned long pages_for_compaction;
1745    unsigned long inactive_lru_pages;
1746
1747    /* If not in reclaim/compaction mode, stop */
1748    if (!in_reclaim_compaction(sc))
1749        return false;
1750
1751    /* Consider stopping depending on scan and reclaim activity */
1752    if (sc->gfp_mask & __GFP_REPEAT) {
1753        /*
1754         * For __GFP_REPEAT allocations, stop reclaiming if the
1755         * full LRU list has been scanned and we are still failing
1756         * to reclaim pages. This full LRU scan is potentially
1757         * expensive but a __GFP_REPEAT caller really wants to succeed
1758         */
1759        if (!nr_reclaimed && !nr_scanned)
1760            return false;
1761    } else {
1762        /*
1763         * For non-__GFP_REPEAT allocations which can presumably
1764         * fail without consequence, stop if we failed to reclaim
1765         * any pages from the last SWAP_CLUSTER_MAX number of
1766         * pages that were scanned. This will return to the
1767         * caller faster at the risk reclaim/compaction and
1768         * the resulting allocation attempt fails
1769         */
1770        if (!nr_reclaimed)
1771            return false;
1772    }
1773
1774    /*
1775     * If we have not reclaimed enough pages for compaction and the
1776     * inactive lists are large enough, continue reclaiming
1777     */
1778    pages_for_compaction = (2UL << sc->order);
1779    inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE);
1780    if (nr_swap_pages > 0)
1781        inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON);
1782    if (sc->nr_reclaimed < pages_for_compaction &&
1783            inactive_lru_pages > pages_for_compaction)
1784        return true;
1785
1786    /* If compaction would go ahead or the allocation would succeed, stop */
1787    switch (compaction_suitable(lruvec_zone(lruvec), sc->order)) {
1788    case COMPACT_PARTIAL:
1789    case COMPACT_CONTINUE:
1790        return false;
1791    default:
1792        return true;
1793    }
1794}
1795
1796/*
1797 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1798 */
1799static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
1800{
1801    unsigned long nr[NR_LRU_LISTS];
1802    unsigned long nr_to_scan;
1803    enum lru_list lru;
1804    unsigned long nr_reclaimed, nr_scanned;
1805    unsigned long nr_to_reclaim = sc->nr_to_reclaim;
1806    struct blk_plug plug;
1807
1808restart:
1809    nr_reclaimed = 0;
1810    nr_scanned = sc->nr_scanned;
1811    get_scan_count(lruvec, sc, nr);
1812
1813    blk_start_plug(&plug);
1814    while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1815                    nr[LRU_INACTIVE_FILE]) {
1816        for_each_evictable_lru(lru) {
1817            if (nr[lru]) {
1818                nr_to_scan = min_t(unsigned long,
1819                           nr[lru], SWAP_CLUSTER_MAX);
1820                nr[lru] -= nr_to_scan;
1821
1822                nr_reclaimed += shrink_list(lru, nr_to_scan,
1823                                lruvec, sc);
1824            }
1825        }
1826        /*
1827         * On large memory systems, scan >> priority can become
1828         * really large. This is fine for the starting priority;
1829         * we want to put equal scanning pressure on each zone.
1830         * However, if the VM has a harder time of freeing pages,
1831         * with multiple processes reclaiming pages, the total
1832         * freeing target can get unreasonably large.
1833         */
1834        if (nr_reclaimed >= nr_to_reclaim &&
1835            sc->priority < DEF_PRIORITY)
1836            break;
1837    }
1838    blk_finish_plug(&plug);
1839    sc->nr_reclaimed += nr_reclaimed;
1840
1841    /*
1842     * Even if we did not try to evict anon pages at all, we want to
1843     * rebalance the anon lru active/inactive ratio.
1844     */
1845    if (inactive_anon_is_low(lruvec))
1846        shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
1847                   sc, LRU_ACTIVE_ANON);
1848
1849    /* reclaim/compaction might need reclaim to continue */
1850    if (should_continue_reclaim(lruvec, nr_reclaimed,
1851                    sc->nr_scanned - nr_scanned, sc))
1852        goto restart;
1853
1854    throttle_vm_writeout(sc->gfp_mask);
1855}
1856
1857static void shrink_zone(struct zone *zone, struct scan_control *sc)
1858{
1859    struct mem_cgroup *root = sc->target_mem_cgroup;
1860    struct mem_cgroup_reclaim_cookie reclaim = {
1861        .zone = zone,
1862        .priority = sc->priority,
1863    };
1864    struct mem_cgroup *memcg;
1865
1866    memcg = mem_cgroup_iter(root, NULL, &reclaim);
1867    do {
1868        struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
1869
1870        shrink_lruvec(lruvec, sc);
1871
1872        /*
1873         * Limit reclaim has historically picked one memcg and
1874         * scanned it with decreasing priority levels until
1875         * nr_to_reclaim had been reclaimed. This priority
1876         * cycle is thus over after a single memcg.
1877         *
1878         * Direct reclaim and kswapd, on the other hand, have
1879         * to scan all memory cgroups to fulfill the overall
1880         * scan target for the zone.
1881         */
1882        if (!global_reclaim(sc)) {
1883            mem_cgroup_iter_break(root, memcg);
1884            break;
1885        }
1886        memcg = mem_cgroup_iter(root, memcg, &reclaim);
1887    } while (memcg);
1888}
1889
1890/* Returns true if compaction should go ahead for a high-order request */
1891static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
1892{
1893    unsigned long balance_gap, watermark;
1894    bool watermark_ok;
1895
1896    /* Do not consider compaction for orders reclaim is meant to satisfy */
1897    if (sc->order <= PAGE_ALLOC_COSTLY_ORDER)
1898        return false;
1899
1900    /*
1901     * Compaction takes time to run and there are potentially other
1902     * callers using the pages just freed. Continue reclaiming until
1903     * there is a buffer of free pages available to give compaction
1904     * a reasonable chance of completing and allocating the page
1905     */
1906    balance_gap = min(low_wmark_pages(zone),
1907        (zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
1908            KSWAPD_ZONE_BALANCE_GAP_RATIO);
1909    watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
1910    watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
1911
1912    /*
1913     * If compaction is deferred, reclaim up to a point where
1914     * compaction will have a chance of success when re-enabled
1915     */
1916    if (compaction_deferred(zone, sc->order))
1917        return watermark_ok;
1918
1919    /* If compaction is not ready to start, keep reclaiming */
1920    if (!compaction_suitable(zone, sc->order))
1921        return false;
1922
1923    return watermark_ok;
1924}
1925
1926/*
1927 * This is the direct reclaim path, for page-allocating processes. We only
1928 * try to reclaim pages from zones which will satisfy the caller's allocation
1929 * request.
1930 *
1931 * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
1932 * Because:
1933 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1934 * allocation or
1935 * b) The target zone may be at high_wmark_pages(zone) but the lower zones
1936 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
1937 * zone defense algorithm.
1938 *
1939 * If a zone is deemed to be full of pinned pages then just give it a light
1940 * scan then give up on it.
1941 *
1942 * This function returns true if a zone is being reclaimed for a costly
1943 * high-order allocation and compaction is ready to begin. This indicates to
1944 * the caller that it should consider retrying the allocation instead of
1945 * further reclaim.
1946 */
1947static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
1948{
1949    struct zoneref *z;
1950    struct zone *zone;
1951    unsigned long nr_soft_reclaimed;
1952    unsigned long nr_soft_scanned;
1953    bool aborted_reclaim = false;
1954
1955    /*
1956     * If the number of buffer_heads in the machine exceeds the maximum
1957     * allowed level, force direct reclaim to scan the highmem zone as
1958     * highmem pages could be pinning lowmem pages storing buffer_heads
1959     */
1960    if (buffer_heads_over_limit)
1961        sc->gfp_mask |= __GFP_HIGHMEM;
1962
1963    for_each_zone_zonelist_nodemask(zone, z, zonelist,
1964                    gfp_zone(sc->gfp_mask), sc->nodemask) {
1965        if (!populated_zone(zone))
1966            continue;
1967        /*
1968         * Take care memory controller reclaiming has small influence
1969         * to global LRU.
1970         */
1971        if (global_reclaim(sc)) {
1972            if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1973                continue;
1974            if (zone->all_unreclaimable &&
1975                    sc->priority != DEF_PRIORITY)
1976                continue; /* Let kswapd poll it */
1977            if (COMPACTION_BUILD) {
1978                /*
1979                 * If we already have plenty of memory free for
1980                 * compaction in this zone, don't free any more.
1981                 * Even though compaction is invoked for any
1982                 * non-zero order, only frequent costly order
1983                 * reclamation is disruptive enough to become a
1984                 * noticeable problem, like transparent huge
1985                 * page allocations.
1986                 */
1987                if (compaction_ready(zone, sc)) {
1988                    aborted_reclaim = true;
1989                    continue;
1990                }
1991            }
1992            /*
1993             * This steals pages from memory cgroups over softlimit
1994             * and returns the number of reclaimed pages and
1995             * scanned pages. This works for global memory pressure
1996             * and balancing, not for a memcg's limit.
1997             */
1998            nr_soft_scanned = 0;
1999            nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2000                        sc->order, sc->gfp_mask,
2001                        &nr_soft_scanned);
2002            sc->nr_reclaimed += nr_soft_reclaimed;
2003            sc->nr_scanned += nr_soft_scanned;
2004            /* need some check for avoid more shrink_zone() */
2005        }
2006
2007        shrink_zone(zone, sc);
2008    }
2009
2010    return aborted_reclaim;
2011}
2012
2013static bool zone_reclaimable(struct zone *zone)
2014{
2015    return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
2016}
2017
2018/* All zones in zonelist are unreclaimable? */
2019static bool all_unreclaimable(struct zonelist *zonelist,
2020        struct scan_control *sc)
2021{
2022    struct zoneref *z;
2023    struct zone *zone;
2024
2025    for_each_zone_zonelist_nodemask(zone, z, zonelist,
2026            gfp_zone(sc->gfp_mask), sc->nodemask) {
2027        if (!populated_zone(zone))
2028            continue;
2029        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2030            continue;
2031        if (!zone->all_unreclaimable)
2032            return false;
2033    }
2034
2035    return true;
2036}
2037
2038/*
2039 * This is the main entry point to direct page reclaim.
2040 *
2041 * If a full scan of the inactive list fails to free enough memory then we
2042 * are "out of memory" and something needs to be killed.
2043 *
2044 * If the caller is !__GFP_FS then the probability of a failure is reasonably
2045 * high - the zone may be full of dirty or under-writeback pages, which this
2046 * caller can't do much about. We kick the writeback threads and take explicit
2047 * naps in the hope that some of these pages can be written. But if the
2048 * allocating task holds filesystem locks which prevent writeout this might not
2049 * work, and the allocation attempt will fail.
2050 *
2051 * returns: 0, if no pages reclaimed
2052 * else, the number of pages reclaimed
2053 */
2054static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2055                    struct scan_control *sc,
2056                    struct shrink_control *shrink)
2057{
2058    unsigned long total_scanned = 0;
2059    struct reclaim_state *reclaim_state = current->reclaim_state;
2060    struct zoneref *z;
2061    struct zone *zone;
2062    unsigned long writeback_threshold;
2063    bool aborted_reclaim;
2064
2065    delayacct_freepages_start();
2066
2067    if (global_reclaim(sc))
2068        count_vm_event(ALLOCSTALL);
2069
2070    do {
2071        sc->nr_scanned = 0;
2072        aborted_reclaim = shrink_zones(zonelist, sc);
2073
2074        /*
2075         * Don't shrink slabs when reclaiming memory from
2076         * over limit cgroups
2077         */
2078        if (global_reclaim(sc)) {
2079            unsigned long lru_pages = 0;
2080            for_each_zone_zonelist(zone, z, zonelist,
2081                    gfp_zone(sc->gfp_mask)) {
2082                if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2083                    continue;
2084
2085                lru_pages += zone_reclaimable_pages(zone);
2086            }
2087
2088            shrink_slab(shrink, sc->nr_scanned, lru_pages);
2089            if (reclaim_state) {
2090                sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2091                reclaim_state->reclaimed_slab = 0;
2092            }
2093        }
2094        total_scanned += sc->nr_scanned;
2095        if (sc->nr_reclaimed >= sc->nr_to_reclaim)
2096            goto out;
2097
2098        /*
2099         * Try to write back as many pages as we just scanned. This
2100         * tends to cause slow streaming writers to write data to the
2101         * disk smoothly, at the dirtying rate, which is nice. But
2102         * that's undesirable in laptop mode, where we *want* lumpy
2103         * writeout. So in laptop mode, write out the whole world.
2104         */
2105        writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
2106        if (total_scanned > writeback_threshold) {
2107            wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
2108                        WB_REASON_TRY_TO_FREE_PAGES);
2109            sc->may_writepage = 1;
2110        }
2111
2112        /* Take a nap, wait for some writeback to complete */
2113        if (!sc->hibernation_mode && sc->nr_scanned &&
2114            sc->priority < DEF_PRIORITY - 2) {
2115            struct zone *preferred_zone;
2116
2117            first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
2118                        &cpuset_current_mems_allowed,
2119                        &preferred_zone);
2120            wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
2121        }
2122    } while (--sc->priority >= 0);
2123
2124out:
2125    delayacct_freepages_end();
2126
2127    if (sc->nr_reclaimed)
2128        return sc->nr_reclaimed;
2129
2130    /*
2131     * As hibernation is going on, kswapd is freezed so that it can't mark
2132     * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
2133     * check.
2134     */
2135    if (oom_killer_disabled)
2136        return 0;
2137
2138    /* Aborted reclaim to try compaction? don't OOM, then */
2139    if (aborted_reclaim)
2140        return 1;
2141
2142    /* top priority shrink_zones still had more to do? don't OOM, then */
2143    if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc))
2144        return 1;
2145
2146    return 0;
2147}
2148
2149static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
2150{
2151    struct zone *zone;
2152    unsigned long pfmemalloc_reserve = 0;
2153    unsigned long free_pages = 0;
2154    int i;
2155    bool wmark_ok;
2156
2157    for (i = 0; i <= ZONE_NORMAL; i++) {
2158        zone = &pgdat->node_zones[i];
2159        pfmemalloc_reserve += min_wmark_pages(zone);
2160        free_pages += zone_page_state(zone, NR_FREE_PAGES);
2161    }
2162
2163    wmark_ok = free_pages > pfmemalloc_reserve / 2;
2164
2165    /* kswapd must be awake if processes are being throttled */
2166    if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
2167        pgdat->classzone_idx = min(pgdat->classzone_idx,
2168                        (enum zone_type)ZONE_NORMAL);
2169        wake_up_interruptible(&pgdat->kswapd_wait);
2170    }
2171
2172    return wmark_ok;
2173}
2174
2175/*
2176 * Throttle direct reclaimers if backing storage is backed by the network
2177 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
2178 * depleted. kswapd will continue to make progress and wake the processes
2179 * when the low watermark is reached
2180 */
2181static void throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
2182                    nodemask_t *nodemask)
2183{
2184    struct zone *zone;
2185    int high_zoneidx = gfp_zone(gfp_mask);
2186    pg_data_t *pgdat;
2187
2188    /*
2189     * Kernel threads should not be throttled as they may be indirectly
2190     * responsible for cleaning pages necessary for reclaim to make forward
2191     * progress. kjournald for example may enter direct reclaim while
2192     * committing a transaction where throttling it could forcing other
2193     * processes to block on log_wait_commit().
2194     */
2195    if (current->flags & PF_KTHREAD)
2196        return;
2197
2198    /* Check if the pfmemalloc reserves are ok */
2199    first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
2200    pgdat = zone->zone_pgdat;
2201    if (pfmemalloc_watermark_ok(pgdat))
2202        return;
2203
2204    /* Account for the throttling */
2205    count_vm_event(PGSCAN_DIRECT_THROTTLE);
2206
2207    /*
2208     * If the caller cannot enter the filesystem, it's possible that it
2209     * is due to the caller holding an FS lock or performing a journal
2210     * transaction in the case of a filesystem like ext[3|4]. In this case,
2211     * it is not safe to block on pfmemalloc_wait as kswapd could be
2212     * blocked waiting on the same lock. Instead, throttle for up to a
2213     * second before continuing.
2214     */
2215    if (!(gfp_mask & __GFP_FS)) {
2216        wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
2217            pfmemalloc_watermark_ok(pgdat), HZ);
2218        return;
2219    }
2220
2221    /* Throttle until kswapd wakes the process */
2222    wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
2223        pfmemalloc_watermark_ok(pgdat));
2224}
2225
2226unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2227                gfp_t gfp_mask, nodemask_t *nodemask)
2228{
2229    unsigned long nr_reclaimed;
2230    struct scan_control sc = {
2231        .gfp_mask = gfp_mask,
2232        .may_writepage = !laptop_mode,
2233        .nr_to_reclaim = SWAP_CLUSTER_MAX,
2234        .may_unmap = 1,
2235        .may_swap = 1,
2236        .order = order,
2237        .priority = DEF_PRIORITY,
2238        .target_mem_cgroup = NULL,
2239        .nodemask = nodemask,
2240    };
2241    struct shrink_control shrink = {
2242        .gfp_mask = sc.gfp_mask,
2243    };
2244
2245    throttle_direct_reclaim(gfp_mask, zonelist, nodemask);
2246
2247    /*
2248     * Do not enter reclaim if fatal signal is pending. 1 is returned so
2249     * that the page allocator does not consider triggering OOM
2250     */
2251    if (fatal_signal_pending(current))
2252        return 1;
2253
2254    trace_mm_vmscan_direct_reclaim_begin(order,
2255                sc.may_writepage,
2256                gfp_mask);
2257
2258    nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2259
2260    trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2261
2262    return nr_reclaimed;
2263}
2264
2265#ifdef CONFIG_MEMCG
2266
2267unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
2268                        gfp_t gfp_mask, bool noswap,
2269                        struct zone *zone,
2270                        unsigned long *nr_scanned)
2271{
2272    struct scan_control sc = {
2273        .nr_scanned = 0,
2274        .nr_to_reclaim = SWAP_CLUSTER_MAX,
2275        .may_writepage = !laptop_mode,
2276        .may_unmap = 1,
2277        .may_swap = !noswap,
2278        .order = 0,
2279        .priority = 0,
2280        .target_mem_cgroup = memcg,
2281    };
2282    struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2283
2284    sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2285            (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2286
2287    trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
2288                              sc.may_writepage,
2289                              sc.gfp_mask);
2290
2291    /*
2292     * NOTE: Although we can get the priority field, using it
2293     * here is not a good idea, since it limits the pages we can scan.
2294     * if we don't reclaim here, the shrink_zone from balance_pgdat
2295     * will pick up pages from other mem cgroup's as well. We hack
2296     * the priority and make it zero.
2297     */
2298    shrink_lruvec(lruvec, &sc);
2299
2300    trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2301
2302    *nr_scanned = sc.nr_scanned;
2303    return sc.nr_reclaimed;
2304}
2305
2306unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
2307                       gfp_t gfp_mask,
2308                       bool noswap)
2309{
2310    struct zonelist *zonelist;
2311    unsigned long nr_reclaimed;
2312    int nid;
2313    struct scan_control sc = {
2314        .may_writepage = !laptop_mode,
2315        .may_unmap = 1,
2316        .may_swap = !noswap,
2317        .nr_to_reclaim = SWAP_CLUSTER_MAX,
2318        .order = 0,
2319        .priority = DEF_PRIORITY,
2320        .target_mem_cgroup = memcg,
2321        .nodemask = NULL, /* we don't care the placement */
2322        .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2323                (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
2324    };
2325    struct shrink_control shrink = {
2326        .gfp_mask = sc.gfp_mask,
2327    };
2328
2329    /*
2330     * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
2331     * take care of from where we get pages. So the node where we start the
2332     * scan does not need to be the current node.
2333     */
2334    nid = mem_cgroup_select_victim_node(memcg);
2335
2336    zonelist = NODE_DATA(nid)->node_zonelists;
2337
2338    trace_mm_vmscan_memcg_reclaim_begin(0,
2339                        sc.may_writepage,
2340                        sc.gfp_mask);
2341
2342    nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2343
2344    trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2345
2346    return nr_reclaimed;
2347}
2348#endif
2349
2350static void age_active_anon(struct zone *zone, struct scan_control *sc)
2351{
2352    struct mem_cgroup *memcg;
2353
2354    if (!total_swap_pages)
2355        return;
2356
2357    memcg = mem_cgroup_iter(NULL, NULL, NULL);
2358    do {
2359        struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2360
2361        if (inactive_anon_is_low(lruvec))
2362            shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2363                       sc, LRU_ACTIVE_ANON);
2364
2365        memcg = mem_cgroup_iter(NULL, memcg, NULL);
2366    } while (memcg);
2367}
2368
2369/*
2370 * pgdat_balanced is used when checking if a node is balanced for high-order
2371 * allocations. Only zones that meet watermarks and are in a zone allowed
2372 * by the callers classzone_idx are added to balanced_pages. The total of
2373 * balanced pages must be at least 25% of the zones allowed by classzone_idx
2374 * for the node to be considered balanced. Forcing all zones to be balanced
2375 * for high orders can cause excessive reclaim when there are imbalanced zones.
2376 * The choice of 25% is due to
2377 * o a 16M DMA zone that is balanced will not balance a zone on any
2378 * reasonable sized machine
2379 * o On all other machines, the top zone must be at least a reasonable
2380 * percentage of the middle zones. For example, on 32-bit x86, highmem
2381 * would need to be at least 256M for it to be balance a whole node.
2382 * Similarly, on x86-64 the Normal zone would need to be at least 1G
2383 * to balance a node on its own. These seemed like reasonable ratios.
2384 */
2385static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
2386                        int classzone_idx)
2387{
2388    unsigned long present_pages = 0;
2389    int i;
2390
2391    for (i = 0; i <= classzone_idx; i++)
2392        present_pages += pgdat->node_zones[i].present_pages;
2393
2394    /* A special case here: if zone has no page, we think it's balanced */
2395    return balanced_pages >= (present_pages >> 2);
2396}
2397
2398/*
2399 * Prepare kswapd for sleeping. This verifies that there are no processes
2400 * waiting in throttle_direct_reclaim() and that watermarks have been met.
2401 *
2402 * Returns true if kswapd is ready to sleep
2403 */
2404static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
2405                    int classzone_idx)
2406{
2407    int i;
2408    unsigned long balanced = 0;
2409    bool all_zones_ok = true;
2410
2411    /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
2412    if (remaining)
2413        return false;
2414
2415    /*
2416     * There is a potential race between when kswapd checks its watermarks
2417     * and a process gets throttled. There is also a potential race if
2418     * processes get throttled, kswapd wakes, a large process exits therby
2419     * balancing the zones that causes kswapd to miss a wakeup. If kswapd
2420     * is going to sleep, no process should be sleeping on pfmemalloc_wait
2421     * so wake them now if necessary. If necessary, processes will wake
2422     * kswapd and get throttled again
2423     */
2424    if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
2425        wake_up(&pgdat->pfmemalloc_wait);
2426        return false;
2427    }
2428
2429    /* Check the watermark levels */
2430    for (i = 0; i <= classzone_idx; i++) {
2431        struct zone *zone = pgdat->node_zones + i;
2432
2433        if (!populated_zone(zone))
2434            continue;
2435
2436        /*
2437         * balance_pgdat() skips over all_unreclaimable after
2438         * DEF_PRIORITY. Effectively, it considers them balanced so
2439         * they must be considered balanced here as well if kswapd
2440         * is to sleep
2441         */
2442        if (zone->all_unreclaimable) {
2443            balanced += zone->present_pages;
2444            continue;
2445        }
2446
2447        if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
2448                            i, 0))
2449            all_zones_ok = false;
2450        else
2451            balanced += zone->present_pages;
2452    }
2453
2454    /*
2455     * For high-order requests, the balanced zones must contain at least
2456     * 25% of the nodes pages for kswapd to sleep. For order-0, all zones
2457     * must be balanced
2458     */
2459    if (order)
2460        return pgdat_balanced(pgdat, balanced, classzone_idx);
2461    else
2462        return all_zones_ok;
2463}
2464
2465/*
2466 * For kswapd, balance_pgdat() will work across all this node's zones until
2467 * they are all at high_wmark_pages(zone).
2468 *
2469 * Returns the final order kswapd was reclaiming at
2470 *
2471 * There is special handling here for zones which are full of pinned pages.
2472 * This can happen if the pages are all mlocked, or if they are all used by
2473 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
2474 * What we do is to detect the case where all pages in the zone have been
2475 * scanned twice and there has been zero successful reclaim. Mark the zone as
2476 * dead and from now on, only perform a short scan. Basically we're polling
2477 * the zone for when the problem goes away.
2478 *
2479 * kswapd scans the zones in the highmem->normal->dma direction. It skips
2480 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
2481 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
2482 * lower zones regardless of the number of free pages in the lower zones. This
2483 * interoperates with the page allocator fallback scheme to ensure that aging
2484 * of pages is balanced across the zones.
2485 */
2486static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2487                            int *classzone_idx)
2488{
2489    int all_zones_ok;
2490    unsigned long balanced;
2491    int i;
2492    int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
2493    unsigned long total_scanned;
2494    struct reclaim_state *reclaim_state = current->reclaim_state;
2495    unsigned long nr_soft_reclaimed;
2496    unsigned long nr_soft_scanned;
2497    struct scan_control sc = {
2498        .gfp_mask = GFP_KERNEL,
2499        .may_unmap = 1,
2500        .may_swap = 1,
2501        /*
2502         * kswapd doesn't want to be bailed out while reclaim. because
2503         * we want to put equal scanning pressure on each zone.
2504         */
2505        .nr_to_reclaim = ULONG_MAX,
2506        .order = order,
2507        .target_mem_cgroup = NULL,
2508    };
2509    struct shrink_control shrink = {
2510        .gfp_mask = sc.gfp_mask,
2511    };
2512loop_again:
2513    total_scanned = 0;
2514    sc.priority = DEF_PRIORITY;
2515    sc.nr_reclaimed = 0;
2516    sc.may_writepage = !laptop_mode;
2517    count_vm_event(PAGEOUTRUN);
2518
2519    do {
2520        unsigned long lru_pages = 0;
2521        int has_under_min_watermark_zone = 0;
2522
2523        all_zones_ok = 1;
2524        balanced = 0;
2525
2526        /*
2527         * Scan in the highmem->dma direction for the highest
2528         * zone which needs scanning
2529         */
2530        for (i = pgdat->nr_zones - 1; i >= 0; i--) {
2531            struct zone *zone = pgdat->node_zones + i;
2532
2533            if (!populated_zone(zone))
2534                continue;
2535
2536            if (zone->all_unreclaimable &&
2537                sc.priority != DEF_PRIORITY)
2538                continue;
2539
2540            /*
2541             * Do some background aging of the anon list, to give
2542             * pages a chance to be referenced before reclaiming.
2543             */
2544            age_active_anon(zone, &sc);
2545
2546            /*
2547             * If the number of buffer_heads in the machine
2548             * exceeds the maximum allowed level and this node
2549             * has a highmem zone, force kswapd to reclaim from
2550             * it to relieve lowmem pressure.
2551             */
2552            if (buffer_heads_over_limit && is_highmem_idx(i)) {
2553                end_zone = i;
2554                break;
2555            }
2556
2557            if (!zone_watermark_ok_safe(zone, order,
2558                    high_wmark_pages(zone), 0, 0)) {
2559                end_zone = i;
2560                break;
2561            } else {
2562                /* If balanced, clear the congested flag */
2563                zone_clear_flag(zone, ZONE_CONGESTED);
2564            }
2565        }
2566        if (i < 0)
2567            goto out;
2568
2569        for (i = 0; i <= end_zone; i++) {
2570            struct zone *zone = pgdat->node_zones + i;
2571
2572            lru_pages += zone_reclaimable_pages(zone);
2573        }
2574
2575        /*
2576         * Now scan the zone in the dma->highmem direction, stopping
2577         * at the last zone which needs scanning.
2578         *
2579         * We do this because the page allocator works in the opposite
2580         * direction. This prevents the page allocator from allocating
2581         * pages behind kswapd's direction of progress, which would
2582         * cause too much scanning of the lower zones.
2583         */
2584        for (i = 0; i <= end_zone; i++) {
2585            struct zone *zone = pgdat->node_zones + i;
2586            int nr_slab, testorder;
2587            unsigned long balance_gap;
2588
2589            if (!populated_zone(zone))
2590                continue;
2591
2592            if (zone->all_unreclaimable &&
2593                sc.priority != DEF_PRIORITY)
2594                continue;
2595
2596            sc.nr_scanned = 0;
2597
2598            nr_soft_scanned = 0;
2599            /*
2600             * Call soft limit reclaim before calling shrink_zone.
2601             */
2602            nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2603                            order, sc.gfp_mask,
2604                            &nr_soft_scanned);
2605            sc.nr_reclaimed += nr_soft_reclaimed;
2606            total_scanned += nr_soft_scanned;
2607
2608            /*
2609             * We put equal pressure on every zone, unless
2610             * one zone has way too many pages free
2611             * already. The "too many pages" is defined
2612             * as the high wmark plus a "gap" where the
2613             * gap is either the low watermark or 1%
2614             * of the zone, whichever is smaller.
2615             */
2616            balance_gap = min(low_wmark_pages(zone),
2617                (zone->present_pages +
2618                    KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2619                KSWAPD_ZONE_BALANCE_GAP_RATIO);
2620            /*
2621             * Kswapd reclaims only single pages with compaction
2622             * enabled. Trying too hard to reclaim until contiguous
2623             * free pages have become available can hurt performance
2624             * by evicting too much useful data from memory.
2625             * Do not reclaim more than needed for compaction.
2626             */
2627            testorder = order;
2628            if (COMPACTION_BUILD && order &&
2629                    compaction_suitable(zone, order) !=
2630                        COMPACT_SKIPPED)
2631                testorder = 0;
2632
2633            if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
2634                    !zone_watermark_ok_safe(zone, testorder,
2635                    high_wmark_pages(zone) + balance_gap,
2636                    end_zone, 0)) {
2637                shrink_zone(zone, &sc);
2638
2639                reclaim_state->reclaimed_slab = 0;
2640                nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
2641                sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2642                total_scanned += sc.nr_scanned;
2643
2644                if (nr_slab == 0 && !zone_reclaimable(zone))
2645                    zone->all_unreclaimable = 1;
2646            }
2647
2648            /*
2649             * If we've done a decent amount of scanning and
2650             * the reclaim ratio is low, start doing writepage
2651             * even in laptop mode
2652             */
2653            if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
2654                total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
2655                sc.may_writepage = 1;
2656
2657            if (zone->all_unreclaimable) {
2658                if (end_zone && end_zone == i)
2659                    end_zone--;
2660                continue;
2661            }
2662
2663            if (!zone_watermark_ok_safe(zone, testorder,
2664                    high_wmark_pages(zone), end_zone, 0)) {
2665                all_zones_ok = 0;
2666                /*
2667                 * We are still under min water mark. This
2668                 * means that we have a GFP_ATOMIC allocation
2669                 * failure risk. Hurry up!
2670                 */
2671                if (!zone_watermark_ok_safe(zone, order,
2672                        min_wmark_pages(zone), end_zone, 0))
2673                    has_under_min_watermark_zone = 1;
2674            } else {
2675                /*
2676                 * If a zone reaches its high watermark,
2677                 * consider it to be no longer congested. It's
2678                 * possible there are dirty pages backed by
2679                 * congested BDIs but as pressure is relieved,
2680                 * speculatively avoid congestion waits
2681                 */
2682                zone_clear_flag(zone, ZONE_CONGESTED);
2683                if (i <= *classzone_idx)
2684                    balanced += zone->present_pages;
2685            }
2686
2687        }
2688
2689        /*
2690         * If the low watermark is met there is no need for processes
2691         * to be throttled on pfmemalloc_wait as they should not be
2692         * able to safely make forward progress. Wake them
2693         */
2694        if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
2695                pfmemalloc_watermark_ok(pgdat))
2696            wake_up(&pgdat->pfmemalloc_wait);
2697
2698        if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
2699            break; /* kswapd: all done */
2700        /*
2701         * OK, kswapd is getting into trouble. Take a nap, then take
2702         * another pass across the zones.
2703         */
2704        if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) {
2705            if (has_under_min_watermark_zone)
2706                count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
2707            else
2708                congestion_wait(BLK_RW_ASYNC, HZ/10);
2709        }
2710
2711        /*
2712         * We do this so kswapd doesn't build up large priorities for
2713         * example when it is freeing in parallel with allocators. It
2714         * matches the direct reclaim path behaviour in terms of impact
2715         * on zone->*_priority.
2716         */
2717        if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
2718            break;
2719    } while (--sc.priority >= 0);
2720out:
2721
2722    /*
2723     * order-0: All zones must meet high watermark for a balanced node
2724     * high-order: Balanced zones must make up at least 25% of the node
2725     * for the node to be balanced
2726     */
2727    if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) {
2728        cond_resched();
2729
2730        try_to_freeze();
2731
2732        /*
2733         * Fragmentation may mean that the system cannot be
2734         * rebalanced for high-order allocations in all zones.
2735         * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
2736         * it means the zones have been fully scanned and are still
2737         * not balanced. For high-order allocations, there is
2738         * little point trying all over again as kswapd may
2739         * infinite loop.
2740         *
2741         * Instead, recheck all watermarks at order-0 as they
2742         * are the most important. If watermarks are ok, kswapd will go
2743         * back to sleep. High-order users can still perform direct
2744         * reclaim if they wish.
2745         */
2746        if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
2747            order = sc.order = 0;
2748
2749        goto loop_again;
2750    }
2751
2752    /*
2753     * If kswapd was reclaiming at a higher order, it has the option of
2754     * sleeping without all zones being balanced. Before it does, it must
2755     * ensure that the watermarks for order-0 on *all* zones are met and
2756     * that the congestion flags are cleared. The congestion flag must
2757     * be cleared as kswapd is the only mechanism that clears the flag
2758     * and it is potentially going to sleep here.
2759     */
2760    if (order) {
2761        int zones_need_compaction = 1;
2762
2763        for (i = 0; i <= end_zone; i++) {
2764            struct zone *zone = pgdat->node_zones + i;
2765
2766            if (!populated_zone(zone))
2767                continue;
2768
2769            if (zone->all_unreclaimable &&
2770                sc.priority != DEF_PRIORITY)
2771                continue;
2772
2773            /* Would compaction fail due to lack of free memory? */
2774            if (COMPACTION_BUILD &&
2775                compaction_suitable(zone, order) == COMPACT_SKIPPED)
2776                goto loop_again;
2777
2778            /* Confirm the zone is balanced for order-0 */
2779            if (!zone_watermark_ok(zone, 0,
2780                    high_wmark_pages(zone), 0, 0)) {
2781                order = sc.order = 0;
2782                goto loop_again;
2783            }
2784
2785            /* Check if the memory needs to be defragmented. */
2786            if (zone_watermark_ok(zone, order,
2787                    low_wmark_pages(zone), *classzone_idx, 0))
2788                zones_need_compaction = 0;
2789
2790            /* If balanced, clear the congested flag */
2791            zone_clear_flag(zone, ZONE_CONGESTED);
2792        }
2793
2794        if (zones_need_compaction)
2795            compact_pgdat(pgdat, order);
2796    }
2797
2798    /*
2799     * Return the order we were reclaiming at so prepare_kswapd_sleep()
2800     * makes a decision on the order we were last reclaiming at. However,
2801     * if another caller entered the allocator slow path while kswapd
2802     * was awake, order will remain at the higher level
2803     */
2804    *classzone_idx = end_zone;
2805    return order;
2806}
2807
2808static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
2809{
2810    long remaining = 0;
2811    DEFINE_WAIT(wait);
2812
2813    if (freezing(current) || kthread_should_stop())
2814        return;
2815
2816    prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2817
2818    /* Try to sleep for a short interval */
2819    if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
2820        remaining = schedule_timeout(HZ/10);
2821        finish_wait(&pgdat->kswapd_wait, &wait);
2822        prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2823    }
2824
2825    /*
2826     * After a short sleep, check if it was a premature sleep. If not, then
2827     * go fully to sleep until explicitly woken up.
2828     */
2829    if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
2830        trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
2831
2832        /*
2833         * vmstat counters are not perfectly accurate and the estimated
2834         * value for counters such as NR_FREE_PAGES can deviate from the
2835         * true value by nr_online_cpus * threshold. To avoid the zone
2836         * watermarks being breached while under pressure, we reduce the
2837         * per-cpu vmstat threshold while kswapd is awake and restore
2838         * them before going back to sleep.
2839         */
2840        set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
2841
2842        if (!kthread_should_stop())
2843            schedule();
2844
2845        set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
2846    } else {
2847        if (remaining)
2848            count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
2849        else
2850            count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
2851    }
2852    finish_wait(&pgdat->kswapd_wait, &wait);
2853}
2854
2855/*
2856 * The background pageout daemon, started as a kernel thread
2857 * from the init process.
2858 *
2859 * This basically trickles out pages so that we have _some_
2860 * free memory available even if there is no other activity
2861 * that frees anything up. This is needed for things like routing
2862 * etc, where we otherwise might have all activity going on in
2863 * asynchronous contexts that cannot page things out.
2864 *
2865 * If there are applications that are active memory-allocators
2866 * (most normal use), this basically shouldn't matter.
2867 */
2868static int kswapd(void *p)
2869{
2870    unsigned long order, new_order;
2871    unsigned balanced_order;
2872    int classzone_idx, new_classzone_idx;
2873    int balanced_classzone_idx;
2874    pg_data_t *pgdat = (pg_data_t*)p;
2875    struct task_struct *tsk = current;
2876
2877    struct reclaim_state reclaim_state = {
2878        .reclaimed_slab = 0,
2879    };
2880    const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2881
2882    lockdep_set_current_reclaim_state(GFP_KERNEL);
2883
2884    if (!cpumask_empty(cpumask))
2885        set_cpus_allowed_ptr(tsk, cpumask);
2886    current->reclaim_state = &reclaim_state;
2887
2888    /*
2889     * Tell the memory management that we're a "memory allocator",
2890     * and that if we need more memory we should get access to it
2891     * regardless (see "__alloc_pages()"). "kswapd" should
2892     * never get caught in the normal page freeing logic.
2893     *
2894     * (Kswapd normally doesn't need memory anyway, but sometimes
2895     * you need a small amount of memory in order to be able to
2896     * page out something else, and this flag essentially protects
2897     * us from recursively trying to free more memory as we're
2898     * trying to free the first piece of memory in the first place).
2899     */
2900    tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2901    set_freezable();
2902
2903    order = new_order = 0;
2904    balanced_order = 0;
2905    classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
2906    balanced_classzone_idx = classzone_idx;
2907    for ( ; ; ) {
2908        int ret;
2909
2910        /*
2911         * If the last balance_pgdat was unsuccessful it's unlikely a
2912         * new request of a similar or harder type will succeed soon
2913         * so consider going to sleep on the basis we reclaimed at
2914         */
2915        if (balanced_classzone_idx >= new_classzone_idx &&
2916                    balanced_order == new_order) {
2917            new_order = pgdat->kswapd_max_order;
2918            new_classzone_idx = pgdat->classzone_idx;
2919            pgdat->kswapd_max_order = 0;
2920            pgdat->classzone_idx = pgdat->nr_zones - 1;
2921        }
2922
2923        if (order < new_order || classzone_idx > new_classzone_idx) {
2924            /*
2925             * Don't sleep if someone wants a larger 'order'
2926             * allocation or has tigher zone constraints
2927             */
2928            order = new_order;
2929            classzone_idx = new_classzone_idx;
2930        } else {
2931            kswapd_try_to_sleep(pgdat, balanced_order,
2932                        balanced_classzone_idx);
2933            order = pgdat->kswapd_max_order;
2934            classzone_idx = pgdat->classzone_idx;
2935            new_order = order;
2936            new_classzone_idx = classzone_idx;
2937            pgdat->kswapd_max_order = 0;
2938            pgdat->classzone_idx = pgdat->nr_zones - 1;
2939        }
2940
2941        ret = try_to_freeze();
2942        if (kthread_should_stop())
2943            break;
2944
2945        /*
2946         * We can speed up thawing tasks if we don't call balance_pgdat
2947         * after returning from the refrigerator
2948         */
2949        if (!ret) {
2950            trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
2951            balanced_classzone_idx = classzone_idx;
2952            balanced_order = balance_pgdat(pgdat, order,
2953                        &balanced_classzone_idx);
2954        }
2955    }
2956    return 0;
2957}
2958
2959/*
2960 * A zone is low on free memory, so wake its kswapd task to service it.
2961 */
2962void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
2963{
2964    pg_data_t *pgdat;
2965
2966    if (!populated_zone(zone))
2967        return;
2968
2969    if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2970        return;
2971    pgdat = zone->zone_pgdat;
2972    if (pgdat->kswapd_max_order < order) {
2973        pgdat->kswapd_max_order = order;
2974        pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
2975    }
2976    if (!waitqueue_active(&pgdat->kswapd_wait))
2977        return;
2978    if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
2979        return;
2980
2981    trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
2982    wake_up_interruptible(&pgdat->kswapd_wait);
2983}
2984
2985/*
2986 * The reclaimable count would be mostly accurate.
2987 * The less reclaimable pages may be
2988 * - mlocked pages, which will be moved to unevictable list when encountered
2989 * - mapped pages, which may require several travels to be reclaimed
2990 * - dirty pages, which is not "instantly" reclaimable
2991 */
2992unsigned long global_reclaimable_pages(void)
2993{
2994    int nr;
2995
2996    nr = global_page_state(NR_ACTIVE_FILE) +
2997         global_page_state(NR_INACTIVE_FILE);
2998
2999    if (nr_swap_pages > 0)
3000        nr += global_page_state(NR_ACTIVE_ANON) +
3001              global_page_state(NR_INACTIVE_ANON);
3002
3003    return nr;
3004}
3005
3006unsigned long zone_reclaimable_pages(struct zone *zone)
3007{
3008    int nr;
3009
3010    nr = zone_page_state(zone, NR_ACTIVE_FILE) +
3011         zone_page_state(zone, NR_INACTIVE_FILE);
3012
3013    if (nr_swap_pages > 0)
3014        nr += zone_page_state(zone, NR_ACTIVE_ANON) +
3015              zone_page_state(zone, NR_INACTIVE_ANON);
3016
3017    return nr;
3018}
3019
3020#ifdef CONFIG_HIBERNATION
3021/*
3022 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
3023 * freed pages.
3024 *
3025 * Rather than trying to age LRUs the aim is to preserve the overall
3026 * LRU order by reclaiming preferentially
3027 * inactive > active > active referenced > active mapped
3028 */
3029unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
3030{
3031    struct reclaim_state reclaim_state;
3032    struct scan_control sc = {
3033        .gfp_mask = GFP_HIGHUSER_MOVABLE,
3034        .may_swap = 1,
3035        .may_unmap = 1,
3036        .may_writepage = 1,
3037        .nr_to_reclaim = nr_to_reclaim,
3038        .hibernation_mode = 1,
3039        .order = 0,
3040        .priority = DEF_PRIORITY,
3041    };
3042    struct shrink_control shrink = {
3043        .gfp_mask = sc.gfp_mask,
3044    };
3045    struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
3046    struct task_struct *p = current;
3047    unsigned long nr_reclaimed;
3048
3049    p->flags |= PF_MEMALLOC;
3050    lockdep_set_current_reclaim_state(sc.gfp_mask);
3051    reclaim_state.reclaimed_slab = 0;
3052    p->reclaim_state = &reclaim_state;
3053
3054    nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
3055
3056    p->reclaim_state = NULL;
3057    lockdep_clear_current_reclaim_state();
3058    p->flags &= ~PF_MEMALLOC;
3059
3060    return nr_reclaimed;
3061}
3062#endif /* CONFIG_HIBERNATION */
3063
3064/* It's optimal to keep kswapds on the same CPUs as their memory, but
3065   not required for correctness. So if the last cpu in a node goes
3066   away, we get changed to run anywhere: as the first one comes back,
3067   restore their cpu bindings. */
3068static int __devinit cpu_callback(struct notifier_block *nfb,
3069                  unsigned long action, void *hcpu)
3070{
3071    int nid;
3072
3073    if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
3074        for_each_node_state(nid, N_HIGH_MEMORY) {
3075            pg_data_t *pgdat = NODE_DATA(nid);
3076            const struct cpumask *mask;
3077
3078            mask = cpumask_of_node(pgdat->node_id);
3079
3080            if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
3081                /* One of our CPUs online: restore mask */
3082                set_cpus_allowed_ptr(pgdat->kswapd, mask);
3083        }
3084    }
3085    return NOTIFY_OK;
3086}
3087
3088/*
3089 * This kswapd start function will be called by init and node-hot-add.
3090 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
3091 */
3092int kswapd_run(int nid)
3093{
3094    pg_data_t *pgdat = NODE_DATA(nid);
3095    int ret = 0;
3096
3097    if (pgdat->kswapd)
3098        return 0;
3099
3100    pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
3101    if (IS_ERR(pgdat->kswapd)) {
3102        /* failure at boot is fatal */
3103        BUG_ON(system_state == SYSTEM_BOOTING);
3104        printk("Failed to start kswapd on node %d\n",nid);
3105        ret = -1;
3106    }
3107    return ret;
3108}
3109
3110/*
3111 * Called by memory hotplug when all memory in a node is offlined. Caller must
3112 * hold lock_memory_hotplug().
3113 */
3114void kswapd_stop(int nid)
3115{
3116    struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
3117
3118    if (kswapd) {
3119        kthread_stop(kswapd);
3120        NODE_DATA(nid)->kswapd = NULL;
3121    }
3122}
3123
3124static int __init kswapd_init(void)
3125{
3126    int nid;
3127
3128    swap_setup();
3129    for_each_node_state(nid, N_HIGH_MEMORY)
3130         kswapd_run(nid);
3131    hotcpu_notifier(cpu_callback, 0);
3132    return 0;
3133}
3134
3135module_init(kswapd_init)
3136
3137#ifdef CONFIG_NUMA
3138/*
3139 * Zone reclaim mode
3140 *
3141 * If non-zero call zone_reclaim when the number of free pages falls below
3142 * the watermarks.
3143 */
3144int zone_reclaim_mode __read_mostly;
3145
3146#define RECLAIM_OFF 0
3147#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
3148#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
3149#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
3150
3151/*
3152 * Priority for ZONE_RECLAIM. This determines the fraction of pages
3153 * of a node considered for each zone_reclaim. 4 scans 1/16th of
3154 * a zone.
3155 */
3156#define ZONE_RECLAIM_PRIORITY 4
3157
3158/*
3159 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
3160 * occur.
3161 */
3162int sysctl_min_unmapped_ratio = 1;
3163
3164/*
3165 * If the number of slab pages in a zone grows beyond this percentage then
3166 * slab reclaim needs to occur.
3167 */
3168int sysctl_min_slab_ratio = 5;
3169
3170static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
3171{
3172    unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
3173    unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
3174        zone_page_state(zone, NR_ACTIVE_FILE);
3175
3176    /*
3177     * It's possible for there to be more file mapped pages than
3178     * accounted for by the pages on the file LRU lists because
3179     * tmpfs pages accounted for as ANON can also be FILE_MAPPED
3180     */
3181    return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
3182}
3183
3184/* Work out how many page cache pages we can reclaim in this reclaim_mode */
3185static long zone_pagecache_reclaimable(struct zone *zone)
3186{
3187    long nr_pagecache_reclaimable;
3188    long delta = 0;
3189
3190    /*
3191     * If RECLAIM_SWAP is set, then all file pages are considered
3192     * potentially reclaimable. Otherwise, we have to worry about
3193     * pages like swapcache and zone_unmapped_file_pages() provides
3194     * a better estimate
3195     */
3196    if (zone_reclaim_mode & RECLAIM_SWAP)
3197        nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
3198    else
3199        nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
3200
3201    /* If we can't clean pages, remove dirty pages from consideration */
3202    if (!(zone_reclaim_mode & RECLAIM_WRITE))
3203        delta += zone_page_state(zone, NR_FILE_DIRTY);
3204
3205    /* Watch for any possible underflows due to delta */
3206    if (unlikely(delta > nr_pagecache_reclaimable))
3207        delta = nr_pagecache_reclaimable;
3208
3209    return nr_pagecache_reclaimable - delta;
3210}
3211
3212/*
3213 * Try to free up some pages from this zone through reclaim.
3214 */
3215static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3216{
3217    /* Minimum pages needed in order to stay on node */
3218    const unsigned long nr_pages = 1 << order;
3219    struct task_struct *p = current;
3220    struct reclaim_state reclaim_state;
3221    struct scan_control sc = {
3222        .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
3223        .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
3224        .may_swap = 1,
3225        .nr_to_reclaim = max_t(unsigned long, nr_pages,
3226                       SWAP_CLUSTER_MAX),
3227        .gfp_mask = gfp_mask,
3228        .order = order,
3229        .priority = ZONE_RECLAIM_PRIORITY,
3230    };
3231    struct shrink_control shrink = {
3232        .gfp_mask = sc.gfp_mask,
3233    };
3234    unsigned long nr_slab_pages0, nr_slab_pages1;
3235
3236    cond_resched();
3237    /*
3238     * We need to be able to allocate from the reserves for RECLAIM_SWAP
3239     * and we also need to be able to write out pages for RECLAIM_WRITE
3240     * and RECLAIM_SWAP.
3241     */
3242    p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
3243    lockdep_set_current_reclaim_state(gfp_mask);
3244    reclaim_state.reclaimed_slab = 0;
3245    p->reclaim_state = &reclaim_state;
3246
3247    if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
3248        /*
3249         * Free memory by calling shrink zone with increasing
3250         * priorities until we have enough memory freed.
3251         */
3252        do {
3253            shrink_zone(zone, &sc);
3254        } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
3255    }
3256
3257    nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3258    if (nr_slab_pages0 > zone->min_slab_pages) {
3259        /*
3260         * shrink_slab() does not currently allow us to determine how
3261         * many pages were freed in this zone. So we take the current
3262         * number of slab pages and shake the slab until it is reduced
3263         * by the same nr_pages that we used for reclaiming unmapped
3264         * pages.
3265         *
3266         * Note that shrink_slab will free memory on all zones and may
3267         * take a long time.
3268         */
3269        for (;;) {
3270            unsigned long lru_pages = zone_reclaimable_pages(zone);
3271
3272            /* No reclaimable slab or very low memory pressure */
3273            if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
3274                break;
3275
3276            /* Freed enough memory */
3277            nr_slab_pages1 = zone_page_state(zone,
3278                            NR_SLAB_RECLAIMABLE);
3279            if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
3280                break;
3281        }
3282
3283        /*
3284         * Update nr_reclaimed by the number of slab pages we
3285         * reclaimed from this zone.
3286         */
3287        nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3288        if (nr_slab_pages1 < nr_slab_pages0)
3289            sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
3290    }
3291
3292    p->reclaim_state = NULL;
3293    current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
3294    lockdep_clear_current_reclaim_state();
3295    return sc.nr_reclaimed >= nr_pages;
3296}
3297
3298int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3299{
3300    int node_id;
3301    int ret;
3302
3303    /*
3304     * Zone reclaim reclaims unmapped file backed pages and
3305     * slab pages if we are over the defined limits.
3306     *
3307     * A small portion of unmapped file backed pages is needed for
3308     * file I/O otherwise pages read by file I/O will be immediately
3309     * thrown out if the zone is overallocated. So we do not reclaim
3310     * if less than a specified percentage of the zone is used by
3311     * unmapped file backed pages.
3312     */
3313    if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
3314        zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
3315        return ZONE_RECLAIM_FULL;
3316
3317    if (zone->all_unreclaimable)
3318        return ZONE_RECLAIM_FULL;
3319
3320    /*
3321     * Do not scan if the allocation should not be delayed.
3322     */
3323    if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
3324        return ZONE_RECLAIM_NOSCAN;
3325
3326    /*
3327     * Only run zone reclaim on the local zone or on zones that do not
3328     * have associated processors. This will favor the local processor
3329     * over remote processors and spread off node memory allocations
3330     * as wide as possible.
3331     */
3332    node_id = zone_to_nid(zone);
3333    if (node_state(node_id, N_CPU) && node_id != numa_node_id())
3334        return ZONE_RECLAIM_NOSCAN;
3335
3336    if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
3337        return ZONE_RECLAIM_NOSCAN;
3338
3339    ret = __zone_reclaim(zone, gfp_mask, order);
3340    zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
3341
3342    if (!ret)
3343        count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
3344
3345    return ret;
3346}
3347#endif
3348
3349/*
3350 * page_evictable - test whether a page is evictable
3351 * @page: the page to test
3352 * @vma: the VMA in which the page is or will be mapped, may be NULL
3353 *
3354 * Test whether page is evictable--i.e., should be placed on active/inactive
3355 * lists vs unevictable list. The vma argument is !NULL when called from the
3356 * fault path to determine how to instantate a new page.
3357 *
3358 * Reasons page might not be evictable:
3359 * (1) page's mapping marked unevictable
3360 * (2) page is part of an mlocked VMA
3361 *
3362 */
3363int page_evictable(struct page *page, struct vm_area_struct *vma)
3364{
3365
3366    if (mapping_unevictable(page_mapping(page)))
3367        return 0;
3368
3369    if (PageMlocked(page) || (vma && mlocked_vma_newpage(vma, page)))
3370        return 0;
3371
3372    return 1;
3373}
3374
3375#ifdef CONFIG_SHMEM
3376/**
3377 * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
3378 * @pages: array of pages to check
3379 * @nr_pages: number of pages to check
3380 *
3381 * Checks pages for evictability and moves them to the appropriate lru list.
3382 *
3383 * This function is only used for SysV IPC SHM_UNLOCK.
3384 */
3385void check_move_unevictable_pages(struct page **pages, int nr_pages)
3386{
3387    struct lruvec *lruvec;
3388    struct zone *zone = NULL;
3389    int pgscanned = 0;
3390    int pgrescued = 0;
3391    int i;
3392
3393    for (i = 0; i < nr_pages; i++) {
3394        struct page *page = pages[i];
3395        struct zone *pagezone;
3396
3397        pgscanned++;
3398        pagezone = page_zone(page);
3399        if (pagezone != zone) {
3400            if (zone)
3401                spin_unlock_irq(&zone->lru_lock);
3402            zone = pagezone;
3403            spin_lock_irq(&zone->lru_lock);
3404        }
3405        lruvec = mem_cgroup_page_lruvec(page, zone);
3406
3407        if (!PageLRU(page) || !PageUnevictable(page))
3408            continue;
3409
3410        if (page_evictable(page, NULL)) {
3411            enum lru_list lru = page_lru_base_type(page);
3412
3413            VM_BUG_ON(PageActive(page));
3414            ClearPageUnevictable(page);
3415            del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
3416            add_page_to_lru_list(page, lruvec, lru);
3417            pgrescued++;
3418        }
3419    }
3420
3421    if (zone) {
3422        __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
3423        __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
3424        spin_unlock_irq(&zone->lru_lock);
3425    }
3426}
3427#endif /* CONFIG_SHMEM */
3428
3429static void warn_scan_unevictable_pages(void)
3430{
3431    printk_once(KERN_WARNING
3432            "%s: The scan_unevictable_pages sysctl/node-interface has been "
3433            "disabled for lack of a legitimate use case. If you have "
3434            "one, please send an email to linux-mm@kvack.org.\n",
3435            current->comm);
3436}
3437
3438/*
3439 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
3440 * all nodes' unevictable lists for evictable pages
3441 */
3442unsigned long scan_unevictable_pages;
3443
3444int scan_unevictable_handler(struct ctl_table *table, int write,
3445               void __user *buffer,
3446               size_t *length, loff_t *ppos)
3447{
3448    warn_scan_unevictable_pages();
3449    proc_doulongvec_minmax(table, write, buffer, length, ppos);
3450    scan_unevictable_pages = 0;
3451    return 0;
3452}
3453
3454#ifdef CONFIG_NUMA
3455/*
3456 * per node 'scan_unevictable_pages' attribute. On demand re-scan of
3457 * a specified node's per zone unevictable lists for evictable pages.
3458 */
3459
3460static ssize_t read_scan_unevictable_node(struct device *dev,
3461                      struct device_attribute *attr,
3462                      char *buf)
3463{
3464    warn_scan_unevictable_pages();
3465    return sprintf(buf, "0\n"); /* always zero; should fit... */
3466}
3467
3468static ssize_t write_scan_unevictable_node(struct device *dev,
3469                       struct device_attribute *attr,
3470                    const char *buf, size_t count)
3471{
3472    warn_scan_unevictable_pages();
3473    return 1;
3474}
3475
3476
3477static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
3478            read_scan_unevictable_node,
3479            write_scan_unevictable_node);
3480
3481int scan_unevictable_register_node(struct node *node)
3482{
3483    return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages);
3484}
3485
3486void scan_unevictable_unregister_node(struct node *node)
3487{
3488    device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
3489}
3490#endif
3491

Archive Download this file



interactive