Root/mm/vmscan.c

1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/gfp.h>
17#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
22#include <linux/vmpressure.h>
23#include <linux/vmstat.h>
24#include <linux/file.h>
25#include <linux/writeback.h>
26#include <linux/blkdev.h>
27#include <linux/buffer_head.h> /* for try_to_release_page(),
28                    buffer_heads_over_limit */
29#include <linux/mm_inline.h>
30#include <linux/backing-dev.h>
31#include <linux/rmap.h>
32#include <linux/topology.h>
33#include <linux/cpu.h>
34#include <linux/cpuset.h>
35#include <linux/compaction.h>
36#include <linux/notifier.h>
37#include <linux/rwsem.h>
38#include <linux/delay.h>
39#include <linux/kthread.h>
40#include <linux/freezer.h>
41#include <linux/memcontrol.h>
42#include <linux/delayacct.h>
43#include <linux/sysctl.h>
44#include <linux/oom.h>
45#include <linux/prefetch.h>
46
47#include <asm/tlbflush.h>
48#include <asm/div64.h>
49
50#include <linux/swapops.h>
51#include <linux/balloon_compaction.h>
52
53#include "internal.h"
54
55#define CREATE_TRACE_POINTS
56#include <trace/events/vmscan.h>
57
58struct scan_control {
59    /* Incremented by the number of inactive pages that were scanned */
60    unsigned long nr_scanned;
61
62    /* Number of pages freed so far during a call to shrink_zones() */
63    unsigned long nr_reclaimed;
64
65    /* How many pages shrink_list() should reclaim */
66    unsigned long nr_to_reclaim;
67
68    unsigned long hibernation_mode;
69
70    /* This context's GFP mask */
71    gfp_t gfp_mask;
72
73    int may_writepage;
74
75    /* Can mapped pages be reclaimed? */
76    int may_unmap;
77
78    /* Can pages be swapped as part of reclaim? */
79    int may_swap;
80
81    int order;
82
83    /* Scan (total_size >> priority) pages at once */
84    int priority;
85
86    /*
87     * The memory cgroup that hit its limit and as a result is the
88     * primary target of this reclaim invocation.
89     */
90    struct mem_cgroup *target_mem_cgroup;
91
92    /*
93     * Nodemask of nodes allowed by the caller. If NULL, all nodes
94     * are scanned.
95     */
96    nodemask_t *nodemask;
97};
98
99#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
100
101#ifdef ARCH_HAS_PREFETCH
102#define prefetch_prev_lru_page(_page, _base, _field) \
103    do { \
104        if ((_page)->lru.prev != _base) { \
105            struct page *prev; \
106                                    \
107            prev = lru_to_page(&(_page->lru)); \
108            prefetch(&prev->_field); \
109        } \
110    } while (0)
111#else
112#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
113#endif
114
115#ifdef ARCH_HAS_PREFETCHW
116#define prefetchw_prev_lru_page(_page, _base, _field) \
117    do { \
118        if ((_page)->lru.prev != _base) { \
119            struct page *prev; \
120                                    \
121            prev = lru_to_page(&(_page->lru)); \
122            prefetchw(&prev->_field); \
123        } \
124    } while (0)
125#else
126#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
127#endif
128
129/*
130 * From 0 .. 100. Higher means more swappy.
131 */
132int vm_swappiness = 60;
133unsigned long vm_total_pages; /* The total number of pages which the VM controls */
134
135static LIST_HEAD(shrinker_list);
136static DECLARE_RWSEM(shrinker_rwsem);
137
138#ifdef CONFIG_MEMCG
139static bool global_reclaim(struct scan_control *sc)
140{
141    return !sc->target_mem_cgroup;
142}
143#else
144static bool global_reclaim(struct scan_control *sc)
145{
146    return true;
147}
148#endif
149
150unsigned long zone_reclaimable_pages(struct zone *zone)
151{
152    int nr;
153
154    nr = zone_page_state(zone, NR_ACTIVE_FILE) +
155         zone_page_state(zone, NR_INACTIVE_FILE);
156
157    if (get_nr_swap_pages() > 0)
158        nr += zone_page_state(zone, NR_ACTIVE_ANON) +
159              zone_page_state(zone, NR_INACTIVE_ANON);
160
161    return nr;
162}
163
164bool zone_reclaimable(struct zone *zone)
165{
166    return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
167}
168
169static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
170{
171    if (!mem_cgroup_disabled())
172        return mem_cgroup_get_lru_size(lruvec, lru);
173
174    return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
175}
176
177/*
178 * Add a shrinker callback to be called from the vm.
179 */
180int register_shrinker(struct shrinker *shrinker)
181{
182    size_t size = sizeof(*shrinker->nr_deferred);
183
184    /*
185     * If we only have one possible node in the system anyway, save
186     * ourselves the trouble and disable NUMA aware behavior. This way we
187     * will save memory and some small loop time later.
188     */
189    if (nr_node_ids == 1)
190        shrinker->flags &= ~SHRINKER_NUMA_AWARE;
191
192    if (shrinker->flags & SHRINKER_NUMA_AWARE)
193        size *= nr_node_ids;
194
195    shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
196    if (!shrinker->nr_deferred)
197        return -ENOMEM;
198
199    down_write(&shrinker_rwsem);
200    list_add_tail(&shrinker->list, &shrinker_list);
201    up_write(&shrinker_rwsem);
202    return 0;
203}
204EXPORT_SYMBOL(register_shrinker);
205
206/*
207 * Remove one
208 */
209void unregister_shrinker(struct shrinker *shrinker)
210{
211    down_write(&shrinker_rwsem);
212    list_del(&shrinker->list);
213    up_write(&shrinker_rwsem);
214    kfree(shrinker->nr_deferred);
215}
216EXPORT_SYMBOL(unregister_shrinker);
217
218#define SHRINK_BATCH 128
219
220static unsigned long
221shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
222         unsigned long nr_pages_scanned, unsigned long lru_pages)
223{
224    unsigned long freed = 0;
225    unsigned long long delta;
226    long total_scan;
227    long max_pass;
228    long nr;
229    long new_nr;
230    int nid = shrinkctl->nid;
231    long batch_size = shrinker->batch ? shrinker->batch
232                      : SHRINK_BATCH;
233
234    max_pass = shrinker->count_objects(shrinker, shrinkctl);
235    if (max_pass == 0)
236        return 0;
237
238    /*
239     * copy the current shrinker scan count into a local variable
240     * and zero it so that other concurrent shrinker invocations
241     * don't also do this scanning work.
242     */
243    nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
244
245    total_scan = nr;
246    delta = (4 * nr_pages_scanned) / shrinker->seeks;
247    delta *= max_pass;
248    do_div(delta, lru_pages + 1);
249    total_scan += delta;
250    if (total_scan < 0) {
251        printk(KERN_ERR
252        "shrink_slab: %pF negative objects to delete nr=%ld\n",
253               shrinker->scan_objects, total_scan);
254        total_scan = max_pass;
255    }
256
257    /*
258     * We need to avoid excessive windup on filesystem shrinkers
259     * due to large numbers of GFP_NOFS allocations causing the
260     * shrinkers to return -1 all the time. This results in a large
261     * nr being built up so when a shrink that can do some work
262     * comes along it empties the entire cache due to nr >>>
263     * max_pass. This is bad for sustaining a working set in
264     * memory.
265     *
266     * Hence only allow the shrinker to scan the entire cache when
267     * a large delta change is calculated directly.
268     */
269    if (delta < max_pass / 4)
270        total_scan = min(total_scan, max_pass / 2);
271
272    /*
273     * Avoid risking looping forever due to too large nr value:
274     * never try to free more than twice the estimate number of
275     * freeable entries.
276     */
277    if (total_scan > max_pass * 2)
278        total_scan = max_pass * 2;
279
280    trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
281                nr_pages_scanned, lru_pages,
282                max_pass, delta, total_scan);
283
284    while (total_scan >= batch_size) {
285        unsigned long ret;
286
287        shrinkctl->nr_to_scan = batch_size;
288        ret = shrinker->scan_objects(shrinker, shrinkctl);
289        if (ret == SHRINK_STOP)
290            break;
291        freed += ret;
292
293        count_vm_events(SLABS_SCANNED, batch_size);
294        total_scan -= batch_size;
295
296        cond_resched();
297    }
298
299    /*
300     * move the unused scan count back into the shrinker in a
301     * manner that handles concurrent updates. If we exhausted the
302     * scan, there is no need to do an update.
303     */
304    if (total_scan > 0)
305        new_nr = atomic_long_add_return(total_scan,
306                        &shrinker->nr_deferred[nid]);
307    else
308        new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
309
310    trace_mm_shrink_slab_end(shrinker, freed, nr, new_nr);
311    return freed;
312}
313
314/*
315 * Call the shrink functions to age shrinkable caches
316 *
317 * Here we assume it costs one seek to replace a lru page and that it also
318 * takes a seek to recreate a cache object. With this in mind we age equal
319 * percentages of the lru and ageable caches. This should balance the seeks
320 * generated by these structures.
321 *
322 * If the vm encountered mapped pages on the LRU it increase the pressure on
323 * slab to avoid swapping.
324 *
325 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
326 *
327 * `lru_pages' represents the number of on-LRU pages in all the zones which
328 * are eligible for the caller's allocation attempt. It is used for balancing
329 * slab reclaim versus page reclaim.
330 *
331 * Returns the number of slab objects which we shrunk.
332 */
333unsigned long shrink_slab(struct shrink_control *shrinkctl,
334              unsigned long nr_pages_scanned,
335              unsigned long lru_pages)
336{
337    struct shrinker *shrinker;
338    unsigned long freed = 0;
339
340    if (nr_pages_scanned == 0)
341        nr_pages_scanned = SWAP_CLUSTER_MAX;
342
343    if (!down_read_trylock(&shrinker_rwsem)) {
344        /*
345         * If we would return 0, our callers would understand that we
346         * have nothing else to shrink and give up trying. By returning
347         * 1 we keep it going and assume we'll be able to shrink next
348         * time.
349         */
350        freed = 1;
351        goto out;
352    }
353
354    list_for_each_entry(shrinker, &shrinker_list, list) {
355        for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) {
356            if (!node_online(shrinkctl->nid))
357                continue;
358
359            if (!(shrinker->flags & SHRINKER_NUMA_AWARE) &&
360                (shrinkctl->nid != 0))
361                break;
362
363            freed += shrink_slab_node(shrinkctl, shrinker,
364                 nr_pages_scanned, lru_pages);
365
366        }
367    }
368    up_read(&shrinker_rwsem);
369out:
370    cond_resched();
371    return freed;
372}
373
374static inline int is_page_cache_freeable(struct page *page)
375{
376    /*
377     * A freeable page cache page is referenced only by the caller
378     * that isolated the page, the page cache radix tree and
379     * optional buffer heads at page->private.
380     */
381    return page_count(page) - page_has_private(page) == 2;
382}
383
384static int may_write_to_queue(struct backing_dev_info *bdi,
385                  struct scan_control *sc)
386{
387    if (current->flags & PF_SWAPWRITE)
388        return 1;
389    if (!bdi_write_congested(bdi))
390        return 1;
391    if (bdi == current->backing_dev_info)
392        return 1;
393    return 0;
394}
395
396/*
397 * We detected a synchronous write error writing a page out. Probably
398 * -ENOSPC. We need to propagate that into the address_space for a subsequent
399 * fsync(), msync() or close().
400 *
401 * The tricky part is that after writepage we cannot touch the mapping: nothing
402 * prevents it from being freed up. But we have a ref on the page and once
403 * that page is locked, the mapping is pinned.
404 *
405 * We're allowed to run sleeping lock_page() here because we know the caller has
406 * __GFP_FS.
407 */
408static void handle_write_error(struct address_space *mapping,
409                struct page *page, int error)
410{
411    lock_page(page);
412    if (page_mapping(page) == mapping)
413        mapping_set_error(mapping, error);
414    unlock_page(page);
415}
416
417/* possible outcome of pageout() */
418typedef enum {
419    /* failed to write page out, page is locked */
420    PAGE_KEEP,
421    /* move page to the active list, page is locked */
422    PAGE_ACTIVATE,
423    /* page has been sent to the disk successfully, page is unlocked */
424    PAGE_SUCCESS,
425    /* page is clean and locked */
426    PAGE_CLEAN,
427} pageout_t;
428
429/*
430 * pageout is called by shrink_page_list() for each dirty page.
431 * Calls ->writepage().
432 */
433static pageout_t pageout(struct page *page, struct address_space *mapping,
434             struct scan_control *sc)
435{
436    /*
437     * If the page is dirty, only perform writeback if that write
438     * will be non-blocking. To prevent this allocation from being
439     * stalled by pagecache activity. But note that there may be
440     * stalls if we need to run get_block(). We could test
441     * PagePrivate for that.
442     *
443     * If this process is currently in __generic_file_aio_write() against
444     * this page's queue, we can perform writeback even if that
445     * will block.
446     *
447     * If the page is swapcache, write it back even if that would
448     * block, for some throttling. This happens by accident, because
449     * swap_backing_dev_info is bust: it doesn't reflect the
450     * congestion state of the swapdevs. Easy to fix, if needed.
451     */
452    if (!is_page_cache_freeable(page))
453        return PAGE_KEEP;
454    if (!mapping) {
455        /*
456         * Some data journaling orphaned pages can have
457         * page->mapping == NULL while being dirty with clean buffers.
458         */
459        if (page_has_private(page)) {
460            if (try_to_free_buffers(page)) {
461                ClearPageDirty(page);
462                printk("%s: orphaned page\n", __func__);
463                return PAGE_CLEAN;
464            }
465        }
466        return PAGE_KEEP;
467    }
468    if (mapping->a_ops->writepage == NULL)
469        return PAGE_ACTIVATE;
470    if (!may_write_to_queue(mapping->backing_dev_info, sc))
471        return PAGE_KEEP;
472
473    if (clear_page_dirty_for_io(page)) {
474        int res;
475        struct writeback_control wbc = {
476            .sync_mode = WB_SYNC_NONE,
477            .nr_to_write = SWAP_CLUSTER_MAX,
478            .range_start = 0,
479            .range_end = LLONG_MAX,
480            .for_reclaim = 1,
481        };
482
483        SetPageReclaim(page);
484        res = mapping->a_ops->writepage(page, &wbc);
485        if (res < 0)
486            handle_write_error(mapping, page, res);
487        if (res == AOP_WRITEPAGE_ACTIVATE) {
488            ClearPageReclaim(page);
489            return PAGE_ACTIVATE;
490        }
491
492        if (!PageWriteback(page)) {
493            /* synchronous write or broken a_ops? */
494            ClearPageReclaim(page);
495        }
496        trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
497        inc_zone_page_state(page, NR_VMSCAN_WRITE);
498        return PAGE_SUCCESS;
499    }
500
501    return PAGE_CLEAN;
502}
503
504/*
505 * Same as remove_mapping, but if the page is removed from the mapping, it
506 * gets returned with a refcount of 0.
507 */
508static int __remove_mapping(struct address_space *mapping, struct page *page)
509{
510    BUG_ON(!PageLocked(page));
511    BUG_ON(mapping != page_mapping(page));
512
513    spin_lock_irq(&mapping->tree_lock);
514    /*
515     * The non racy check for a busy page.
516     *
517     * Must be careful with the order of the tests. When someone has
518     * a ref to the page, it may be possible that they dirty it then
519     * drop the reference. So if PageDirty is tested before page_count
520     * here, then the following race may occur:
521     *
522     * get_user_pages(&page);
523     * [user mapping goes away]
524     * write_to(page);
525     * !PageDirty(page) [good]
526     * SetPageDirty(page);
527     * put_page(page);
528     * !page_count(page) [good, discard it]
529     *
530     * [oops, our write_to data is lost]
531     *
532     * Reversing the order of the tests ensures such a situation cannot
533     * escape unnoticed. The smp_rmb is needed to ensure the page->flags
534     * load is not satisfied before that of page->_count.
535     *
536     * Note that if SetPageDirty is always performed via set_page_dirty,
537     * and thus under tree_lock, then this ordering is not required.
538     */
539    if (!page_freeze_refs(page, 2))
540        goto cannot_free;
541    /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
542    if (unlikely(PageDirty(page))) {
543        page_unfreeze_refs(page, 2);
544        goto cannot_free;
545    }
546
547    if (PageSwapCache(page)) {
548        swp_entry_t swap = { .val = page_private(page) };
549        __delete_from_swap_cache(page);
550        spin_unlock_irq(&mapping->tree_lock);
551        swapcache_free(swap, page);
552    } else {
553        void (*freepage)(struct page *);
554
555        freepage = mapping->a_ops->freepage;
556
557        __delete_from_page_cache(page);
558        spin_unlock_irq(&mapping->tree_lock);
559        mem_cgroup_uncharge_cache_page(page);
560
561        if (freepage != NULL)
562            freepage(page);
563    }
564
565    return 1;
566
567cannot_free:
568    spin_unlock_irq(&mapping->tree_lock);
569    return 0;
570}
571
572/*
573 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
574 * someone else has a ref on the page, abort and return 0. If it was
575 * successfully detached, return 1. Assumes the caller has a single ref on
576 * this page.
577 */
578int remove_mapping(struct address_space *mapping, struct page *page)
579{
580    if (__remove_mapping(mapping, page)) {
581        /*
582         * Unfreezing the refcount with 1 rather than 2 effectively
583         * drops the pagecache ref for us without requiring another
584         * atomic operation.
585         */
586        page_unfreeze_refs(page, 1);
587        return 1;
588    }
589    return 0;
590}
591
592/**
593 * putback_lru_page - put previously isolated page onto appropriate LRU list
594 * @page: page to be put back to appropriate lru list
595 *
596 * Add previously isolated @page to appropriate LRU list.
597 * Page may still be unevictable for other reasons.
598 *
599 * lru_lock must not be held, interrupts must be enabled.
600 */
601void putback_lru_page(struct page *page)
602{
603    bool is_unevictable;
604    int was_unevictable = PageUnevictable(page);
605
606    VM_BUG_ON(PageLRU(page));
607
608redo:
609    ClearPageUnevictable(page);
610
611    if (page_evictable(page)) {
612        /*
613         * For evictable pages, we can use the cache.
614         * In event of a race, worst case is we end up with an
615         * unevictable page on [in]active list.
616         * We know how to handle that.
617         */
618        is_unevictable = false;
619        lru_cache_add(page);
620    } else {
621        /*
622         * Put unevictable pages directly on zone's unevictable
623         * list.
624         */
625        is_unevictable = true;
626        add_page_to_unevictable_list(page);
627        /*
628         * When racing with an mlock or AS_UNEVICTABLE clearing
629         * (page is unlocked) make sure that if the other thread
630         * does not observe our setting of PG_lru and fails
631         * isolation/check_move_unevictable_pages,
632         * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
633         * the page back to the evictable list.
634         *
635         * The other side is TestClearPageMlocked() or shmem_lock().
636         */
637        smp_mb();
638    }
639
640    /*
641     * page's status can change while we move it among lru. If an evictable
642     * page is on unevictable list, it never be freed. To avoid that,
643     * check after we added it to the list, again.
644     */
645    if (is_unevictable && page_evictable(page)) {
646        if (!isolate_lru_page(page)) {
647            put_page(page);
648            goto redo;
649        }
650        /* This means someone else dropped this page from LRU
651         * So, it will be freed or putback to LRU again. There is
652         * nothing to do here.
653         */
654    }
655
656    if (was_unevictable && !is_unevictable)
657        count_vm_event(UNEVICTABLE_PGRESCUED);
658    else if (!was_unevictable && is_unevictable)
659        count_vm_event(UNEVICTABLE_PGCULLED);
660
661    put_page(page); /* drop ref from isolate */
662}
663
664enum page_references {
665    PAGEREF_RECLAIM,
666    PAGEREF_RECLAIM_CLEAN,
667    PAGEREF_KEEP,
668    PAGEREF_ACTIVATE,
669};
670
671static enum page_references page_check_references(struct page *page,
672                          struct scan_control *sc)
673{
674    int referenced_ptes, referenced_page;
675    unsigned long vm_flags;
676
677    referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
678                      &vm_flags);
679    referenced_page = TestClearPageReferenced(page);
680
681    /*
682     * Mlock lost the isolation race with us. Let try_to_unmap()
683     * move the page to the unevictable list.
684     */
685    if (vm_flags & VM_LOCKED)
686        return PAGEREF_RECLAIM;
687
688    if (referenced_ptes) {
689        if (PageSwapBacked(page))
690            return PAGEREF_ACTIVATE;
691        /*
692         * All mapped pages start out with page table
693         * references from the instantiating fault, so we need
694         * to look twice if a mapped file page is used more
695         * than once.
696         *
697         * Mark it and spare it for another trip around the
698         * inactive list. Another page table reference will
699         * lead to its activation.
700         *
701         * Note: the mark is set for activated pages as well
702         * so that recently deactivated but used pages are
703         * quickly recovered.
704         */
705        SetPageReferenced(page);
706
707        if (referenced_page || referenced_ptes > 1)
708            return PAGEREF_ACTIVATE;
709
710        /*
711         * Activate file-backed executable pages after first usage.
712         */
713        if (vm_flags & VM_EXEC)
714            return PAGEREF_ACTIVATE;
715
716        return PAGEREF_KEEP;
717    }
718
719    /* Reclaim if clean, defer dirty pages to writeback */
720    if (referenced_page && !PageSwapBacked(page))
721        return PAGEREF_RECLAIM_CLEAN;
722
723    return PAGEREF_RECLAIM;
724}
725
726/* Check if a page is dirty or under writeback */
727static void page_check_dirty_writeback(struct page *page,
728                       bool *dirty, bool *writeback)
729{
730    struct address_space *mapping;
731
732    /*
733     * Anonymous pages are not handled by flushers and must be written
734     * from reclaim context. Do not stall reclaim based on them
735     */
736    if (!page_is_file_cache(page)) {
737        *dirty = false;
738        *writeback = false;
739        return;
740    }
741
742    /* By default assume that the page flags are accurate */
743    *dirty = PageDirty(page);
744    *writeback = PageWriteback(page);
745
746    /* Verify dirty/writeback state if the filesystem supports it */
747    if (!page_has_private(page))
748        return;
749
750    mapping = page_mapping(page);
751    if (mapping && mapping->a_ops->is_dirty_writeback)
752        mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
753}
754
755/*
756 * shrink_page_list() returns the number of reclaimed pages
757 */
758static unsigned long shrink_page_list(struct list_head *page_list,
759                      struct zone *zone,
760                      struct scan_control *sc,
761                      enum ttu_flags ttu_flags,
762                      unsigned long *ret_nr_dirty,
763                      unsigned long *ret_nr_unqueued_dirty,
764                      unsigned long *ret_nr_congested,
765                      unsigned long *ret_nr_writeback,
766                      unsigned long *ret_nr_immediate,
767                      bool force_reclaim)
768{
769    LIST_HEAD(ret_pages);
770    LIST_HEAD(free_pages);
771    int pgactivate = 0;
772    unsigned long nr_unqueued_dirty = 0;
773    unsigned long nr_dirty = 0;
774    unsigned long nr_congested = 0;
775    unsigned long nr_reclaimed = 0;
776    unsigned long nr_writeback = 0;
777    unsigned long nr_immediate = 0;
778
779    cond_resched();
780
781    mem_cgroup_uncharge_start();
782    while (!list_empty(page_list)) {
783        struct address_space *mapping;
784        struct page *page;
785        int may_enter_fs;
786        enum page_references references = PAGEREF_RECLAIM_CLEAN;
787        bool dirty, writeback;
788
789        cond_resched();
790
791        page = lru_to_page(page_list);
792        list_del(&page->lru);
793
794        if (!trylock_page(page))
795            goto keep;
796
797        VM_BUG_ON(PageActive(page));
798        VM_BUG_ON(page_zone(page) != zone);
799
800        sc->nr_scanned++;
801
802        if (unlikely(!page_evictable(page)))
803            goto cull_mlocked;
804
805        if (!sc->may_unmap && page_mapped(page))
806            goto keep_locked;
807
808        /* Double the slab pressure for mapped and swapcache pages */
809        if (page_mapped(page) || PageSwapCache(page))
810            sc->nr_scanned++;
811
812        may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
813            (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
814
815        /*
816         * The number of dirty pages determines if a zone is marked
817         * reclaim_congested which affects wait_iff_congested. kswapd
818         * will stall and start writing pages if the tail of the LRU
819         * is all dirty unqueued pages.
820         */
821        page_check_dirty_writeback(page, &dirty, &writeback);
822        if (dirty || writeback)
823            nr_dirty++;
824
825        if (dirty && !writeback)
826            nr_unqueued_dirty++;
827
828        /*
829         * Treat this page as congested if the underlying BDI is or if
830         * pages are cycling through the LRU so quickly that the
831         * pages marked for immediate reclaim are making it to the
832         * end of the LRU a second time.
833         */
834        mapping = page_mapping(page);
835        if ((mapping && bdi_write_congested(mapping->backing_dev_info)) ||
836            (writeback && PageReclaim(page)))
837            nr_congested++;
838
839        /*
840         * If a page at the tail of the LRU is under writeback, there
841         * are three cases to consider.
842         *
843         * 1) If reclaim is encountering an excessive number of pages
844         * under writeback and this page is both under writeback and
845         * PageReclaim then it indicates that pages are being queued
846         * for IO but are being recycled through the LRU before the
847         * IO can complete. Waiting on the page itself risks an
848         * indefinite stall if it is impossible to writeback the
849         * page due to IO error or disconnected storage so instead
850         * note that the LRU is being scanned too quickly and the
851         * caller can stall after page list has been processed.
852         *
853         * 2) Global reclaim encounters a page, memcg encounters a
854         * page that is not marked for immediate reclaim or
855         * the caller does not have __GFP_IO. In this case mark
856         * the page for immediate reclaim and continue scanning.
857         *
858         * __GFP_IO is checked because a loop driver thread might
859         * enter reclaim, and deadlock if it waits on a page for
860         * which it is needed to do the write (loop masks off
861         * __GFP_IO|__GFP_FS for this reason); but more thought
862         * would probably show more reasons.
863         *
864         * Don't require __GFP_FS, since we're not going into the
865         * FS, just waiting on its writeback completion. Worryingly,
866         * ext4 gfs2 and xfs allocate pages with
867         * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
868         * may_enter_fs here is liable to OOM on them.
869         *
870         * 3) memcg encounters a page that is not already marked
871         * PageReclaim. memcg does not have any dirty pages
872         * throttling so we could easily OOM just because too many
873         * pages are in writeback and there is nothing else to
874         * reclaim. Wait for the writeback to complete.
875         */
876        if (PageWriteback(page)) {
877            /* Case 1 above */
878            if (current_is_kswapd() &&
879                PageReclaim(page) &&
880                zone_is_reclaim_writeback(zone)) {
881                nr_immediate++;
882                goto keep_locked;
883
884            /* Case 2 above */
885            } else if (global_reclaim(sc) ||
886                !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
887                /*
888                 * This is slightly racy - end_page_writeback()
889                 * might have just cleared PageReclaim, then
890                 * setting PageReclaim here end up interpreted
891                 * as PageReadahead - but that does not matter
892                 * enough to care. What we do want is for this
893                 * page to have PageReclaim set next time memcg
894                 * reclaim reaches the tests above, so it will
895                 * then wait_on_page_writeback() to avoid OOM;
896                 * and it's also appropriate in global reclaim.
897                 */
898                SetPageReclaim(page);
899                nr_writeback++;
900
901                goto keep_locked;
902
903            /* Case 3 above */
904            } else {
905                wait_on_page_writeback(page);
906            }
907        }
908
909        if (!force_reclaim)
910            references = page_check_references(page, sc);
911
912        switch (references) {
913        case PAGEREF_ACTIVATE:
914            goto activate_locked;
915        case PAGEREF_KEEP:
916            goto keep_locked;
917        case PAGEREF_RECLAIM:
918        case PAGEREF_RECLAIM_CLEAN:
919            ; /* try to reclaim the page below */
920        }
921
922        /*
923         * Anonymous process memory has backing store?
924         * Try to allocate it some swap space here.
925         */
926        if (PageAnon(page) && !PageSwapCache(page)) {
927            if (!(sc->gfp_mask & __GFP_IO))
928                goto keep_locked;
929            if (!add_to_swap(page, page_list))
930                goto activate_locked;
931            may_enter_fs = 1;
932
933            /* Adding to swap updated mapping */
934            mapping = page_mapping(page);
935        }
936
937        /*
938         * The page is mapped into the page tables of one or more
939         * processes. Try to unmap it here.
940         */
941        if (page_mapped(page) && mapping) {
942            switch (try_to_unmap(page, ttu_flags)) {
943            case SWAP_FAIL:
944                goto activate_locked;
945            case SWAP_AGAIN:
946                goto keep_locked;
947            case SWAP_MLOCK:
948                goto cull_mlocked;
949            case SWAP_SUCCESS:
950                ; /* try to free the page below */
951            }
952        }
953
954        if (PageDirty(page)) {
955            /*
956             * Only kswapd can writeback filesystem pages to
957             * avoid risk of stack overflow but only writeback
958             * if many dirty pages have been encountered.
959             */
960            if (page_is_file_cache(page) &&
961                    (!current_is_kswapd() ||
962                     !zone_is_reclaim_dirty(zone))) {
963                /*
964                 * Immediately reclaim when written back.
965                 * Similar in principal to deactivate_page()
966                 * except we already have the page isolated
967                 * and know it's dirty
968                 */
969                inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
970                SetPageReclaim(page);
971
972                goto keep_locked;
973            }
974
975            if (references == PAGEREF_RECLAIM_CLEAN)
976                goto keep_locked;
977            if (!may_enter_fs)
978                goto keep_locked;
979            if (!sc->may_writepage)
980                goto keep_locked;
981
982            /* Page is dirty, try to write it out here */
983            switch (pageout(page, mapping, sc)) {
984            case PAGE_KEEP:
985                goto keep_locked;
986            case PAGE_ACTIVATE:
987                goto activate_locked;
988            case PAGE_SUCCESS:
989                if (PageWriteback(page))
990                    goto keep;
991                if (PageDirty(page))
992                    goto keep;
993
994                /*
995                 * A synchronous write - probably a ramdisk. Go
996                 * ahead and try to reclaim the page.
997                 */
998                if (!trylock_page(page))
999                    goto keep;
1000                if (PageDirty(page) || PageWriteback(page))
1001                    goto keep_locked;
1002                mapping = page_mapping(page);
1003            case PAGE_CLEAN:
1004                ; /* try to free the page below */
1005            }
1006        }
1007
1008        /*
1009         * If the page has buffers, try to free the buffer mappings
1010         * associated with this page. If we succeed we try to free
1011         * the page as well.
1012         *
1013         * We do this even if the page is PageDirty().
1014         * try_to_release_page() does not perform I/O, but it is
1015         * possible for a page to have PageDirty set, but it is actually
1016         * clean (all its buffers are clean). This happens if the
1017         * buffers were written out directly, with submit_bh(). ext3
1018         * will do this, as well as the blockdev mapping.
1019         * try_to_release_page() will discover that cleanness and will
1020         * drop the buffers and mark the page clean - it can be freed.
1021         *
1022         * Rarely, pages can have buffers and no ->mapping. These are
1023         * the pages which were not successfully invalidated in
1024         * truncate_complete_page(). We try to drop those buffers here
1025         * and if that worked, and the page is no longer mapped into
1026         * process address space (page_count == 1) it can be freed.
1027         * Otherwise, leave the page on the LRU so it is swappable.
1028         */
1029        if (page_has_private(page)) {
1030            if (!try_to_release_page(page, sc->gfp_mask))
1031                goto activate_locked;
1032            if (!mapping && page_count(page) == 1) {
1033                unlock_page(page);
1034                if (put_page_testzero(page))
1035                    goto free_it;
1036                else {
1037                    /*
1038                     * rare race with speculative reference.
1039                     * the speculative reference will free
1040                     * this page shortly, so we may
1041                     * increment nr_reclaimed here (and
1042                     * leave it off the LRU).
1043                     */
1044                    nr_reclaimed++;
1045                    continue;
1046                }
1047            }
1048        }
1049
1050        if (!mapping || !__remove_mapping(mapping, page))
1051            goto keep_locked;
1052
1053        /*
1054         * At this point, we have no other references and there is
1055         * no way to pick any more up (removed from LRU, removed
1056         * from pagecache). Can use non-atomic bitops now (and
1057         * we obviously don't have to worry about waking up a process
1058         * waiting on the page lock, because there are no references.
1059         */
1060        __clear_page_locked(page);
1061free_it:
1062        nr_reclaimed++;
1063
1064        /*
1065         * Is there need to periodically free_page_list? It would
1066         * appear not as the counts should be low
1067         */
1068        list_add(&page->lru, &free_pages);
1069        continue;
1070
1071cull_mlocked:
1072        if (PageSwapCache(page))
1073            try_to_free_swap(page);
1074        unlock_page(page);
1075        putback_lru_page(page);
1076        continue;
1077
1078activate_locked:
1079        /* Not a candidate for swapping, so reclaim swap space. */
1080        if (PageSwapCache(page) && vm_swap_full())
1081            try_to_free_swap(page);
1082        VM_BUG_ON(PageActive(page));
1083        SetPageActive(page);
1084        pgactivate++;
1085keep_locked:
1086        unlock_page(page);
1087keep:
1088        list_add(&page->lru, &ret_pages);
1089        VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
1090    }
1091
1092    free_hot_cold_page_list(&free_pages, 1);
1093
1094    list_splice(&ret_pages, page_list);
1095    count_vm_events(PGACTIVATE, pgactivate);
1096    mem_cgroup_uncharge_end();
1097    *ret_nr_dirty += nr_dirty;
1098    *ret_nr_congested += nr_congested;
1099    *ret_nr_unqueued_dirty += nr_unqueued_dirty;
1100    *ret_nr_writeback += nr_writeback;
1101    *ret_nr_immediate += nr_immediate;
1102    return nr_reclaimed;
1103}
1104
1105unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1106                        struct list_head *page_list)
1107{
1108    struct scan_control sc = {
1109        .gfp_mask = GFP_KERNEL,
1110        .priority = DEF_PRIORITY,
1111        .may_unmap = 1,
1112    };
1113    unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
1114    struct page *page, *next;
1115    LIST_HEAD(clean_pages);
1116
1117    list_for_each_entry_safe(page, next, page_list, lru) {
1118        if (page_is_file_cache(page) && !PageDirty(page) &&
1119            !isolated_balloon_page(page)) {
1120            ClearPageActive(page);
1121            list_move(&page->lru, &clean_pages);
1122        }
1123    }
1124
1125    ret = shrink_page_list(&clean_pages, zone, &sc,
1126            TTU_UNMAP|TTU_IGNORE_ACCESS,
1127            &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
1128    list_splice(&clean_pages, page_list);
1129    __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
1130    return ret;
1131}
1132
1133/*
1134 * Attempt to remove the specified page from its LRU. Only take this page
1135 * if it is of the appropriate PageActive status. Pages which are being
1136 * freed elsewhere are also ignored.
1137 *
1138 * page: page to consider
1139 * mode: one of the LRU isolation modes defined above
1140 *
1141 * returns 0 on success, -ve errno on failure.
1142 */
1143int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1144{
1145    int ret = -EINVAL;
1146
1147    /* Only take pages on the LRU. */
1148    if (!PageLRU(page))
1149        return ret;
1150
1151    /* Compaction should not handle unevictable pages but CMA can do so */
1152    if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1153        return ret;
1154
1155    ret = -EBUSY;
1156
1157    /*
1158     * To minimise LRU disruption, the caller can indicate that it only
1159     * wants to isolate pages it will be able to operate on without
1160     * blocking - clean pages for the most part.
1161     *
1162     * ISOLATE_CLEAN means that only clean pages should be isolated. This
1163     * is used by reclaim when it is cannot write to backing storage
1164     *
1165     * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1166     * that it is possible to migrate without blocking
1167     */
1168    if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
1169        /* All the caller can do on PageWriteback is block */
1170        if (PageWriteback(page))
1171            return ret;
1172
1173        if (PageDirty(page)) {
1174            struct address_space *mapping;
1175
1176            /* ISOLATE_CLEAN means only clean pages */
1177            if (mode & ISOLATE_CLEAN)
1178                return ret;
1179
1180            /*
1181             * Only pages without mappings or that have a
1182             * ->migratepage callback are possible to migrate
1183             * without blocking
1184             */
1185            mapping = page_mapping(page);
1186            if (mapping && !mapping->a_ops->migratepage)
1187                return ret;
1188        }
1189    }
1190
1191    if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1192        return ret;
1193
1194    if (likely(get_page_unless_zero(page))) {
1195        /*
1196         * Be careful not to clear PageLRU until after we're
1197         * sure the page is not being freed elsewhere -- the
1198         * page release code relies on it.
1199         */
1200        ClearPageLRU(page);
1201        ret = 0;
1202    }
1203
1204    return ret;
1205}
1206
1207/*
1208 * zone->lru_lock is heavily contended. Some of the functions that
1209 * shrink the lists perform better by taking out a batch of pages
1210 * and working on them outside the LRU lock.
1211 *
1212 * For pagecache intensive workloads, this function is the hottest
1213 * spot in the kernel (apart from copy_*_user functions).
1214 *
1215 * Appropriate locks must be held before calling this function.
1216 *
1217 * @nr_to_scan: The number of pages to look through on the list.
1218 * @lruvec: The LRU vector to pull pages from.
1219 * @dst: The temp list to put pages on to.
1220 * @nr_scanned: The number of pages that were scanned.
1221 * @sc: The scan_control struct for this reclaim session
1222 * @mode: One of the LRU isolation modes
1223 * @lru: LRU list id for isolating
1224 *
1225 * returns how many pages were moved onto *@dst.
1226 */
1227static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1228        struct lruvec *lruvec, struct list_head *dst,
1229        unsigned long *nr_scanned, struct scan_control *sc,
1230        isolate_mode_t mode, enum lru_list lru)
1231{
1232    struct list_head *src = &lruvec->lists[lru];
1233    unsigned long nr_taken = 0;
1234    unsigned long scan;
1235
1236    for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
1237        struct page *page;
1238        int nr_pages;
1239
1240        page = lru_to_page(src);
1241        prefetchw_prev_lru_page(page, src, flags);
1242
1243        VM_BUG_ON(!PageLRU(page));
1244
1245        switch (__isolate_lru_page(page, mode)) {
1246        case 0:
1247            nr_pages = hpage_nr_pages(page);
1248            mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
1249            list_move(&page->lru, dst);
1250            nr_taken += nr_pages;
1251            break;
1252
1253        case -EBUSY:
1254            /* else it is being freed elsewhere */
1255            list_move(&page->lru, src);
1256            continue;
1257
1258        default:
1259            BUG();
1260        }
1261    }
1262
1263    *nr_scanned = scan;
1264    trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
1265                    nr_taken, mode, is_file_lru(lru));
1266    return nr_taken;
1267}
1268
1269/**
1270 * isolate_lru_page - tries to isolate a page from its LRU list
1271 * @page: page to isolate from its LRU list
1272 *
1273 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1274 * vmstat statistic corresponding to whatever LRU list the page was on.
1275 *
1276 * Returns 0 if the page was removed from an LRU list.
1277 * Returns -EBUSY if the page was not on an LRU list.
1278 *
1279 * The returned page will have PageLRU() cleared. If it was found on
1280 * the active list, it will have PageActive set. If it was found on
1281 * the unevictable list, it will have the PageUnevictable bit set. That flag
1282 * may need to be cleared by the caller before letting the page go.
1283 *
1284 * The vmstat statistic corresponding to the list on which the page was
1285 * found will be decremented.
1286 *
1287 * Restrictions:
1288 * (1) Must be called with an elevated refcount on the page. This is a
1289 * fundamentnal difference from isolate_lru_pages (which is called
1290 * without a stable reference).
1291 * (2) the lru_lock must not be held.
1292 * (3) interrupts must be enabled.
1293 */
1294int isolate_lru_page(struct page *page)
1295{
1296    int ret = -EBUSY;
1297
1298    VM_BUG_ON(!page_count(page));
1299
1300    if (PageLRU(page)) {
1301        struct zone *zone = page_zone(page);
1302        struct lruvec *lruvec;
1303
1304        spin_lock_irq(&zone->lru_lock);
1305        lruvec = mem_cgroup_page_lruvec(page, zone);
1306        if (PageLRU(page)) {
1307            int lru = page_lru(page);
1308            get_page(page);
1309            ClearPageLRU(page);
1310            del_page_from_lru_list(page, lruvec, lru);
1311            ret = 0;
1312        }
1313        spin_unlock_irq(&zone->lru_lock);
1314    }
1315    return ret;
1316}
1317
1318/*
1319 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1320 * then get resheduled. When there are massive number of tasks doing page
1321 * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1322 * the LRU list will go small and be scanned faster than necessary, leading to
1323 * unnecessary swapping, thrashing and OOM.
1324 */
1325static int too_many_isolated(struct zone *zone, int file,
1326        struct scan_control *sc)
1327{
1328    unsigned long inactive, isolated;
1329
1330    if (current_is_kswapd())
1331        return 0;
1332
1333    if (!global_reclaim(sc))
1334        return 0;
1335
1336    if (file) {
1337        inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1338        isolated = zone_page_state(zone, NR_ISOLATED_FILE);
1339    } else {
1340        inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1341        isolated = zone_page_state(zone, NR_ISOLATED_ANON);
1342    }
1343
1344    /*
1345     * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1346     * won't get blocked by normal direct-reclaimers, forming a circular
1347     * deadlock.
1348     */
1349    if ((sc->gfp_mask & GFP_IOFS) == GFP_IOFS)
1350        inactive >>= 3;
1351
1352    return isolated > inactive;
1353}
1354
1355static noinline_for_stack void
1356putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1357{
1358    struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1359    struct zone *zone = lruvec_zone(lruvec);
1360    LIST_HEAD(pages_to_free);
1361
1362    /*
1363     * Put back any unfreeable pages.
1364     */
1365    while (!list_empty(page_list)) {
1366        struct page *page = lru_to_page(page_list);
1367        int lru;
1368
1369        VM_BUG_ON(PageLRU(page));
1370        list_del(&page->lru);
1371        if (unlikely(!page_evictable(page))) {
1372            spin_unlock_irq(&zone->lru_lock);
1373            putback_lru_page(page);
1374            spin_lock_irq(&zone->lru_lock);
1375            continue;
1376        }
1377
1378        lruvec = mem_cgroup_page_lruvec(page, zone);
1379
1380        SetPageLRU(page);
1381        lru = page_lru(page);
1382        add_page_to_lru_list(page, lruvec, lru);
1383
1384        if (is_active_lru(lru)) {
1385            int file = is_file_lru(lru);
1386            int numpages = hpage_nr_pages(page);
1387            reclaim_stat->recent_rotated[file] += numpages;
1388        }
1389        if (put_page_testzero(page)) {
1390            __ClearPageLRU(page);
1391            __ClearPageActive(page);
1392            del_page_from_lru_list(page, lruvec, lru);
1393
1394            if (unlikely(PageCompound(page))) {
1395                spin_unlock_irq(&zone->lru_lock);
1396                (*get_compound_page_dtor(page))(page);
1397                spin_lock_irq(&zone->lru_lock);
1398            } else
1399                list_add(&page->lru, &pages_to_free);
1400        }
1401    }
1402
1403    /*
1404     * To save our caller's stack, now use input list for pages to free.
1405     */
1406    list_splice(&pages_to_free, page_list);
1407}
1408
1409/*
1410 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
1411 * of reclaimed pages
1412 */
1413static noinline_for_stack unsigned long
1414shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1415             struct scan_control *sc, enum lru_list lru)
1416{
1417    LIST_HEAD(page_list);
1418    unsigned long nr_scanned;
1419    unsigned long nr_reclaimed = 0;
1420    unsigned long nr_taken;
1421    unsigned long nr_dirty = 0;
1422    unsigned long nr_congested = 0;
1423    unsigned long nr_unqueued_dirty = 0;
1424    unsigned long nr_writeback = 0;
1425    unsigned long nr_immediate = 0;
1426    isolate_mode_t isolate_mode = 0;
1427    int file = is_file_lru(lru);
1428    struct zone *zone = lruvec_zone(lruvec);
1429    struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1430
1431    while (unlikely(too_many_isolated(zone, file, sc))) {
1432        congestion_wait(BLK_RW_ASYNC, HZ/10);
1433
1434        /* We are about to die and free our memory. Return now. */
1435        if (fatal_signal_pending(current))
1436            return SWAP_CLUSTER_MAX;
1437    }
1438
1439    lru_add_drain();
1440
1441    if (!sc->may_unmap)
1442        isolate_mode |= ISOLATE_UNMAPPED;
1443    if (!sc->may_writepage)
1444        isolate_mode |= ISOLATE_CLEAN;
1445
1446    spin_lock_irq(&zone->lru_lock);
1447
1448    nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1449                     &nr_scanned, sc, isolate_mode, lru);
1450
1451    __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
1452    __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1453
1454    if (global_reclaim(sc)) {
1455        zone->pages_scanned += nr_scanned;
1456        if (current_is_kswapd())
1457            __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
1458        else
1459            __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
1460    }
1461    spin_unlock_irq(&zone->lru_lock);
1462
1463    if (nr_taken == 0)
1464        return 0;
1465
1466    nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
1467                &nr_dirty, &nr_unqueued_dirty, &nr_congested,
1468                &nr_writeback, &nr_immediate,
1469                false);
1470
1471    spin_lock_irq(&zone->lru_lock);
1472
1473    reclaim_stat->recent_scanned[file] += nr_taken;
1474
1475    if (global_reclaim(sc)) {
1476        if (current_is_kswapd())
1477            __count_zone_vm_events(PGSTEAL_KSWAPD, zone,
1478                           nr_reclaimed);
1479        else
1480            __count_zone_vm_events(PGSTEAL_DIRECT, zone,
1481                           nr_reclaimed);
1482    }
1483
1484    putback_inactive_pages(lruvec, &page_list);
1485
1486    __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1487
1488    spin_unlock_irq(&zone->lru_lock);
1489
1490    free_hot_cold_page_list(&page_list, 1);
1491
1492    /*
1493     * If reclaim is isolating dirty pages under writeback, it implies
1494     * that the long-lived page allocation rate is exceeding the page
1495     * laundering rate. Either the global limits are not being effective
1496     * at throttling processes due to the page distribution throughout
1497     * zones or there is heavy usage of a slow backing device. The
1498     * only option is to throttle from reclaim context which is not ideal
1499     * as there is no guarantee the dirtying process is throttled in the
1500     * same way balance_dirty_pages() manages.
1501     *
1502     * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number
1503     * of pages under pages flagged for immediate reclaim and stall if any
1504     * are encountered in the nr_immediate check below.
1505     */
1506    if (nr_writeback && nr_writeback == nr_taken)
1507        zone_set_flag(zone, ZONE_WRITEBACK);
1508
1509    /*
1510     * memcg will stall in page writeback so only consider forcibly
1511     * stalling for global reclaim
1512     */
1513    if (global_reclaim(sc)) {
1514        /*
1515         * Tag a zone as congested if all the dirty pages scanned were
1516         * backed by a congested BDI and wait_iff_congested will stall.
1517         */
1518        if (nr_dirty && nr_dirty == nr_congested)
1519            zone_set_flag(zone, ZONE_CONGESTED);
1520
1521        /*
1522         * If dirty pages are scanned that are not queued for IO, it
1523         * implies that flushers are not keeping up. In this case, flag
1524         * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
1525         * pages from reclaim context. It will forcibly stall in the
1526         * next check.
1527         */
1528        if (nr_unqueued_dirty == nr_taken)
1529            zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
1530
1531        /*
1532         * In addition, if kswapd scans pages marked marked for
1533         * immediate reclaim and under writeback (nr_immediate), it
1534         * implies that pages are cycling through the LRU faster than
1535         * they are written so also forcibly stall.
1536         */
1537        if (nr_unqueued_dirty == nr_taken || nr_immediate)
1538            congestion_wait(BLK_RW_ASYNC, HZ/10);
1539    }
1540
1541    /*
1542     * Stall direct reclaim for IO completions if underlying BDIs or zone
1543     * is congested. Allow kswapd to continue until it starts encountering
1544     * unqueued dirty pages or cycling through the LRU too quickly.
1545     */
1546    if (!sc->hibernation_mode && !current_is_kswapd())
1547        wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
1548
1549    trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1550        zone_idx(zone),
1551        nr_scanned, nr_reclaimed,
1552        sc->priority,
1553        trace_shrink_flags(file));
1554    return nr_reclaimed;
1555}
1556
1557/*
1558 * This moves pages from the active list to the inactive list.
1559 *
1560 * We move them the other way if the page is referenced by one or more
1561 * processes, from rmap.
1562 *
1563 * If the pages are mostly unmapped, the processing is fast and it is
1564 * appropriate to hold zone->lru_lock across the whole operation. But if
1565 * the pages are mapped, the processing is slow (page_referenced()) so we
1566 * should drop zone->lru_lock around each page. It's impossible to balance
1567 * this, so instead we remove the pages from the LRU while processing them.
1568 * It is safe to rely on PG_active against the non-LRU pages in here because
1569 * nobody will play with that bit on a non-LRU page.
1570 *
1571 * The downside is that we have to touch page->_count against each page.
1572 * But we had to alter page->flags anyway.
1573 */
1574
1575static void move_active_pages_to_lru(struct lruvec *lruvec,
1576                     struct list_head *list,
1577                     struct list_head *pages_to_free,
1578                     enum lru_list lru)
1579{
1580    struct zone *zone = lruvec_zone(lruvec);
1581    unsigned long pgmoved = 0;
1582    struct page *page;
1583    int nr_pages;
1584
1585    while (!list_empty(list)) {
1586        page = lru_to_page(list);
1587        lruvec = mem_cgroup_page_lruvec(page, zone);
1588
1589        VM_BUG_ON(PageLRU(page));
1590        SetPageLRU(page);
1591
1592        nr_pages = hpage_nr_pages(page);
1593        mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
1594        list_move(&page->lru, &lruvec->lists[lru]);
1595        pgmoved += nr_pages;
1596
1597        if (put_page_testzero(page)) {
1598            __ClearPageLRU(page);
1599            __ClearPageActive(page);
1600            del_page_from_lru_list(page, lruvec, lru);
1601
1602            if (unlikely(PageCompound(page))) {
1603                spin_unlock_irq(&zone->lru_lock);
1604                (*get_compound_page_dtor(page))(page);
1605                spin_lock_irq(&zone->lru_lock);
1606            } else
1607                list_add(&page->lru, pages_to_free);
1608        }
1609    }
1610    __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1611    if (!is_active_lru(lru))
1612        __count_vm_events(PGDEACTIVATE, pgmoved);
1613}
1614
1615static void shrink_active_list(unsigned long nr_to_scan,
1616                   struct lruvec *lruvec,
1617                   struct scan_control *sc,
1618                   enum lru_list lru)
1619{
1620    unsigned long nr_taken;
1621    unsigned long nr_scanned;
1622    unsigned long vm_flags;
1623    LIST_HEAD(l_hold); /* The pages which were snipped off */
1624    LIST_HEAD(l_active);
1625    LIST_HEAD(l_inactive);
1626    struct page *page;
1627    struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1628    unsigned long nr_rotated = 0;
1629    isolate_mode_t isolate_mode = 0;
1630    int file = is_file_lru(lru);
1631    struct zone *zone = lruvec_zone(lruvec);
1632
1633    lru_add_drain();
1634
1635    if (!sc->may_unmap)
1636        isolate_mode |= ISOLATE_UNMAPPED;
1637    if (!sc->may_writepage)
1638        isolate_mode |= ISOLATE_CLEAN;
1639
1640    spin_lock_irq(&zone->lru_lock);
1641
1642    nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
1643                     &nr_scanned, sc, isolate_mode, lru);
1644    if (global_reclaim(sc))
1645        zone->pages_scanned += nr_scanned;
1646
1647    reclaim_stat->recent_scanned[file] += nr_taken;
1648
1649    __count_zone_vm_events(PGREFILL, zone, nr_scanned);
1650    __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
1651    __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1652    spin_unlock_irq(&zone->lru_lock);
1653
1654    while (!list_empty(&l_hold)) {
1655        cond_resched();
1656        page = lru_to_page(&l_hold);
1657        list_del(&page->lru);
1658
1659        if (unlikely(!page_evictable(page))) {
1660            putback_lru_page(page);
1661            continue;
1662        }
1663
1664        if (unlikely(buffer_heads_over_limit)) {
1665            if (page_has_private(page) && trylock_page(page)) {
1666                if (page_has_private(page))
1667                    try_to_release_page(page, 0);
1668                unlock_page(page);
1669            }
1670        }
1671
1672        if (page_referenced(page, 0, sc->target_mem_cgroup,
1673                    &vm_flags)) {
1674            nr_rotated += hpage_nr_pages(page);
1675            /*
1676             * Identify referenced, file-backed active pages and
1677             * give them one more trip around the active list. So
1678             * that executable code get better chances to stay in
1679             * memory under moderate memory pressure. Anon pages
1680             * are not likely to be evicted by use-once streaming
1681             * IO, plus JVM can create lots of anon VM_EXEC pages,
1682             * so we ignore them here.
1683             */
1684            if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
1685                list_add(&page->lru, &l_active);
1686                continue;
1687            }
1688        }
1689
1690        ClearPageActive(page); /* we are de-activating */
1691        list_add(&page->lru, &l_inactive);
1692    }
1693
1694    /*
1695     * Move pages back to the lru list.
1696     */
1697    spin_lock_irq(&zone->lru_lock);
1698    /*
1699     * Count referenced pages from currently used mappings as rotated,
1700     * even though only some of them are actually re-activated. This
1701     * helps balance scan pressure between file and anonymous pages in
1702     * get_scan_ratio.
1703     */
1704    reclaim_stat->recent_rotated[file] += nr_rotated;
1705
1706    move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
1707    move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
1708    __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1709    spin_unlock_irq(&zone->lru_lock);
1710
1711    free_hot_cold_page_list(&l_hold, 1);
1712}
1713
1714#ifdef CONFIG_SWAP
1715static int inactive_anon_is_low_global(struct zone *zone)
1716{
1717    unsigned long active, inactive;
1718
1719    active = zone_page_state(zone, NR_ACTIVE_ANON);
1720    inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1721
1722    if (inactive * zone->inactive_ratio < active)
1723        return 1;
1724
1725    return 0;
1726}
1727
1728/**
1729 * inactive_anon_is_low - check if anonymous pages need to be deactivated
1730 * @lruvec: LRU vector to check
1731 *
1732 * Returns true if the zone does not have enough inactive anon pages,
1733 * meaning some active anon pages need to be deactivated.
1734 */
1735static int inactive_anon_is_low(struct lruvec *lruvec)
1736{
1737    /*
1738     * If we don't have swap space, anonymous page deactivation
1739     * is pointless.
1740     */
1741    if (!total_swap_pages)
1742        return 0;
1743
1744    if (!mem_cgroup_disabled())
1745        return mem_cgroup_inactive_anon_is_low(lruvec);
1746
1747    return inactive_anon_is_low_global(lruvec_zone(lruvec));
1748}
1749#else
1750static inline int inactive_anon_is_low(struct lruvec *lruvec)
1751{
1752    return 0;
1753}
1754#endif
1755
1756/**
1757 * inactive_file_is_low - check if file pages need to be deactivated
1758 * @lruvec: LRU vector to check
1759 *
1760 * When the system is doing streaming IO, memory pressure here
1761 * ensures that active file pages get deactivated, until more
1762 * than half of the file pages are on the inactive list.
1763 *
1764 * Once we get to that situation, protect the system's working
1765 * set from being evicted by disabling active file page aging.
1766 *
1767 * This uses a different ratio than the anonymous pages, because
1768 * the page cache uses a use-once replacement algorithm.
1769 */
1770static int inactive_file_is_low(struct lruvec *lruvec)
1771{
1772    unsigned long inactive;
1773    unsigned long active;
1774
1775    inactive = get_lru_size(lruvec, LRU_INACTIVE_FILE);
1776    active = get_lru_size(lruvec, LRU_ACTIVE_FILE);
1777
1778    return active > inactive;
1779}
1780
1781static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
1782{
1783    if (is_file_lru(lru))
1784        return inactive_file_is_low(lruvec);
1785    else
1786        return inactive_anon_is_low(lruvec);
1787}
1788
1789static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1790                 struct lruvec *lruvec, struct scan_control *sc)
1791{
1792    if (is_active_lru(lru)) {
1793        if (inactive_list_is_low(lruvec, lru))
1794            shrink_active_list(nr_to_scan, lruvec, sc, lru);
1795        return 0;
1796    }
1797
1798    return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
1799}
1800
1801static int vmscan_swappiness(struct scan_control *sc)
1802{
1803    if (global_reclaim(sc))
1804        return vm_swappiness;
1805    return mem_cgroup_swappiness(sc->target_mem_cgroup);
1806}
1807
1808enum scan_balance {
1809    SCAN_EQUAL,
1810    SCAN_FRACT,
1811    SCAN_ANON,
1812    SCAN_FILE,
1813};
1814
1815/*
1816 * Determine how aggressively the anon and file LRU lists should be
1817 * scanned. The relative value of each set of LRU lists is determined
1818 * by looking at the fraction of the pages scanned we did rotate back
1819 * onto the active list instead of evict.
1820 *
1821 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
1822 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
1823 */
1824static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
1825               unsigned long *nr)
1826{
1827    struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1828    u64 fraction[2];
1829    u64 denominator = 0; /* gcc */
1830    struct zone *zone = lruvec_zone(lruvec);
1831    unsigned long anon_prio, file_prio;
1832    enum scan_balance scan_balance;
1833    unsigned long anon, file, free;
1834    bool force_scan = false;
1835    unsigned long ap, fp;
1836    enum lru_list lru;
1837
1838    /*
1839     * If the zone or memcg is small, nr[l] can be 0. This
1840     * results in no scanning on this priority and a potential
1841     * priority drop. Global direct reclaim can go to the next
1842     * zone and tends to have no problems. Global kswapd is for
1843     * zone balancing and it needs to scan a minimum amount. When
1844     * reclaiming for a memcg, a priority drop can cause high
1845     * latencies, so it's better to scan a minimum amount there as
1846     * well.
1847     */
1848    if (current_is_kswapd() && !zone_reclaimable(zone))
1849        force_scan = true;
1850    if (!global_reclaim(sc))
1851        force_scan = true;
1852
1853    /* If we have no swap space, do not bother scanning anon pages. */
1854    if (!sc->may_swap || (get_nr_swap_pages() <= 0)) {
1855        scan_balance = SCAN_FILE;
1856        goto out;
1857    }
1858
1859    /*
1860     * Global reclaim will swap to prevent OOM even with no
1861     * swappiness, but memcg users want to use this knob to
1862     * disable swapping for individual groups completely when
1863     * using the memory controller's swap limit feature would be
1864     * too expensive.
1865     */
1866    if (!global_reclaim(sc) && !vmscan_swappiness(sc)) {
1867        scan_balance = SCAN_FILE;
1868        goto out;
1869    }
1870
1871    /*
1872     * Do not apply any pressure balancing cleverness when the
1873     * system is close to OOM, scan both anon and file equally
1874     * (unless the swappiness setting disagrees with swapping).
1875     */
1876    if (!sc->priority && vmscan_swappiness(sc)) {
1877        scan_balance = SCAN_EQUAL;
1878        goto out;
1879    }
1880
1881    anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
1882        get_lru_size(lruvec, LRU_INACTIVE_ANON);
1883    file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
1884        get_lru_size(lruvec, LRU_INACTIVE_FILE);
1885
1886    /*
1887     * If it's foreseeable that reclaiming the file cache won't be
1888     * enough to get the zone back into a desirable shape, we have
1889     * to swap. Better start now and leave the - probably heavily
1890     * thrashing - remaining file pages alone.
1891     */
1892    if (global_reclaim(sc)) {
1893        free = zone_page_state(zone, NR_FREE_PAGES);
1894        if (unlikely(file + free <= high_wmark_pages(zone))) {
1895            scan_balance = SCAN_ANON;
1896            goto out;
1897        }
1898    }
1899
1900    /*
1901     * There is enough inactive page cache, do not reclaim
1902     * anything from the anonymous working set right now.
1903     */
1904    if (!inactive_file_is_low(lruvec)) {
1905        scan_balance = SCAN_FILE;
1906        goto out;
1907    }
1908
1909    scan_balance = SCAN_FRACT;
1910
1911    /*
1912     * With swappiness at 100, anonymous and file have the same priority.
1913     * This scanning priority is essentially the inverse of IO cost.
1914     */
1915    anon_prio = vmscan_swappiness(sc);
1916    file_prio = 200 - anon_prio;
1917
1918    /*
1919     * OK, so we have swap space and a fair amount of page cache
1920     * pages. We use the recently rotated / recently scanned
1921     * ratios to determine how valuable each cache is.
1922     *
1923     * Because workloads change over time (and to avoid overflow)
1924     * we keep these statistics as a floating average, which ends
1925     * up weighing recent references more than old ones.
1926     *
1927     * anon in [0], file in [1]
1928     */
1929    spin_lock_irq(&zone->lru_lock);
1930    if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1931        reclaim_stat->recent_scanned[0] /= 2;
1932        reclaim_stat->recent_rotated[0] /= 2;
1933    }
1934
1935    if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
1936        reclaim_stat->recent_scanned[1] /= 2;
1937        reclaim_stat->recent_rotated[1] /= 2;
1938    }
1939
1940    /*
1941     * The amount of pressure on anon vs file pages is inversely
1942     * proportional to the fraction of recently scanned pages on
1943     * each list that were recently referenced and in active use.
1944     */
1945    ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
1946    ap /= reclaim_stat->recent_rotated[0] + 1;
1947
1948    fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
1949    fp /= reclaim_stat->recent_rotated[1] + 1;
1950    spin_unlock_irq(&zone->lru_lock);
1951
1952    fraction[0] = ap;
1953    fraction[1] = fp;
1954    denominator = ap + fp + 1;
1955out:
1956    for_each_evictable_lru(lru) {
1957        int file = is_file_lru(lru);
1958        unsigned long size;
1959        unsigned long scan;
1960
1961        size = get_lru_size(lruvec, lru);
1962        scan = size >> sc->priority;
1963
1964        if (!scan && force_scan)
1965            scan = min(size, SWAP_CLUSTER_MAX);
1966
1967        switch (scan_balance) {
1968        case SCAN_EQUAL:
1969            /* Scan lists relative to size */
1970            break;
1971        case SCAN_FRACT:
1972            /*
1973             * Scan types proportional to swappiness and
1974             * their relative recent reclaim efficiency.
1975             */
1976            scan = div64_u64(scan * fraction[file], denominator);
1977            break;
1978        case SCAN_FILE:
1979        case SCAN_ANON:
1980            /* Scan one type exclusively */
1981            if ((scan_balance == SCAN_FILE) != file)
1982                scan = 0;
1983            break;
1984        default:
1985            /* Look ma, no brain */
1986            BUG();
1987        }
1988        nr[lru] = scan;
1989    }
1990}
1991
1992/*
1993 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1994 */
1995static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
1996{
1997    unsigned long nr[NR_LRU_LISTS];
1998    unsigned long targets[NR_LRU_LISTS];
1999    unsigned long nr_to_scan;
2000    enum lru_list lru;
2001    unsigned long nr_reclaimed = 0;
2002    unsigned long nr_to_reclaim = sc->nr_to_reclaim;
2003    struct blk_plug plug;
2004    bool scan_adjusted = false;
2005
2006    get_scan_count(lruvec, sc, nr);
2007
2008    /* Record the original scan target for proportional adjustments later */
2009    memcpy(targets, nr, sizeof(nr));
2010
2011    blk_start_plug(&plug);
2012    while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2013                    nr[LRU_INACTIVE_FILE]) {
2014        unsigned long nr_anon, nr_file, percentage;
2015        unsigned long nr_scanned;
2016
2017        for_each_evictable_lru(lru) {
2018            if (nr[lru]) {
2019                nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
2020                nr[lru] -= nr_to_scan;
2021
2022                nr_reclaimed += shrink_list(lru, nr_to_scan,
2023                                lruvec, sc);
2024            }
2025        }
2026
2027        if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
2028            continue;
2029
2030        /*
2031         * For global direct reclaim, reclaim only the number of pages
2032         * requested. Less care is taken to scan proportionally as it
2033         * is more important to minimise direct reclaim stall latency
2034         * than it is to properly age the LRU lists.
2035         */
2036        if (global_reclaim(sc) && !current_is_kswapd())
2037            break;
2038
2039        /*
2040         * For kswapd and memcg, reclaim at least the number of pages
2041         * requested. Ensure that the anon and file LRUs shrink
2042         * proportionally what was requested by get_scan_count(). We
2043         * stop reclaiming one LRU and reduce the amount scanning
2044         * proportional to the original scan target.
2045         */
2046        nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
2047        nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
2048
2049        if (nr_file > nr_anon) {
2050            unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
2051                        targets[LRU_ACTIVE_ANON] + 1;
2052            lru = LRU_BASE;
2053            percentage = nr_anon * 100 / scan_target;
2054        } else {
2055            unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
2056                        targets[LRU_ACTIVE_FILE] + 1;
2057            lru = LRU_FILE;
2058            percentage = nr_file * 100 / scan_target;
2059        }
2060
2061        /* Stop scanning the smaller of the LRU */
2062        nr[lru] = 0;
2063        nr[lru + LRU_ACTIVE] = 0;
2064
2065        /*
2066         * Recalculate the other LRU scan count based on its original
2067         * scan target and the percentage scanning already complete
2068         */
2069        lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
2070        nr_scanned = targets[lru] - nr[lru];
2071        nr[lru] = targets[lru] * (100 - percentage) / 100;
2072        nr[lru] -= min(nr[lru], nr_scanned);
2073
2074        lru += LRU_ACTIVE;
2075        nr_scanned = targets[lru] - nr[lru];
2076        nr[lru] = targets[lru] * (100 - percentage) / 100;
2077        nr[lru] -= min(nr[lru], nr_scanned);
2078
2079        scan_adjusted = true;
2080    }
2081    blk_finish_plug(&plug);
2082    sc->nr_reclaimed += nr_reclaimed;
2083
2084    /*
2085     * Even if we did not try to evict anon pages at all, we want to
2086     * rebalance the anon lru active/inactive ratio.
2087     */
2088    if (inactive_anon_is_low(lruvec))
2089        shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2090                   sc, LRU_ACTIVE_ANON);
2091
2092    throttle_vm_writeout(sc->gfp_mask);
2093}
2094
2095/* Use reclaim/compaction for costly allocs or under memory pressure */
2096static bool in_reclaim_compaction(struct scan_control *sc)
2097{
2098    if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
2099            (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
2100             sc->priority < DEF_PRIORITY - 2))
2101        return true;
2102
2103    return false;
2104}
2105
2106/*
2107 * Reclaim/compaction is used for high-order allocation requests. It reclaims
2108 * order-0 pages before compacting the zone. should_continue_reclaim() returns
2109 * true if more pages should be reclaimed such that when the page allocator
2110 * calls try_to_compact_zone() that it will have enough free pages to succeed.
2111 * It will give up earlier than that if there is difficulty reclaiming pages.
2112 */
2113static inline bool should_continue_reclaim(struct zone *zone,
2114                    unsigned long nr_reclaimed,
2115                    unsigned long nr_scanned,
2116                    struct scan_control *sc)
2117{
2118    unsigned long pages_for_compaction;
2119    unsigned long inactive_lru_pages;
2120
2121    /* If not in reclaim/compaction mode, stop */
2122    if (!in_reclaim_compaction(sc))
2123        return false;
2124
2125    /* Consider stopping depending on scan and reclaim activity */
2126    if (sc->gfp_mask & __GFP_REPEAT) {
2127        /*
2128         * For __GFP_REPEAT allocations, stop reclaiming if the
2129         * full LRU list has been scanned and we are still failing
2130         * to reclaim pages. This full LRU scan is potentially
2131         * expensive but a __GFP_REPEAT caller really wants to succeed
2132         */
2133        if (!nr_reclaimed && !nr_scanned)
2134            return false;
2135    } else {
2136        /*
2137         * For non-__GFP_REPEAT allocations which can presumably
2138         * fail without consequence, stop if we failed to reclaim
2139         * any pages from the last SWAP_CLUSTER_MAX number of
2140         * pages that were scanned. This will return to the
2141         * caller faster at the risk reclaim/compaction and
2142         * the resulting allocation attempt fails
2143         */
2144        if (!nr_reclaimed)
2145            return false;
2146    }
2147
2148    /*
2149     * If we have not reclaimed enough pages for compaction and the
2150     * inactive lists are large enough, continue reclaiming
2151     */
2152    pages_for_compaction = (2UL << sc->order);
2153    inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE);
2154    if (get_nr_swap_pages() > 0)
2155        inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON);
2156    if (sc->nr_reclaimed < pages_for_compaction &&
2157            inactive_lru_pages > pages_for_compaction)
2158        return true;
2159
2160    /* If compaction would go ahead or the allocation would succeed, stop */
2161    switch (compaction_suitable(zone, sc->order)) {
2162    case COMPACT_PARTIAL:
2163    case COMPACT_CONTINUE:
2164        return false;
2165    default:
2166        return true;
2167    }
2168}
2169
2170static void shrink_zone(struct zone *zone, struct scan_control *sc)
2171{
2172    unsigned long nr_reclaimed, nr_scanned;
2173
2174    do {
2175        struct mem_cgroup *root = sc->target_mem_cgroup;
2176        struct mem_cgroup_reclaim_cookie reclaim = {
2177            .zone = zone,
2178            .priority = sc->priority,
2179        };
2180        struct mem_cgroup *memcg;
2181
2182        nr_reclaimed = sc->nr_reclaimed;
2183        nr_scanned = sc->nr_scanned;
2184
2185        memcg = mem_cgroup_iter(root, NULL, &reclaim);
2186        do {
2187            struct lruvec *lruvec;
2188
2189            lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2190
2191            shrink_lruvec(lruvec, sc);
2192
2193            /*
2194             * Direct reclaim and kswapd have to scan all memory
2195             * cgroups to fulfill the overall scan target for the
2196             * zone.
2197             *
2198             * Limit reclaim, on the other hand, only cares about
2199             * nr_to_reclaim pages to be reclaimed and it will
2200             * retry with decreasing priority if one round over the
2201             * whole hierarchy is not sufficient.
2202             */
2203            if (!global_reclaim(sc) &&
2204                    sc->nr_reclaimed >= sc->nr_to_reclaim) {
2205                mem_cgroup_iter_break(root, memcg);
2206                break;
2207            }
2208            memcg = mem_cgroup_iter(root, memcg, &reclaim);
2209        } while (memcg);
2210
2211        vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
2212               sc->nr_scanned - nr_scanned,
2213               sc->nr_reclaimed - nr_reclaimed);
2214
2215    } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
2216                     sc->nr_scanned - nr_scanned, sc));
2217}
2218
2219/* Returns true if compaction should go ahead for a high-order request */
2220static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
2221{
2222    unsigned long balance_gap, watermark;
2223    bool watermark_ok;
2224
2225    /* Do not consider compaction for orders reclaim is meant to satisfy */
2226    if (sc->order <= PAGE_ALLOC_COSTLY_ORDER)
2227        return false;
2228
2229    /*
2230     * Compaction takes time to run and there are potentially other
2231     * callers using the pages just freed. Continue reclaiming until
2232     * there is a buffer of free pages available to give compaction
2233     * a reasonable chance of completing and allocating the page
2234     */
2235    balance_gap = min(low_wmark_pages(zone),
2236        (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2237            KSWAPD_ZONE_BALANCE_GAP_RATIO);
2238    watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
2239    watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
2240
2241    /*
2242     * If compaction is deferred, reclaim up to a point where
2243     * compaction will have a chance of success when re-enabled
2244     */
2245    if (compaction_deferred(zone, sc->order))
2246        return watermark_ok;
2247
2248    /* If compaction is not ready to start, keep reclaiming */
2249    if (!compaction_suitable(zone, sc->order))
2250        return false;
2251
2252    return watermark_ok;
2253}
2254
2255/*
2256 * This is the direct reclaim path, for page-allocating processes. We only
2257 * try to reclaim pages from zones which will satisfy the caller's allocation
2258 * request.
2259 *
2260 * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
2261 * Because:
2262 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
2263 * allocation or
2264 * b) The target zone may be at high_wmark_pages(zone) but the lower zones
2265 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
2266 * zone defense algorithm.
2267 *
2268 * If a zone is deemed to be full of pinned pages then just give it a light
2269 * scan then give up on it.
2270 *
2271 * This function returns true if a zone is being reclaimed for a costly
2272 * high-order allocation and compaction is ready to begin. This indicates to
2273 * the caller that it should consider retrying the allocation instead of
2274 * further reclaim.
2275 */
2276static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2277{
2278    struct zoneref *z;
2279    struct zone *zone;
2280    unsigned long nr_soft_reclaimed;
2281    unsigned long nr_soft_scanned;
2282    bool aborted_reclaim = false;
2283
2284    /*
2285     * If the number of buffer_heads in the machine exceeds the maximum
2286     * allowed level, force direct reclaim to scan the highmem zone as
2287     * highmem pages could be pinning lowmem pages storing buffer_heads
2288     */
2289    if (buffer_heads_over_limit)
2290        sc->gfp_mask |= __GFP_HIGHMEM;
2291
2292    for_each_zone_zonelist_nodemask(zone, z, zonelist,
2293                    gfp_zone(sc->gfp_mask), sc->nodemask) {
2294        if (!populated_zone(zone))
2295            continue;
2296        /*
2297         * Take care memory controller reclaiming has small influence
2298         * to global LRU.
2299         */
2300        if (global_reclaim(sc)) {
2301            if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2302                continue;
2303            if (sc->priority != DEF_PRIORITY &&
2304                !zone_reclaimable(zone))
2305                continue; /* Let kswapd poll it */
2306            if (IS_ENABLED(CONFIG_COMPACTION)) {
2307                /*
2308                 * If we already have plenty of memory free for
2309                 * compaction in this zone, don't free any more.
2310                 * Even though compaction is invoked for any
2311                 * non-zero order, only frequent costly order
2312                 * reclamation is disruptive enough to become a
2313                 * noticeable problem, like transparent huge
2314                 * page allocations.
2315                 */
2316                if (compaction_ready(zone, sc)) {
2317                    aborted_reclaim = true;
2318                    continue;
2319                }
2320            }
2321            /*
2322             * This steals pages from memory cgroups over softlimit
2323             * and returns the number of reclaimed pages and
2324             * scanned pages. This works for global memory pressure
2325             * and balancing, not for a memcg's limit.
2326             */
2327            nr_soft_scanned = 0;
2328            nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2329                        sc->order, sc->gfp_mask,
2330                        &nr_soft_scanned);
2331            sc->nr_reclaimed += nr_soft_reclaimed;
2332            sc->nr_scanned += nr_soft_scanned;
2333            /* need some check for avoid more shrink_zone() */
2334        }
2335
2336        shrink_zone(zone, sc);
2337    }
2338
2339    return aborted_reclaim;
2340}
2341
2342/* All zones in zonelist are unreclaimable? */
2343static bool all_unreclaimable(struct zonelist *zonelist,
2344        struct scan_control *sc)
2345{
2346    struct zoneref *z;
2347    struct zone *zone;
2348
2349    for_each_zone_zonelist_nodemask(zone, z, zonelist,
2350            gfp_zone(sc->gfp_mask), sc->nodemask) {
2351        if (!populated_zone(zone))
2352            continue;
2353        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2354            continue;
2355        if (zone_reclaimable(zone))
2356            return false;
2357    }
2358
2359    return true;
2360}
2361
2362/*
2363 * This is the main entry point to direct page reclaim.
2364 *
2365 * If a full scan of the inactive list fails to free enough memory then we
2366 * are "out of memory" and something needs to be killed.
2367 *
2368 * If the caller is !__GFP_FS then the probability of a failure is reasonably
2369 * high - the zone may be full of dirty or under-writeback pages, which this
2370 * caller can't do much about. We kick the writeback threads and take explicit
2371 * naps in the hope that some of these pages can be written. But if the
2372 * allocating task holds filesystem locks which prevent writeout this might not
2373 * work, and the allocation attempt will fail.
2374 *
2375 * returns: 0, if no pages reclaimed
2376 * else, the number of pages reclaimed
2377 */
2378static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2379                    struct scan_control *sc,
2380                    struct shrink_control *shrink)
2381{
2382    unsigned long total_scanned = 0;
2383    struct reclaim_state *reclaim_state = current->reclaim_state;
2384    struct zoneref *z;
2385    struct zone *zone;
2386    unsigned long writeback_threshold;
2387    bool aborted_reclaim;
2388
2389    delayacct_freepages_start();
2390
2391    if (global_reclaim(sc))
2392        count_vm_event(ALLOCSTALL);
2393
2394    do {
2395        vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
2396                sc->priority);
2397        sc->nr_scanned = 0;
2398        aborted_reclaim = shrink_zones(zonelist, sc);
2399
2400        /*
2401         * Don't shrink slabs when reclaiming memory from over limit
2402         * cgroups but do shrink slab at least once when aborting
2403         * reclaim for compaction to avoid unevenly scanning file/anon
2404         * LRU pages over slab pages.
2405         */
2406        if (global_reclaim(sc)) {
2407            unsigned long lru_pages = 0;
2408
2409            nodes_clear(shrink->nodes_to_scan);
2410            for_each_zone_zonelist(zone, z, zonelist,
2411                    gfp_zone(sc->gfp_mask)) {
2412                if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2413                    continue;
2414
2415                lru_pages += zone_reclaimable_pages(zone);
2416                node_set(zone_to_nid(zone),
2417                     shrink->nodes_to_scan);
2418            }
2419
2420            shrink_slab(shrink, sc->nr_scanned, lru_pages);
2421            if (reclaim_state) {
2422                sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2423                reclaim_state->reclaimed_slab = 0;
2424            }
2425        }
2426        total_scanned += sc->nr_scanned;
2427        if (sc->nr_reclaimed >= sc->nr_to_reclaim)
2428            goto out;
2429
2430        /*
2431         * If we're getting trouble reclaiming, start doing
2432         * writepage even in laptop mode.
2433         */
2434        if (sc->priority < DEF_PRIORITY - 2)
2435            sc->may_writepage = 1;
2436
2437        /*
2438         * Try to write back as many pages as we just scanned. This
2439         * tends to cause slow streaming writers to write data to the
2440         * disk smoothly, at the dirtying rate, which is nice. But
2441         * that's undesirable in laptop mode, where we *want* lumpy
2442         * writeout. So in laptop mode, write out the whole world.
2443         */
2444        writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
2445        if (total_scanned > writeback_threshold) {
2446            wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
2447                        WB_REASON_TRY_TO_FREE_PAGES);
2448            sc->may_writepage = 1;
2449        }
2450    } while (--sc->priority >= 0 && !aborted_reclaim);
2451
2452out:
2453    delayacct_freepages_end();
2454
2455    if (sc->nr_reclaimed)
2456        return sc->nr_reclaimed;
2457
2458    /*
2459     * As hibernation is going on, kswapd is freezed so that it can't mark
2460     * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
2461     * check.
2462     */
2463    if (oom_killer_disabled)
2464        return 0;
2465
2466    /* Aborted reclaim to try compaction? don't OOM, then */
2467    if (aborted_reclaim)
2468        return 1;
2469
2470    /* top priority shrink_zones still had more to do? don't OOM, then */
2471    if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc))
2472        return 1;
2473
2474    return 0;
2475}
2476
2477static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
2478{
2479    struct zone *zone;
2480    unsigned long pfmemalloc_reserve = 0;
2481    unsigned long free_pages = 0;
2482    int i;
2483    bool wmark_ok;
2484
2485    for (i = 0; i <= ZONE_NORMAL; i++) {
2486        zone = &pgdat->node_zones[i];
2487        pfmemalloc_reserve += min_wmark_pages(zone);
2488        free_pages += zone_page_state(zone, NR_FREE_PAGES);
2489    }
2490
2491    wmark_ok = free_pages > pfmemalloc_reserve / 2;
2492
2493    /* kswapd must be awake if processes are being throttled */
2494    if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
2495        pgdat->classzone_idx = min(pgdat->classzone_idx,
2496                        (enum zone_type)ZONE_NORMAL);
2497        wake_up_interruptible(&pgdat->kswapd_wait);
2498    }
2499
2500    return wmark_ok;
2501}
2502
2503/*
2504 * Throttle direct reclaimers if backing storage is backed by the network
2505 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
2506 * depleted. kswapd will continue to make progress and wake the processes
2507 * when the low watermark is reached.
2508 *
2509 * Returns true if a fatal signal was delivered during throttling. If this
2510 * happens, the page allocator should not consider triggering the OOM killer.
2511 */
2512static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
2513                    nodemask_t *nodemask)
2514{
2515    struct zone *zone;
2516    int high_zoneidx = gfp_zone(gfp_mask);
2517    pg_data_t *pgdat;
2518
2519    /*
2520     * Kernel threads should not be throttled as they may be indirectly
2521     * responsible for cleaning pages necessary for reclaim to make forward
2522     * progress. kjournald for example may enter direct reclaim while
2523     * committing a transaction where throttling it could forcing other
2524     * processes to block on log_wait_commit().
2525     */
2526    if (current->flags & PF_KTHREAD)
2527        goto out;
2528
2529    /*
2530     * If a fatal signal is pending, this process should not throttle.
2531     * It should return quickly so it can exit and free its memory
2532     */
2533    if (fatal_signal_pending(current))
2534        goto out;
2535
2536    /* Check if the pfmemalloc reserves are ok */
2537    first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
2538    pgdat = zone->zone_pgdat;
2539    if (pfmemalloc_watermark_ok(pgdat))
2540        goto out;
2541
2542    /* Account for the throttling */
2543    count_vm_event(PGSCAN_DIRECT_THROTTLE);
2544
2545    /*
2546     * If the caller cannot enter the filesystem, it's possible that it
2547     * is due to the caller holding an FS lock or performing a journal
2548     * transaction in the case of a filesystem like ext[3|4]. In this case,
2549     * it is not safe to block on pfmemalloc_wait as kswapd could be
2550     * blocked waiting on the same lock. Instead, throttle for up to a
2551     * second before continuing.
2552     */
2553    if (!(gfp_mask & __GFP_FS)) {
2554        wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
2555            pfmemalloc_watermark_ok(pgdat), HZ);
2556
2557        goto check_pending;
2558    }
2559
2560    /* Throttle until kswapd wakes the process */
2561    wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
2562        pfmemalloc_watermark_ok(pgdat));
2563
2564check_pending:
2565    if (fatal_signal_pending(current))
2566        return true;
2567
2568out:
2569    return false;
2570}
2571
2572unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2573                gfp_t gfp_mask, nodemask_t *nodemask)
2574{
2575    unsigned long nr_reclaimed;
2576    struct scan_control sc = {
2577        .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
2578        .may_writepage = !laptop_mode,
2579        .nr_to_reclaim = SWAP_CLUSTER_MAX,
2580        .may_unmap = 1,
2581        .may_swap = 1,
2582        .order = order,
2583        .priority = DEF_PRIORITY,
2584        .target_mem_cgroup = NULL,
2585        .nodemask = nodemask,
2586    };
2587    struct shrink_control shrink = {
2588        .gfp_mask = sc.gfp_mask,
2589    };
2590
2591    /*
2592     * Do not enter reclaim if fatal signal was delivered while throttled.
2593     * 1 is returned so that the page allocator does not OOM kill at this
2594     * point.
2595     */
2596    if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
2597        return 1;
2598
2599    trace_mm_vmscan_direct_reclaim_begin(order,
2600                sc.may_writepage,
2601                gfp_mask);
2602
2603    nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2604
2605    trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2606
2607    return nr_reclaimed;
2608}
2609
2610#ifdef CONFIG_MEMCG
2611
2612unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
2613                        gfp_t gfp_mask, bool noswap,
2614                        struct zone *zone,
2615                        unsigned long *nr_scanned)
2616{
2617    struct scan_control sc = {
2618        .nr_scanned = 0,
2619        .nr_to_reclaim = SWAP_CLUSTER_MAX,
2620        .may_writepage = !laptop_mode,
2621        .may_unmap = 1,
2622        .may_swap = !noswap,
2623        .order = 0,
2624        .priority = 0,
2625        .target_mem_cgroup = memcg,
2626    };
2627    struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2628
2629    sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2630            (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2631
2632    trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
2633                              sc.may_writepage,
2634                              sc.gfp_mask);
2635
2636    /*
2637     * NOTE: Although we can get the priority field, using it
2638     * here is not a good idea, since it limits the pages we can scan.
2639     * if we don't reclaim here, the shrink_zone from balance_pgdat
2640     * will pick up pages from other mem cgroup's as well. We hack
2641     * the priority and make it zero.
2642     */
2643    shrink_lruvec(lruvec, &sc);
2644
2645    trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2646
2647    *nr_scanned = sc.nr_scanned;
2648    return sc.nr_reclaimed;
2649}
2650
2651unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
2652                       gfp_t gfp_mask,
2653                       bool noswap)
2654{
2655    struct zonelist *zonelist;
2656    unsigned long nr_reclaimed;
2657    int nid;
2658    struct scan_control sc = {
2659        .may_writepage = !laptop_mode,
2660        .may_unmap = 1,
2661        .may_swap = !noswap,
2662        .nr_to_reclaim = SWAP_CLUSTER_MAX,
2663        .order = 0,
2664        .priority = DEF_PRIORITY,
2665        .target_mem_cgroup = memcg,
2666        .nodemask = NULL, /* we don't care the placement */
2667        .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2668                (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
2669    };
2670    struct shrink_control shrink = {
2671        .gfp_mask = sc.gfp_mask,
2672    };
2673
2674    /*
2675     * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
2676     * take care of from where we get pages. So the node where we start the
2677     * scan does not need to be the current node.
2678     */
2679    nid = mem_cgroup_select_victim_node(memcg);
2680
2681    zonelist = NODE_DATA(nid)->node_zonelists;
2682
2683    trace_mm_vmscan_memcg_reclaim_begin(0,
2684                        sc.may_writepage,
2685                        sc.gfp_mask);
2686
2687    nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2688
2689    trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2690
2691    return nr_reclaimed;
2692}
2693#endif
2694
2695static void age_active_anon(struct zone *zone, struct scan_control *sc)
2696{
2697    struct mem_cgroup *memcg;
2698
2699    if (!total_swap_pages)
2700        return;
2701
2702    memcg = mem_cgroup_iter(NULL, NULL, NULL);
2703    do {
2704        struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2705
2706        if (inactive_anon_is_low(lruvec))
2707            shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2708                       sc, LRU_ACTIVE_ANON);
2709
2710        memcg = mem_cgroup_iter(NULL, memcg, NULL);
2711    } while (memcg);
2712}
2713
2714static bool zone_balanced(struct zone *zone, int order,
2715              unsigned long balance_gap, int classzone_idx)
2716{
2717    if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
2718                    balance_gap, classzone_idx, 0))
2719        return false;
2720
2721    if (IS_ENABLED(CONFIG_COMPACTION) && order &&
2722        !compaction_suitable(zone, order))
2723        return false;
2724
2725    return true;
2726}
2727
2728/*
2729 * pgdat_balanced() is used when checking if a node is balanced.
2730 *
2731 * For order-0, all zones must be balanced!
2732 *
2733 * For high-order allocations only zones that meet watermarks and are in a
2734 * zone allowed by the callers classzone_idx are added to balanced_pages. The
2735 * total of balanced pages must be at least 25% of the zones allowed by
2736 * classzone_idx for the node to be considered balanced. Forcing all zones to
2737 * be balanced for high orders can cause excessive reclaim when there are
2738 * imbalanced zones.
2739 * The choice of 25% is due to
2740 * o a 16M DMA zone that is balanced will not balance a zone on any
2741 * reasonable sized machine
2742 * o On all other machines, the top zone must be at least a reasonable
2743 * percentage of the middle zones. For example, on 32-bit x86, highmem
2744 * would need to be at least 256M for it to be balance a whole node.
2745 * Similarly, on x86-64 the Normal zone would need to be at least 1G
2746 * to balance a node on its own. These seemed like reasonable ratios.
2747 */
2748static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
2749{
2750    unsigned long managed_pages = 0;
2751    unsigned long balanced_pages = 0;
2752    int i;
2753
2754    /* Check the watermark levels */
2755    for (i = 0; i <= classzone_idx; i++) {
2756        struct zone *zone = pgdat->node_zones + i;
2757
2758        if (!populated_zone(zone))
2759            continue;
2760
2761        managed_pages += zone->managed_pages;
2762
2763        /*
2764         * A special case here:
2765         *
2766         * balance_pgdat() skips over all_unreclaimable after
2767         * DEF_PRIORITY. Effectively, it considers them balanced so
2768         * they must be considered balanced here as well!
2769         */
2770        if (!zone_reclaimable(zone)) {
2771            balanced_pages += zone->managed_pages;
2772            continue;
2773        }
2774
2775        if (zone_balanced(zone, order, 0, i))
2776            balanced_pages += zone->managed_pages;
2777        else if (!order)
2778            return false;
2779    }
2780
2781    if (order)
2782        return balanced_pages >= (managed_pages >> 2);
2783    else
2784        return true;
2785}
2786
2787/*
2788 * Prepare kswapd for sleeping. This verifies that there are no processes
2789 * waiting in throttle_direct_reclaim() and that watermarks have been met.
2790 *
2791 * Returns true if kswapd is ready to sleep
2792 */
2793static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
2794                    int classzone_idx)
2795{
2796    /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
2797    if (remaining)
2798        return false;
2799
2800    /*
2801     * There is a potential race between when kswapd checks its watermarks
2802     * and a process gets throttled. There is also a potential race if
2803     * processes get throttled, kswapd wakes, a large process exits therby
2804     * balancing the zones that causes kswapd to miss a wakeup. If kswapd
2805     * is going to sleep, no process should be sleeping on pfmemalloc_wait
2806     * so wake them now if necessary. If necessary, processes will wake
2807     * kswapd and get throttled again
2808     */
2809    if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
2810        wake_up(&pgdat->pfmemalloc_wait);
2811        return false;
2812    }
2813
2814    return pgdat_balanced(pgdat, order, classzone_idx);
2815}
2816
2817/*
2818 * kswapd shrinks the zone by the number of pages required to reach
2819 * the high watermark.
2820 *
2821 * Returns true if kswapd scanned at least the requested number of pages to
2822 * reclaim or if the lack of progress was due to pages under writeback.
2823 * This is used to determine if the scanning priority needs to be raised.
2824 */
2825static bool kswapd_shrink_zone(struct zone *zone,
2826                   int classzone_idx,
2827                   struct scan_control *sc,
2828                   unsigned long lru_pages,
2829                   unsigned long *nr_attempted)
2830{
2831    int testorder = sc->order;
2832    unsigned long balance_gap;
2833    struct reclaim_state *reclaim_state = current->reclaim_state;
2834    struct shrink_control shrink = {
2835        .gfp_mask = sc->gfp_mask,
2836    };
2837    bool lowmem_pressure;
2838
2839    /* Reclaim above the high watermark. */
2840    sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone));
2841
2842    /*
2843     * Kswapd reclaims only single pages with compaction enabled. Trying
2844     * too hard to reclaim until contiguous free pages have become
2845     * available can hurt performance by evicting too much useful data
2846     * from memory. Do not reclaim more than needed for compaction.
2847     */
2848    if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
2849            compaction_suitable(zone, sc->order) !=
2850                COMPACT_SKIPPED)
2851        testorder = 0;
2852
2853    /*
2854     * We put equal pressure on every zone, unless one zone has way too
2855     * many pages free already. The "too many pages" is defined as the
2856     * high wmark plus a "gap" where the gap is either the low
2857     * watermark or 1% of the zone, whichever is smaller.
2858     */
2859    balance_gap = min(low_wmark_pages(zone),
2860        (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2861        KSWAPD_ZONE_BALANCE_GAP_RATIO);
2862
2863    /*
2864     * If there is no low memory pressure or the zone is balanced then no
2865     * reclaim is necessary
2866     */
2867    lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone));
2868    if (!lowmem_pressure && zone_balanced(zone, testorder,
2869                        balance_gap, classzone_idx))
2870        return true;
2871
2872    shrink_zone(zone, sc);
2873    nodes_clear(shrink.nodes_to_scan);
2874    node_set(zone_to_nid(zone), shrink.nodes_to_scan);
2875
2876    reclaim_state->reclaimed_slab = 0;
2877    shrink_slab(&shrink, sc->nr_scanned, lru_pages);
2878    sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2879
2880    /* Account for the number of pages attempted to reclaim */
2881    *nr_attempted += sc->nr_to_reclaim;
2882
2883    zone_clear_flag(zone, ZONE_WRITEBACK);
2884
2885    /*
2886     * If a zone reaches its high watermark, consider it to be no longer
2887     * congested. It's possible there are dirty pages backed by congested
2888     * BDIs but as pressure is relieved, speculatively avoid congestion
2889     * waits.
2890     */
2891    if (zone_reclaimable(zone) &&
2892        zone_balanced(zone, testorder, 0, classzone_idx)) {
2893        zone_clear_flag(zone, ZONE_CONGESTED);
2894        zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
2895    }
2896
2897    return sc->nr_scanned >= sc->nr_to_reclaim;
2898}
2899
2900/*
2901 * For kswapd, balance_pgdat() will work across all this node's zones until
2902 * they are all at high_wmark_pages(zone).
2903 *
2904 * Returns the final order kswapd was reclaiming at
2905 *
2906 * There is special handling here for zones which are full of pinned pages.
2907 * This can happen if the pages are all mlocked, or if they are all used by
2908 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
2909 * What we do is to detect the case where all pages in the zone have been
2910 * scanned twice and there has been zero successful reclaim. Mark the zone as
2911 * dead and from now on, only perform a short scan. Basically we're polling
2912 * the zone for when the problem goes away.
2913 *
2914 * kswapd scans the zones in the highmem->normal->dma direction. It skips
2915 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
2916 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
2917 * lower zones regardless of the number of free pages in the lower zones. This
2918 * interoperates with the page allocator fallback scheme to ensure that aging
2919 * of pages is balanced across the zones.
2920 */
2921static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2922                            int *classzone_idx)
2923{
2924    int i;
2925    int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
2926    unsigned long nr_soft_reclaimed;
2927    unsigned long nr_soft_scanned;
2928    struct scan_control sc = {
2929        .gfp_mask = GFP_KERNEL,
2930        .priority = DEF_PRIORITY,
2931        .may_unmap = 1,
2932        .may_swap = 1,
2933        .may_writepage = !laptop_mode,
2934        .order = order,
2935        .target_mem_cgroup = NULL,
2936    };
2937    count_vm_event(PAGEOUTRUN);
2938
2939    do {
2940        unsigned long lru_pages = 0;
2941        unsigned long nr_attempted = 0;
2942        bool raise_priority = true;
2943        bool pgdat_needs_compaction = (order > 0);
2944
2945        sc.nr_reclaimed = 0;
2946
2947        /*
2948         * Scan in the highmem->dma direction for the highest
2949         * zone which needs scanning
2950         */
2951        for (i = pgdat->nr_zones - 1; i >= 0; i--) {
2952            struct zone *zone = pgdat->node_zones + i;
2953
2954            if (!populated_zone(zone))
2955                continue;
2956
2957            if (sc.priority != DEF_PRIORITY &&
2958                !zone_reclaimable(zone))
2959                continue;
2960
2961            /*
2962             * Do some background aging of the anon list, to give
2963             * pages a chance to be referenced before reclaiming.
2964             */
2965            age_active_anon(zone, &sc);
2966
2967            /*
2968             * If the number of buffer_heads in the machine
2969             * exceeds the maximum allowed level and this node
2970             * has a highmem zone, force kswapd to reclaim from
2971             * it to relieve lowmem pressure.
2972             */
2973            if (buffer_heads_over_limit && is_highmem_idx(i)) {
2974                end_zone = i;
2975                break;
2976            }
2977
2978            if (!zone_balanced(zone, order, 0, 0)) {
2979                end_zone = i;
2980                break;
2981            } else {
2982                /*
2983                 * If balanced, clear the dirty and congested
2984                 * flags
2985                 */
2986                zone_clear_flag(zone, ZONE_CONGESTED);
2987                zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
2988            }
2989        }
2990
2991        if (i < 0)
2992            goto out;
2993
2994        for (i = 0; i <= end_zone; i++) {
2995            struct zone *zone = pgdat->node_zones + i;
2996
2997            if (!populated_zone(zone))
2998                continue;
2999
3000            lru_pages += zone_reclaimable_pages(zone);
3001
3002            /*
3003             * If any zone is currently balanced then kswapd will
3004             * not call compaction as it is expected that the
3005             * necessary pages are already available.
3006             */
3007            if (pgdat_needs_compaction &&
3008                    zone_watermark_ok(zone, order,
3009                        low_wmark_pages(zone),
3010                        *classzone_idx, 0))
3011                pgdat_needs_compaction = false;
3012        }
3013
3014        /*
3015         * If we're getting trouble reclaiming, start doing writepage
3016         * even in laptop mode.
3017         */
3018        if (sc.priority < DEF_PRIORITY - 2)
3019            sc.may_writepage = 1;
3020
3021        /*
3022         * Now scan the zone in the dma->highmem direction, stopping
3023         * at the last zone which needs scanning.
3024         *
3025         * We do this because the page allocator works in the opposite
3026         * direction. This prevents the page allocator from allocating
3027         * pages behind kswapd's direction of progress, which would
3028         * cause too much scanning of the lower zones.
3029         */
3030        for (i = 0; i <= end_zone; i++) {
3031            struct zone *zone = pgdat->node_zones + i;
3032
3033            if (!populated_zone(zone))
3034                continue;
3035
3036            if (sc.priority != DEF_PRIORITY &&
3037                !zone_reclaimable(zone))
3038                continue;
3039
3040            sc.nr_scanned = 0;
3041
3042            nr_soft_scanned = 0;
3043            /*
3044             * Call soft limit reclaim before calling shrink_zone.
3045             */
3046            nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
3047                            order, sc.gfp_mask,
3048                            &nr_soft_scanned);
3049            sc.nr_reclaimed += nr_soft_reclaimed;
3050
3051            /*
3052             * There should be no need to raise the scanning
3053             * priority if enough pages are already being scanned
3054             * that that high watermark would be met at 100%
3055             * efficiency.
3056             */
3057            if (kswapd_shrink_zone(zone, end_zone, &sc,
3058                    lru_pages, &nr_attempted))
3059                raise_priority = false;
3060        }
3061
3062        /*
3063         * If the low watermark is met there is no need for processes
3064         * to be throttled on pfmemalloc_wait as they should not be
3065         * able to safely make forward progress. Wake them
3066         */
3067        if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
3068                pfmemalloc_watermark_ok(pgdat))
3069            wake_up(&pgdat->pfmemalloc_wait);
3070
3071        /*
3072         * Fragmentation may mean that the system cannot be rebalanced
3073         * for high-order allocations in all zones. If twice the
3074         * allocation size has been reclaimed and the zones are still
3075         * not balanced then recheck the watermarks at order-0 to
3076         * prevent kswapd reclaiming excessively. Assume that a
3077         * process requested a high-order can direct reclaim/compact.
3078         */
3079        if (order && sc.nr_reclaimed >= 2UL << order)
3080            order = sc.order = 0;
3081
3082        /* Check if kswapd should be suspending */
3083        if (try_to_freeze() || kthread_should_stop())
3084            break;
3085
3086        /*
3087         * Compact if necessary and kswapd is reclaiming at least the
3088         * high watermark number of pages as requsted
3089         */
3090        if (pgdat_needs_compaction && sc.nr_reclaimed > nr_attempted)
3091            compact_pgdat(pgdat, order);
3092
3093        /*
3094         * Raise priority if scanning rate is too low or there was no
3095         * progress in reclaiming pages
3096         */
3097        if (raise_priority || !sc.nr_reclaimed)
3098            sc.priority--;
3099    } while (sc.priority >= 1 &&
3100         !pgdat_balanced(pgdat, order, *classzone_idx));
3101
3102out:
3103    /*
3104     * Return the order we were reclaiming at so prepare_kswapd_sleep()
3105     * makes a decision on the order we were last reclaiming at. However,
3106     * if another caller entered the allocator slow path while kswapd
3107     * was awake, order will remain at the higher level
3108     */
3109    *classzone_idx = end_zone;
3110    return order;
3111}
3112
3113static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
3114{
3115    long remaining = 0;
3116    DEFINE_WAIT(wait);
3117
3118    if (freezing(current) || kthread_should_stop())
3119        return;
3120
3121    prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3122
3123    /* Try to sleep for a short interval */
3124    if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
3125        remaining = schedule_timeout(HZ/10);
3126        finish_wait(&pgdat->kswapd_wait, &wait);
3127        prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3128    }
3129
3130    /*
3131     * After a short sleep, check if it was a premature sleep. If not, then
3132     * go fully to sleep until explicitly woken up.
3133     */
3134    if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
3135        trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
3136
3137        /*
3138         * vmstat counters are not perfectly accurate and the estimated
3139         * value for counters such as NR_FREE_PAGES can deviate from the
3140         * true value by nr_online_cpus * threshold. To avoid the zone
3141         * watermarks being breached while under pressure, we reduce the
3142         * per-cpu vmstat threshold while kswapd is awake and restore
3143         * them before going back to sleep.
3144         */
3145        set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
3146
3147        /*
3148         * Compaction records what page blocks it recently failed to
3149         * isolate pages from and skips them in the future scanning.
3150         * When kswapd is going to sleep, it is reasonable to assume
3151         * that pages and compaction may succeed so reset the cache.
3152         */
3153        reset_isolation_suitable(pgdat);
3154
3155        if (!kthread_should_stop())
3156            schedule();
3157
3158        set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
3159    } else {
3160        if (remaining)
3161            count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
3162        else
3163            count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
3164    }
3165    finish_wait(&pgdat->kswapd_wait, &wait);
3166}
3167
3168/*
3169 * The background pageout daemon, started as a kernel thread
3170 * from the init process.
3171 *
3172 * This basically trickles out pages so that we have _some_
3173 * free memory available even if there is no other activity
3174 * that frees anything up. This is needed for things like routing
3175 * etc, where we otherwise might have all activity going on in
3176 * asynchronous contexts that cannot page things out.
3177 *
3178 * If there are applications that are active memory-allocators
3179 * (most normal use), this basically shouldn't matter.
3180 */
3181static int kswapd(void *p)
3182{
3183    unsigned long order, new_order;
3184    unsigned balanced_order;
3185    int classzone_idx, new_classzone_idx;
3186    int balanced_classzone_idx;
3187    pg_data_t *pgdat = (pg_data_t*)p;
3188    struct task_struct *tsk = current;
3189
3190    struct reclaim_state reclaim_state = {
3191        .reclaimed_slab = 0,
3192    };
3193    const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
3194
3195    lockdep_set_current_reclaim_state(GFP_KERNEL);
3196
3197    if (!cpumask_empty(cpumask))
3198        set_cpus_allowed_ptr(tsk, cpumask);
3199    current->reclaim_state = &reclaim_state;
3200
3201    /*
3202     * Tell the memory management that we're a "memory allocator",
3203     * and that if we need more memory we should get access to it
3204     * regardless (see "__alloc_pages()"). "kswapd" should
3205     * never get caught in the normal page freeing logic.
3206     *
3207     * (Kswapd normally doesn't need memory anyway, but sometimes
3208     * you need a small amount of memory in order to be able to
3209     * page out something else, and this flag essentially protects
3210     * us from recursively trying to free more memory as we're
3211     * trying to free the first piece of memory in the first place).
3212     */
3213    tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
3214    set_freezable();
3215
3216    order = new_order = 0;
3217    balanced_order = 0;
3218    classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
3219    balanced_classzone_idx = classzone_idx;
3220    for ( ; ; ) {
3221        bool ret;
3222
3223        /*
3224         * If the last balance_pgdat was unsuccessful it's unlikely a
3225         * new request of a similar or harder type will succeed soon
3226         * so consider going to sleep on the basis we reclaimed at
3227         */
3228        if (balanced_classzone_idx >= new_classzone_idx &&
3229                    balanced_order == new_order) {
3230            new_order = pgdat->kswapd_max_order;
3231            new_classzone_idx = pgdat->classzone_idx;
3232            pgdat->kswapd_max_order = 0;
3233            pgdat->classzone_idx = pgdat->nr_zones - 1;
3234        }
3235
3236        if (order < new_order || classzone_idx > new_classzone_idx) {
3237            /*
3238             * Don't sleep if someone wants a larger 'order'
3239             * allocation or has tigher zone constraints
3240             */
3241            order = new_order;
3242            classzone_idx = new_classzone_idx;
3243        } else {
3244            kswapd_try_to_sleep(pgdat, balanced_order,
3245                        balanced_classzone_idx);
3246            order = pgdat->kswapd_max_order;
3247            classzone_idx = pgdat->classzone_idx;
3248            new_order = order;
3249            new_classzone_idx = classzone_idx;
3250            pgdat->kswapd_max_order = 0;
3251            pgdat->classzone_idx = pgdat->nr_zones - 1;
3252        }
3253
3254        ret = try_to_freeze();
3255        if (kthread_should_stop())
3256            break;
3257
3258        /*
3259         * We can speed up thawing tasks if we don't call balance_pgdat
3260         * after returning from the refrigerator
3261         */
3262        if (!ret) {
3263            trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
3264            balanced_classzone_idx = classzone_idx;
3265            balanced_order = balance_pgdat(pgdat, order,
3266                        &balanced_classzone_idx);
3267        }
3268    }
3269
3270    current->reclaim_state = NULL;
3271    return 0;
3272}
3273
3274/*
3275 * A zone is low on free memory, so wake its kswapd task to service it.
3276 */
3277void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
3278{
3279    pg_data_t *pgdat;
3280
3281    if (!populated_zone(zone))
3282        return;
3283
3284    if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
3285        return;
3286    pgdat = zone->zone_pgdat;
3287    if (pgdat->kswapd_max_order < order) {
3288        pgdat->kswapd_max_order = order;
3289        pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
3290    }
3291    if (!waitqueue_active(&pgdat->kswapd_wait))
3292        return;
3293    if (zone_balanced(zone, order, 0, 0))
3294        return;
3295
3296    trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
3297    wake_up_interruptible(&pgdat->kswapd_wait);
3298}
3299
3300/*
3301 * The reclaimable count would be mostly accurate.
3302 * The less reclaimable pages may be
3303 * - mlocked pages, which will be moved to unevictable list when encountered
3304 * - mapped pages, which may require several travels to be reclaimed
3305 * - dirty pages, which is not "instantly" reclaimable
3306 */
3307unsigned long global_reclaimable_pages(void)
3308{
3309    int nr;
3310
3311    nr = global_page_state(NR_ACTIVE_FILE) +
3312         global_page_state(NR_INACTIVE_FILE);
3313
3314    if (get_nr_swap_pages() > 0)
3315        nr += global_page_state(NR_ACTIVE_ANON) +
3316              global_page_state(NR_INACTIVE_ANON);
3317
3318    return nr;
3319}
3320
3321#ifdef CONFIG_HIBERNATION
3322/*
3323 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
3324 * freed pages.
3325 *
3326 * Rather than trying to age LRUs the aim is to preserve the overall
3327 * LRU order by reclaiming preferentially
3328 * inactive > active > active referenced > active mapped
3329 */
3330unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
3331{
3332    struct reclaim_state reclaim_state;
3333    struct scan_control sc = {
3334        .gfp_mask = GFP_HIGHUSER_MOVABLE,
3335        .may_swap = 1,
3336        .may_unmap = 1,
3337        .may_writepage = 1,
3338        .nr_to_reclaim = nr_to_reclaim,
3339        .hibernation_mode = 1,
3340        .order = 0,
3341        .priority = DEF_PRIORITY,
3342    };
3343    struct shrink_control shrink = {
3344        .gfp_mask = sc.gfp_mask,
3345    };
3346    struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
3347    struct task_struct *p = current;
3348    unsigned long nr_reclaimed;
3349
3350    p->flags |= PF_MEMALLOC;
3351    lockdep_set_current_reclaim_state(sc.gfp_mask);
3352    reclaim_state.reclaimed_slab = 0;
3353    p->reclaim_state = &reclaim_state;
3354
3355    nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
3356
3357    p->reclaim_state = NULL;
3358    lockdep_clear_current_reclaim_state();
3359    p->flags &= ~PF_MEMALLOC;
3360
3361    return nr_reclaimed;
3362}
3363#endif /* CONFIG_HIBERNATION */
3364
3365/* It's optimal to keep kswapds on the same CPUs as their memory, but
3366   not required for correctness. So if the last cpu in a node goes
3367   away, we get changed to run anywhere: as the first one comes back,
3368   restore their cpu bindings. */
3369static int cpu_callback(struct notifier_block *nfb, unsigned long action,
3370            void *hcpu)
3371{
3372    int nid;
3373
3374    if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
3375        for_each_node_state(nid, N_MEMORY) {
3376            pg_data_t *pgdat = NODE_DATA(nid);
3377            const struct cpumask *mask;
3378
3379            mask = cpumask_of_node(pgdat->node_id);
3380
3381            if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
3382                /* One of our CPUs online: restore mask */
3383                set_cpus_allowed_ptr(pgdat->kswapd, mask);
3384        }
3385    }
3386    return NOTIFY_OK;
3387}
3388
3389/*
3390 * This kswapd start function will be called by init and node-hot-add.
3391 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
3392 */
3393int kswapd_run(int nid)
3394{
3395    pg_data_t *pgdat = NODE_DATA(nid);
3396    int ret = 0;
3397
3398    if (pgdat->kswapd)
3399        return 0;
3400
3401    pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
3402    if (IS_ERR(pgdat->kswapd)) {
3403        /* failure at boot is fatal */
3404        BUG_ON(system_state == SYSTEM_BOOTING);
3405        pr_err("Failed to start kswapd on node %d\n", nid);
3406        ret = PTR_ERR(pgdat->kswapd);
3407        pgdat->kswapd = NULL;
3408    }
3409    return ret;
3410}
3411
3412/*
3413 * Called by memory hotplug when all memory in a node is offlined. Caller must
3414 * hold lock_memory_hotplug().
3415 */
3416void kswapd_stop(int nid)
3417{
3418    struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
3419
3420    if (kswapd) {
3421        kthread_stop(kswapd);
3422        NODE_DATA(nid)->kswapd = NULL;
3423    }
3424}
3425
3426static int __init kswapd_init(void)
3427{
3428    int nid;
3429
3430    swap_setup();
3431    for_each_node_state(nid, N_MEMORY)
3432         kswapd_run(nid);
3433    hotcpu_notifier(cpu_callback, 0);
3434    return 0;
3435}
3436
3437module_init(kswapd_init)
3438
3439#ifdef CONFIG_NUMA
3440/*
3441 * Zone reclaim mode
3442 *
3443 * If non-zero call zone_reclaim when the number of free pages falls below
3444 * the watermarks.
3445 */
3446int zone_reclaim_mode __read_mostly;
3447
3448#define RECLAIM_OFF 0
3449#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
3450#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
3451#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
3452
3453/*
3454 * Priority for ZONE_RECLAIM. This determines the fraction of pages
3455 * of a node considered for each zone_reclaim. 4 scans 1/16th of
3456 * a zone.
3457 */
3458#define ZONE_RECLAIM_PRIORITY 4
3459
3460/*
3461 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
3462 * occur.
3463 */
3464int sysctl_min_unmapped_ratio = 1;
3465
3466/*
3467 * If the number of slab pages in a zone grows beyond this percentage then
3468 * slab reclaim needs to occur.
3469 */
3470int sysctl_min_slab_ratio = 5;
3471
3472static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
3473{
3474    unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
3475    unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
3476        zone_page_state(zone, NR_ACTIVE_FILE);
3477
3478    /*
3479     * It's possible for there to be more file mapped pages than
3480     * accounted for by the pages on the file LRU lists because
3481     * tmpfs pages accounted for as ANON can also be FILE_MAPPED
3482     */
3483    return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
3484}
3485
3486/* Work out how many page cache pages we can reclaim in this reclaim_mode */
3487static long zone_pagecache_reclaimable(struct zone *zone)
3488{
3489    long nr_pagecache_reclaimable;
3490    long delta = 0;
3491
3492    /*
3493     * If RECLAIM_SWAP is set, then all file pages are considered
3494     * potentially reclaimable. Otherwise, we have to worry about
3495     * pages like swapcache and zone_unmapped_file_pages() provides
3496     * a better estimate
3497     */
3498    if (zone_reclaim_mode & RECLAIM_SWAP)
3499        nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
3500    else
3501        nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
3502
3503    /* If we can't clean pages, remove dirty pages from consideration */
3504    if (!(zone_reclaim_mode & RECLAIM_WRITE))
3505        delta += zone_page_state(zone, NR_FILE_DIRTY);
3506
3507    /* Watch for any possible underflows due to delta */
3508    if (unlikely(delta > nr_pagecache_reclaimable))
3509        delta = nr_pagecache_reclaimable;
3510
3511    return nr_pagecache_reclaimable - delta;
3512}
3513
3514/*
3515 * Try to free up some pages from this zone through reclaim.
3516 */
3517static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3518{
3519    /* Minimum pages needed in order to stay on node */
3520    const unsigned long nr_pages = 1 << order;
3521    struct task_struct *p = current;
3522    struct reclaim_state reclaim_state;
3523    struct scan_control sc = {
3524        .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
3525        .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
3526        .may_swap = 1,
3527        .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
3528        .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
3529        .order = order,
3530        .priority = ZONE_RECLAIM_PRIORITY,
3531    };
3532    struct shrink_control shrink = {
3533        .gfp_mask = sc.gfp_mask,
3534    };
3535    unsigned long nr_slab_pages0, nr_slab_pages1;
3536
3537    cond_resched();
3538    /*
3539     * We need to be able to allocate from the reserves for RECLAIM_SWAP
3540     * and we also need to be able to write out pages for RECLAIM_WRITE
3541     * and RECLAIM_SWAP.
3542     */
3543    p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
3544    lockdep_set_current_reclaim_state(gfp_mask);
3545    reclaim_state.reclaimed_slab = 0;
3546    p->reclaim_state = &reclaim_state;
3547
3548    if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
3549        /*
3550         * Free memory by calling shrink zone with increasing
3551         * priorities until we have enough memory freed.
3552         */
3553        do {
3554            shrink_zone(zone, &sc);
3555        } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
3556    }
3557
3558    nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3559    if (nr_slab_pages0 > zone->min_slab_pages) {
3560        /*
3561         * shrink_slab() does not currently allow us to determine how
3562         * many pages were freed in this zone. So we take the current
3563         * number of slab pages and shake the slab until it is reduced
3564         * by the same nr_pages that we used for reclaiming unmapped
3565         * pages.
3566         */
3567        nodes_clear(shrink.nodes_to_scan);
3568        node_set(zone_to_nid(zone), shrink.nodes_to_scan);
3569        for (;;) {
3570            unsigned long lru_pages = zone_reclaimable_pages(zone);
3571
3572            /* No reclaimable slab or very low memory pressure */
3573            if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
3574                break;
3575
3576            /* Freed enough memory */
3577            nr_slab_pages1 = zone_page_state(zone,
3578                            NR_SLAB_RECLAIMABLE);
3579            if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
3580                break;
3581        }
3582
3583        /*
3584         * Update nr_reclaimed by the number of slab pages we
3585         * reclaimed from this zone.
3586         */
3587        nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3588        if (nr_slab_pages1 < nr_slab_pages0)
3589            sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
3590    }
3591
3592    p->reclaim_state = NULL;
3593    current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
3594    lockdep_clear_current_reclaim_state();
3595    return sc.nr_reclaimed >= nr_pages;
3596}
3597
3598int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3599{
3600    int node_id;
3601    int ret;
3602
3603    /*
3604     * Zone reclaim reclaims unmapped file backed pages and
3605     * slab pages if we are over the defined limits.
3606     *
3607     * A small portion of unmapped file backed pages is needed for
3608     * file I/O otherwise pages read by file I/O will be immediately
3609     * thrown out if the zone is overallocated. So we do not reclaim
3610     * if less than a specified percentage of the zone is used by
3611     * unmapped file backed pages.
3612     */
3613    if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
3614        zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
3615        return ZONE_RECLAIM_FULL;
3616
3617    if (!zone_reclaimable(zone))
3618        return ZONE_RECLAIM_FULL;
3619
3620    /*
3621     * Do not scan if the allocation should not be delayed.
3622     */
3623    if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
3624        return ZONE_RECLAIM_NOSCAN;
3625
3626    /*
3627     * Only run zone reclaim on the local zone or on zones that do not
3628     * have associated processors. This will favor the local processor
3629     * over remote processors and spread off node memory allocations
3630     * as wide as possible.
3631     */
3632    node_id = zone_to_nid(zone);
3633    if (node_state(node_id, N_CPU) && node_id != numa_node_id())
3634        return ZONE_RECLAIM_NOSCAN;
3635
3636    if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
3637        return ZONE_RECLAIM_NOSCAN;
3638
3639    ret = __zone_reclaim(zone, gfp_mask, order);
3640    zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
3641
3642    if (!ret)
3643        count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
3644
3645    return ret;
3646}
3647#endif
3648
3649/*
3650 * page_evictable - test whether a page is evictable
3651 * @page: the page to test
3652 *
3653 * Test whether page is evictable--i.e., should be placed on active/inactive
3654 * lists vs unevictable list.
3655 *
3656 * Reasons page might not be evictable:
3657 * (1) page's mapping marked unevictable
3658 * (2) page is part of an mlocked VMA
3659 *
3660 */
3661int page_evictable(struct page *page)
3662{
3663    return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
3664}
3665
3666#ifdef CONFIG_SHMEM
3667/**
3668 * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
3669 * @pages: array of pages to check
3670 * @nr_pages: number of pages to check
3671 *
3672 * Checks pages for evictability and moves them to the appropriate lru list.
3673 *
3674 * This function is only used for SysV IPC SHM_UNLOCK.
3675 */
3676void check_move_unevictable_pages(struct page **pages, int nr_pages)
3677{
3678    struct lruvec *lruvec;
3679    struct zone *zone = NULL;
3680    int pgscanned = 0;
3681    int pgrescued = 0;
3682    int i;
3683
3684    for (i = 0; i < nr_pages; i++) {
3685        struct page *page = pages[i];
3686        struct zone *pagezone;
3687
3688        pgscanned++;
3689        pagezone = page_zone(page);
3690        if (pagezone != zone) {
3691            if (zone)
3692                spin_unlock_irq(&zone->lru_lock);
3693            zone = pagezone;
3694            spin_lock_irq(&zone->lru_lock);
3695        }
3696        lruvec = mem_cgroup_page_lruvec(page, zone);
3697
3698        if (!PageLRU(page) || !PageUnevictable(page))
3699            continue;
3700
3701        if (page_evictable(page)) {
3702            enum lru_list lru = page_lru_base_type(page);
3703
3704            VM_BUG_ON(PageActive(page));
3705            ClearPageUnevictable(page);
3706            del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
3707            add_page_to_lru_list(page, lruvec, lru);
3708            pgrescued++;
3709        }
3710    }
3711
3712    if (zone) {
3713        __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
3714        __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
3715        spin_unlock_irq(&zone->lru_lock);
3716    }
3717}
3718#endif /* CONFIG_SHMEM */
3719
3720static void warn_scan_unevictable_pages(void)
3721{
3722    printk_once(KERN_WARNING
3723            "%s: The scan_unevictable_pages sysctl/node-interface has been "
3724            "disabled for lack of a legitimate use case. If you have "
3725            "one, please send an email to linux-mm@kvack.org.\n",
3726            current->comm);
3727}
3728
3729/*
3730 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
3731 * all nodes' unevictable lists for evictable pages
3732 */
3733unsigned long scan_unevictable_pages;
3734
3735int scan_unevictable_handler(struct ctl_table *table, int write,
3736               void __user *buffer,
3737               size_t *length, loff_t *ppos)
3738{
3739    warn_scan_unevictable_pages();
3740    proc_doulongvec_minmax(table, write, buffer, length, ppos);
3741    scan_unevictable_pages = 0;
3742    return 0;
3743}
3744
3745#ifdef CONFIG_NUMA
3746/*
3747 * per node 'scan_unevictable_pages' attribute. On demand re-scan of
3748 * a specified node's per zone unevictable lists for evictable pages.
3749 */
3750
3751static ssize_t read_scan_unevictable_node(struct device *dev,
3752                      struct device_attribute *attr,
3753                      char *buf)
3754{
3755    warn_scan_unevictable_pages();
3756    return sprintf(buf, "0\n"); /* always zero; should fit... */
3757}
3758
3759static ssize_t write_scan_unevictable_node(struct device *dev,
3760                       struct device_attribute *attr,
3761                    const char *buf, size_t count)
3762{
3763    warn_scan_unevictable_pages();
3764    return 1;
3765}
3766
3767
3768static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
3769            read_scan_unevictable_node,
3770            write_scan_unevictable_node);
3771
3772int scan_unevictable_register_node(struct node *node)
3773{
3774    return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages);
3775}
3776
3777void scan_unevictable_unregister_node(struct node *node)
3778{
3779    device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
3780}
3781#endif
3782

Archive Download this file



interactive