Root/mm/swap.c

1/*
2 * linux/mm/swap.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
6
7/*
8 * This file contains the default values for the operation of the
9 * Linux VM subsystem. Fine-tuning documentation can be found in
10 * Documentation/sysctl/vm.txt.
11 * Started 18.12.91
12 * Swap aging added 23.2.95, Stephen Tweedie.
13 * Buffermem limits added 12.3.98, Rik van Riel.
14 */
15
16#include <linux/mm.h>
17#include <linux/sched.h>
18#include <linux/kernel_stat.h>
19#include <linux/swap.h>
20#include <linux/mman.h>
21#include <linux/pagemap.h>
22#include <linux/pagevec.h>
23#include <linux/init.h>
24#include <linux/export.h>
25#include <linux/mm_inline.h>
26#include <linux/percpu_counter.h>
27#include <linux/percpu.h>
28#include <linux/cpu.h>
29#include <linux/notifier.h>
30#include <linux/backing-dev.h>
31#include <linux/memcontrol.h>
32#include <linux/gfp.h>
33
34#include "internal.h"
35
36/* How many pages do we try to swap or page in/out together? */
37int page_cluster;
38
39static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
40static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
41static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
42
43/*
44 * This path almost never happens for VM activity - pages are normally
45 * freed via pagevecs. But it gets used by networking.
46 */
47static void __page_cache_release(struct page *page)
48{
49    if (PageLRU(page)) {
50        struct zone *zone = page_zone(page);
51        struct lruvec *lruvec;
52        unsigned long flags;
53
54        spin_lock_irqsave(&zone->lru_lock, flags);
55        lruvec = mem_cgroup_page_lruvec(page, zone);
56        VM_BUG_ON(!PageLRU(page));
57        __ClearPageLRU(page);
58        del_page_from_lru_list(page, lruvec, page_off_lru(page));
59        spin_unlock_irqrestore(&zone->lru_lock, flags);
60    }
61}
62
63static void __put_single_page(struct page *page)
64{
65    __page_cache_release(page);
66    free_hot_cold_page(page, 0);
67}
68
69static void __put_compound_page(struct page *page)
70{
71    compound_page_dtor *dtor;
72
73    __page_cache_release(page);
74    dtor = get_compound_page_dtor(page);
75    (*dtor)(page);
76}
77
78static void put_compound_page(struct page *page)
79{
80    if (unlikely(PageTail(page))) {
81        /* __split_huge_page_refcount can run under us */
82        struct page *page_head = compound_trans_head(page);
83
84        if (likely(page != page_head &&
85               get_page_unless_zero(page_head))) {
86            unsigned long flags;
87
88            /*
89             * THP can not break up slab pages so avoid taking
90             * compound_lock(). Slab performs non-atomic bit ops
91             * on page->flags for better performance. In particular
92             * slab_unlock() in slub used to be a hot path. It is
93             * still hot on arches that do not support
94             * this_cpu_cmpxchg_double().
95             */
96            if (PageSlab(page_head)) {
97                if (PageTail(page)) {
98                    if (put_page_testzero(page_head))
99                        VM_BUG_ON(1);
100
101                    atomic_dec(&page->_mapcount);
102                    goto skip_lock_tail;
103                } else
104                    goto skip_lock;
105            }
106            /*
107             * page_head wasn't a dangling pointer but it
108             * may not be a head page anymore by the time
109             * we obtain the lock. That is ok as long as it
110             * can't be freed from under us.
111             */
112            flags = compound_lock_irqsave(page_head);
113            if (unlikely(!PageTail(page))) {
114                /* __split_huge_page_refcount run before us */
115                compound_unlock_irqrestore(page_head, flags);
116skip_lock:
117                if (put_page_testzero(page_head))
118                    __put_single_page(page_head);
119out_put_single:
120                if (put_page_testzero(page))
121                    __put_single_page(page);
122                return;
123            }
124            VM_BUG_ON(page_head != page->first_page);
125            /*
126             * We can release the refcount taken by
127             * get_page_unless_zero() now that
128             * __split_huge_page_refcount() is blocked on
129             * the compound_lock.
130             */
131            if (put_page_testzero(page_head))
132                VM_BUG_ON(1);
133            /* __split_huge_page_refcount will wait now */
134            VM_BUG_ON(page_mapcount(page) <= 0);
135            atomic_dec(&page->_mapcount);
136            VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
137            VM_BUG_ON(atomic_read(&page->_count) != 0);
138            compound_unlock_irqrestore(page_head, flags);
139
140skip_lock_tail:
141            if (put_page_testzero(page_head)) {
142                if (PageHead(page_head))
143                    __put_compound_page(page_head);
144                else
145                    __put_single_page(page_head);
146            }
147        } else {
148            /* page_head is a dangling pointer */
149            VM_BUG_ON(PageTail(page));
150            goto out_put_single;
151        }
152    } else if (put_page_testzero(page)) {
153        if (PageHead(page))
154            __put_compound_page(page);
155        else
156            __put_single_page(page);
157    }
158}
159
160void put_page(struct page *page)
161{
162    if (unlikely(PageCompound(page)))
163        put_compound_page(page);
164    else if (put_page_testzero(page))
165        __put_single_page(page);
166}
167EXPORT_SYMBOL(put_page);
168
169/*
170 * This function is exported but must not be called by anything other
171 * than get_page(). It implements the slow path of get_page().
172 */
173bool __get_page_tail(struct page *page)
174{
175    /*
176     * This takes care of get_page() if run on a tail page
177     * returned by one of the get_user_pages/follow_page variants.
178     * get_user_pages/follow_page itself doesn't need the compound
179     * lock because it runs __get_page_tail_foll() under the
180     * proper PT lock that already serializes against
181     * split_huge_page().
182     */
183    unsigned long flags;
184    bool got = false;
185    struct page *page_head = compound_trans_head(page);
186
187    if (likely(page != page_head && get_page_unless_zero(page_head))) {
188
189        /* Ref to put_compound_page() comment. */
190        if (PageSlab(page_head)) {
191            if (likely(PageTail(page))) {
192                __get_page_tail_foll(page, false);
193                return true;
194            } else {
195                put_page(page_head);
196                return false;
197            }
198        }
199
200        /*
201         * page_head wasn't a dangling pointer but it
202         * may not be a head page anymore by the time
203         * we obtain the lock. That is ok as long as it
204         * can't be freed from under us.
205         */
206        flags = compound_lock_irqsave(page_head);
207        /* here __split_huge_page_refcount won't run anymore */
208        if (likely(PageTail(page))) {
209            __get_page_tail_foll(page, false);
210            got = true;
211        }
212        compound_unlock_irqrestore(page_head, flags);
213        if (unlikely(!got))
214            put_page(page_head);
215    }
216    return got;
217}
218EXPORT_SYMBOL(__get_page_tail);
219
220/**
221 * put_pages_list() - release a list of pages
222 * @pages: list of pages threaded on page->lru
223 *
224 * Release a list of pages which are strung together on page.lru. Currently
225 * used by read_cache_pages() and related error recovery code.
226 */
227void put_pages_list(struct list_head *pages)
228{
229    while (!list_empty(pages)) {
230        struct page *victim;
231
232        victim = list_entry(pages->prev, struct page, lru);
233        list_del(&victim->lru);
234        page_cache_release(victim);
235    }
236}
237EXPORT_SYMBOL(put_pages_list);
238
239/*
240 * get_kernel_pages() - pin kernel pages in memory
241 * @kiov: An array of struct kvec structures
242 * @nr_segs: number of segments to pin
243 * @write: pinning for read/write, currently ignored
244 * @pages: array that receives pointers to the pages pinned.
245 * Should be at least nr_segs long.
246 *
247 * Returns number of pages pinned. This may be fewer than the number
248 * requested. If nr_pages is 0 or negative, returns 0. If no pages
249 * were pinned, returns -errno. Each page returned must be released
250 * with a put_page() call when it is finished with.
251 */
252int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
253        struct page **pages)
254{
255    int seg;
256
257    for (seg = 0; seg < nr_segs; seg++) {
258        if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
259            return seg;
260
261        pages[seg] = kmap_to_page(kiov[seg].iov_base);
262        page_cache_get(pages[seg]);
263    }
264
265    return seg;
266}
267EXPORT_SYMBOL_GPL(get_kernel_pages);
268
269/*
270 * get_kernel_page() - pin a kernel page in memory
271 * @start: starting kernel address
272 * @write: pinning for read/write, currently ignored
273 * @pages: array that receives pointer to the page pinned.
274 * Must be at least nr_segs long.
275 *
276 * Returns 1 if page is pinned. If the page was not pinned, returns
277 * -errno. The page returned must be released with a put_page() call
278 * when it is finished with.
279 */
280int get_kernel_page(unsigned long start, int write, struct page **pages)
281{
282    const struct kvec kiov = {
283        .iov_base = (void *)start,
284        .iov_len = PAGE_SIZE
285    };
286
287    return get_kernel_pages(&kiov, 1, write, pages);
288}
289EXPORT_SYMBOL_GPL(get_kernel_page);
290
291static void pagevec_lru_move_fn(struct pagevec *pvec,
292    void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
293    void *arg)
294{
295    int i;
296    struct zone *zone = NULL;
297    struct lruvec *lruvec;
298    unsigned long flags = 0;
299
300    for (i = 0; i < pagevec_count(pvec); i++) {
301        struct page *page = pvec->pages[i];
302        struct zone *pagezone = page_zone(page);
303
304        if (pagezone != zone) {
305            if (zone)
306                spin_unlock_irqrestore(&zone->lru_lock, flags);
307            zone = pagezone;
308            spin_lock_irqsave(&zone->lru_lock, flags);
309        }
310
311        lruvec = mem_cgroup_page_lruvec(page, zone);
312        (*move_fn)(page, lruvec, arg);
313    }
314    if (zone)
315        spin_unlock_irqrestore(&zone->lru_lock, flags);
316    release_pages(pvec->pages, pvec->nr, pvec->cold);
317    pagevec_reinit(pvec);
318}
319
320static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
321                 void *arg)
322{
323    int *pgmoved = arg;
324
325    if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
326        enum lru_list lru = page_lru_base_type(page);
327        list_move_tail(&page->lru, &lruvec->lists[lru]);
328        (*pgmoved)++;
329    }
330}
331
332/*
333 * pagevec_move_tail() must be called with IRQ disabled.
334 * Otherwise this may cause nasty races.
335 */
336static void pagevec_move_tail(struct pagevec *pvec)
337{
338    int pgmoved = 0;
339
340    pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
341    __count_vm_events(PGROTATED, pgmoved);
342}
343
344/*
345 * Writeback is about to end against a page which has been marked for immediate
346 * reclaim. If it still appears to be reclaimable, move it to the tail of the
347 * inactive list.
348 */
349void rotate_reclaimable_page(struct page *page)
350{
351    if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
352        !PageUnevictable(page) && PageLRU(page)) {
353        struct pagevec *pvec;
354        unsigned long flags;
355
356        page_cache_get(page);
357        local_irq_save(flags);
358        pvec = &__get_cpu_var(lru_rotate_pvecs);
359        if (!pagevec_add(pvec, page))
360            pagevec_move_tail(pvec);
361        local_irq_restore(flags);
362    }
363}
364
365static void update_page_reclaim_stat(struct lruvec *lruvec,
366                     int file, int rotated)
367{
368    struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
369
370    reclaim_stat->recent_scanned[file]++;
371    if (rotated)
372        reclaim_stat->recent_rotated[file]++;
373}
374
375static void __activate_page(struct page *page, struct lruvec *lruvec,
376                void *arg)
377{
378    if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
379        int file = page_is_file_cache(page);
380        int lru = page_lru_base_type(page);
381
382        del_page_from_lru_list(page, lruvec, lru);
383        SetPageActive(page);
384        lru += LRU_ACTIVE;
385        add_page_to_lru_list(page, lruvec, lru);
386
387        __count_vm_event(PGACTIVATE);
388        update_page_reclaim_stat(lruvec, file, 1);
389    }
390}
391
392#ifdef CONFIG_SMP
393static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
394
395static void activate_page_drain(int cpu)
396{
397    struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
398
399    if (pagevec_count(pvec))
400        pagevec_lru_move_fn(pvec, __activate_page, NULL);
401}
402
403void activate_page(struct page *page)
404{
405    if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
406        struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
407
408        page_cache_get(page);
409        if (!pagevec_add(pvec, page))
410            pagevec_lru_move_fn(pvec, __activate_page, NULL);
411        put_cpu_var(activate_page_pvecs);
412    }
413}
414
415#else
416static inline void activate_page_drain(int cpu)
417{
418}
419
420void activate_page(struct page *page)
421{
422    struct zone *zone = page_zone(page);
423
424    spin_lock_irq(&zone->lru_lock);
425    __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
426    spin_unlock_irq(&zone->lru_lock);
427}
428#endif
429
430/*
431 * Mark a page as having seen activity.
432 *
433 * inactive,unreferenced -> inactive,referenced
434 * inactive,referenced -> active,unreferenced
435 * active,unreferenced -> active,referenced
436 */
437void mark_page_accessed(struct page *page)
438{
439    if (!PageActive(page) && !PageUnevictable(page) &&
440            PageReferenced(page) && PageLRU(page)) {
441        activate_page(page);
442        ClearPageReferenced(page);
443    } else if (!PageReferenced(page)) {
444        SetPageReferenced(page);
445    }
446}
447EXPORT_SYMBOL(mark_page_accessed);
448
449void __lru_cache_add(struct page *page, enum lru_list lru)
450{
451    struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
452
453    page_cache_get(page);
454    if (!pagevec_add(pvec, page))
455        __pagevec_lru_add(pvec, lru);
456    put_cpu_var(lru_add_pvecs);
457}
458EXPORT_SYMBOL(__lru_cache_add);
459
460/**
461 * lru_cache_add_lru - add a page to a page list
462 * @page: the page to be added to the LRU.
463 * @lru: the LRU list to which the page is added.
464 */
465void lru_cache_add_lru(struct page *page, enum lru_list lru)
466{
467    if (PageActive(page)) {
468        VM_BUG_ON(PageUnevictable(page));
469        ClearPageActive(page);
470    } else if (PageUnevictable(page)) {
471        VM_BUG_ON(PageActive(page));
472        ClearPageUnevictable(page);
473    }
474
475    VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
476    __lru_cache_add(page, lru);
477}
478
479/**
480 * add_page_to_unevictable_list - add a page to the unevictable list
481 * @page: the page to be added to the unevictable list
482 *
483 * Add page directly to its zone's unevictable list. To avoid races with
484 * tasks that might be making the page evictable, through eg. munlock,
485 * munmap or exit, while it's not on the lru, we want to add the page
486 * while it's locked or otherwise "invisible" to other tasks. This is
487 * difficult to do when using the pagevec cache, so bypass that.
488 */
489void add_page_to_unevictable_list(struct page *page)
490{
491    struct zone *zone = page_zone(page);
492    struct lruvec *lruvec;
493
494    spin_lock_irq(&zone->lru_lock);
495    lruvec = mem_cgroup_page_lruvec(page, zone);
496    SetPageUnevictable(page);
497    SetPageLRU(page);
498    add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
499    spin_unlock_irq(&zone->lru_lock);
500}
501
502/*
503 * If the page can not be invalidated, it is moved to the
504 * inactive list to speed up its reclaim. It is moved to the
505 * head of the list, rather than the tail, to give the flusher
506 * threads some time to write it out, as this is much more
507 * effective than the single-page writeout from reclaim.
508 *
509 * If the page isn't page_mapped and dirty/writeback, the page
510 * could reclaim asap using PG_reclaim.
511 *
512 * 1. active, mapped page -> none
513 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
514 * 3. inactive, mapped page -> none
515 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
516 * 5. inactive, clean -> inactive, tail
517 * 6. Others -> none
518 *
519 * In 4, why it moves inactive's head, the VM expects the page would
520 * be write it out by flusher threads as this is much more effective
521 * than the single-page writeout from reclaim.
522 */
523static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
524                  void *arg)
525{
526    int lru, file;
527    bool active;
528
529    if (!PageLRU(page))
530        return;
531
532    if (PageUnevictable(page))
533        return;
534
535    /* Some processes are using the page */
536    if (page_mapped(page))
537        return;
538
539    active = PageActive(page);
540    file = page_is_file_cache(page);
541    lru = page_lru_base_type(page);
542
543    del_page_from_lru_list(page, lruvec, lru + active);
544    ClearPageActive(page);
545    ClearPageReferenced(page);
546    add_page_to_lru_list(page, lruvec, lru);
547
548    if (PageWriteback(page) || PageDirty(page)) {
549        /*
550         * PG_reclaim could be raced with end_page_writeback
551         * It can make readahead confusing. But race window
552         * is _really_ small and it's non-critical problem.
553         */
554        SetPageReclaim(page);
555    } else {
556        /*
557         * The page's writeback ends up during pagevec
558         * We moves tha page into tail of inactive.
559         */
560        list_move_tail(&page->lru, &lruvec->lists[lru]);
561        __count_vm_event(PGROTATED);
562    }
563
564    if (active)
565        __count_vm_event(PGDEACTIVATE);
566    update_page_reclaim_stat(lruvec, file, 0);
567}
568
569/*
570 * Drain pages out of the cpu's pagevecs.
571 * Either "cpu" is the current CPU, and preemption has already been
572 * disabled; or "cpu" is being hot-unplugged, and is already dead.
573 */
574void lru_add_drain_cpu(int cpu)
575{
576    struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
577    struct pagevec *pvec;
578    int lru;
579
580    for_each_lru(lru) {
581        pvec = &pvecs[lru - LRU_BASE];
582        if (pagevec_count(pvec))
583            __pagevec_lru_add(pvec, lru);
584    }
585
586    pvec = &per_cpu(lru_rotate_pvecs, cpu);
587    if (pagevec_count(pvec)) {
588        unsigned long flags;
589
590        /* No harm done if a racing interrupt already did this */
591        local_irq_save(flags);
592        pagevec_move_tail(pvec);
593        local_irq_restore(flags);
594    }
595
596    pvec = &per_cpu(lru_deactivate_pvecs, cpu);
597    if (pagevec_count(pvec))
598        pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
599
600    activate_page_drain(cpu);
601}
602
603/**
604 * deactivate_page - forcefully deactivate a page
605 * @page: page to deactivate
606 *
607 * This function hints the VM that @page is a good reclaim candidate,
608 * for example if its invalidation fails due to the page being dirty
609 * or under writeback.
610 */
611void deactivate_page(struct page *page)
612{
613    /*
614     * In a workload with many unevictable page such as mprotect, unevictable
615     * page deactivation for accelerating reclaim is pointless.
616     */
617    if (PageUnevictable(page))
618        return;
619
620    if (likely(get_page_unless_zero(page))) {
621        struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
622
623        if (!pagevec_add(pvec, page))
624            pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
625        put_cpu_var(lru_deactivate_pvecs);
626    }
627}
628
629void lru_add_drain(void)
630{
631    lru_add_drain_cpu(get_cpu());
632    put_cpu();
633}
634
635static void lru_add_drain_per_cpu(struct work_struct *dummy)
636{
637    lru_add_drain();
638}
639
640/*
641 * Returns 0 for success
642 */
643int lru_add_drain_all(void)
644{
645    return schedule_on_each_cpu(lru_add_drain_per_cpu);
646}
647
648/*
649 * Batched page_cache_release(). Decrement the reference count on all the
650 * passed pages. If it fell to zero then remove the page from the LRU and
651 * free it.
652 *
653 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
654 * for the remainder of the operation.
655 *
656 * The locking in this function is against shrink_inactive_list(): we recheck
657 * the page count inside the lock to see whether shrink_inactive_list()
658 * grabbed the page via the LRU. If it did, give up: shrink_inactive_list()
659 * will free it.
660 */
661void release_pages(struct page **pages, int nr, int cold)
662{
663    int i;
664    LIST_HEAD(pages_to_free);
665    struct zone *zone = NULL;
666    struct lruvec *lruvec;
667    unsigned long uninitialized_var(flags);
668
669    for (i = 0; i < nr; i++) {
670        struct page *page = pages[i];
671
672        if (unlikely(PageCompound(page))) {
673            if (zone) {
674                spin_unlock_irqrestore(&zone->lru_lock, flags);
675                zone = NULL;
676            }
677            put_compound_page(page);
678            continue;
679        }
680
681        if (!put_page_testzero(page))
682            continue;
683
684        if (PageLRU(page)) {
685            struct zone *pagezone = page_zone(page);
686
687            if (pagezone != zone) {
688                if (zone)
689                    spin_unlock_irqrestore(&zone->lru_lock,
690                                    flags);
691                zone = pagezone;
692                spin_lock_irqsave(&zone->lru_lock, flags);
693            }
694
695            lruvec = mem_cgroup_page_lruvec(page, zone);
696            VM_BUG_ON(!PageLRU(page));
697            __ClearPageLRU(page);
698            del_page_from_lru_list(page, lruvec, page_off_lru(page));
699        }
700
701        list_add(&page->lru, &pages_to_free);
702    }
703    if (zone)
704        spin_unlock_irqrestore(&zone->lru_lock, flags);
705
706    free_hot_cold_page_list(&pages_to_free, cold);
707}
708EXPORT_SYMBOL(release_pages);
709
710/*
711 * The pages which we're about to release may be in the deferred lru-addition
712 * queues. That would prevent them from really being freed right now. That's
713 * OK from a correctness point of view but is inefficient - those pages may be
714 * cache-warm and we want to give them back to the page allocator ASAP.
715 *
716 * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
717 * and __pagevec_lru_add_active() call release_pages() directly to avoid
718 * mutual recursion.
719 */
720void __pagevec_release(struct pagevec *pvec)
721{
722    lru_add_drain();
723    release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
724    pagevec_reinit(pvec);
725}
726EXPORT_SYMBOL(__pagevec_release);
727
728#ifdef CONFIG_TRANSPARENT_HUGEPAGE
729/* used by __split_huge_page_refcount() */
730void lru_add_page_tail(struct page *page, struct page *page_tail,
731               struct lruvec *lruvec)
732{
733    int uninitialized_var(active);
734    enum lru_list lru;
735    const int file = 0;
736
737    VM_BUG_ON(!PageHead(page));
738    VM_BUG_ON(PageCompound(page_tail));
739    VM_BUG_ON(PageLRU(page_tail));
740    VM_BUG_ON(NR_CPUS != 1 &&
741          !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
742
743    SetPageLRU(page_tail);
744
745    if (page_evictable(page_tail, NULL)) {
746        if (PageActive(page)) {
747            SetPageActive(page_tail);
748            active = 1;
749            lru = LRU_ACTIVE_ANON;
750        } else {
751            active = 0;
752            lru = LRU_INACTIVE_ANON;
753        }
754    } else {
755        SetPageUnevictable(page_tail);
756        lru = LRU_UNEVICTABLE;
757    }
758
759    if (likely(PageLRU(page)))
760        list_add_tail(&page_tail->lru, &page->lru);
761    else {
762        struct list_head *list_head;
763        /*
764         * Head page has not yet been counted, as an hpage,
765         * so we must account for each subpage individually.
766         *
767         * Use the standard add function to put page_tail on the list,
768         * but then correct its position so they all end up in order.
769         */
770        add_page_to_lru_list(page_tail, lruvec, lru);
771        list_head = page_tail->lru.prev;
772        list_move_tail(&page_tail->lru, list_head);
773    }
774
775    if (!PageUnevictable(page))
776        update_page_reclaim_stat(lruvec, file, active);
777}
778#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
779
780static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
781                 void *arg)
782{
783    enum lru_list lru = (enum lru_list)arg;
784    int file = is_file_lru(lru);
785    int active = is_active_lru(lru);
786
787    VM_BUG_ON(PageActive(page));
788    VM_BUG_ON(PageUnevictable(page));
789    VM_BUG_ON(PageLRU(page));
790
791    SetPageLRU(page);
792    if (active)
793        SetPageActive(page);
794    add_page_to_lru_list(page, lruvec, lru);
795    update_page_reclaim_stat(lruvec, file, active);
796}
797
798/*
799 * Add the passed pages to the LRU, then drop the caller's refcount
800 * on them. Reinitialises the caller's pagevec.
801 */
802void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
803{
804    VM_BUG_ON(is_unevictable_lru(lru));
805
806    pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru);
807}
808EXPORT_SYMBOL(__pagevec_lru_add);
809
810/**
811 * pagevec_lookup - gang pagecache lookup
812 * @pvec: Where the resulting pages are placed
813 * @mapping: The address_space to search
814 * @start: The starting page index
815 * @nr_pages: The maximum number of pages
816 *
817 * pagevec_lookup() will search for and return a group of up to @nr_pages pages
818 * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a
819 * reference against the pages in @pvec.
820 *
821 * The search returns a group of mapping-contiguous pages with ascending
822 * indexes. There may be holes in the indices due to not-present pages.
823 *
824 * pagevec_lookup() returns the number of pages which were found.
825 */
826unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
827        pgoff_t start, unsigned nr_pages)
828{
829    pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
830    return pagevec_count(pvec);
831}
832EXPORT_SYMBOL(pagevec_lookup);
833
834unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
835        pgoff_t *index, int tag, unsigned nr_pages)
836{
837    pvec->nr = find_get_pages_tag(mapping, index, tag,
838                    nr_pages, pvec->pages);
839    return pagevec_count(pvec);
840}
841EXPORT_SYMBOL(pagevec_lookup_tag);
842
843/*
844 * Perform any setup for the swap system
845 */
846void __init swap_setup(void)
847{
848    unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
849
850#ifdef CONFIG_SWAP
851    bdi_init(swapper_space.backing_dev_info);
852#endif
853
854    /* Use a smaller cluster for small-memory machines */
855    if (megs < 16)
856        page_cluster = 2;
857    else
858        page_cluster = 3;
859    /*
860     * Right now other parts of the system means that we
861     * _really_ don't want to cluster much more
862     */
863}
864

Archive Download this file



interactive