Root/mm/swap_state.c

1/*
2 * linux/mm/swap_state.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 *
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
8 */
9#include <linux/mm.h>
10#include <linux/gfp.h>
11#include <linux/kernel_stat.h>
12#include <linux/swap.h>
13#include <linux/swapops.h>
14#include <linux/init.h>
15#include <linux/pagemap.h>
16#include <linux/backing-dev.h>
17#include <linux/blkdev.h>
18#include <linux/pagevec.h>
19#include <linux/migrate.h>
20#include <linux/page_cgroup.h>
21
22#include <asm/pgtable.h>
23
24/*
25 * swapper_space is a fiction, retained to simplify the path through
26 * vmscan's shrink_page_list.
27 */
28static const struct address_space_operations swap_aops = {
29    .writepage = swap_writepage,
30    .set_page_dirty = swap_set_page_dirty,
31    .migratepage = migrate_page,
32};
33
34static struct backing_dev_info swap_backing_dev_info = {
35    .name = "swap",
36    .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
37};
38
39struct address_space swapper_space = {
40    .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
41    .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
42    .a_ops = &swap_aops,
43    .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
44    .backing_dev_info = &swap_backing_dev_info,
45};
46
47#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
48
49static struct {
50    unsigned long add_total;
51    unsigned long del_total;
52    unsigned long find_success;
53    unsigned long find_total;
54} swap_cache_info;
55
56void show_swap_cache_info(void)
57{
58    printk("%lu pages in swap cache\n", total_swapcache_pages);
59    printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
60        swap_cache_info.add_total, swap_cache_info.del_total,
61        swap_cache_info.find_success, swap_cache_info.find_total);
62    printk("Free swap = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
63    printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
64}
65
66/*
67 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
68 * but sets SwapCache flag and private instead of mapping and index.
69 */
70static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
71{
72    int error;
73
74    VM_BUG_ON(!PageLocked(page));
75    VM_BUG_ON(PageSwapCache(page));
76    VM_BUG_ON(!PageSwapBacked(page));
77
78    page_cache_get(page);
79    SetPageSwapCache(page);
80    set_page_private(page, entry.val);
81
82    spin_lock_irq(&swapper_space.tree_lock);
83    error = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
84    if (likely(!error)) {
85        total_swapcache_pages++;
86        __inc_zone_page_state(page, NR_FILE_PAGES);
87        INC_CACHE_INFO(add_total);
88    }
89    spin_unlock_irq(&swapper_space.tree_lock);
90
91    if (unlikely(error)) {
92        /*
93         * Only the context which have set SWAP_HAS_CACHE flag
94         * would call add_to_swap_cache().
95         * So add_to_swap_cache() doesn't returns -EEXIST.
96         */
97        VM_BUG_ON(error == -EEXIST);
98        set_page_private(page, 0UL);
99        ClearPageSwapCache(page);
100        page_cache_release(page);
101    }
102
103    return error;
104}
105
106
107int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
108{
109    int error;
110
111    error = radix_tree_preload(gfp_mask);
112    if (!error) {
113        error = __add_to_swap_cache(page, entry);
114        radix_tree_preload_end();
115    }
116    return error;
117}
118
119/*
120 * This must be called only on pages that have
121 * been verified to be in the swap cache.
122 */
123void __delete_from_swap_cache(struct page *page)
124{
125    VM_BUG_ON(!PageLocked(page));
126    VM_BUG_ON(!PageSwapCache(page));
127    VM_BUG_ON(PageWriteback(page));
128
129    radix_tree_delete(&swapper_space.page_tree, page_private(page));
130    set_page_private(page, 0);
131    ClearPageSwapCache(page);
132    total_swapcache_pages--;
133    __dec_zone_page_state(page, NR_FILE_PAGES);
134    INC_CACHE_INFO(del_total);
135}
136
137/**
138 * add_to_swap - allocate swap space for a page
139 * @page: page we want to move to swap
140 *
141 * Allocate swap space for the page and add the page to the
142 * swap cache. Caller needs to hold the page lock.
143 */
144int add_to_swap(struct page *page)
145{
146    swp_entry_t entry;
147    int err;
148
149    VM_BUG_ON(!PageLocked(page));
150    VM_BUG_ON(!PageUptodate(page));
151
152    entry = get_swap_page();
153    if (!entry.val)
154        return 0;
155
156    if (unlikely(PageTransHuge(page)))
157        if (unlikely(split_huge_page(page))) {
158            swapcache_free(entry, NULL);
159            return 0;
160        }
161
162    /*
163     * Radix-tree node allocations from PF_MEMALLOC contexts could
164     * completely exhaust the page allocator. __GFP_NOMEMALLOC
165     * stops emergency reserves from being allocated.
166     *
167     * TODO: this could cause a theoretical memory reclaim
168     * deadlock in the swap out path.
169     */
170    /*
171     * Add it to the swap cache and mark it dirty
172     */
173    err = add_to_swap_cache(page, entry,
174            __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
175
176    if (!err) { /* Success */
177        SetPageDirty(page);
178        return 1;
179    } else { /* -ENOMEM radix-tree allocation failure */
180        /*
181         * add_to_swap_cache() doesn't return -EEXIST, so we can safely
182         * clear SWAP_HAS_CACHE flag.
183         */
184        swapcache_free(entry, NULL);
185        return 0;
186    }
187}
188
189/*
190 * This must be called only on pages that have
191 * been verified to be in the swap cache and locked.
192 * It will never put the page into the free list,
193 * the caller has a reference on the page.
194 */
195void delete_from_swap_cache(struct page *page)
196{
197    swp_entry_t entry;
198
199    entry.val = page_private(page);
200
201    spin_lock_irq(&swapper_space.tree_lock);
202    __delete_from_swap_cache(page);
203    spin_unlock_irq(&swapper_space.tree_lock);
204
205    swapcache_free(entry, page);
206    page_cache_release(page);
207}
208
209/*
210 * If we are the only user, then try to free up the swap cache.
211 *
212 * Its ok to check for PageSwapCache without the page lock
213 * here because we are going to recheck again inside
214 * try_to_free_swap() _with_ the lock.
215 * - Marcelo
216 */
217static inline void free_swap_cache(struct page *page)
218{
219    if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
220        try_to_free_swap(page);
221        unlock_page(page);
222    }
223}
224
225/*
226 * Perform a free_page(), also freeing any swap cache associated with
227 * this page if it is the last user of the page.
228 */
229void free_page_and_swap_cache(struct page *page)
230{
231    free_swap_cache(page);
232    page_cache_release(page);
233}
234
235/*
236 * Passed an array of pages, drop them all from swapcache and then release
237 * them. They are removed from the LRU and freed if this is their last use.
238 */
239void free_pages_and_swap_cache(struct page **pages, int nr)
240{
241    struct page **pagep = pages;
242
243    lru_add_drain();
244    while (nr) {
245        int todo = min(nr, PAGEVEC_SIZE);
246        int i;
247
248        for (i = 0; i < todo; i++)
249            free_swap_cache(pagep[i]);
250        release_pages(pagep, todo, 0);
251        pagep += todo;
252        nr -= todo;
253    }
254}
255
256/*
257 * Lookup a swap entry in the swap cache. A found page will be returned
258 * unlocked and with its refcount incremented - we rely on the kernel
259 * lock getting page table operations atomic even if we drop the page
260 * lock before returning.
261 */
262struct page * lookup_swap_cache(swp_entry_t entry)
263{
264    struct page *page;
265
266    page = find_get_page(&swapper_space, entry.val);
267
268    if (page)
269        INC_CACHE_INFO(find_success);
270
271    INC_CACHE_INFO(find_total);
272    return page;
273}
274
275/*
276 * Locate a page of swap in physical memory, reserving swap cache space
277 * and reading the disk if it is not already cached.
278 * A failure return means that either the page allocation failed or that
279 * the swap entry is no longer in use.
280 */
281struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
282            struct vm_area_struct *vma, unsigned long addr)
283{
284    struct page *found_page, *new_page = NULL;
285    int err;
286
287    do {
288        /*
289         * First check the swap cache. Since this is normally
290         * called after lookup_swap_cache() failed, re-calling
291         * that would confuse statistics.
292         */
293        found_page = find_get_page(&swapper_space, entry.val);
294        if (found_page)
295            break;
296
297        /*
298         * Get a new page to read into from swap.
299         */
300        if (!new_page) {
301            new_page = alloc_page_vma(gfp_mask, vma, addr);
302            if (!new_page)
303                break; /* Out of memory */
304        }
305
306        /*
307         * call radix_tree_preload() while we can wait.
308         */
309        err = radix_tree_preload(gfp_mask & GFP_KERNEL);
310        if (err)
311            break;
312
313        /*
314         * Swap entry may have been freed since our caller observed it.
315         */
316        err = swapcache_prepare(entry);
317        if (err == -EEXIST) { /* seems racy */
318            radix_tree_preload_end();
319            continue;
320        }
321        if (err) { /* swp entry is obsolete ? */
322            radix_tree_preload_end();
323            break;
324        }
325
326        /* May fail (-ENOMEM) if radix-tree node allocation failed. */
327        __set_page_locked(new_page);
328        SetPageSwapBacked(new_page);
329        err = __add_to_swap_cache(new_page, entry);
330        if (likely(!err)) {
331            radix_tree_preload_end();
332            /*
333             * Initiate read into locked page and return.
334             */
335            lru_cache_add_anon(new_page);
336            swap_readpage(new_page);
337            return new_page;
338        }
339        radix_tree_preload_end();
340        ClearPageSwapBacked(new_page);
341        __clear_page_locked(new_page);
342        /*
343         * add_to_swap_cache() doesn't return -EEXIST, so we can safely
344         * clear SWAP_HAS_CACHE flag.
345         */
346        swapcache_free(entry, NULL);
347    } while (err != -ENOMEM);
348
349    if (new_page)
350        page_cache_release(new_page);
351    return found_page;
352}
353
354/**
355 * swapin_readahead - swap in pages in hope we need them soon
356 * @entry: swap entry of this memory
357 * @gfp_mask: memory allocation flags
358 * @vma: user vma this address belongs to
359 * @addr: target address for mempolicy
360 *
361 * Returns the struct page for entry and addr, after queueing swapin.
362 *
363 * Primitive swap readahead code. We simply read an aligned block of
364 * (1 << page_cluster) entries in the swap area. This method is chosen
365 * because it doesn't cost us any seek time. We also make sure to queue
366 * the 'original' request together with the readahead ones...
367 *
368 * This has been extended to use the NUMA policies from the mm triggering
369 * the readahead.
370 *
371 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
372 */
373struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
374            struct vm_area_struct *vma, unsigned long addr)
375{
376    struct page *page;
377    unsigned long offset = swp_offset(entry);
378    unsigned long start_offset, end_offset;
379    unsigned long mask = (1UL << page_cluster) - 1;
380    struct blk_plug plug;
381
382    /* Read a page_cluster sized and aligned cluster around offset. */
383    start_offset = offset & ~mask;
384    end_offset = offset | mask;
385    if (!start_offset) /* First page is swap header. */
386        start_offset++;
387
388    blk_start_plug(&plug);
389    for (offset = start_offset; offset <= end_offset ; offset++) {
390        /* Ok, do the async read-ahead now */
391        page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
392                        gfp_mask, vma, addr);
393        if (!page)
394            continue;
395        page_cache_release(page);
396    }
397    blk_finish_plug(&plug);
398
399    lru_add_drain(); /* Push any new pages onto the LRU now */
400    return read_swap_cache_async(entry, gfp_mask, vma, addr);
401}
402

Archive Download this file



interactive