Root/mm/swapfile.c

1/*
2 * linux/mm/swapfile.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 */
7
8#include <linux/mm.h>
9#include <linux/hugetlb.h>
10#include <linux/mman.h>
11#include <linux/slab.h>
12#include <linux/kernel_stat.h>
13#include <linux/swap.h>
14#include <linux/vmalloc.h>
15#include <linux/pagemap.h>
16#include <linux/namei.h>
17#include <linux/shm.h>
18#include <linux/blkdev.h>
19#include <linux/random.h>
20#include <linux/writeback.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/ksm.h>
26#include <linux/rmap.h>
27#include <linux/security.h>
28#include <linux/backing-dev.h>
29#include <linux/mutex.h>
30#include <linux/capability.h>
31#include <linux/syscalls.h>
32#include <linux/memcontrol.h>
33
34#include <asm/pgtable.h>
35#include <asm/tlbflush.h>
36#include <linux/swapops.h>
37#include <linux/page_cgroup.h>
38
39static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
40                 unsigned char);
41static void free_swap_count_continuations(struct swap_info_struct *);
42static sector_t map_swap_entry(swp_entry_t, struct block_device**);
43
44static DEFINE_SPINLOCK(swap_lock);
45static unsigned int nr_swapfiles;
46long nr_swap_pages;
47long total_swap_pages;
48static int least_priority;
49
50static const char Bad_file[] = "Bad swap file entry ";
51static const char Unused_file[] = "Unused swap file entry ";
52static const char Bad_offset[] = "Bad swap offset entry ";
53static const char Unused_offset[] = "Unused swap offset entry ";
54
55static struct swap_list_t swap_list = {-1, -1};
56
57static struct swap_info_struct *swap_info[MAX_SWAPFILES];
58
59static DEFINE_MUTEX(swapon_mutex);
60
61static inline unsigned char swap_count(unsigned char ent)
62{
63    return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
64}
65
66/* returns 1 if swap entry is freed */
67static int
68__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
69{
70    swp_entry_t entry = swp_entry(si->type, offset);
71    struct page *page;
72    int ret = 0;
73
74    page = find_get_page(&swapper_space, entry.val);
75    if (!page)
76        return 0;
77    /*
78     * This function is called from scan_swap_map() and it's called
79     * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
80     * We have to use trylock for avoiding deadlock. This is a special
81     * case and you should use try_to_free_swap() with explicit lock_page()
82     * in usual operations.
83     */
84    if (trylock_page(page)) {
85        ret = try_to_free_swap(page);
86        unlock_page(page);
87    }
88    page_cache_release(page);
89    return ret;
90}
91
92/*
93 * We need this because the bdev->unplug_fn can sleep and we cannot
94 * hold swap_lock while calling the unplug_fn. And swap_lock
95 * cannot be turned into a mutex.
96 */
97static DECLARE_RWSEM(swap_unplug_sem);
98
99void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
100{
101    swp_entry_t entry;
102
103    down_read(&swap_unplug_sem);
104    entry.val = page_private(page);
105    if (PageSwapCache(page)) {
106        struct block_device *bdev = swap_info[swp_type(entry)]->bdev;
107        struct backing_dev_info *bdi;
108
109        /*
110         * If the page is removed from swapcache from under us (with a
111         * racy try_to_unuse/swapoff) we need an additional reference
112         * count to avoid reading garbage from page_private(page) above.
113         * If the WARN_ON triggers during a swapoff it maybe the race
114         * condition and it's harmless. However if it triggers without
115         * swapoff it signals a problem.
116         */
117        WARN_ON(page_count(page) <= 1);
118
119        bdi = bdev->bd_inode->i_mapping->backing_dev_info;
120        blk_run_backing_dev(bdi, page);
121    }
122    up_read(&swap_unplug_sem);
123}
124
125/*
126 * swapon tell device that all the old swap contents can be discarded,
127 * to allow the swap device to optimize its wear-levelling.
128 */
129static int discard_swap(struct swap_info_struct *si)
130{
131    struct swap_extent *se;
132    sector_t start_block;
133    sector_t nr_blocks;
134    int err = 0;
135
136    /* Do not discard the swap header page! */
137    se = &si->first_swap_extent;
138    start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
139    nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
140    if (nr_blocks) {
141        err = blkdev_issue_discard(si->bdev, start_block,
142                nr_blocks, GFP_KERNEL,
143                BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
144        if (err)
145            return err;
146        cond_resched();
147    }
148
149    list_for_each_entry(se, &si->first_swap_extent.list, list) {
150        start_block = se->start_block << (PAGE_SHIFT - 9);
151        nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
152
153        err = blkdev_issue_discard(si->bdev, start_block,
154                nr_blocks, GFP_KERNEL,
155                BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
156        if (err)
157            break;
158
159        cond_resched();
160    }
161    return err; /* That will often be -EOPNOTSUPP */
162}
163
164/*
165 * swap allocation tell device that a cluster of swap can now be discarded,
166 * to allow the swap device to optimize its wear-levelling.
167 */
168static void discard_swap_cluster(struct swap_info_struct *si,
169                 pgoff_t start_page, pgoff_t nr_pages)
170{
171    struct swap_extent *se = si->curr_swap_extent;
172    int found_extent = 0;
173
174    while (nr_pages) {
175        struct list_head *lh;
176
177        if (se->start_page <= start_page &&
178            start_page < se->start_page + se->nr_pages) {
179            pgoff_t offset = start_page - se->start_page;
180            sector_t start_block = se->start_block + offset;
181            sector_t nr_blocks = se->nr_pages - offset;
182
183            if (nr_blocks > nr_pages)
184                nr_blocks = nr_pages;
185            start_page += nr_blocks;
186            nr_pages -= nr_blocks;
187
188            if (!found_extent++)
189                si->curr_swap_extent = se;
190
191            start_block <<= PAGE_SHIFT - 9;
192            nr_blocks <<= PAGE_SHIFT - 9;
193            if (blkdev_issue_discard(si->bdev, start_block,
194                    nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT |
195                            BLKDEV_IFL_BARRIER))
196                break;
197        }
198
199        lh = se->list.next;
200        se = list_entry(lh, struct swap_extent, list);
201    }
202}
203
204static int wait_for_discard(void *word)
205{
206    schedule();
207    return 0;
208}
209
210#define SWAPFILE_CLUSTER 256
211#define LATENCY_LIMIT 256
212
213static inline unsigned long scan_swap_map(struct swap_info_struct *si,
214                      unsigned char usage)
215{
216    unsigned long offset;
217    unsigned long scan_base;
218    unsigned long last_in_cluster = 0;
219    int latency_ration = LATENCY_LIMIT;
220    int found_free_cluster = 0;
221
222    /*
223     * We try to cluster swap pages by allocating them sequentially
224     * in swap. Once we've allocated SWAPFILE_CLUSTER pages this
225     * way, however, we resort to first-free allocation, starting
226     * a new cluster. This prevents us from scattering swap pages
227     * all over the entire swap partition, so that we reduce
228     * overall disk seek times between swap pages. -- sct
229     * But we do now try to find an empty cluster. -Andrea
230     * And we let swap pages go all over an SSD partition. Hugh
231     */
232
233    si->flags += SWP_SCANNING;
234    scan_base = offset = si->cluster_next;
235
236    if (unlikely(!si->cluster_nr--)) {
237        if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
238            si->cluster_nr = SWAPFILE_CLUSTER - 1;
239            goto checks;
240        }
241        if (si->flags & SWP_DISCARDABLE) {
242            /*
243             * Start range check on racing allocations, in case
244             * they overlap the cluster we eventually decide on
245             * (we scan without swap_lock to allow preemption).
246             * It's hardly conceivable that cluster_nr could be
247             * wrapped during our scan, but don't depend on it.
248             */
249            if (si->lowest_alloc)
250                goto checks;
251            si->lowest_alloc = si->max;
252            si->highest_alloc = 0;
253        }
254        spin_unlock(&swap_lock);
255
256        /*
257         * If seek is expensive, start searching for new cluster from
258         * start of partition, to minimize the span of allocated swap.
259         * But if seek is cheap, search from our current position, so
260         * that swap is allocated from all over the partition: if the
261         * Flash Translation Layer only remaps within limited zones,
262         * we don't want to wear out the first zone too quickly.
263         */
264        if (!(si->flags & SWP_SOLIDSTATE))
265            scan_base = offset = si->lowest_bit;
266        last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
267
268        /* Locate the first empty (unaligned) cluster */
269        for (; last_in_cluster <= si->highest_bit; offset++) {
270            if (si->swap_map[offset])
271                last_in_cluster = offset + SWAPFILE_CLUSTER;
272            else if (offset == last_in_cluster) {
273                spin_lock(&swap_lock);
274                offset -= SWAPFILE_CLUSTER - 1;
275                si->cluster_next = offset;
276                si->cluster_nr = SWAPFILE_CLUSTER - 1;
277                found_free_cluster = 1;
278                goto checks;
279            }
280            if (unlikely(--latency_ration < 0)) {
281                cond_resched();
282                latency_ration = LATENCY_LIMIT;
283            }
284        }
285
286        offset = si->lowest_bit;
287        last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
288
289        /* Locate the first empty (unaligned) cluster */
290        for (; last_in_cluster < scan_base; offset++) {
291            if (si->swap_map[offset])
292                last_in_cluster = offset + SWAPFILE_CLUSTER;
293            else if (offset == last_in_cluster) {
294                spin_lock(&swap_lock);
295                offset -= SWAPFILE_CLUSTER - 1;
296                si->cluster_next = offset;
297                si->cluster_nr = SWAPFILE_CLUSTER - 1;
298                found_free_cluster = 1;
299                goto checks;
300            }
301            if (unlikely(--latency_ration < 0)) {
302                cond_resched();
303                latency_ration = LATENCY_LIMIT;
304            }
305        }
306
307        offset = scan_base;
308        spin_lock(&swap_lock);
309        si->cluster_nr = SWAPFILE_CLUSTER - 1;
310        si->lowest_alloc = 0;
311    }
312
313checks:
314    if (!(si->flags & SWP_WRITEOK))
315        goto no_page;
316    if (!si->highest_bit)
317        goto no_page;
318    if (offset > si->highest_bit)
319        scan_base = offset = si->lowest_bit;
320
321    /* reuse swap entry of cache-only swap if not busy. */
322    if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
323        int swap_was_freed;
324        spin_unlock(&swap_lock);
325        swap_was_freed = __try_to_reclaim_swap(si, offset);
326        spin_lock(&swap_lock);
327        /* entry was freed successfully, try to use this again */
328        if (swap_was_freed)
329            goto checks;
330        goto scan; /* check next one */
331    }
332
333    if (si->swap_map[offset])
334        goto scan;
335
336    if (offset == si->lowest_bit)
337        si->lowest_bit++;
338    if (offset == si->highest_bit)
339        si->highest_bit--;
340    si->inuse_pages++;
341    if (si->inuse_pages == si->pages) {
342        si->lowest_bit = si->max;
343        si->highest_bit = 0;
344    }
345    si->swap_map[offset] = usage;
346    si->cluster_next = offset + 1;
347    si->flags -= SWP_SCANNING;
348
349    if (si->lowest_alloc) {
350        /*
351         * Only set when SWP_DISCARDABLE, and there's a scan
352         * for a free cluster in progress or just completed.
353         */
354        if (found_free_cluster) {
355            /*
356             * To optimize wear-levelling, discard the
357             * old data of the cluster, taking care not to
358             * discard any of its pages that have already
359             * been allocated by racing tasks (offset has
360             * already stepped over any at the beginning).
361             */
362            if (offset < si->highest_alloc &&
363                si->lowest_alloc <= last_in_cluster)
364                last_in_cluster = si->lowest_alloc - 1;
365            si->flags |= SWP_DISCARDING;
366            spin_unlock(&swap_lock);
367
368            if (offset < last_in_cluster)
369                discard_swap_cluster(si, offset,
370                    last_in_cluster - offset + 1);
371
372            spin_lock(&swap_lock);
373            si->lowest_alloc = 0;
374            si->flags &= ~SWP_DISCARDING;
375
376            smp_mb(); /* wake_up_bit advises this */
377            wake_up_bit(&si->flags, ilog2(SWP_DISCARDING));
378
379        } else if (si->flags & SWP_DISCARDING) {
380            /*
381             * Delay using pages allocated by racing tasks
382             * until the whole discard has been issued. We
383             * could defer that delay until swap_writepage,
384             * but it's easier to keep this self-contained.
385             */
386            spin_unlock(&swap_lock);
387            wait_on_bit(&si->flags, ilog2(SWP_DISCARDING),
388                wait_for_discard, TASK_UNINTERRUPTIBLE);
389            spin_lock(&swap_lock);
390        } else {
391            /*
392             * Note pages allocated by racing tasks while
393             * scan for a free cluster is in progress, so
394             * that its final discard can exclude them.
395             */
396            if (offset < si->lowest_alloc)
397                si->lowest_alloc = offset;
398            if (offset > si->highest_alloc)
399                si->highest_alloc = offset;
400        }
401    }
402    return offset;
403
404scan:
405    spin_unlock(&swap_lock);
406    while (++offset <= si->highest_bit) {
407        if (!si->swap_map[offset]) {
408            spin_lock(&swap_lock);
409            goto checks;
410        }
411        if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
412            spin_lock(&swap_lock);
413            goto checks;
414        }
415        if (unlikely(--latency_ration < 0)) {
416            cond_resched();
417            latency_ration = LATENCY_LIMIT;
418        }
419    }
420    offset = si->lowest_bit;
421    while (++offset < scan_base) {
422        if (!si->swap_map[offset]) {
423            spin_lock(&swap_lock);
424            goto checks;
425        }
426        if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
427            spin_lock(&swap_lock);
428            goto checks;
429        }
430        if (unlikely(--latency_ration < 0)) {
431            cond_resched();
432            latency_ration = LATENCY_LIMIT;
433        }
434    }
435    spin_lock(&swap_lock);
436
437no_page:
438    si->flags -= SWP_SCANNING;
439    return 0;
440}
441
442swp_entry_t get_swap_page(void)
443{
444    struct swap_info_struct *si;
445    pgoff_t offset;
446    int type, next;
447    int wrapped = 0;
448
449    spin_lock(&swap_lock);
450    if (nr_swap_pages <= 0)
451        goto noswap;
452    nr_swap_pages--;
453
454    for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
455        si = swap_info[type];
456        next = si->next;
457        if (next < 0 ||
458            (!wrapped && si->prio != swap_info[next]->prio)) {
459            next = swap_list.head;
460            wrapped++;
461        }
462
463        if (!si->highest_bit)
464            continue;
465        if (!(si->flags & SWP_WRITEOK))
466            continue;
467
468        swap_list.next = next;
469        /* This is called for allocating swap entry for cache */
470        offset = scan_swap_map(si, SWAP_HAS_CACHE);
471        if (offset) {
472            spin_unlock(&swap_lock);
473            return swp_entry(type, offset);
474        }
475        next = swap_list.next;
476    }
477
478    nr_swap_pages++;
479noswap:
480    spin_unlock(&swap_lock);
481    return (swp_entry_t) {0};
482}
483
484/* The only caller of this function is now susupend routine */
485swp_entry_t get_swap_page_of_type(int type)
486{
487    struct swap_info_struct *si;
488    pgoff_t offset;
489
490    spin_lock(&swap_lock);
491    si = swap_info[type];
492    if (si && (si->flags & SWP_WRITEOK)) {
493        nr_swap_pages--;
494        /* This is called for allocating swap entry, not cache */
495        offset = scan_swap_map(si, 1);
496        if (offset) {
497            spin_unlock(&swap_lock);
498            return swp_entry(type, offset);
499        }
500        nr_swap_pages++;
501    }
502    spin_unlock(&swap_lock);
503    return (swp_entry_t) {0};
504}
505
506static struct swap_info_struct *swap_info_get(swp_entry_t entry)
507{
508    struct swap_info_struct *p;
509    unsigned long offset, type;
510
511    if (!entry.val)
512        goto out;
513    type = swp_type(entry);
514    if (type >= nr_swapfiles)
515        goto bad_nofile;
516    p = swap_info[type];
517    if (!(p->flags & SWP_USED))
518        goto bad_device;
519    offset = swp_offset(entry);
520    if (offset >= p->max)
521        goto bad_offset;
522    if (!p->swap_map[offset])
523        goto bad_free;
524    spin_lock(&swap_lock);
525    return p;
526
527bad_free:
528    printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
529    goto out;
530bad_offset:
531    printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
532    goto out;
533bad_device:
534    printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
535    goto out;
536bad_nofile:
537    printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
538out:
539    return NULL;
540}
541
542static unsigned char swap_entry_free(struct swap_info_struct *p,
543                     swp_entry_t entry, unsigned char usage)
544{
545    unsigned long offset = swp_offset(entry);
546    unsigned char count;
547    unsigned char has_cache;
548
549    count = p->swap_map[offset];
550    has_cache = count & SWAP_HAS_CACHE;
551    count &= ~SWAP_HAS_CACHE;
552
553    if (usage == SWAP_HAS_CACHE) {
554        VM_BUG_ON(!has_cache);
555        has_cache = 0;
556    } else if (count == SWAP_MAP_SHMEM) {
557        /*
558         * Or we could insist on shmem.c using a special
559         * swap_shmem_free() and free_shmem_swap_and_cache()...
560         */
561        count = 0;
562    } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
563        if (count == COUNT_CONTINUED) {
564            if (swap_count_continued(p, offset, count))
565                count = SWAP_MAP_MAX | COUNT_CONTINUED;
566            else
567                count = SWAP_MAP_MAX;
568        } else
569            count--;
570    }
571
572    if (!count)
573        mem_cgroup_uncharge_swap(entry);
574
575    usage = count | has_cache;
576    p->swap_map[offset] = usage;
577
578    /* free if no reference */
579    if (!usage) {
580        struct gendisk *disk = p->bdev->bd_disk;
581        if (offset < p->lowest_bit)
582            p->lowest_bit = offset;
583        if (offset > p->highest_bit)
584            p->highest_bit = offset;
585        if (swap_list.next >= 0 &&
586            p->prio > swap_info[swap_list.next]->prio)
587            swap_list.next = p->type;
588        nr_swap_pages++;
589        p->inuse_pages--;
590        if ((p->flags & SWP_BLKDEV) &&
591                disk->fops->swap_slot_free_notify)
592            disk->fops->swap_slot_free_notify(p->bdev, offset);
593    }
594
595    return usage;
596}
597
598/*
599 * Caller has made sure that the swapdevice corresponding to entry
600 * is still around or has not been recycled.
601 */
602void swap_free(swp_entry_t entry)
603{
604    struct swap_info_struct *p;
605
606    p = swap_info_get(entry);
607    if (p) {
608        swap_entry_free(p, entry, 1);
609        spin_unlock(&swap_lock);
610    }
611}
612
613/*
614 * Called after dropping swapcache to decrease refcnt to swap entries.
615 */
616void swapcache_free(swp_entry_t entry, struct page *page)
617{
618    struct swap_info_struct *p;
619    unsigned char count;
620
621    p = swap_info_get(entry);
622    if (p) {
623        count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
624        if (page)
625            mem_cgroup_uncharge_swapcache(page, entry, count != 0);
626        spin_unlock(&swap_lock);
627    }
628}
629
630/*
631 * How many references to page are currently swapped out?
632 * This does not give an exact answer when swap count is continued,
633 * but does include the high COUNT_CONTINUED flag to allow for that.
634 */
635static inline int page_swapcount(struct page *page)
636{
637    int count = 0;
638    struct swap_info_struct *p;
639    swp_entry_t entry;
640
641    entry.val = page_private(page);
642    p = swap_info_get(entry);
643    if (p) {
644        count = swap_count(p->swap_map[swp_offset(entry)]);
645        spin_unlock(&swap_lock);
646    }
647    return count;
648}
649
650/*
651 * We can write to an anon page without COW if there are no other references
652 * to it. And as a side-effect, free up its swap: because the old content
653 * on disk will never be read, and seeking back there to write new content
654 * later would only waste time away from clustering.
655 */
656int reuse_swap_page(struct page *page)
657{
658    int count;
659
660    VM_BUG_ON(!PageLocked(page));
661    if (unlikely(PageKsm(page)))
662        return 0;
663    count = page_mapcount(page);
664    if (count <= 1 && PageSwapCache(page)) {
665        count += page_swapcount(page);
666        if (count == 1 && !PageWriteback(page)) {
667            delete_from_swap_cache(page);
668            SetPageDirty(page);
669        }
670    }
671    return count <= 1;
672}
673
674/*
675 * If swap is getting full, or if there are no more mappings of this page,
676 * then try_to_free_swap is called to free its swap space.
677 */
678int try_to_free_swap(struct page *page)
679{
680    VM_BUG_ON(!PageLocked(page));
681
682    if (!PageSwapCache(page))
683        return 0;
684    if (PageWriteback(page))
685        return 0;
686    if (page_swapcount(page))
687        return 0;
688
689    delete_from_swap_cache(page);
690    SetPageDirty(page);
691    return 1;
692}
693
694/*
695 * Free the swap entry like above, but also try to
696 * free the page cache entry if it is the last user.
697 */
698int free_swap_and_cache(swp_entry_t entry)
699{
700    struct swap_info_struct *p;
701    struct page *page = NULL;
702
703    if (non_swap_entry(entry))
704        return 1;
705
706    p = swap_info_get(entry);
707    if (p) {
708        if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
709            page = find_get_page(&swapper_space, entry.val);
710            if (page && !trylock_page(page)) {
711                page_cache_release(page);
712                page = NULL;
713            }
714        }
715        spin_unlock(&swap_lock);
716    }
717    if (page) {
718        /*
719         * Not mapped elsewhere, or swap space full? Free it!
720         * Also recheck PageSwapCache now page is locked (above).
721         */
722        if (PageSwapCache(page) && !PageWriteback(page) &&
723                (!page_mapped(page) || vm_swap_full())) {
724            delete_from_swap_cache(page);
725            SetPageDirty(page);
726        }
727        unlock_page(page);
728        page_cache_release(page);
729    }
730    return p != NULL;
731}
732
733#ifdef CONFIG_CGROUP_MEM_RES_CTLR
734/**
735 * mem_cgroup_count_swap_user - count the user of a swap entry
736 * @ent: the swap entry to be checked
737 * @pagep: the pointer for the swap cache page of the entry to be stored
738 *
739 * Returns the number of the user of the swap entry. The number is valid only
740 * for swaps of anonymous pages.
741 * If the entry is found on swap cache, the page is stored to pagep with
742 * refcount of it being incremented.
743 */
744int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
745{
746    struct page *page;
747    struct swap_info_struct *p;
748    int count = 0;
749
750    page = find_get_page(&swapper_space, ent.val);
751    if (page)
752        count += page_mapcount(page);
753    p = swap_info_get(ent);
754    if (p) {
755        count += swap_count(p->swap_map[swp_offset(ent)]);
756        spin_unlock(&swap_lock);
757    }
758
759    *pagep = page;
760    return count;
761}
762#endif
763
764#ifdef CONFIG_HIBERNATION
765/*
766 * Find the swap type that corresponds to given device (if any).
767 *
768 * @offset - number of the PAGE_SIZE-sized block of the device, starting
769 * from 0, in which the swap header is expected to be located.
770 *
771 * This is needed for the suspend to disk (aka swsusp).
772 */
773int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
774{
775    struct block_device *bdev = NULL;
776    int type;
777
778    if (device)
779        bdev = bdget(device);
780
781    spin_lock(&swap_lock);
782    for (type = 0; type < nr_swapfiles; type++) {
783        struct swap_info_struct *sis = swap_info[type];
784
785        if (!(sis->flags & SWP_WRITEOK))
786            continue;
787
788        if (!bdev) {
789            if (bdev_p)
790                *bdev_p = bdgrab(sis->bdev);
791
792            spin_unlock(&swap_lock);
793            return type;
794        }
795        if (bdev == sis->bdev) {
796            struct swap_extent *se = &sis->first_swap_extent;
797
798            if (se->start_block == offset) {
799                if (bdev_p)
800                    *bdev_p = bdgrab(sis->bdev);
801
802                spin_unlock(&swap_lock);
803                bdput(bdev);
804                return type;
805            }
806        }
807    }
808    spin_unlock(&swap_lock);
809    if (bdev)
810        bdput(bdev);
811
812    return -ENODEV;
813}
814
815/*
816 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
817 * corresponding to given index in swap_info (swap type).
818 */
819sector_t swapdev_block(int type, pgoff_t offset)
820{
821    struct block_device *bdev;
822
823    if ((unsigned int)type >= nr_swapfiles)
824        return 0;
825    if (!(swap_info[type]->flags & SWP_WRITEOK))
826        return 0;
827    return map_swap_entry(swp_entry(type, offset), &bdev);
828}
829
830/*
831 * Return either the total number of swap pages of given type, or the number
832 * of free pages of that type (depending on @free)
833 *
834 * This is needed for software suspend
835 */
836unsigned int count_swap_pages(int type, int free)
837{
838    unsigned int n = 0;
839
840    spin_lock(&swap_lock);
841    if ((unsigned int)type < nr_swapfiles) {
842        struct swap_info_struct *sis = swap_info[type];
843
844        if (sis->flags & SWP_WRITEOK) {
845            n = sis->pages;
846            if (free)
847                n -= sis->inuse_pages;
848        }
849    }
850    spin_unlock(&swap_lock);
851    return n;
852}
853#endif /* CONFIG_HIBERNATION */
854
855/*
856 * No need to decide whether this PTE shares the swap entry with others,
857 * just let do_wp_page work it out if a write is requested later - to
858 * force COW, vm_page_prot omits write permission from any private vma.
859 */
860static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
861        unsigned long addr, swp_entry_t entry, struct page *page)
862{
863    struct mem_cgroup *ptr = NULL;
864    spinlock_t *ptl;
865    pte_t *pte;
866    int ret = 1;
867
868    if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) {
869        ret = -ENOMEM;
870        goto out_nolock;
871    }
872
873    pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
874    if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
875        if (ret > 0)
876            mem_cgroup_cancel_charge_swapin(ptr);
877        ret = 0;
878        goto out;
879    }
880
881    dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
882    inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
883    get_page(page);
884    set_pte_at(vma->vm_mm, addr, pte,
885           pte_mkold(mk_pte(page, vma->vm_page_prot)));
886    page_add_anon_rmap(page, vma, addr);
887    mem_cgroup_commit_charge_swapin(page, ptr);
888    swap_free(entry);
889    /*
890     * Move the page to the active list so it is not
891     * immediately swapped out again after swapon.
892     */
893    activate_page(page);
894out:
895    pte_unmap_unlock(pte, ptl);
896out_nolock:
897    return ret;
898}
899
900static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
901                unsigned long addr, unsigned long end,
902                swp_entry_t entry, struct page *page)
903{
904    pte_t swp_pte = swp_entry_to_pte(entry);
905    pte_t *pte;
906    int ret = 0;
907
908    /*
909     * We don't actually need pte lock while scanning for swp_pte: since
910     * we hold page lock and mmap_sem, swp_pte cannot be inserted into the
911     * page table while we're scanning; though it could get zapped, and on
912     * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
913     * of unmatched parts which look like swp_pte, so unuse_pte must
914     * recheck under pte lock. Scanning without pte lock lets it be
915     * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
916     */
917    pte = pte_offset_map(pmd, addr);
918    do {
919        /*
920         * swapoff spends a _lot_ of time in this loop!
921         * Test inline before going to call unuse_pte.
922         */
923        if (unlikely(pte_same(*pte, swp_pte))) {
924            pte_unmap(pte);
925            ret = unuse_pte(vma, pmd, addr, entry, page);
926            if (ret)
927                goto out;
928            pte = pte_offset_map(pmd, addr);
929        }
930    } while (pte++, addr += PAGE_SIZE, addr != end);
931    pte_unmap(pte - 1);
932out:
933    return ret;
934}
935
936static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
937                unsigned long addr, unsigned long end,
938                swp_entry_t entry, struct page *page)
939{
940    pmd_t *pmd;
941    unsigned long next;
942    int ret;
943
944    pmd = pmd_offset(pud, addr);
945    do {
946        next = pmd_addr_end(addr, end);
947        if (pmd_none_or_clear_bad(pmd))
948            continue;
949        ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
950        if (ret)
951            return ret;
952    } while (pmd++, addr = next, addr != end);
953    return 0;
954}
955
956static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
957                unsigned long addr, unsigned long end,
958                swp_entry_t entry, struct page *page)
959{
960    pud_t *pud;
961    unsigned long next;
962    int ret;
963
964    pud = pud_offset(pgd, addr);
965    do {
966        next = pud_addr_end(addr, end);
967        if (pud_none_or_clear_bad(pud))
968            continue;
969        ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
970        if (ret)
971            return ret;
972    } while (pud++, addr = next, addr != end);
973    return 0;
974}
975
976static int unuse_vma(struct vm_area_struct *vma,
977                swp_entry_t entry, struct page *page)
978{
979    pgd_t *pgd;
980    unsigned long addr, end, next;
981    int ret;
982
983    if (page_anon_vma(page)) {
984        addr = page_address_in_vma(page, vma);
985        if (addr == -EFAULT)
986            return 0;
987        else
988            end = addr + PAGE_SIZE;
989    } else {
990        addr = vma->vm_start;
991        end = vma->vm_end;
992    }
993
994    pgd = pgd_offset(vma->vm_mm, addr);
995    do {
996        next = pgd_addr_end(addr, end);
997        if (pgd_none_or_clear_bad(pgd))
998            continue;
999        ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
1000        if (ret)
1001            return ret;
1002    } while (pgd++, addr = next, addr != end);
1003    return 0;
1004}
1005
1006static int unuse_mm(struct mm_struct *mm,
1007                swp_entry_t entry, struct page *page)
1008{
1009    struct vm_area_struct *vma;
1010    int ret = 0;
1011
1012    if (!down_read_trylock(&mm->mmap_sem)) {
1013        /*
1014         * Activate page so shrink_inactive_list is unlikely to unmap
1015         * its ptes while lock is dropped, so swapoff can make progress.
1016         */
1017        activate_page(page);
1018        unlock_page(page);
1019        down_read(&mm->mmap_sem);
1020        lock_page(page);
1021    }
1022    for (vma = mm->mmap; vma; vma = vma->vm_next) {
1023        if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
1024            break;
1025    }
1026    up_read(&mm->mmap_sem);
1027    return (ret < 0)? ret: 0;
1028}
1029
1030/*
1031 * Scan swap_map from current position to next entry still in use.
1032 * Recycle to start on reaching the end, returning 0 when empty.
1033 */
1034static unsigned int find_next_to_unuse(struct swap_info_struct *si,
1035                    unsigned int prev)
1036{
1037    unsigned int max = si->max;
1038    unsigned int i = prev;
1039    unsigned char count;
1040
1041    /*
1042     * No need for swap_lock here: we're just looking
1043     * for whether an entry is in use, not modifying it; false
1044     * hits are okay, and sys_swapoff() has already prevented new
1045     * allocations from this area (while holding swap_lock).
1046     */
1047    for (;;) {
1048        if (++i >= max) {
1049            if (!prev) {
1050                i = 0;
1051                break;
1052            }
1053            /*
1054             * No entries in use at top of swap_map,
1055             * loop back to start and recheck there.
1056             */
1057            max = prev + 1;
1058            prev = 0;
1059            i = 1;
1060        }
1061        count = si->swap_map[i];
1062        if (count && swap_count(count) != SWAP_MAP_BAD)
1063            break;
1064    }
1065    return i;
1066}
1067
1068/*
1069 * We completely avoid races by reading each swap page in advance,
1070 * and then search for the process using it. All the necessary
1071 * page table adjustments can then be made atomically.
1072 */
1073static int try_to_unuse(unsigned int type)
1074{
1075    struct swap_info_struct *si = swap_info[type];
1076    struct mm_struct *start_mm;
1077    unsigned char *swap_map;
1078    unsigned char swcount;
1079    struct page *page;
1080    swp_entry_t entry;
1081    unsigned int i = 0;
1082    int retval = 0;
1083
1084    /*
1085     * When searching mms for an entry, a good strategy is to
1086     * start at the first mm we freed the previous entry from
1087     * (though actually we don't notice whether we or coincidence
1088     * freed the entry). Initialize this start_mm with a hold.
1089     *
1090     * A simpler strategy would be to start at the last mm we
1091     * freed the previous entry from; but that would take less
1092     * advantage of mmlist ordering, which clusters forked mms
1093     * together, child after parent. If we race with dup_mmap(), we
1094     * prefer to resolve parent before child, lest we miss entries
1095     * duplicated after we scanned child: using last mm would invert
1096     * that.
1097     */
1098    start_mm = &init_mm;
1099    atomic_inc(&init_mm.mm_users);
1100
1101    /*
1102     * Keep on scanning until all entries have gone. Usually,
1103     * one pass through swap_map is enough, but not necessarily:
1104     * there are races when an instance of an entry might be missed.
1105     */
1106    while ((i = find_next_to_unuse(si, i)) != 0) {
1107        if (signal_pending(current)) {
1108            retval = -EINTR;
1109            break;
1110        }
1111
1112        /*
1113         * Get a page for the entry, using the existing swap
1114         * cache page if there is one. Otherwise, get a clean
1115         * page and read the swap into it.
1116         */
1117        swap_map = &si->swap_map[i];
1118        entry = swp_entry(type, i);
1119        page = read_swap_cache_async(entry,
1120                    GFP_HIGHUSER_MOVABLE, NULL, 0);
1121        if (!page) {
1122            /*
1123             * Either swap_duplicate() failed because entry
1124             * has been freed independently, and will not be
1125             * reused since sys_swapoff() already disabled
1126             * allocation from here, or alloc_page() failed.
1127             */
1128            if (!*swap_map)
1129                continue;
1130            retval = -ENOMEM;
1131            break;
1132        }
1133
1134        /*
1135         * Don't hold on to start_mm if it looks like exiting.
1136         */
1137        if (atomic_read(&start_mm->mm_users) == 1) {
1138            mmput(start_mm);
1139            start_mm = &init_mm;
1140            atomic_inc(&init_mm.mm_users);
1141        }
1142
1143        /*
1144         * Wait for and lock page. When do_swap_page races with
1145         * try_to_unuse, do_swap_page can handle the fault much
1146         * faster than try_to_unuse can locate the entry. This
1147         * apparently redundant "wait_on_page_locked" lets try_to_unuse
1148         * defer to do_swap_page in such a case - in some tests,
1149         * do_swap_page and try_to_unuse repeatedly compete.
1150         */
1151        wait_on_page_locked(page);
1152        wait_on_page_writeback(page);
1153        lock_page(page);
1154        wait_on_page_writeback(page);
1155
1156        /*
1157         * Remove all references to entry.
1158         */
1159        swcount = *swap_map;
1160        if (swap_count(swcount) == SWAP_MAP_SHMEM) {
1161            retval = shmem_unuse(entry, page);
1162            /* page has already been unlocked and released */
1163            if (retval < 0)
1164                break;
1165            continue;
1166        }
1167        if (swap_count(swcount) && start_mm != &init_mm)
1168            retval = unuse_mm(start_mm, entry, page);
1169
1170        if (swap_count(*swap_map)) {
1171            int set_start_mm = (*swap_map >= swcount);
1172            struct list_head *p = &start_mm->mmlist;
1173            struct mm_struct *new_start_mm = start_mm;
1174            struct mm_struct *prev_mm = start_mm;
1175            struct mm_struct *mm;
1176
1177            atomic_inc(&new_start_mm->mm_users);
1178            atomic_inc(&prev_mm->mm_users);
1179            spin_lock(&mmlist_lock);
1180            while (swap_count(*swap_map) && !retval &&
1181                    (p = p->next) != &start_mm->mmlist) {
1182                mm = list_entry(p, struct mm_struct, mmlist);
1183                if (!atomic_inc_not_zero(&mm->mm_users))
1184                    continue;
1185                spin_unlock(&mmlist_lock);
1186                mmput(prev_mm);
1187                prev_mm = mm;
1188
1189                cond_resched();
1190
1191                swcount = *swap_map;
1192                if (!swap_count(swcount)) /* any usage ? */
1193                    ;
1194                else if (mm == &init_mm)
1195                    set_start_mm = 1;
1196                else
1197                    retval = unuse_mm(mm, entry, page);
1198
1199                if (set_start_mm && *swap_map < swcount) {
1200                    mmput(new_start_mm);
1201                    atomic_inc(&mm->mm_users);
1202                    new_start_mm = mm;
1203                    set_start_mm = 0;
1204                }
1205                spin_lock(&mmlist_lock);
1206            }
1207            spin_unlock(&mmlist_lock);
1208            mmput(prev_mm);
1209            mmput(start_mm);
1210            start_mm = new_start_mm;
1211        }
1212        if (retval) {
1213            unlock_page(page);
1214            page_cache_release(page);
1215            break;
1216        }
1217
1218        /*
1219         * If a reference remains (rare), we would like to leave
1220         * the page in the swap cache; but try_to_unmap could
1221         * then re-duplicate the entry once we drop page lock,
1222         * so we might loop indefinitely; also, that page could
1223         * not be swapped out to other storage meanwhile. So:
1224         * delete from cache even if there's another reference,
1225         * after ensuring that the data has been saved to disk -
1226         * since if the reference remains (rarer), it will be
1227         * read from disk into another page. Splitting into two
1228         * pages would be incorrect if swap supported "shared
1229         * private" pages, but they are handled by tmpfs files.
1230         *
1231         * Given how unuse_vma() targets one particular offset
1232         * in an anon_vma, once the anon_vma has been determined,
1233         * this splitting happens to be just what is needed to
1234         * handle where KSM pages have been swapped out: re-reading
1235         * is unnecessarily slow, but we can fix that later on.
1236         */
1237        if (swap_count(*swap_map) &&
1238             PageDirty(page) && PageSwapCache(page)) {
1239            struct writeback_control wbc = {
1240                .sync_mode = WB_SYNC_NONE,
1241            };
1242
1243            swap_writepage(page, &wbc);
1244            lock_page(page);
1245            wait_on_page_writeback(page);
1246        }
1247
1248        /*
1249         * It is conceivable that a racing task removed this page from
1250         * swap cache just before we acquired the page lock at the top,
1251         * or while we dropped it in unuse_mm(). The page might even
1252         * be back in swap cache on another swap area: that we must not
1253         * delete, since it may not have been written out to swap yet.
1254         */
1255        if (PageSwapCache(page) &&
1256            likely(page_private(page) == entry.val))
1257            delete_from_swap_cache(page);
1258
1259        /*
1260         * So we could skip searching mms once swap count went
1261         * to 1, we did not mark any present ptes as dirty: must
1262         * mark page dirty so shrink_page_list will preserve it.
1263         */
1264        SetPageDirty(page);
1265        unlock_page(page);
1266        page_cache_release(page);
1267
1268        /*
1269         * Make sure that we aren't completely killing
1270         * interactive performance.
1271         */
1272        cond_resched();
1273    }
1274
1275    mmput(start_mm);
1276    return retval;
1277}
1278
1279/*
1280 * After a successful try_to_unuse, if no swap is now in use, we know
1281 * we can empty the mmlist. swap_lock must be held on entry and exit.
1282 * Note that mmlist_lock nests inside swap_lock, and an mm must be
1283 * added to the mmlist just after page_duplicate - before would be racy.
1284 */
1285static void drain_mmlist(void)
1286{
1287    struct list_head *p, *next;
1288    unsigned int type;
1289
1290    for (type = 0; type < nr_swapfiles; type++)
1291        if (swap_info[type]->inuse_pages)
1292            return;
1293    spin_lock(&mmlist_lock);
1294    list_for_each_safe(p, next, &init_mm.mmlist)
1295        list_del_init(p);
1296    spin_unlock(&mmlist_lock);
1297}
1298
1299/*
1300 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
1301 * corresponds to page offset for the specified swap entry.
1302 * Note that the type of this function is sector_t, but it returns page offset
1303 * into the bdev, not sector offset.
1304 */
1305static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
1306{
1307    struct swap_info_struct *sis;
1308    struct swap_extent *start_se;
1309    struct swap_extent *se;
1310    pgoff_t offset;
1311
1312    sis = swap_info[swp_type(entry)];
1313    *bdev = sis->bdev;
1314
1315    offset = swp_offset(entry);
1316    start_se = sis->curr_swap_extent;
1317    se = start_se;
1318
1319    for ( ; ; ) {
1320        struct list_head *lh;
1321
1322        if (se->start_page <= offset &&
1323                offset < (se->start_page + se->nr_pages)) {
1324            return se->start_block + (offset - se->start_page);
1325        }
1326        lh = se->list.next;
1327        se = list_entry(lh, struct swap_extent, list);
1328        sis->curr_swap_extent = se;
1329        BUG_ON(se == start_se); /* It *must* be present */
1330    }
1331}
1332
1333/*
1334 * Returns the page offset into bdev for the specified page's swap entry.
1335 */
1336sector_t map_swap_page(struct page *page, struct block_device **bdev)
1337{
1338    swp_entry_t entry;
1339    entry.val = page_private(page);
1340    return map_swap_entry(entry, bdev);
1341}
1342
1343/*
1344 * Free all of a swapdev's extent information
1345 */
1346static void destroy_swap_extents(struct swap_info_struct *sis)
1347{
1348    while (!list_empty(&sis->first_swap_extent.list)) {
1349        struct swap_extent *se;
1350
1351        se = list_entry(sis->first_swap_extent.list.next,
1352                struct swap_extent, list);
1353        list_del(&se->list);
1354        kfree(se);
1355    }
1356}
1357
1358/*
1359 * Add a block range (and the corresponding page range) into this swapdev's
1360 * extent list. The extent list is kept sorted in page order.
1361 *
1362 * This function rather assumes that it is called in ascending page order.
1363 */
1364static int
1365add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
1366        unsigned long nr_pages, sector_t start_block)
1367{
1368    struct swap_extent *se;
1369    struct swap_extent *new_se;
1370    struct list_head *lh;
1371
1372    if (start_page == 0) {
1373        se = &sis->first_swap_extent;
1374        sis->curr_swap_extent = se;
1375        se->start_page = 0;
1376        se->nr_pages = nr_pages;
1377        se->start_block = start_block;
1378        return 1;
1379    } else {
1380        lh = sis->first_swap_extent.list.prev; /* Highest extent */
1381        se = list_entry(lh, struct swap_extent, list);
1382        BUG_ON(se->start_page + se->nr_pages != start_page);
1383        if (se->start_block + se->nr_pages == start_block) {
1384            /* Merge it */
1385            se->nr_pages += nr_pages;
1386            return 0;
1387        }
1388    }
1389
1390    /*
1391     * No merge. Insert a new extent, preserving ordering.
1392     */
1393    new_se = kmalloc(sizeof(*se), GFP_KERNEL);
1394    if (new_se == NULL)
1395        return -ENOMEM;
1396    new_se->start_page = start_page;
1397    new_se->nr_pages = nr_pages;
1398    new_se->start_block = start_block;
1399
1400    list_add_tail(&new_se->list, &sis->first_swap_extent.list);
1401    return 1;
1402}
1403
1404/*
1405 * A `swap extent' is a simple thing which maps a contiguous range of pages
1406 * onto a contiguous range of disk blocks. An ordered list of swap extents
1407 * is built at swapon time and is then used at swap_writepage/swap_readpage
1408 * time for locating where on disk a page belongs.
1409 *
1410 * If the swapfile is an S_ISBLK block device, a single extent is installed.
1411 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
1412 * swap files identically.
1413 *
1414 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
1415 * extent list operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK
1416 * swapfiles are handled *identically* after swapon time.
1417 *
1418 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
1419 * and will parse them into an ordered extent list, in PAGE_SIZE chunks. If
1420 * some stray blocks are found which do not fall within the PAGE_SIZE alignment
1421 * requirements, they are simply tossed out - we will never use those blocks
1422 * for swapping.
1423 *
1424 * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon. This
1425 * prevents root from shooting her foot off by ftruncating an in-use swapfile,
1426 * which will scribble on the fs.
1427 *
1428 * The amount of disk space which a single swap extent represents varies.
1429 * Typically it is in the 1-4 megabyte range. So we can have hundreds of
1430 * extents in the list. To avoid much list walking, we cache the previous
1431 * search location in `curr_swap_extent', and start new searches from there.
1432 * This is extremely effective. The average number of iterations in
1433 * map_swap_page() has been measured at about 0.3 per page. - akpm.
1434 */
1435static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
1436{
1437    struct inode *inode;
1438    unsigned blocks_per_page;
1439    unsigned long page_no;
1440    unsigned blkbits;
1441    sector_t probe_block;
1442    sector_t last_block;
1443    sector_t lowest_block = -1;
1444    sector_t highest_block = 0;
1445    int nr_extents = 0;
1446    int ret;
1447
1448    inode = sis->swap_file->f_mapping->host;
1449    if (S_ISBLK(inode->i_mode)) {
1450        ret = add_swap_extent(sis, 0, sis->max, 0);
1451        *span = sis->pages;
1452        goto out;
1453    }
1454
1455    blkbits = inode->i_blkbits;
1456    blocks_per_page = PAGE_SIZE >> blkbits;
1457
1458    /*
1459     * Map all the blocks into the extent list. This code doesn't try
1460     * to be very smart.
1461     */
1462    probe_block = 0;
1463    page_no = 0;
1464    last_block = i_size_read(inode) >> blkbits;
1465    while ((probe_block + blocks_per_page) <= last_block &&
1466            page_no < sis->max) {
1467        unsigned block_in_page;
1468        sector_t first_block;
1469
1470        first_block = bmap(inode, probe_block);
1471        if (first_block == 0)
1472            goto bad_bmap;
1473
1474        /*
1475         * It must be PAGE_SIZE aligned on-disk
1476         */
1477        if (first_block & (blocks_per_page - 1)) {
1478            probe_block++;
1479            goto reprobe;
1480        }
1481
1482        for (block_in_page = 1; block_in_page < blocks_per_page;
1483                    block_in_page++) {
1484            sector_t block;
1485
1486            block = bmap(inode, probe_block + block_in_page);
1487            if (block == 0)
1488                goto bad_bmap;
1489            if (block != first_block + block_in_page) {
1490                /* Discontiguity */
1491                probe_block++;
1492                goto reprobe;
1493            }
1494        }
1495
1496        first_block >>= (PAGE_SHIFT - blkbits);
1497        if (page_no) { /* exclude the header page */
1498            if (first_block < lowest_block)
1499                lowest_block = first_block;
1500            if (first_block > highest_block)
1501                highest_block = first_block;
1502        }
1503
1504        /*
1505         * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
1506         */
1507        ret = add_swap_extent(sis, page_no, 1, first_block);
1508        if (ret < 0)
1509            goto out;
1510        nr_extents += ret;
1511        page_no++;
1512        probe_block += blocks_per_page;
1513reprobe:
1514        continue;
1515    }
1516    ret = nr_extents;
1517    *span = 1 + highest_block - lowest_block;
1518    if (page_no == 0)
1519        page_no = 1; /* force Empty message */
1520    sis->max = page_no;
1521    sis->pages = page_no - 1;
1522    sis->highest_bit = page_no - 1;
1523out:
1524    return ret;
1525bad_bmap:
1526    printk(KERN_ERR "swapon: swapfile has holes\n");
1527    ret = -EINVAL;
1528    goto out;
1529}
1530
1531SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1532{
1533    struct swap_info_struct *p = NULL;
1534    unsigned char *swap_map;
1535    struct file *swap_file, *victim;
1536    struct address_space *mapping;
1537    struct inode *inode;
1538    char *pathname;
1539    int i, type, prev;
1540    int err;
1541
1542    if (!capable(CAP_SYS_ADMIN))
1543        return -EPERM;
1544
1545    pathname = getname(specialfile);
1546    err = PTR_ERR(pathname);
1547    if (IS_ERR(pathname))
1548        goto out;
1549
1550    victim = filp_open(pathname, O_RDWR|O_LARGEFILE, 0);
1551    putname(pathname);
1552    err = PTR_ERR(victim);
1553    if (IS_ERR(victim))
1554        goto out;
1555
1556    mapping = victim->f_mapping;
1557    prev = -1;
1558    spin_lock(&swap_lock);
1559    for (type = swap_list.head; type >= 0; type = swap_info[type]->next) {
1560        p = swap_info[type];
1561        if (p->flags & SWP_WRITEOK) {
1562            if (p->swap_file->f_mapping == mapping)
1563                break;
1564        }
1565        prev = type;
1566    }
1567    if (type < 0) {
1568        err = -EINVAL;
1569        spin_unlock(&swap_lock);
1570        goto out_dput;
1571    }
1572    if (!security_vm_enough_memory(p->pages))
1573        vm_unacct_memory(p->pages);
1574    else {
1575        err = -ENOMEM;
1576        spin_unlock(&swap_lock);
1577        goto out_dput;
1578    }
1579    if (prev < 0)
1580        swap_list.head = p->next;
1581    else
1582        swap_info[prev]->next = p->next;
1583    if (type == swap_list.next) {
1584        /* just pick something that's safe... */
1585        swap_list.next = swap_list.head;
1586    }
1587    if (p->prio < 0) {
1588        for (i = p->next; i >= 0; i = swap_info[i]->next)
1589            swap_info[i]->prio = p->prio--;
1590        least_priority++;
1591    }
1592    nr_swap_pages -= p->pages;
1593    total_swap_pages -= p->pages;
1594    p->flags &= ~SWP_WRITEOK;
1595    spin_unlock(&swap_lock);
1596
1597    current->flags |= PF_OOM_ORIGIN;
1598    err = try_to_unuse(type);
1599    current->flags &= ~PF_OOM_ORIGIN;
1600
1601    if (err) {
1602        /* re-insert swap space back into swap_list */
1603        spin_lock(&swap_lock);
1604        if (p->prio < 0)
1605            p->prio = --least_priority;
1606        prev = -1;
1607        for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
1608            if (p->prio >= swap_info[i]->prio)
1609                break;
1610            prev = i;
1611        }
1612        p->next = i;
1613        if (prev < 0)
1614            swap_list.head = swap_list.next = type;
1615        else
1616            swap_info[prev]->next = type;
1617        nr_swap_pages += p->pages;
1618        total_swap_pages += p->pages;
1619        p->flags |= SWP_WRITEOK;
1620        spin_unlock(&swap_lock);
1621        goto out_dput;
1622    }
1623
1624    /* wait for any unplug function to finish */
1625    down_write(&swap_unplug_sem);
1626    up_write(&swap_unplug_sem);
1627
1628    destroy_swap_extents(p);
1629    if (p->flags & SWP_CONTINUED)
1630        free_swap_count_continuations(p);
1631
1632    mutex_lock(&swapon_mutex);
1633    spin_lock(&swap_lock);
1634    drain_mmlist();
1635
1636    /* wait for anyone still in scan_swap_map */
1637    p->highest_bit = 0; /* cuts scans short */
1638    while (p->flags >= SWP_SCANNING) {
1639        spin_unlock(&swap_lock);
1640        schedule_timeout_uninterruptible(1);
1641        spin_lock(&swap_lock);
1642    }
1643
1644    swap_file = p->swap_file;
1645    p->swap_file = NULL;
1646    p->max = 0;
1647    swap_map = p->swap_map;
1648    p->swap_map = NULL;
1649    p->flags = 0;
1650    spin_unlock(&swap_lock);
1651    mutex_unlock(&swapon_mutex);
1652    vfree(swap_map);
1653    /* Destroy swap account informatin */
1654    swap_cgroup_swapoff(type);
1655
1656    inode = mapping->host;
1657    if (S_ISBLK(inode->i_mode)) {
1658        struct block_device *bdev = I_BDEV(inode);
1659        set_blocksize(bdev, p->old_block_size);
1660        bd_release(bdev);
1661    } else {
1662        mutex_lock(&inode->i_mutex);
1663        inode->i_flags &= ~S_SWAPFILE;
1664        mutex_unlock(&inode->i_mutex);
1665    }
1666    filp_close(swap_file, NULL);
1667    err = 0;
1668
1669out_dput:
1670    filp_close(victim, NULL);
1671out:
1672    return err;
1673}
1674
1675#ifdef CONFIG_PROC_FS
1676/* iterator */
1677static void *swap_start(struct seq_file *swap, loff_t *pos)
1678{
1679    struct swap_info_struct *si;
1680    int type;
1681    loff_t l = *pos;
1682
1683    mutex_lock(&swapon_mutex);
1684
1685    if (!l)
1686        return SEQ_START_TOKEN;
1687
1688    for (type = 0; type < nr_swapfiles; type++) {
1689        smp_rmb(); /* read nr_swapfiles before swap_info[type] */
1690        si = swap_info[type];
1691        if (!(si->flags & SWP_USED) || !si->swap_map)
1692            continue;
1693        if (!--l)
1694            return si;
1695    }
1696
1697    return NULL;
1698}
1699
1700static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
1701{
1702    struct swap_info_struct *si = v;
1703    int type;
1704
1705    if (v == SEQ_START_TOKEN)
1706        type = 0;
1707    else
1708        type = si->type + 1;
1709
1710    for (; type < nr_swapfiles; type++) {
1711        smp_rmb(); /* read nr_swapfiles before swap_info[type] */
1712        si = swap_info[type];
1713        if (!(si->flags & SWP_USED) || !si->swap_map)
1714            continue;
1715        ++*pos;
1716        return si;
1717    }
1718
1719    return NULL;
1720}
1721
1722static void swap_stop(struct seq_file *swap, void *v)
1723{
1724    mutex_unlock(&swapon_mutex);
1725}
1726
1727static int swap_show(struct seq_file *swap, void *v)
1728{
1729    struct swap_info_struct *si = v;
1730    struct file *file;
1731    int len;
1732
1733    if (si == SEQ_START_TOKEN) {
1734        seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
1735        return 0;
1736    }
1737
1738    file = si->swap_file;
1739    len = seq_path(swap, &file->f_path, " \t\n\\");
1740    seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
1741            len < 40 ? 40 - len : 1, " ",
1742            S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
1743                "partition" : "file\t",
1744            si->pages << (PAGE_SHIFT - 10),
1745            si->inuse_pages << (PAGE_SHIFT - 10),
1746            si->prio);
1747    return 0;
1748}
1749
1750static const struct seq_operations swaps_op = {
1751    .start = swap_start,
1752    .next = swap_next,
1753    .stop = swap_stop,
1754    .show = swap_show
1755};
1756
1757static int swaps_open(struct inode *inode, struct file *file)
1758{
1759    return seq_open(file, &swaps_op);
1760}
1761
1762static const struct file_operations proc_swaps_operations = {
1763    .open = swaps_open,
1764    .read = seq_read,
1765    .llseek = seq_lseek,
1766    .release = seq_release,
1767};
1768
1769static int __init procswaps_init(void)
1770{
1771    proc_create("swaps", 0, NULL, &proc_swaps_operations);
1772    return 0;
1773}
1774__initcall(procswaps_init);
1775#endif /* CONFIG_PROC_FS */
1776
1777#ifdef MAX_SWAPFILES_CHECK
1778static int __init max_swapfiles_check(void)
1779{
1780    MAX_SWAPFILES_CHECK();
1781    return 0;
1782}
1783late_initcall(max_swapfiles_check);
1784#endif
1785
1786/*
1787 * Written 01/25/92 by Simmule Turner, heavily changed by Linus.
1788 *
1789 * The swapon system call
1790 */
1791SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
1792{
1793    struct swap_info_struct *p;
1794    char *name = NULL;
1795    struct block_device *bdev = NULL;
1796    struct file *swap_file = NULL;
1797    struct address_space *mapping;
1798    unsigned int type;
1799    int i, prev;
1800    int error;
1801    union swap_header *swap_header;
1802    unsigned int nr_good_pages;
1803    int nr_extents = 0;
1804    sector_t span;
1805    unsigned long maxpages;
1806    unsigned long swapfilepages;
1807    unsigned char *swap_map = NULL;
1808    struct page *page = NULL;
1809    struct inode *inode = NULL;
1810    int did_down = 0;
1811
1812    if (!capable(CAP_SYS_ADMIN))
1813        return -EPERM;
1814
1815    p = kzalloc(sizeof(*p), GFP_KERNEL);
1816    if (!p)
1817        return -ENOMEM;
1818
1819    spin_lock(&swap_lock);
1820    for (type = 0; type < nr_swapfiles; type++) {
1821        if (!(swap_info[type]->flags & SWP_USED))
1822            break;
1823    }
1824    error = -EPERM;
1825    if (type >= MAX_SWAPFILES) {
1826        spin_unlock(&swap_lock);
1827        kfree(p);
1828        goto out;
1829    }
1830    if (type >= nr_swapfiles) {
1831        p->type = type;
1832        swap_info[type] = p;
1833        /*
1834         * Write swap_info[type] before nr_swapfiles, in case a
1835         * racing procfs swap_start() or swap_next() is reading them.
1836         * (We never shrink nr_swapfiles, we never free this entry.)
1837         */
1838        smp_wmb();
1839        nr_swapfiles++;
1840    } else {
1841        kfree(p);
1842        p = swap_info[type];
1843        /*
1844         * Do not memset this entry: a racing procfs swap_next()
1845         * would be relying on p->type to remain valid.
1846         */
1847    }
1848    INIT_LIST_HEAD(&p->first_swap_extent.list);
1849    p->flags = SWP_USED;
1850    p->next = -1;
1851    spin_unlock(&swap_lock);
1852
1853    name = getname(specialfile);
1854    error = PTR_ERR(name);
1855    if (IS_ERR(name)) {
1856        name = NULL;
1857        goto bad_swap_2;
1858    }
1859    swap_file = filp_open(name, O_RDWR|O_LARGEFILE, 0);
1860    error = PTR_ERR(swap_file);
1861    if (IS_ERR(swap_file)) {
1862        swap_file = NULL;
1863        goto bad_swap_2;
1864    }
1865
1866    p->swap_file = swap_file;
1867    mapping = swap_file->f_mapping;
1868    inode = mapping->host;
1869
1870    error = -EBUSY;
1871    for (i = 0; i < nr_swapfiles; i++) {
1872        struct swap_info_struct *q = swap_info[i];
1873
1874        if (i == type || !q->swap_file)
1875            continue;
1876        if (mapping == q->swap_file->f_mapping)
1877            goto bad_swap;
1878    }
1879
1880    error = -EINVAL;
1881    if (S_ISBLK(inode->i_mode)) {
1882        bdev = I_BDEV(inode);
1883        error = bd_claim(bdev, sys_swapon);
1884        if (error < 0) {
1885            bdev = NULL;
1886            error = -EINVAL;
1887            goto bad_swap;
1888        }
1889        p->old_block_size = block_size(bdev);
1890        error = set_blocksize(bdev, PAGE_SIZE);
1891        if (error < 0)
1892            goto bad_swap;
1893        p->bdev = bdev;
1894        p->flags |= SWP_BLKDEV;
1895    } else if (S_ISREG(inode->i_mode)) {
1896        p->bdev = inode->i_sb->s_bdev;
1897        mutex_lock(&inode->i_mutex);
1898        did_down = 1;
1899        if (IS_SWAPFILE(inode)) {
1900            error = -EBUSY;
1901            goto bad_swap;
1902        }
1903    } else {
1904        goto bad_swap;
1905    }
1906
1907    swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
1908
1909    /*
1910     * Read the swap header.
1911     */
1912    if (!mapping->a_ops->readpage) {
1913        error = -EINVAL;
1914        goto bad_swap;
1915    }
1916    page = read_mapping_page(mapping, 0, swap_file);
1917    if (IS_ERR(page)) {
1918        error = PTR_ERR(page);
1919        goto bad_swap;
1920    }
1921    swap_header = kmap(page);
1922
1923    if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
1924        printk(KERN_ERR "Unable to find swap-space signature\n");
1925        error = -EINVAL;
1926        goto bad_swap;
1927    }
1928
1929    /* swap partition endianess hack... */
1930    if (swab32(swap_header->info.version) == 1) {
1931        swab32s(&swap_header->info.version);
1932        swab32s(&swap_header->info.last_page);
1933        swab32s(&swap_header->info.nr_badpages);
1934        for (i = 0; i < swap_header->info.nr_badpages; i++)
1935            swab32s(&swap_header->info.badpages[i]);
1936    }
1937    /* Check the swap header's sub-version */
1938    if (swap_header->info.version != 1) {
1939        printk(KERN_WARNING
1940               "Unable to handle swap header version %d\n",
1941               swap_header->info.version);
1942        error = -EINVAL;
1943        goto bad_swap;
1944    }
1945
1946    p->lowest_bit = 1;
1947    p->cluster_next = 1;
1948    p->cluster_nr = 0;
1949
1950    /*
1951     * Find out how many pages are allowed for a single swap
1952     * device. There are two limiting factors: 1) the number of
1953     * bits for the swap offset in the swp_entry_t type and
1954     * 2) the number of bits in the a swap pte as defined by
1955     * the different architectures. In order to find the
1956     * largest possible bit mask a swap entry with swap type 0
1957     * and swap offset ~0UL is created, encoded to a swap pte,
1958     * decoded to a swp_entry_t again and finally the swap
1959     * offset is extracted. This will mask all the bits from
1960     * the initial ~0UL mask that can't be encoded in either
1961     * the swp_entry_t or the architecture definition of a
1962     * swap pte.
1963     */
1964    maxpages = swp_offset(pte_to_swp_entry(
1965            swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
1966    if (maxpages > swap_header->info.last_page) {
1967        maxpages = swap_header->info.last_page + 1;
1968        /* p->max is an unsigned int: don't overflow it */
1969        if ((unsigned int)maxpages == 0)
1970            maxpages = UINT_MAX;
1971    }
1972    p->highest_bit = maxpages - 1;
1973
1974    error = -EINVAL;
1975    if (!maxpages)
1976        goto bad_swap;
1977    if (swapfilepages && maxpages > swapfilepages) {
1978        printk(KERN_WARNING
1979               "Swap area shorter than signature indicates\n");
1980        goto bad_swap;
1981    }
1982    if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
1983        goto bad_swap;
1984    if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
1985        goto bad_swap;
1986
1987    /* OK, set up the swap map and apply the bad block list */
1988    swap_map = vmalloc(maxpages);
1989    if (!swap_map) {
1990        error = -ENOMEM;
1991        goto bad_swap;
1992    }
1993
1994    memset(swap_map, 0, maxpages);
1995    nr_good_pages = maxpages - 1; /* omit header page */
1996
1997    for (i = 0; i < swap_header->info.nr_badpages; i++) {
1998        unsigned int page_nr = swap_header->info.badpages[i];
1999        if (page_nr == 0 || page_nr > swap_header->info.last_page) {
2000            error = -EINVAL;
2001            goto bad_swap;
2002        }
2003        if (page_nr < maxpages) {
2004            swap_map[page_nr] = SWAP_MAP_BAD;
2005            nr_good_pages--;
2006        }
2007    }
2008
2009    error = swap_cgroup_swapon(type, maxpages);
2010    if (error)
2011        goto bad_swap;
2012
2013    if (nr_good_pages) {
2014        swap_map[0] = SWAP_MAP_BAD;
2015        p->max = maxpages;
2016        p->pages = nr_good_pages;
2017        nr_extents = setup_swap_extents(p, &span);
2018        if (nr_extents < 0) {
2019            error = nr_extents;
2020            goto bad_swap;
2021        }
2022        nr_good_pages = p->pages;
2023    }
2024    if (!nr_good_pages) {
2025        printk(KERN_WARNING "Empty swap-file\n");
2026        error = -EINVAL;
2027        goto bad_swap;
2028    }
2029
2030    if (p->bdev) {
2031        if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
2032            p->flags |= SWP_SOLIDSTATE;
2033            p->cluster_next = 1 + (random32() % p->highest_bit);
2034        }
2035        if (discard_swap(p) == 0)
2036            p->flags |= SWP_DISCARDABLE;
2037    }
2038
2039    mutex_lock(&swapon_mutex);
2040    spin_lock(&swap_lock);
2041    if (swap_flags & SWAP_FLAG_PREFER)
2042        p->prio =
2043          (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
2044    else
2045        p->prio = --least_priority;
2046    p->swap_map = swap_map;
2047    p->flags |= SWP_WRITEOK;
2048    nr_swap_pages += nr_good_pages;
2049    total_swap_pages += nr_good_pages;
2050
2051    printk(KERN_INFO "Adding %uk swap on %s. "
2052            "Priority:%d extents:%d across:%lluk %s%s\n",
2053        nr_good_pages<<(PAGE_SHIFT-10), name, p->prio,
2054        nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
2055        (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
2056        (p->flags & SWP_DISCARDABLE) ? "D" : "");
2057
2058    /* insert swap space into swap_list: */
2059    prev = -1;
2060    for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
2061        if (p->prio >= swap_info[i]->prio)
2062            break;
2063        prev = i;
2064    }
2065    p->next = i;
2066    if (prev < 0)
2067        swap_list.head = swap_list.next = type;
2068    else
2069        swap_info[prev]->next = type;
2070    spin_unlock(&swap_lock);
2071    mutex_unlock(&swapon_mutex);
2072    error = 0;
2073    goto out;
2074bad_swap:
2075    if (bdev) {
2076        set_blocksize(bdev, p->old_block_size);
2077        bd_release(bdev);
2078    }
2079    destroy_swap_extents(p);
2080    swap_cgroup_swapoff(type);
2081bad_swap_2:
2082    spin_lock(&swap_lock);
2083    p->swap_file = NULL;
2084    p->flags = 0;
2085    spin_unlock(&swap_lock);
2086    vfree(swap_map);
2087    if (swap_file)
2088        filp_close(swap_file, NULL);
2089out:
2090    if (page && !IS_ERR(page)) {
2091        kunmap(page);
2092        page_cache_release(page);
2093    }
2094    if (name)
2095        putname(name);
2096    if (did_down) {
2097        if (!error)
2098            inode->i_flags |= S_SWAPFILE;
2099        mutex_unlock(&inode->i_mutex);
2100    }
2101    return error;
2102}
2103
2104void si_swapinfo(struct sysinfo *val)
2105{
2106    unsigned int type;
2107    unsigned long nr_to_be_unused = 0;
2108
2109    spin_lock(&swap_lock);
2110    for (type = 0; type < nr_swapfiles; type++) {
2111        struct swap_info_struct *si = swap_info[type];
2112
2113        if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
2114            nr_to_be_unused += si->inuse_pages;
2115    }
2116    val->freeswap = nr_swap_pages + nr_to_be_unused;
2117    val->totalswap = total_swap_pages + nr_to_be_unused;
2118    spin_unlock(&swap_lock);
2119}
2120
2121/*
2122 * Verify that a swap entry is valid and increment its swap map count.
2123 *
2124 * Returns error code in following case.
2125 * - success -> 0
2126 * - swp_entry is invalid -> EINVAL
2127 * - swp_entry is migration entry -> EINVAL
2128 * - swap-cache reference is requested but there is already one. -> EEXIST
2129 * - swap-cache reference is requested but the entry is not used. -> ENOENT
2130 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
2131 */
2132static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
2133{
2134    struct swap_info_struct *p;
2135    unsigned long offset, type;
2136    unsigned char count;
2137    unsigned char has_cache;
2138    int err = -EINVAL;
2139
2140    if (non_swap_entry(entry))
2141        goto out;
2142
2143    type = swp_type(entry);
2144    if (type >= nr_swapfiles)
2145        goto bad_file;
2146    p = swap_info[type];
2147    offset = swp_offset(entry);
2148
2149    spin_lock(&swap_lock);
2150    if (unlikely(offset >= p->max))
2151        goto unlock_out;
2152
2153    count = p->swap_map[offset];
2154    has_cache = count & SWAP_HAS_CACHE;
2155    count &= ~SWAP_HAS_CACHE;
2156    err = 0;
2157
2158    if (usage == SWAP_HAS_CACHE) {
2159
2160        /* set SWAP_HAS_CACHE if there is no cache and entry is used */
2161        if (!has_cache && count)
2162            has_cache = SWAP_HAS_CACHE;
2163        else if (has_cache) /* someone else added cache */
2164            err = -EEXIST;
2165        else /* no users remaining */
2166            err = -ENOENT;
2167
2168    } else if (count || has_cache) {
2169
2170        if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
2171            count += usage;
2172        else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
2173            err = -EINVAL;
2174        else if (swap_count_continued(p, offset, count))
2175            count = COUNT_CONTINUED;
2176        else
2177            err = -ENOMEM;
2178    } else
2179        err = -ENOENT; /* unused swap entry */
2180
2181    p->swap_map[offset] = count | has_cache;
2182
2183unlock_out:
2184    spin_unlock(&swap_lock);
2185out:
2186    return err;
2187
2188bad_file:
2189    printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
2190    goto out;
2191}
2192
2193/*
2194 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
2195 * (in which case its reference count is never incremented).
2196 */
2197void swap_shmem_alloc(swp_entry_t entry)
2198{
2199    __swap_duplicate(entry, SWAP_MAP_SHMEM);
2200}
2201
2202/*
2203 * Increase reference count of swap entry by 1.
2204 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
2205 * but could not be atomically allocated. Returns 0, just as if it succeeded,
2206 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
2207 * might occur if a page table entry has got corrupted.
2208 */
2209int swap_duplicate(swp_entry_t entry)
2210{
2211    int err = 0;
2212
2213    while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
2214        err = add_swap_count_continuation(entry, GFP_ATOMIC);
2215    return err;
2216}
2217
2218/*
2219 * @entry: swap entry for which we allocate swap cache.
2220 *
2221 * Called when allocating swap cache for existing swap entry,
2222 * This can return error codes. Returns 0 at success.
2223 * -EBUSY means there is a swap cache.
2224 * Note: return code is different from swap_duplicate().
2225 */
2226int swapcache_prepare(swp_entry_t entry)
2227{
2228    return __swap_duplicate(entry, SWAP_HAS_CACHE);
2229}
2230
2231/*
2232 * swap_lock prevents swap_map being freed. Don't grab an extra
2233 * reference on the swaphandle, it doesn't matter if it becomes unused.
2234 */
2235int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
2236{
2237    struct swap_info_struct *si;
2238    int our_page_cluster = page_cluster;
2239    pgoff_t target, toff;
2240    pgoff_t base, end;
2241    int nr_pages = 0;
2242
2243    if (!our_page_cluster) /* no readahead */
2244        return 0;
2245
2246    si = swap_info[swp_type(entry)];
2247    target = swp_offset(entry);
2248    base = (target >> our_page_cluster) << our_page_cluster;
2249    end = base + (1 << our_page_cluster);
2250    if (!base) /* first page is swap header */
2251        base++;
2252
2253    spin_lock(&swap_lock);
2254    if (end > si->max) /* don't go beyond end of map */
2255        end = si->max;
2256
2257    /* Count contiguous allocated slots above our target */
2258    for (toff = target; ++toff < end; nr_pages++) {
2259        /* Don't read in free or bad pages */
2260        if (!si->swap_map[toff])
2261            break;
2262        if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
2263            break;
2264    }
2265    /* Count contiguous allocated slots below our target */
2266    for (toff = target; --toff >= base; nr_pages++) {
2267        /* Don't read in free or bad pages */
2268        if (!si->swap_map[toff])
2269            break;
2270        if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
2271            break;
2272    }
2273    spin_unlock(&swap_lock);
2274
2275    /*
2276     * Indicate starting offset, and return number of pages to get:
2277     * if only 1, say 0, since there's then no readahead to be done.
2278     */
2279    *offset = ++toff;
2280    return nr_pages? ++nr_pages: 0;
2281}
2282
2283/*
2284 * add_swap_count_continuation - called when a swap count is duplicated
2285 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
2286 * page of the original vmalloc'ed swap_map, to hold the continuation count
2287 * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called
2288 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
2289 *
2290 * These continuation pages are seldom referenced: the common paths all work
2291 * on the original swap_map, only referring to a continuation page when the
2292 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
2293 *
2294 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
2295 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
2296 * can be called after dropping locks.
2297 */
2298int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
2299{
2300    struct swap_info_struct *si;
2301    struct page *head;
2302    struct page *page;
2303    struct page *list_page;
2304    pgoff_t offset;
2305    unsigned char count;
2306
2307    /*
2308     * When debugging, it's easier to use __GFP_ZERO here; but it's better
2309     * for latency not to zero a page while GFP_ATOMIC and holding locks.
2310     */
2311    page = alloc_page(gfp_mask | __GFP_HIGHMEM);
2312
2313    si = swap_info_get(entry);
2314    if (!si) {
2315        /*
2316         * An acceptable race has occurred since the failing
2317         * __swap_duplicate(): the swap entry has been freed,
2318         * perhaps even the whole swap_map cleared for swapoff.
2319         */
2320        goto outer;
2321    }
2322
2323    offset = swp_offset(entry);
2324    count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
2325
2326    if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
2327        /*
2328         * The higher the swap count, the more likely it is that tasks
2329         * will race to add swap count continuation: we need to avoid
2330         * over-provisioning.
2331         */
2332        goto out;
2333    }
2334
2335    if (!page) {
2336        spin_unlock(&swap_lock);
2337        return -ENOMEM;
2338    }
2339
2340    /*
2341     * We are fortunate that although vmalloc_to_page uses pte_offset_map,
2342     * no architecture is using highmem pages for kernel pagetables: so it
2343     * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps.
2344     */
2345    head = vmalloc_to_page(si->swap_map + offset);
2346    offset &= ~PAGE_MASK;
2347
2348    /*
2349     * Page allocation does not initialize the page's lru field,
2350     * but it does always reset its private field.
2351     */
2352    if (!page_private(head)) {
2353        BUG_ON(count & COUNT_CONTINUED);
2354        INIT_LIST_HEAD(&head->lru);
2355        set_page_private(head, SWP_CONTINUED);
2356        si->flags |= SWP_CONTINUED;
2357    }
2358
2359    list_for_each_entry(list_page, &head->lru, lru) {
2360        unsigned char *map;
2361
2362        /*
2363         * If the previous map said no continuation, but we've found
2364         * a continuation page, free our allocation and use this one.
2365         */
2366        if (!(count & COUNT_CONTINUED))
2367            goto out;
2368
2369        map = kmap_atomic(list_page, KM_USER0) + offset;
2370        count = *map;
2371        kunmap_atomic(map, KM_USER0);
2372
2373        /*
2374         * If this continuation count now has some space in it,
2375         * free our allocation and use this one.
2376         */
2377        if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
2378            goto out;
2379    }
2380
2381    list_add_tail(&page->lru, &head->lru);
2382    page = NULL; /* now it's attached, don't free it */
2383out:
2384    spin_unlock(&swap_lock);
2385outer:
2386    if (page)
2387        __free_page(page);
2388    return 0;
2389}
2390
2391/*
2392 * swap_count_continued - when the original swap_map count is incremented
2393 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
2394 * into, carry if so, or else fail until a new continuation page is allocated;
2395 * when the original swap_map count is decremented from 0 with continuation,
2396 * borrow from the continuation and report whether it still holds more.
2397 * Called while __swap_duplicate() or swap_entry_free() holds swap_lock.
2398 */
2399static bool swap_count_continued(struct swap_info_struct *si,
2400                 pgoff_t offset, unsigned char count)
2401{
2402    struct page *head;
2403    struct page *page;
2404    unsigned char *map;
2405
2406    head = vmalloc_to_page(si->swap_map + offset);
2407    if (page_private(head) != SWP_CONTINUED) {
2408        BUG_ON(count & COUNT_CONTINUED);
2409        return false; /* need to add count continuation */
2410    }
2411
2412    offset &= ~PAGE_MASK;
2413    page = list_entry(head->lru.next, struct page, lru);
2414    map = kmap_atomic(page, KM_USER0) + offset;
2415
2416    if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
2417        goto init_map; /* jump over SWAP_CONT_MAX checks */
2418
2419    if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
2420        /*
2421         * Think of how you add 1 to 999
2422         */
2423        while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
2424            kunmap_atomic(map, KM_USER0);
2425            page = list_entry(page->lru.next, struct page, lru);
2426            BUG_ON(page == head);
2427            map = kmap_atomic(page, KM_USER0) + offset;
2428        }
2429        if (*map == SWAP_CONT_MAX) {
2430            kunmap_atomic(map, KM_USER0);
2431            page = list_entry(page->lru.next, struct page, lru);
2432            if (page == head)
2433                return false; /* add count continuation */
2434            map = kmap_atomic(page, KM_USER0) + offset;
2435init_map: *map = 0; /* we didn't zero the page */
2436        }
2437        *map += 1;
2438        kunmap_atomic(map, KM_USER0);
2439        page = list_entry(page->lru.prev, struct page, lru);
2440        while (page != head) {
2441            map = kmap_atomic(page, KM_USER0) + offset;
2442            *map = COUNT_CONTINUED;
2443            kunmap_atomic(map, KM_USER0);
2444            page = list_entry(page->lru.prev, struct page, lru);
2445        }
2446        return true; /* incremented */
2447
2448    } else { /* decrementing */
2449        /*
2450         * Think of how you subtract 1 from 1000
2451         */
2452        BUG_ON(count != COUNT_CONTINUED);
2453        while (*map == COUNT_CONTINUED) {
2454            kunmap_atomic(map, KM_USER0);
2455            page = list_entry(page->lru.next, struct page, lru);
2456            BUG_ON(page == head);
2457            map = kmap_atomic(page, KM_USER0) + offset;
2458        }
2459        BUG_ON(*map == 0);
2460        *map -= 1;
2461        if (*map == 0)
2462            count = 0;
2463        kunmap_atomic(map, KM_USER0);
2464        page = list_entry(page->lru.prev, struct page, lru);
2465        while (page != head) {
2466            map = kmap_atomic(page, KM_USER0) + offset;
2467            *map = SWAP_CONT_MAX | count;
2468            count = COUNT_CONTINUED;
2469            kunmap_atomic(map, KM_USER0);
2470            page = list_entry(page->lru.prev, struct page, lru);
2471        }
2472        return count == COUNT_CONTINUED;
2473    }
2474}
2475
2476/*
2477 * free_swap_count_continuations - swapoff free all the continuation pages
2478 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
2479 */
2480static void free_swap_count_continuations(struct swap_info_struct *si)
2481{
2482    pgoff_t offset;
2483
2484    for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
2485        struct page *head;
2486        head = vmalloc_to_page(si->swap_map + offset);
2487        if (page_private(head)) {
2488            struct list_head *this, *next;
2489            list_for_each_safe(this, next, &head->lru) {
2490                struct page *page;
2491                page = list_entry(this, struct page, lru);
2492                list_del(this);
2493                __free_page(page);
2494            }
2495        }
2496    }
2497}
2498

Archive Download this file



interactive