Root/mm/memory.c

1/*
2 * linux/mm/memory.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
6
7/*
8 * demand-loading started 01.12.91 - seems it is high on the list of
9 * things wanted, and it should be easy to implement. - Linus
10 */
11
12/*
13 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14 * pages started 02.12.91, seems to work. - Linus.
15 *
16 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17 * would have taken more than the 6M I have free, but it worked well as
18 * far as I could see.
19 *
20 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
21 */
22
23/*
24 * Real VM (paging to/from disk) started 18.12.91. Much more work and
25 * thought has to go into this. Oh, well..
26 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
27 * Found it. Everything seems to work now.
28 * 20.12.91 - Ok, making the swap-device changeable like the root.
29 */
30
31/*
32 * 05.04.94 - Multi-page memory management added for v1.1.
33 * Idea by Alex Bligh (alex@cconcepts.co.uk)
34 *
35 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
36 * (Gerhard.Wichert@pdb.siemens.de)
37 *
38 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
39 */
40
41#include <linux/kernel_stat.h>
42#include <linux/mm.h>
43#include <linux/hugetlb.h>
44#include <linux/mman.h>
45#include <linux/swap.h>
46#include <linux/highmem.h>
47#include <linux/pagemap.h>
48#include <linux/ksm.h>
49#include <linux/rmap.h>
50#include <linux/module.h>
51#include <linux/delayacct.h>
52#include <linux/init.h>
53#include <linux/writeback.h>
54#include <linux/memcontrol.h>
55#include <linux/mmu_notifier.h>
56#include <linux/kallsyms.h>
57#include <linux/swapops.h>
58#include <linux/elf.h>
59#include <linux/gfp.h>
60
61#include <asm/io.h>
62#include <asm/pgalloc.h>
63#include <asm/uaccess.h>
64#include <asm/tlb.h>
65#include <asm/tlbflush.h>
66#include <asm/pgtable.h>
67
68#include "internal.h"
69
70#ifndef CONFIG_NEED_MULTIPLE_NODES
71/* use the per-pgdat data instead for discontigmem - mbligh */
72unsigned long max_mapnr;
73struct page *mem_map;
74
75EXPORT_SYMBOL(max_mapnr);
76EXPORT_SYMBOL(mem_map);
77#endif
78
79unsigned long num_physpages;
80/*
81 * A number of key systems in x86 including ioremap() rely on the assumption
82 * that high_memory defines the upper bound on direct map memory, then end
83 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
84 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
85 * and ZONE_HIGHMEM.
86 */
87void * high_memory;
88
89EXPORT_SYMBOL(num_physpages);
90EXPORT_SYMBOL(high_memory);
91
92/*
93 * Randomize the address space (stacks, mmaps, brk, etc.).
94 *
95 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
96 * as ancient (libc5 based) binaries can segfault. )
97 */
98int randomize_va_space __read_mostly =
99#ifdef CONFIG_COMPAT_BRK
100                    1;
101#else
102                    2;
103#endif
104
105static int __init disable_randmaps(char *s)
106{
107    randomize_va_space = 0;
108    return 1;
109}
110__setup("norandmaps", disable_randmaps);
111
112unsigned long zero_pfn __read_mostly;
113unsigned long highest_memmap_pfn __read_mostly;
114
115/*
116 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
117 */
118static int __init init_zero_pfn(void)
119{
120    zero_pfn = page_to_pfn(ZERO_PAGE(0));
121    return 0;
122}
123core_initcall(init_zero_pfn);
124
125
126#if defined(SPLIT_RSS_COUNTING)
127
128static void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm)
129{
130    int i;
131
132    for (i = 0; i < NR_MM_COUNTERS; i++) {
133        if (task->rss_stat.count[i]) {
134            add_mm_counter(mm, i, task->rss_stat.count[i]);
135            task->rss_stat.count[i] = 0;
136        }
137    }
138    task->rss_stat.events = 0;
139}
140
141static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
142{
143    struct task_struct *task = current;
144
145    if (likely(task->mm == mm))
146        task->rss_stat.count[member] += val;
147    else
148        add_mm_counter(mm, member, val);
149}
150#define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
151#define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
152
153/* sync counter once per 64 page faults */
154#define TASK_RSS_EVENTS_THRESH (64)
155static void check_sync_rss_stat(struct task_struct *task)
156{
157    if (unlikely(task != current))
158        return;
159    if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
160        __sync_task_rss_stat(task, task->mm);
161}
162
163unsigned long get_mm_counter(struct mm_struct *mm, int member)
164{
165    long val = 0;
166
167    /*
168     * Don't use task->mm here...for avoiding to use task_get_mm()..
169     * The caller must guarantee task->mm is not invalid.
170     */
171    val = atomic_long_read(&mm->rss_stat.count[member]);
172    /*
173     * counter is updated in asynchronous manner and may go to minus.
174     * But it's never be expected number for users.
175     */
176    if (val < 0)
177        return 0;
178    return (unsigned long)val;
179}
180
181void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
182{
183    __sync_task_rss_stat(task, mm);
184}
185#else
186
187#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
188#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
189
190static void check_sync_rss_stat(struct task_struct *task)
191{
192}
193
194#endif
195
196/*
197 * If a p?d_bad entry is found while walking page tables, report
198 * the error, before resetting entry to p?d_none. Usually (but
199 * very seldom) called out from the p?d_none_or_clear_bad macros.
200 */
201
202void pgd_clear_bad(pgd_t *pgd)
203{
204    pgd_ERROR(*pgd);
205    pgd_clear(pgd);
206}
207
208void pud_clear_bad(pud_t *pud)
209{
210    pud_ERROR(*pud);
211    pud_clear(pud);
212}
213
214void pmd_clear_bad(pmd_t *pmd)
215{
216    pmd_ERROR(*pmd);
217    pmd_clear(pmd);
218}
219
220/*
221 * Note: this doesn't free the actual pages themselves. That
222 * has been handled earlier when unmapping all the memory regions.
223 */
224static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
225               unsigned long addr)
226{
227    pgtable_t token = pmd_pgtable(*pmd);
228    pmd_clear(pmd);
229    pte_free_tlb(tlb, token, addr);
230    tlb->mm->nr_ptes--;
231}
232
233static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
234                unsigned long addr, unsigned long end,
235                unsigned long floor, unsigned long ceiling)
236{
237    pmd_t *pmd;
238    unsigned long next;
239    unsigned long start;
240
241    start = addr;
242    pmd = pmd_offset(pud, addr);
243    do {
244        next = pmd_addr_end(addr, end);
245        if (pmd_none_or_clear_bad(pmd))
246            continue;
247        free_pte_range(tlb, pmd, addr);
248    } while (pmd++, addr = next, addr != end);
249
250    start &= PUD_MASK;
251    if (start < floor)
252        return;
253    if (ceiling) {
254        ceiling &= PUD_MASK;
255        if (!ceiling)
256            return;
257    }
258    if (end - 1 > ceiling - 1)
259        return;
260
261    pmd = pmd_offset(pud, start);
262    pud_clear(pud);
263    pmd_free_tlb(tlb, pmd, start);
264}
265
266static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
267                unsigned long addr, unsigned long end,
268                unsigned long floor, unsigned long ceiling)
269{
270    pud_t *pud;
271    unsigned long next;
272    unsigned long start;
273
274    start = addr;
275    pud = pud_offset(pgd, addr);
276    do {
277        next = pud_addr_end(addr, end);
278        if (pud_none_or_clear_bad(pud))
279            continue;
280        free_pmd_range(tlb, pud, addr, next, floor, ceiling);
281    } while (pud++, addr = next, addr != end);
282
283    start &= PGDIR_MASK;
284    if (start < floor)
285        return;
286    if (ceiling) {
287        ceiling &= PGDIR_MASK;
288        if (!ceiling)
289            return;
290    }
291    if (end - 1 > ceiling - 1)
292        return;
293
294    pud = pud_offset(pgd, start);
295    pgd_clear(pgd);
296    pud_free_tlb(tlb, pud, start);
297}
298
299/*
300 * This function frees user-level page tables of a process.
301 *
302 * Must be called with pagetable lock held.
303 */
304void free_pgd_range(struct mmu_gather *tlb,
305            unsigned long addr, unsigned long end,
306            unsigned long floor, unsigned long ceiling)
307{
308    pgd_t *pgd;
309    unsigned long next;
310
311    /*
312     * The next few lines have given us lots of grief...
313     *
314     * Why are we testing PMD* at this top level? Because often
315     * there will be no work to do at all, and we'd prefer not to
316     * go all the way down to the bottom just to discover that.
317     *
318     * Why all these "- 1"s? Because 0 represents both the bottom
319     * of the address space and the top of it (using -1 for the
320     * top wouldn't help much: the masks would do the wrong thing).
321     * The rule is that addr 0 and floor 0 refer to the bottom of
322     * the address space, but end 0 and ceiling 0 refer to the top
323     * Comparisons need to use "end - 1" and "ceiling - 1" (though
324     * that end 0 case should be mythical).
325     *
326     * Wherever addr is brought up or ceiling brought down, we must
327     * be careful to reject "the opposite 0" before it confuses the
328     * subsequent tests. But what about where end is brought down
329     * by PMD_SIZE below? no, end can't go down to 0 there.
330     *
331     * Whereas we round start (addr) and ceiling down, by different
332     * masks at different levels, in order to test whether a table
333     * now has no other vmas using it, so can be freed, we don't
334     * bother to round floor or end up - the tests don't need that.
335     */
336
337    addr &= PMD_MASK;
338    if (addr < floor) {
339        addr += PMD_SIZE;
340        if (!addr)
341            return;
342    }
343    if (ceiling) {
344        ceiling &= PMD_MASK;
345        if (!ceiling)
346            return;
347    }
348    if (end - 1 > ceiling - 1)
349        end -= PMD_SIZE;
350    if (addr > end - 1)
351        return;
352
353    pgd = pgd_offset(tlb->mm, addr);
354    do {
355        next = pgd_addr_end(addr, end);
356        if (pgd_none_or_clear_bad(pgd))
357            continue;
358        free_pud_range(tlb, pgd, addr, next, floor, ceiling);
359    } while (pgd++, addr = next, addr != end);
360}
361
362void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
363        unsigned long floor, unsigned long ceiling)
364{
365    while (vma) {
366        struct vm_area_struct *next = vma->vm_next;
367        unsigned long addr = vma->vm_start;
368
369        /*
370         * Hide vma from rmap and truncate_pagecache before freeing
371         * pgtables
372         */
373        unlink_anon_vmas(vma);
374        unlink_file_vma(vma);
375
376        if (is_vm_hugetlb_page(vma)) {
377            hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
378                floor, next? next->vm_start: ceiling);
379        } else {
380            /*
381             * Optimization: gather nearby vmas into one call down
382             */
383            while (next && next->vm_start <= vma->vm_end + PMD_SIZE
384                   && !is_vm_hugetlb_page(next)) {
385                vma = next;
386                next = vma->vm_next;
387                unlink_anon_vmas(vma);
388                unlink_file_vma(vma);
389            }
390            free_pgd_range(tlb, addr, vma->vm_end,
391                floor, next? next->vm_start: ceiling);
392        }
393        vma = next;
394    }
395}
396
397int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
398{
399    pgtable_t new = pte_alloc_one(mm, address);
400    if (!new)
401        return -ENOMEM;
402
403    /*
404     * Ensure all pte setup (eg. pte page lock and page clearing) are
405     * visible before the pte is made visible to other CPUs by being
406     * put into page tables.
407     *
408     * The other side of the story is the pointer chasing in the page
409     * table walking code (when walking the page table without locking;
410     * ie. most of the time). Fortunately, these data accesses consist
411     * of a chain of data-dependent loads, meaning most CPUs (alpha
412     * being the notable exception) will already guarantee loads are
413     * seen in-order. See the alpha page table accessors for the
414     * smp_read_barrier_depends() barriers in page table walking code.
415     */
416    smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
417
418    spin_lock(&mm->page_table_lock);
419    if (!pmd_present(*pmd)) { /* Has another populated it ? */
420        mm->nr_ptes++;
421        pmd_populate(mm, pmd, new);
422        new = NULL;
423    }
424    spin_unlock(&mm->page_table_lock);
425    if (new)
426        pte_free(mm, new);
427    return 0;
428}
429
430int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
431{
432    pte_t *new = pte_alloc_one_kernel(&init_mm, address);
433    if (!new)
434        return -ENOMEM;
435
436    smp_wmb(); /* See comment in __pte_alloc */
437
438    spin_lock(&init_mm.page_table_lock);
439    if (!pmd_present(*pmd)) { /* Has another populated it ? */
440        pmd_populate_kernel(&init_mm, pmd, new);
441        new = NULL;
442    }
443    spin_unlock(&init_mm.page_table_lock);
444    if (new)
445        pte_free_kernel(&init_mm, new);
446    return 0;
447}
448
449static inline void init_rss_vec(int *rss)
450{
451    memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
452}
453
454static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
455{
456    int i;
457
458    if (current->mm == mm)
459        sync_mm_rss(current, mm);
460    for (i = 0; i < NR_MM_COUNTERS; i++)
461        if (rss[i])
462            add_mm_counter(mm, i, rss[i]);
463}
464
465/*
466 * This function is called to print an error when a bad pte
467 * is found. For example, we might have a PFN-mapped pte in
468 * a region that doesn't allow it.
469 *
470 * The calling function must still handle the error.
471 */
472static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
473              pte_t pte, struct page *page)
474{
475    pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
476    pud_t *pud = pud_offset(pgd, addr);
477    pmd_t *pmd = pmd_offset(pud, addr);
478    struct address_space *mapping;
479    pgoff_t index;
480    static unsigned long resume;
481    static unsigned long nr_shown;
482    static unsigned long nr_unshown;
483
484    /*
485     * Allow a burst of 60 reports, then keep quiet for that minute;
486     * or allow a steady drip of one report per second.
487     */
488    if (nr_shown == 60) {
489        if (time_before(jiffies, resume)) {
490            nr_unshown++;
491            return;
492        }
493        if (nr_unshown) {
494            printk(KERN_ALERT
495                "BUG: Bad page map: %lu messages suppressed\n",
496                nr_unshown);
497            nr_unshown = 0;
498        }
499        nr_shown = 0;
500    }
501    if (nr_shown++ == 0)
502        resume = jiffies + 60 * HZ;
503
504    mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
505    index = linear_page_index(vma, addr);
506
507    printk(KERN_ALERT
508        "BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
509        current->comm,
510        (long long)pte_val(pte), (long long)pmd_val(*pmd));
511    if (page)
512        dump_page(page);
513    printk(KERN_ALERT
514        "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
515        (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
516    /*
517     * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
518     */
519    if (vma->vm_ops)
520        print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n",
521                (unsigned long)vma->vm_ops->fault);
522    if (vma->vm_file && vma->vm_file->f_op)
523        print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n",
524                (unsigned long)vma->vm_file->f_op->mmap);
525    dump_stack();
526    add_taint(TAINT_BAD_PAGE);
527}
528
529static inline int is_cow_mapping(unsigned int flags)
530{
531    return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
532}
533
534#ifndef is_zero_pfn
535static inline int is_zero_pfn(unsigned long pfn)
536{
537    return pfn == zero_pfn;
538}
539#endif
540
541#ifndef my_zero_pfn
542static inline unsigned long my_zero_pfn(unsigned long addr)
543{
544    return zero_pfn;
545}
546#endif
547
548/*
549 * vm_normal_page -- This function gets the "struct page" associated with a pte.
550 *
551 * "Special" mappings do not wish to be associated with a "struct page" (either
552 * it doesn't exist, or it exists but they don't want to touch it). In this
553 * case, NULL is returned here. "Normal" mappings do have a struct page.
554 *
555 * There are 2 broad cases. Firstly, an architecture may define a pte_special()
556 * pte bit, in which case this function is trivial. Secondly, an architecture
557 * may not have a spare pte bit, which requires a more complicated scheme,
558 * described below.
559 *
560 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
561 * special mapping (even if there are underlying and valid "struct pages").
562 * COWed pages of a VM_PFNMAP are always normal.
563 *
564 * The way we recognize COWed pages within VM_PFNMAP mappings is through the
565 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
566 * set, and the vm_pgoff will point to the first PFN mapped: thus every special
567 * mapping will always honor the rule
568 *
569 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
570 *
571 * And for normal mappings this is false.
572 *
573 * This restricts such mappings to be a linear translation from virtual address
574 * to pfn. To get around this restriction, we allow arbitrary mappings so long
575 * as the vma is not a COW mapping; in that case, we know that all ptes are
576 * special (because none can have been COWed).
577 *
578 *
579 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
580 *
581 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
582 * page" backing, however the difference is that _all_ pages with a struct
583 * page (that is, those where pfn_valid is true) are refcounted and considered
584 * normal pages by the VM. The disadvantage is that pages are refcounted
585 * (which can be slower and simply not an option for some PFNMAP users). The
586 * advantage is that we don't have to follow the strict linearity rule of
587 * PFNMAP mappings in order to support COWable mappings.
588 *
589 */
590#ifdef __HAVE_ARCH_PTE_SPECIAL
591# define HAVE_PTE_SPECIAL 1
592#else
593# define HAVE_PTE_SPECIAL 0
594#endif
595struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
596                pte_t pte)
597{
598    unsigned long pfn = pte_pfn(pte);
599
600    if (HAVE_PTE_SPECIAL) {
601        if (likely(!pte_special(pte)))
602            goto check_pfn;
603        if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
604            return NULL;
605        if (!is_zero_pfn(pfn))
606            print_bad_pte(vma, addr, pte, NULL);
607        return NULL;
608    }
609
610    /* !HAVE_PTE_SPECIAL case follows: */
611
612    if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
613        if (vma->vm_flags & VM_MIXEDMAP) {
614            if (!pfn_valid(pfn))
615                return NULL;
616            goto out;
617        } else {
618            unsigned long off;
619            off = (addr - vma->vm_start) >> PAGE_SHIFT;
620            if (pfn == vma->vm_pgoff + off)
621                return NULL;
622            if (!is_cow_mapping(vma->vm_flags))
623                return NULL;
624        }
625    }
626
627    if (is_zero_pfn(pfn))
628        return NULL;
629check_pfn:
630    if (unlikely(pfn > highest_memmap_pfn)) {
631        print_bad_pte(vma, addr, pte, NULL);
632        return NULL;
633    }
634
635    /*
636     * NOTE! We still have PageReserved() pages in the page tables.
637     * eg. VDSO mappings can cause them to exist.
638     */
639out:
640    return pfn_to_page(pfn);
641}
642
643/*
644 * copy one vm_area from one task to the other. Assumes the page tables
645 * already present in the new task to be cleared in the whole range
646 * covered by this vma.
647 */
648
649static inline unsigned long
650copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
651        pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
652        unsigned long addr, int *rss)
653{
654    unsigned long vm_flags = vma->vm_flags;
655    pte_t pte = *src_pte;
656    struct page *page;
657
658    /* pte contains position in swap or file, so copy. */
659    if (unlikely(!pte_present(pte))) {
660        if (!pte_file(pte)) {
661            swp_entry_t entry = pte_to_swp_entry(pte);
662
663            if (swap_duplicate(entry) < 0)
664                return entry.val;
665
666            /* make sure dst_mm is on swapoff's mmlist. */
667            if (unlikely(list_empty(&dst_mm->mmlist))) {
668                spin_lock(&mmlist_lock);
669                if (list_empty(&dst_mm->mmlist))
670                    list_add(&dst_mm->mmlist,
671                         &src_mm->mmlist);
672                spin_unlock(&mmlist_lock);
673            }
674            if (likely(!non_swap_entry(entry)))
675                rss[MM_SWAPENTS]++;
676            else if (is_write_migration_entry(entry) &&
677                    is_cow_mapping(vm_flags)) {
678                /*
679                 * COW mappings require pages in both parent
680                 * and child to be set to read.
681                 */
682                make_migration_entry_read(&entry);
683                pte = swp_entry_to_pte(entry);
684                set_pte_at(src_mm, addr, src_pte, pte);
685            }
686        }
687        goto out_set_pte;
688    }
689
690    /*
691     * If it's a COW mapping, write protect it both
692     * in the parent and the child
693     */
694    if (is_cow_mapping(vm_flags)) {
695        ptep_set_wrprotect(src_mm, addr, src_pte);
696        pte = pte_wrprotect(pte);
697    }
698
699    /*
700     * If it's a shared mapping, mark it clean in
701     * the child
702     */
703    if (vm_flags & VM_SHARED)
704        pte = pte_mkclean(pte);
705    pte = pte_mkold(pte);
706
707    page = vm_normal_page(vma, addr, pte);
708    if (page) {
709        get_page(page);
710        page_dup_rmap(page);
711        if (PageAnon(page))
712            rss[MM_ANONPAGES]++;
713        else
714            rss[MM_FILEPAGES]++;
715    }
716
717out_set_pte:
718    set_pte_at(dst_mm, addr, dst_pte, pte);
719    return 0;
720}
721
722static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
723        pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
724        unsigned long addr, unsigned long end)
725{
726    pte_t *orig_src_pte, *orig_dst_pte;
727    pte_t *src_pte, *dst_pte;
728    spinlock_t *src_ptl, *dst_ptl;
729    int progress = 0;
730    int rss[NR_MM_COUNTERS];
731    swp_entry_t entry = (swp_entry_t){0};
732
733again:
734    init_rss_vec(rss);
735
736    dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
737    if (!dst_pte)
738        return -ENOMEM;
739    src_pte = pte_offset_map_nested(src_pmd, addr);
740    src_ptl = pte_lockptr(src_mm, src_pmd);
741    spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
742    orig_src_pte = src_pte;
743    orig_dst_pte = dst_pte;
744    arch_enter_lazy_mmu_mode();
745
746    do {
747        /*
748         * We are holding two locks at this point - either of them
749         * could generate latencies in another task on another CPU.
750         */
751        if (progress >= 32) {
752            progress = 0;
753            if (need_resched() ||
754                spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
755                break;
756        }
757        if (pte_none(*src_pte)) {
758            progress++;
759            continue;
760        }
761        entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
762                            vma, addr, rss);
763        if (entry.val)
764            break;
765        progress += 8;
766    } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
767
768    arch_leave_lazy_mmu_mode();
769    spin_unlock(src_ptl);
770    pte_unmap_nested(orig_src_pte);
771    add_mm_rss_vec(dst_mm, rss);
772    pte_unmap_unlock(orig_dst_pte, dst_ptl);
773    cond_resched();
774
775    if (entry.val) {
776        if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
777            return -ENOMEM;
778        progress = 0;
779    }
780    if (addr != end)
781        goto again;
782    return 0;
783}
784
785static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
786        pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
787        unsigned long addr, unsigned long end)
788{
789    pmd_t *src_pmd, *dst_pmd;
790    unsigned long next;
791
792    dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
793    if (!dst_pmd)
794        return -ENOMEM;
795    src_pmd = pmd_offset(src_pud, addr);
796    do {
797        next = pmd_addr_end(addr, end);
798        if (pmd_none_or_clear_bad(src_pmd))
799            continue;
800        if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
801                        vma, addr, next))
802            return -ENOMEM;
803    } while (dst_pmd++, src_pmd++, addr = next, addr != end);
804    return 0;
805}
806
807static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
808        pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
809        unsigned long addr, unsigned long end)
810{
811    pud_t *src_pud, *dst_pud;
812    unsigned long next;
813
814    dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
815    if (!dst_pud)
816        return -ENOMEM;
817    src_pud = pud_offset(src_pgd, addr);
818    do {
819        next = pud_addr_end(addr, end);
820        if (pud_none_or_clear_bad(src_pud))
821            continue;
822        if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
823                        vma, addr, next))
824            return -ENOMEM;
825    } while (dst_pud++, src_pud++, addr = next, addr != end);
826    return 0;
827}
828
829int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
830        struct vm_area_struct *vma)
831{
832    pgd_t *src_pgd, *dst_pgd;
833    unsigned long next;
834    unsigned long addr = vma->vm_start;
835    unsigned long end = vma->vm_end;
836    int ret;
837
838    /*
839     * Don't copy ptes where a page fault will fill them correctly.
840     * Fork becomes much lighter when there are big shared or private
841     * readonly mappings. The tradeoff is that copy_page_range is more
842     * efficient than faulting.
843     */
844    if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
845        if (!vma->anon_vma)
846            return 0;
847    }
848
849    if (is_vm_hugetlb_page(vma))
850        return copy_hugetlb_page_range(dst_mm, src_mm, vma);
851
852    if (unlikely(is_pfn_mapping(vma))) {
853        /*
854         * We do not free on error cases below as remove_vma
855         * gets called on error from higher level routine
856         */
857        ret = track_pfn_vma_copy(vma);
858        if (ret)
859            return ret;
860    }
861
862    /*
863     * We need to invalidate the secondary MMU mappings only when
864     * there could be a permission downgrade on the ptes of the
865     * parent mm. And a permission downgrade will only happen if
866     * is_cow_mapping() returns true.
867     */
868    if (is_cow_mapping(vma->vm_flags))
869        mmu_notifier_invalidate_range_start(src_mm, addr, end);
870
871    ret = 0;
872    dst_pgd = pgd_offset(dst_mm, addr);
873    src_pgd = pgd_offset(src_mm, addr);
874    do {
875        next = pgd_addr_end(addr, end);
876        if (pgd_none_or_clear_bad(src_pgd))
877            continue;
878        if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
879                        vma, addr, next))) {
880            ret = -ENOMEM;
881            break;
882        }
883    } while (dst_pgd++, src_pgd++, addr = next, addr != end);
884
885    if (is_cow_mapping(vma->vm_flags))
886        mmu_notifier_invalidate_range_end(src_mm,
887                          vma->vm_start, end);
888    return ret;
889}
890
891static unsigned long zap_pte_range(struct mmu_gather *tlb,
892                struct vm_area_struct *vma, pmd_t *pmd,
893                unsigned long addr, unsigned long end,
894                long *zap_work, struct zap_details *details)
895{
896    struct mm_struct *mm = tlb->mm;
897    pte_t *pte;
898    spinlock_t *ptl;
899    int rss[NR_MM_COUNTERS];
900
901    init_rss_vec(rss);
902
903    pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
904    arch_enter_lazy_mmu_mode();
905    do {
906        pte_t ptent = *pte;
907        if (pte_none(ptent)) {
908            (*zap_work)--;
909            continue;
910        }
911
912        (*zap_work) -= PAGE_SIZE;
913
914        if (pte_present(ptent)) {
915            struct page *page;
916
917            page = vm_normal_page(vma, addr, ptent);
918            if (unlikely(details) && page) {
919                /*
920                 * unmap_shared_mapping_pages() wants to
921                 * invalidate cache without truncating:
922                 * unmap shared but keep private pages.
923                 */
924                if (details->check_mapping &&
925                    details->check_mapping != page->mapping)
926                    continue;
927                /*
928                 * Each page->index must be checked when
929                 * invalidating or truncating nonlinear.
930                 */
931                if (details->nonlinear_vma &&
932                    (page->index < details->first_index ||
933                     page->index > details->last_index))
934                    continue;
935            }
936            ptent = ptep_get_and_clear_full(mm, addr, pte,
937                            tlb->fullmm);
938            tlb_remove_tlb_entry(tlb, pte, addr);
939            if (unlikely(!page))
940                continue;
941            if (unlikely(details) && details->nonlinear_vma
942                && linear_page_index(details->nonlinear_vma,
943                        addr) != page->index)
944                set_pte_at(mm, addr, pte,
945                       pgoff_to_pte(page->index));
946            if (PageAnon(page))
947                rss[MM_ANONPAGES]--;
948            else {
949                if (pte_dirty(ptent))
950                    set_page_dirty(page);
951                if (pte_young(ptent) &&
952                    likely(!VM_SequentialReadHint(vma)))
953                    mark_page_accessed(page);
954                rss[MM_FILEPAGES]--;
955            }
956            page_remove_rmap(page);
957            if (unlikely(page_mapcount(page) < 0))
958                print_bad_pte(vma, addr, ptent, page);
959            tlb_remove_page(tlb, page);
960            continue;
961        }
962        /*
963         * If details->check_mapping, we leave swap entries;
964         * if details->nonlinear_vma, we leave file entries.
965         */
966        if (unlikely(details))
967            continue;
968        if (pte_file(ptent)) {
969            if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
970                print_bad_pte(vma, addr, ptent, NULL);
971        } else {
972            swp_entry_t entry = pte_to_swp_entry(ptent);
973
974            if (!non_swap_entry(entry))
975                rss[MM_SWAPENTS]--;
976            if (unlikely(!free_swap_and_cache(entry)))
977                print_bad_pte(vma, addr, ptent, NULL);
978        }
979        pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
980    } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
981
982    add_mm_rss_vec(mm, rss);
983    arch_leave_lazy_mmu_mode();
984    pte_unmap_unlock(pte - 1, ptl);
985
986    return addr;
987}
988
989static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
990                struct vm_area_struct *vma, pud_t *pud,
991                unsigned long addr, unsigned long end,
992                long *zap_work, struct zap_details *details)
993{
994    pmd_t *pmd;
995    unsigned long next;
996
997    pmd = pmd_offset(pud, addr);
998    do {
999        next = pmd_addr_end(addr, end);
1000        if (pmd_none_or_clear_bad(pmd)) {
1001            (*zap_work)--;
1002            continue;
1003        }
1004        next = zap_pte_range(tlb, vma, pmd, addr, next,
1005                        zap_work, details);
1006    } while (pmd++, addr = next, (addr != end && *zap_work > 0));
1007
1008    return addr;
1009}
1010
1011static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1012                struct vm_area_struct *vma, pgd_t *pgd,
1013                unsigned long addr, unsigned long end,
1014                long *zap_work, struct zap_details *details)
1015{
1016    pud_t *pud;
1017    unsigned long next;
1018
1019    pud = pud_offset(pgd, addr);
1020    do {
1021        next = pud_addr_end(addr, end);
1022        if (pud_none_or_clear_bad(pud)) {
1023            (*zap_work)--;
1024            continue;
1025        }
1026        next = zap_pmd_range(tlb, vma, pud, addr, next,
1027                        zap_work, details);
1028    } while (pud++, addr = next, (addr != end && *zap_work > 0));
1029
1030    return addr;
1031}
1032
1033static unsigned long unmap_page_range(struct mmu_gather *tlb,
1034                struct vm_area_struct *vma,
1035                unsigned long addr, unsigned long end,
1036                long *zap_work, struct zap_details *details)
1037{
1038    pgd_t *pgd;
1039    unsigned long next;
1040
1041    if (details && !details->check_mapping && !details->nonlinear_vma)
1042        details = NULL;
1043
1044    BUG_ON(addr >= end);
1045    mem_cgroup_uncharge_start();
1046    tlb_start_vma(tlb, vma);
1047    pgd = pgd_offset(vma->vm_mm, addr);
1048    do {
1049        next = pgd_addr_end(addr, end);
1050        if (pgd_none_or_clear_bad(pgd)) {
1051            (*zap_work)--;
1052            continue;
1053        }
1054        next = zap_pud_range(tlb, vma, pgd, addr, next,
1055                        zap_work, details);
1056    } while (pgd++, addr = next, (addr != end && *zap_work > 0));
1057    tlb_end_vma(tlb, vma);
1058    mem_cgroup_uncharge_end();
1059
1060    return addr;
1061}
1062
1063#ifdef CONFIG_PREEMPT
1064# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
1065#else
1066/* No preempt: go for improved straight-line efficiency */
1067# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
1068#endif
1069
1070/**
1071 * unmap_vmas - unmap a range of memory covered by a list of vma's
1072 * @tlbp: address of the caller's struct mmu_gather
1073 * @vma: the starting vma
1074 * @start_addr: virtual address at which to start unmapping
1075 * @end_addr: virtual address at which to end unmapping
1076 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
1077 * @details: details of nonlinear truncation or shared cache invalidation
1078 *
1079 * Returns the end address of the unmapping (restart addr if interrupted).
1080 *
1081 * Unmap all pages in the vma list.
1082 *
1083 * We aim to not hold locks for too long (for scheduling latency reasons).
1084 * So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to
1085 * return the ending mmu_gather to the caller.
1086 *
1087 * Only addresses between `start' and `end' will be unmapped.
1088 *
1089 * The VMA list must be sorted in ascending virtual address order.
1090 *
1091 * unmap_vmas() assumes that the caller will flush the whole unmapped address
1092 * range after unmap_vmas() returns. So the only responsibility here is to
1093 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1094 * drops the lock and schedules.
1095 */
1096unsigned long unmap_vmas(struct mmu_gather **tlbp,
1097        struct vm_area_struct *vma, unsigned long start_addr,
1098        unsigned long end_addr, unsigned long *nr_accounted,
1099        struct zap_details *details)
1100{
1101    long zap_work = ZAP_BLOCK_SIZE;
1102    unsigned long tlb_start = 0; /* For tlb_finish_mmu */
1103    int tlb_start_valid = 0;
1104    unsigned long start = start_addr;
1105    spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
1106    int fullmm = (*tlbp)->fullmm;
1107    struct mm_struct *mm = vma->vm_mm;
1108
1109    mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
1110    for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
1111        unsigned long end;
1112
1113        start = max(vma->vm_start, start_addr);
1114        if (start >= vma->vm_end)
1115            continue;
1116        end = min(vma->vm_end, end_addr);
1117        if (end <= vma->vm_start)
1118            continue;
1119
1120        if (vma->vm_flags & VM_ACCOUNT)
1121            *nr_accounted += (end - start) >> PAGE_SHIFT;
1122
1123        if (unlikely(is_pfn_mapping(vma)))
1124            untrack_pfn_vma(vma, 0, 0);
1125
1126        while (start != end) {
1127            if (!tlb_start_valid) {
1128                tlb_start = start;
1129                tlb_start_valid = 1;
1130            }
1131
1132            if (unlikely(is_vm_hugetlb_page(vma))) {
1133                /*
1134                 * It is undesirable to test vma->vm_file as it
1135                 * should be non-null for valid hugetlb area.
1136                 * However, vm_file will be NULL in the error
1137                 * cleanup path of do_mmap_pgoff. When
1138                 * hugetlbfs ->mmap method fails,
1139                 * do_mmap_pgoff() nullifies vma->vm_file
1140                 * before calling this function to clean up.
1141                 * Since no pte has actually been setup, it is
1142                 * safe to do nothing in this case.
1143                 */
1144                if (vma->vm_file) {
1145                    unmap_hugepage_range(vma, start, end, NULL);
1146                    zap_work -= (end - start) /
1147                    pages_per_huge_page(hstate_vma(vma));
1148                }
1149
1150                start = end;
1151            } else
1152                start = unmap_page_range(*tlbp, vma,
1153                        start, end, &zap_work, details);
1154
1155            if (zap_work > 0) {
1156                BUG_ON(start != end);
1157                break;
1158            }
1159
1160            tlb_finish_mmu(*tlbp, tlb_start, start);
1161
1162            if (need_resched() ||
1163                (i_mmap_lock && spin_needbreak(i_mmap_lock))) {
1164                if (i_mmap_lock) {
1165                    *tlbp = NULL;
1166                    goto out;
1167                }
1168                cond_resched();
1169            }
1170
1171            *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
1172            tlb_start_valid = 0;
1173            zap_work = ZAP_BLOCK_SIZE;
1174        }
1175    }
1176out:
1177    mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
1178    return start; /* which is now the end (or restart) address */
1179}
1180
1181/**
1182 * zap_page_range - remove user pages in a given range
1183 * @vma: vm_area_struct holding the applicable pages
1184 * @address: starting address of pages to zap
1185 * @size: number of bytes to zap
1186 * @details: details of nonlinear truncation or shared cache invalidation
1187 */
1188unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
1189        unsigned long size, struct zap_details *details)
1190{
1191    struct mm_struct *mm = vma->vm_mm;
1192    struct mmu_gather *tlb;
1193    unsigned long end = address + size;
1194    unsigned long nr_accounted = 0;
1195
1196    lru_add_drain();
1197    tlb = tlb_gather_mmu(mm, 0);
1198    update_hiwater_rss(mm);
1199    end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
1200    if (tlb)
1201        tlb_finish_mmu(tlb, address, end);
1202    return end;
1203}
1204
1205/**
1206 * zap_vma_ptes - remove ptes mapping the vma
1207 * @vma: vm_area_struct holding ptes to be zapped
1208 * @address: starting address of pages to zap
1209 * @size: number of bytes to zap
1210 *
1211 * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1212 *
1213 * The entire address range must be fully contained within the vma.
1214 *
1215 * Returns 0 if successful.
1216 */
1217int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1218        unsigned long size)
1219{
1220    if (address < vma->vm_start || address + size > vma->vm_end ||
1221                !(vma->vm_flags & VM_PFNMAP))
1222        return -1;
1223    zap_page_range(vma, address, size, NULL);
1224    return 0;
1225}
1226EXPORT_SYMBOL_GPL(zap_vma_ptes);
1227
1228/**
1229 * follow_page - look up a page descriptor from a user-virtual address
1230 * @vma: vm_area_struct mapping @address
1231 * @address: virtual address to look up
1232 * @flags: flags modifying lookup behaviour
1233 *
1234 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
1235 *
1236 * Returns the mapped (struct page *), %NULL if no mapping exists, or
1237 * an error pointer if there is a mapping to something not represented
1238 * by a page descriptor (see also vm_normal_page()).
1239 */
1240struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1241            unsigned int flags)
1242{
1243    pgd_t *pgd;
1244    pud_t *pud;
1245    pmd_t *pmd;
1246    pte_t *ptep, pte;
1247    spinlock_t *ptl;
1248    struct page *page;
1249    struct mm_struct *mm = vma->vm_mm;
1250
1251    page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
1252    if (!IS_ERR(page)) {
1253        BUG_ON(flags & FOLL_GET);
1254        goto out;
1255    }
1256
1257    page = NULL;
1258    pgd = pgd_offset(mm, address);
1259    if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
1260        goto no_page_table;
1261
1262    pud = pud_offset(pgd, address);
1263    if (pud_none(*pud))
1264        goto no_page_table;
1265    if (pud_huge(*pud)) {
1266        BUG_ON(flags & FOLL_GET);
1267        page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
1268        goto out;
1269    }
1270    if (unlikely(pud_bad(*pud)))
1271        goto no_page_table;
1272
1273    pmd = pmd_offset(pud, address);
1274    if (pmd_none(*pmd))
1275        goto no_page_table;
1276    if (pmd_huge(*pmd)) {
1277        BUG_ON(flags & FOLL_GET);
1278        page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
1279        goto out;
1280    }
1281    if (unlikely(pmd_bad(*pmd)))
1282        goto no_page_table;
1283
1284    ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
1285
1286    pte = *ptep;
1287    if (!pte_present(pte))
1288        goto no_page;
1289    if ((flags & FOLL_WRITE) && !pte_write(pte))
1290        goto unlock;
1291
1292    page = vm_normal_page(vma, address, pte);
1293    if (unlikely(!page)) {
1294        if ((flags & FOLL_DUMP) ||
1295            !is_zero_pfn(pte_pfn(pte)))
1296            goto bad_page;
1297        page = pte_page(pte);
1298    }
1299
1300    if (flags & FOLL_GET)
1301        get_page(page);
1302    if (flags & FOLL_TOUCH) {
1303        if ((flags & FOLL_WRITE) &&
1304            !pte_dirty(pte) && !PageDirty(page))
1305            set_page_dirty(page);
1306        /*
1307         * pte_mkyoung() would be more correct here, but atomic care
1308         * is needed to avoid losing the dirty bit: it is easier to use
1309         * mark_page_accessed().
1310         */
1311        mark_page_accessed(page);
1312    }
1313unlock:
1314    pte_unmap_unlock(ptep, ptl);
1315out:
1316    return page;
1317
1318bad_page:
1319    pte_unmap_unlock(ptep, ptl);
1320    return ERR_PTR(-EFAULT);
1321
1322no_page:
1323    pte_unmap_unlock(ptep, ptl);
1324    if (!pte_none(pte))
1325        return page;
1326
1327no_page_table:
1328    /*
1329     * When core dumping an enormous anonymous area that nobody
1330     * has touched so far, we don't want to allocate unnecessary pages or
1331     * page tables. Return error instead of NULL to skip handle_mm_fault,
1332     * then get_dump_page() will return NULL to leave a hole in the dump.
1333     * But we can only make this optimization where a hole would surely
1334     * be zero-filled if handle_mm_fault() actually did handle it.
1335     */
1336    if ((flags & FOLL_DUMP) &&
1337        (!vma->vm_ops || !vma->vm_ops->fault))
1338        return ERR_PTR(-EFAULT);
1339    return page;
1340}
1341
1342int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1343             unsigned long start, int nr_pages, unsigned int gup_flags,
1344             struct page **pages, struct vm_area_struct **vmas)
1345{
1346    int i;
1347    unsigned long vm_flags;
1348
1349    if (nr_pages <= 0)
1350        return 0;
1351
1352    VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
1353
1354    /*
1355     * Require read or write permissions.
1356     * If FOLL_FORCE is set, we only require the "MAY" flags.
1357     */
1358    vm_flags = (gup_flags & FOLL_WRITE) ?
1359            (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1360    vm_flags &= (gup_flags & FOLL_FORCE) ?
1361            (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1362    i = 0;
1363
1364    do {
1365        struct vm_area_struct *vma;
1366
1367        vma = find_extend_vma(mm, start);
1368        if (!vma && in_gate_area(tsk, start)) {
1369            unsigned long pg = start & PAGE_MASK;
1370            struct vm_area_struct *gate_vma = get_gate_vma(tsk);
1371            pgd_t *pgd;
1372            pud_t *pud;
1373            pmd_t *pmd;
1374            pte_t *pte;
1375
1376            /* user gate pages are read-only */
1377            if (gup_flags & FOLL_WRITE)
1378                return i ? : -EFAULT;
1379            if (pg > TASK_SIZE)
1380                pgd = pgd_offset_k(pg);
1381            else
1382                pgd = pgd_offset_gate(mm, pg);
1383            BUG_ON(pgd_none(*pgd));
1384            pud = pud_offset(pgd, pg);
1385            BUG_ON(pud_none(*pud));
1386            pmd = pmd_offset(pud, pg);
1387            if (pmd_none(*pmd))
1388                return i ? : -EFAULT;
1389            pte = pte_offset_map(pmd, pg);
1390            if (pte_none(*pte)) {
1391                pte_unmap(pte);
1392                return i ? : -EFAULT;
1393            }
1394            if (pages) {
1395                struct page *page;
1396
1397                page = vm_normal_page(gate_vma, start, *pte);
1398                if (!page) {
1399                    if (!(gup_flags & FOLL_DUMP) &&
1400                         is_zero_pfn(pte_pfn(*pte)))
1401                        page = pte_page(*pte);
1402                    else {
1403                        pte_unmap(pte);
1404                        return i ? : -EFAULT;
1405                    }
1406                }
1407                pages[i] = page;
1408                get_page(page);
1409            }
1410            pte_unmap(pte);
1411            if (vmas)
1412                vmas[i] = gate_vma;
1413            i++;
1414            start += PAGE_SIZE;
1415            nr_pages--;
1416            continue;
1417        }
1418
1419        if (!vma ||
1420            (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1421            !(vm_flags & vma->vm_flags))
1422            return i ? : -EFAULT;
1423
1424        if (is_vm_hugetlb_page(vma)) {
1425            i = follow_hugetlb_page(mm, vma, pages, vmas,
1426                    &start, &nr_pages, i, gup_flags);
1427            continue;
1428        }
1429
1430        do {
1431            struct page *page;
1432            unsigned int foll_flags = gup_flags;
1433
1434            /*
1435             * If we have a pending SIGKILL, don't keep faulting
1436             * pages and potentially allocating memory.
1437             */
1438            if (unlikely(fatal_signal_pending(current)))
1439                return i ? i : -ERESTARTSYS;
1440
1441            cond_resched();
1442            while (!(page = follow_page(vma, start, foll_flags))) {
1443                int ret;
1444
1445                ret = handle_mm_fault(mm, vma, start,
1446                    (foll_flags & FOLL_WRITE) ?
1447                    FAULT_FLAG_WRITE : 0);
1448
1449                if (ret & VM_FAULT_ERROR) {
1450                    if (ret & VM_FAULT_OOM)
1451                        return i ? i : -ENOMEM;
1452                    if (ret &
1453                        (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS))
1454                        return i ? i : -EFAULT;
1455                    BUG();
1456                }
1457                if (ret & VM_FAULT_MAJOR)
1458                    tsk->maj_flt++;
1459                else
1460                    tsk->min_flt++;
1461
1462                /*
1463                 * The VM_FAULT_WRITE bit tells us that
1464                 * do_wp_page has broken COW when necessary,
1465                 * even if maybe_mkwrite decided not to set
1466                 * pte_write. We can thus safely do subsequent
1467                 * page lookups as if they were reads. But only
1468                 * do so when looping for pte_write is futile:
1469                 * in some cases userspace may also be wanting
1470                 * to write to the gotten user page, which a
1471                 * read fault here might prevent (a readonly
1472                 * page might get reCOWed by userspace write).
1473                 */
1474                if ((ret & VM_FAULT_WRITE) &&
1475                    !(vma->vm_flags & VM_WRITE))
1476                    foll_flags &= ~FOLL_WRITE;
1477
1478                cond_resched();
1479            }
1480            if (IS_ERR(page))
1481                return i ? i : PTR_ERR(page);
1482            if (pages) {
1483                pages[i] = page;
1484
1485                flush_anon_page(vma, page, start);
1486                flush_dcache_page(page);
1487            }
1488            if (vmas)
1489                vmas[i] = vma;
1490            i++;
1491            start += PAGE_SIZE;
1492            nr_pages--;
1493        } while (nr_pages && start < vma->vm_end);
1494    } while (nr_pages);
1495    return i;
1496}
1497
1498/**
1499 * get_user_pages() - pin user pages in memory
1500 * @tsk: task_struct of target task
1501 * @mm: mm_struct of target mm
1502 * @start: starting user address
1503 * @nr_pages: number of pages from start to pin
1504 * @write: whether pages will be written to by the caller
1505 * @force: whether to force write access even if user mapping is
1506 * readonly. This will result in the page being COWed even
1507 * in MAP_SHARED mappings. You do not want this.
1508 * @pages: array that receives pointers to the pages pinned.
1509 * Should be at least nr_pages long. Or NULL, if caller
1510 * only intends to ensure the pages are faulted in.
1511 * @vmas: array of pointers to vmas corresponding to each page.
1512 * Or NULL if the caller does not require them.
1513 *
1514 * Returns number of pages pinned. This may be fewer than the number
1515 * requested. If nr_pages is 0 or negative, returns 0. If no pages
1516 * were pinned, returns -errno. Each page returned must be released
1517 * with a put_page() call when it is finished with. vmas will only
1518 * remain valid while mmap_sem is held.
1519 *
1520 * Must be called with mmap_sem held for read or write.
1521 *
1522 * get_user_pages walks a process's page tables and takes a reference to
1523 * each struct page that each user address corresponds to at a given
1524 * instant. That is, it takes the page that would be accessed if a user
1525 * thread accesses the given user virtual address at that instant.
1526 *
1527 * This does not guarantee that the page exists in the user mappings when
1528 * get_user_pages returns, and there may even be a completely different
1529 * page there in some cases (eg. if mmapped pagecache has been invalidated
1530 * and subsequently re faulted). However it does guarantee that the page
1531 * won't be freed completely. And mostly callers simply care that the page
1532 * contains data that was valid *at some point in time*. Typically, an IO
1533 * or similar operation cannot guarantee anything stronger anyway because
1534 * locks can't be held over the syscall boundary.
1535 *
1536 * If write=0, the page must not be written to. If the page is written to,
1537 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
1538 * after the page is finished with, and before put_page is called.
1539 *
1540 * get_user_pages is typically used for fewer-copy IO operations, to get a
1541 * handle on the memory by some means other than accesses via the user virtual
1542 * addresses. The pages may be submitted for DMA to devices or accessed via
1543 * their kernel linear mapping (via the kmap APIs). Care should be taken to
1544 * use the correct cache flushing APIs.
1545 *
1546 * See also get_user_pages_fast, for performance critical applications.
1547 */
1548int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1549        unsigned long start, int nr_pages, int write, int force,
1550        struct page **pages, struct vm_area_struct **vmas)
1551{
1552    int flags = FOLL_TOUCH;
1553
1554    if (pages)
1555        flags |= FOLL_GET;
1556    if (write)
1557        flags |= FOLL_WRITE;
1558    if (force)
1559        flags |= FOLL_FORCE;
1560
1561    return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
1562}
1563EXPORT_SYMBOL(get_user_pages);
1564
1565/**
1566 * get_dump_page() - pin user page in memory while writing it to core dump
1567 * @addr: user address
1568 *
1569 * Returns struct page pointer of user page pinned for dump,
1570 * to be freed afterwards by page_cache_release() or put_page().
1571 *
1572 * Returns NULL on any kind of failure - a hole must then be inserted into
1573 * the corefile, to preserve alignment with its headers; and also returns
1574 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1575 * allowing a hole to be left in the corefile to save diskspace.
1576 *
1577 * Called without mmap_sem, but after all other threads have been killed.
1578 */
1579#ifdef CONFIG_ELF_CORE
1580struct page *get_dump_page(unsigned long addr)
1581{
1582    struct vm_area_struct *vma;
1583    struct page *page;
1584
1585    if (__get_user_pages(current, current->mm, addr, 1,
1586            FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1)
1587        return NULL;
1588    flush_cache_page(vma, addr, page_to_pfn(page));
1589    return page;
1590}
1591#endif /* CONFIG_ELF_CORE */
1592
1593pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1594            spinlock_t **ptl)
1595{
1596    pgd_t * pgd = pgd_offset(mm, addr);
1597    pud_t * pud = pud_alloc(mm, pgd, addr);
1598    if (pud) {
1599        pmd_t * pmd = pmd_alloc(mm, pud, addr);
1600        if (pmd)
1601            return pte_alloc_map_lock(mm, pmd, addr, ptl);
1602    }
1603    return NULL;
1604}
1605
1606/*
1607 * This is the old fallback for page remapping.
1608 *
1609 * For historical reasons, it only allows reserved pages. Only
1610 * old drivers should use this, and they needed to mark their
1611 * pages reserved for the old functions anyway.
1612 */
1613static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1614            struct page *page, pgprot_t prot)
1615{
1616    struct mm_struct *mm = vma->vm_mm;
1617    int retval;
1618    pte_t *pte;
1619    spinlock_t *ptl;
1620
1621    retval = -EINVAL;
1622    if (PageAnon(page))
1623        goto out;
1624    retval = -ENOMEM;
1625    flush_dcache_page(page);
1626    pte = get_locked_pte(mm, addr, &ptl);
1627    if (!pte)
1628        goto out;
1629    retval = -EBUSY;
1630    if (!pte_none(*pte))
1631        goto out_unlock;
1632
1633    /* Ok, finally just insert the thing.. */
1634    get_page(page);
1635    inc_mm_counter_fast(mm, MM_FILEPAGES);
1636    page_add_file_rmap(page);
1637    set_pte_at(mm, addr, pte, mk_pte(page, prot));
1638
1639    retval = 0;
1640    pte_unmap_unlock(pte, ptl);
1641    return retval;
1642out_unlock:
1643    pte_unmap_unlock(pte, ptl);
1644out:
1645    return retval;
1646}
1647
1648/**
1649 * vm_insert_page - insert single page into user vma
1650 * @vma: user vma to map to
1651 * @addr: target user address of this page
1652 * @page: source kernel page
1653 *
1654 * This allows drivers to insert individual pages they've allocated
1655 * into a user vma.
1656 *
1657 * The page has to be a nice clean _individual_ kernel allocation.
1658 * If you allocate a compound page, you need to have marked it as
1659 * such (__GFP_COMP), or manually just split the page up yourself
1660 * (see split_page()).
1661 *
1662 * NOTE! Traditionally this was done with "remap_pfn_range()" which
1663 * took an arbitrary page protection parameter. This doesn't allow
1664 * that. Your vma protection will have to be set up correctly, which
1665 * means that if you want a shared writable mapping, you'd better
1666 * ask for a shared writable mapping!
1667 *
1668 * The page does not need to be reserved.
1669 */
1670int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1671            struct page *page)
1672{
1673    if (addr < vma->vm_start || addr >= vma->vm_end)
1674        return -EFAULT;
1675    if (!page_count(page))
1676        return -EINVAL;
1677    vma->vm_flags |= VM_INSERTPAGE;
1678    return insert_page(vma, addr, page, vma->vm_page_prot);
1679}
1680EXPORT_SYMBOL(vm_insert_page);
1681
1682static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1683            unsigned long pfn, pgprot_t prot)
1684{
1685    struct mm_struct *mm = vma->vm_mm;
1686    int retval;
1687    pte_t *pte, entry;
1688    spinlock_t *ptl;
1689
1690    retval = -ENOMEM;
1691    pte = get_locked_pte(mm, addr, &ptl);
1692    if (!pte)
1693        goto out;
1694    retval = -EBUSY;
1695    if (!pte_none(*pte))
1696        goto out_unlock;
1697
1698    /* Ok, finally just insert the thing.. */
1699    entry = pte_mkspecial(pfn_pte(pfn, prot));
1700    set_pte_at(mm, addr, pte, entry);
1701    update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
1702
1703    retval = 0;
1704out_unlock:
1705    pte_unmap_unlock(pte, ptl);
1706out:
1707    return retval;
1708}
1709
1710/**
1711 * vm_insert_pfn - insert single pfn into user vma
1712 * @vma: user vma to map to
1713 * @addr: target user address of this page
1714 * @pfn: source kernel pfn
1715 *
1716 * Similar to vm_inert_page, this allows drivers to insert individual pages
1717 * they've allocated into a user vma. Same comments apply.
1718 *
1719 * This function should only be called from a vm_ops->fault handler, and
1720 * in that case the handler should return NULL.
1721 *
1722 * vma cannot be a COW mapping.
1723 *
1724 * As this is called only for pages that do not currently exist, we
1725 * do not need to flush old virtual caches or the TLB.
1726 */
1727int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1728            unsigned long pfn)
1729{
1730    int ret;
1731    pgprot_t pgprot = vma->vm_page_prot;
1732    /*
1733     * Technically, architectures with pte_special can avoid all these
1734     * restrictions (same for remap_pfn_range). However we would like
1735     * consistency in testing and feature parity among all, so we should
1736     * try to keep these invariants in place for everybody.
1737     */
1738    BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
1739    BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1740                        (VM_PFNMAP|VM_MIXEDMAP));
1741    BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1742    BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
1743
1744    if (addr < vma->vm_start || addr >= vma->vm_end)
1745        return -EFAULT;
1746    if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
1747        return -EINVAL;
1748
1749    ret = insert_pfn(vma, addr, pfn, pgprot);
1750
1751    if (ret)
1752        untrack_pfn_vma(vma, pfn, PAGE_SIZE);
1753
1754    return ret;
1755}
1756EXPORT_SYMBOL(vm_insert_pfn);
1757
1758int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1759            unsigned long pfn)
1760{
1761    BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
1762
1763    if (addr < vma->vm_start || addr >= vma->vm_end)
1764        return -EFAULT;
1765
1766    /*
1767     * If we don't have pte special, then we have to use the pfn_valid()
1768     * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
1769     * refcount the page if pfn_valid is true (hence insert_page rather
1770     * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
1771     * without pte special, it would there be refcounted as a normal page.
1772     */
1773    if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
1774        struct page *page;
1775
1776        page = pfn_to_page(pfn);
1777        return insert_page(vma, addr, page, vma->vm_page_prot);
1778    }
1779    return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
1780}
1781EXPORT_SYMBOL(vm_insert_mixed);
1782
1783/*
1784 * maps a range of physical memory into the requested pages. the old
1785 * mappings are removed. any references to nonexistent pages results
1786 * in null mappings (currently treated as "copy-on-access")
1787 */
1788static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
1789            unsigned long addr, unsigned long end,
1790            unsigned long pfn, pgprot_t prot)
1791{
1792    pte_t *pte;
1793    spinlock_t *ptl;
1794
1795    pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1796    if (!pte)
1797        return -ENOMEM;
1798    arch_enter_lazy_mmu_mode();
1799    do {
1800        BUG_ON(!pte_none(*pte));
1801        set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
1802        pfn++;
1803    } while (pte++, addr += PAGE_SIZE, addr != end);
1804    arch_leave_lazy_mmu_mode();
1805    pte_unmap_unlock(pte - 1, ptl);
1806    return 0;
1807}
1808
1809static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
1810            unsigned long addr, unsigned long end,
1811            unsigned long pfn, pgprot_t prot)
1812{
1813    pmd_t *pmd;
1814    unsigned long next;
1815
1816    pfn -= addr >> PAGE_SHIFT;
1817    pmd = pmd_alloc(mm, pud, addr);
1818    if (!pmd)
1819        return -ENOMEM;
1820    do {
1821        next = pmd_addr_end(addr, end);
1822        if (remap_pte_range(mm, pmd, addr, next,
1823                pfn + (addr >> PAGE_SHIFT), prot))
1824            return -ENOMEM;
1825    } while (pmd++, addr = next, addr != end);
1826    return 0;
1827}
1828
1829static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1830            unsigned long addr, unsigned long end,
1831            unsigned long pfn, pgprot_t prot)
1832{
1833    pud_t *pud;
1834    unsigned long next;
1835
1836    pfn -= addr >> PAGE_SHIFT;
1837    pud = pud_alloc(mm, pgd, addr);
1838    if (!pud)
1839        return -ENOMEM;
1840    do {
1841        next = pud_addr_end(addr, end);
1842        if (remap_pmd_range(mm, pud, addr, next,
1843                pfn + (addr >> PAGE_SHIFT), prot))
1844            return -ENOMEM;
1845    } while (pud++, addr = next, addr != end);
1846    return 0;
1847}
1848
1849/**
1850 * remap_pfn_range - remap kernel memory to userspace
1851 * @vma: user vma to map to
1852 * @addr: target user address to start at
1853 * @pfn: physical address of kernel memory
1854 * @size: size of map area
1855 * @prot: page protection flags for this mapping
1856 *
1857 * Note: this is only safe if the mm semaphore is held when called.
1858 */
1859int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1860            unsigned long pfn, unsigned long size, pgprot_t prot)
1861{
1862    pgd_t *pgd;
1863    unsigned long next;
1864    unsigned long end = addr + PAGE_ALIGN(size);
1865    struct mm_struct *mm = vma->vm_mm;
1866    int err;
1867
1868    /*
1869     * Physically remapped pages are special. Tell the
1870     * rest of the world about it:
1871     * VM_IO tells people not to look at these pages
1872     * (accesses can have side effects).
1873     * VM_RESERVED is specified all over the place, because
1874     * in 2.4 it kept swapout's vma scan off this vma; but
1875     * in 2.6 the LRU scan won't even find its pages, so this
1876     * flag means no more than count its pages in reserved_vm,
1877     * and omit it from core dump, even when VM_IO turned off.
1878     * VM_PFNMAP tells the core MM that the base pages are just
1879     * raw PFN mappings, and do not have a "struct page" associated
1880     * with them.
1881     *
1882     * There's a horrible special case to handle copy-on-write
1883     * behaviour that some programs depend on. We mark the "original"
1884     * un-COW'ed pages by matching them up with "vma->vm_pgoff".
1885     */
1886    if (addr == vma->vm_start && end == vma->vm_end) {
1887        vma->vm_pgoff = pfn;
1888        vma->vm_flags |= VM_PFN_AT_MMAP;
1889    } else if (is_cow_mapping(vma->vm_flags))
1890        return -EINVAL;
1891
1892    vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1893
1894    err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
1895    if (err) {
1896        /*
1897         * To indicate that track_pfn related cleanup is not
1898         * needed from higher level routine calling unmap_vmas
1899         */
1900        vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
1901        vma->vm_flags &= ~VM_PFN_AT_MMAP;
1902        return -EINVAL;
1903    }
1904
1905    BUG_ON(addr >= end);
1906    pfn -= addr >> PAGE_SHIFT;
1907    pgd = pgd_offset(mm, addr);
1908    flush_cache_range(vma, addr, end);
1909    do {
1910        next = pgd_addr_end(addr, end);
1911        err = remap_pud_range(mm, pgd, addr, next,
1912                pfn + (addr >> PAGE_SHIFT), prot);
1913        if (err)
1914            break;
1915    } while (pgd++, addr = next, addr != end);
1916
1917    if (err)
1918        untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
1919
1920    return err;
1921}
1922EXPORT_SYMBOL(remap_pfn_range);
1923
1924static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
1925                     unsigned long addr, unsigned long end,
1926                     pte_fn_t fn, void *data)
1927{
1928    pte_t *pte;
1929    int err;
1930    pgtable_t token;
1931    spinlock_t *uninitialized_var(ptl);
1932
1933    pte = (mm == &init_mm) ?
1934        pte_alloc_kernel(pmd, addr) :
1935        pte_alloc_map_lock(mm, pmd, addr, &ptl);
1936    if (!pte)
1937        return -ENOMEM;
1938
1939    BUG_ON(pmd_huge(*pmd));
1940
1941    arch_enter_lazy_mmu_mode();
1942
1943    token = pmd_pgtable(*pmd);
1944
1945    do {
1946        err = fn(pte++, token, addr, data);
1947        if (err)
1948            break;
1949    } while (addr += PAGE_SIZE, addr != end);
1950
1951    arch_leave_lazy_mmu_mode();
1952
1953    if (mm != &init_mm)
1954        pte_unmap_unlock(pte-1, ptl);
1955    return err;
1956}
1957
1958static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
1959                     unsigned long addr, unsigned long end,
1960                     pte_fn_t fn, void *data)
1961{
1962    pmd_t *pmd;
1963    unsigned long next;
1964    int err;
1965
1966    BUG_ON(pud_huge(*pud));
1967
1968    pmd = pmd_alloc(mm, pud, addr);
1969    if (!pmd)
1970        return -ENOMEM;
1971    do {
1972        next = pmd_addr_end(addr, end);
1973        err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
1974        if (err)
1975            break;
1976    } while (pmd++, addr = next, addr != end);
1977    return err;
1978}
1979
1980static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
1981                     unsigned long addr, unsigned long end,
1982                     pte_fn_t fn, void *data)
1983{
1984    pud_t *pud;
1985    unsigned long next;
1986    int err;
1987
1988    pud = pud_alloc(mm, pgd, addr);
1989    if (!pud)
1990        return -ENOMEM;
1991    do {
1992        next = pud_addr_end(addr, end);
1993        err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
1994        if (err)
1995            break;
1996    } while (pud++, addr = next, addr != end);
1997    return err;
1998}
1999
2000/*
2001 * Scan a region of virtual memory, filling in page tables as necessary
2002 * and calling a provided function on each leaf page table.
2003 */
2004int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2005            unsigned long size, pte_fn_t fn, void *data)
2006{
2007    pgd_t *pgd;
2008    unsigned long next;
2009    unsigned long end = addr + size;
2010    int err;
2011
2012    BUG_ON(addr >= end);
2013    pgd = pgd_offset(mm, addr);
2014    do {
2015        next = pgd_addr_end(addr, end);
2016        err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
2017        if (err)
2018            break;
2019    } while (pgd++, addr = next, addr != end);
2020
2021    return err;
2022}
2023EXPORT_SYMBOL_GPL(apply_to_page_range);
2024
2025/*
2026 * handle_pte_fault chooses page fault handler according to an entry
2027 * which was read non-atomically. Before making any commitment, on
2028 * those architectures or configurations (e.g. i386 with PAE) which
2029 * might give a mix of unmatched parts, do_swap_page and do_file_page
2030 * must check under lock before unmapping the pte and proceeding
2031 * (but do_wp_page is only called after already making such a check;
2032 * and do_anonymous_page and do_no_page can safely check later on).
2033 */
2034static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
2035                pte_t *page_table, pte_t orig_pte)
2036{
2037    int same = 1;
2038#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
2039    if (sizeof(pte_t) > sizeof(unsigned long)) {
2040        spinlock_t *ptl = pte_lockptr(mm, pmd);
2041        spin_lock(ptl);
2042        same = pte_same(*page_table, orig_pte);
2043        spin_unlock(ptl);
2044    }
2045#endif
2046    pte_unmap(page_table);
2047    return same;
2048}
2049
2050/*
2051 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
2052 * servicing faults for write access. In the normal case, do always want
2053 * pte_mkwrite. But get_user_pages can cause write faults for mappings
2054 * that do not have writing enabled, when used by access_process_vm.
2055 */
2056static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
2057{
2058    if (likely(vma->vm_flags & VM_WRITE))
2059        pte = pte_mkwrite(pte);
2060    return pte;
2061}
2062
2063static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
2064{
2065    /*
2066     * If the source page was a PFN mapping, we don't have
2067     * a "struct page" for it. We do a best-effort copy by
2068     * just copying from the original user address. If that
2069     * fails, we just zero-fill it. Live with it.
2070     */
2071    if (unlikely(!src)) {
2072        void *kaddr = kmap_atomic(dst, KM_USER0);
2073        void __user *uaddr = (void __user *)(va & PAGE_MASK);
2074
2075        /*
2076         * This really shouldn't fail, because the page is there
2077         * in the page tables. But it might just be unreadable,
2078         * in which case we just give up and fill the result with
2079         * zeroes.
2080         */
2081        if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
2082            memset(kaddr, 0, PAGE_SIZE);
2083        kunmap_atomic(kaddr, KM_USER0);
2084        flush_dcache_page(dst);
2085    } else
2086        copy_user_highpage(dst, src, va, vma);
2087}
2088
2089/*
2090 * This routine handles present pages, when users try to write
2091 * to a shared page. It is done by copying the page to a new address
2092 * and decrementing the shared-page counter for the old page.
2093 *
2094 * Note that this routine assumes that the protection checks have been
2095 * done by the caller (the low-level page fault routine in most cases).
2096 * Thus we can safely just mark it writable once we've done any necessary
2097 * COW.
2098 *
2099 * We also mark the page dirty at this point even though the page will
2100 * change only once the write actually happens. This avoids a few races,
2101 * and potentially makes it more efficient.
2102 *
2103 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2104 * but allow concurrent faults), with pte both mapped and locked.
2105 * We return with mmap_sem still held, but pte unmapped and unlocked.
2106 */
2107static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2108        unsigned long address, pte_t *page_table, pmd_t *pmd,
2109        spinlock_t *ptl, pte_t orig_pte)
2110{
2111    struct page *old_page, *new_page;
2112    pte_t entry;
2113    int reuse = 0, ret = 0;
2114    int page_mkwrite = 0;
2115    struct page *dirty_page = NULL;
2116
2117    old_page = vm_normal_page(vma, address, orig_pte);
2118    if (!old_page) {
2119        /*
2120         * VM_MIXEDMAP !pfn_valid() case
2121         *
2122         * We should not cow pages in a shared writeable mapping.
2123         * Just mark the pages writable as we can't do any dirty
2124         * accounting on raw pfn maps.
2125         */
2126        if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
2127                     (VM_WRITE|VM_SHARED))
2128            goto reuse;
2129        goto gotten;
2130    }
2131
2132    /*
2133     * Take out anonymous pages first, anonymous shared vmas are
2134     * not dirty accountable.
2135     */
2136    if (PageAnon(old_page) && !PageKsm(old_page)) {
2137        if (!trylock_page(old_page)) {
2138            page_cache_get(old_page);
2139            pte_unmap_unlock(page_table, ptl);
2140            lock_page(old_page);
2141            page_table = pte_offset_map_lock(mm, pmd, address,
2142                             &ptl);
2143            if (!pte_same(*page_table, orig_pte)) {
2144                unlock_page(old_page);
2145                page_cache_release(old_page);
2146                goto unlock;
2147            }
2148            page_cache_release(old_page);
2149        }
2150        reuse = reuse_swap_page(old_page);
2151        if (reuse)
2152            /*
2153             * The page is all ours. Move it to our anon_vma so
2154             * the rmap code will not search our parent or siblings.
2155             * Protected against the rmap code by the page lock.
2156             */
2157            page_move_anon_rmap(old_page, vma, address);
2158        unlock_page(old_page);
2159    } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
2160                    (VM_WRITE|VM_SHARED))) {
2161        /*
2162         * Only catch write-faults on shared writable pages,
2163         * read-only shared pages can get COWed by
2164         * get_user_pages(.write=1, .force=1).
2165         */
2166        if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2167            struct vm_fault vmf;
2168            int tmp;
2169
2170            vmf.virtual_address = (void __user *)(address &
2171                                PAGE_MASK);
2172            vmf.pgoff = old_page->index;
2173            vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2174            vmf.page = old_page;
2175
2176            /*
2177             * Notify the address space that the page is about to
2178             * become writable so that it can prohibit this or wait
2179             * for the page to get into an appropriate state.
2180             *
2181             * We do this without the lock held, so that it can
2182             * sleep if it needs to.
2183             */
2184            page_cache_get(old_page);
2185            pte_unmap_unlock(page_table, ptl);
2186
2187            tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
2188            if (unlikely(tmp &
2189                    (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
2190                ret = tmp;
2191                goto unwritable_page;
2192            }
2193            if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
2194                lock_page(old_page);
2195                if (!old_page->mapping) {
2196                    ret = 0; /* retry the fault */
2197                    unlock_page(old_page);
2198                    goto unwritable_page;
2199                }
2200            } else
2201                VM_BUG_ON(!PageLocked(old_page));
2202
2203            /*
2204             * Since we dropped the lock we need to revalidate
2205             * the PTE as someone else may have changed it. If
2206             * they did, we just return, as we can count on the
2207             * MMU to tell us if they didn't also make it writable.
2208             */
2209            page_table = pte_offset_map_lock(mm, pmd, address,
2210                             &ptl);
2211            if (!pte_same(*page_table, orig_pte)) {
2212                unlock_page(old_page);
2213                page_cache_release(old_page);
2214                goto unlock;
2215            }
2216
2217            page_mkwrite = 1;
2218        }
2219        dirty_page = old_page;
2220        get_page(dirty_page);
2221        reuse = 1;
2222    }
2223
2224    if (reuse) {
2225reuse:
2226        flush_cache_page(vma, address, pte_pfn(orig_pte));
2227        entry = pte_mkyoung(orig_pte);
2228        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2229        if (ptep_set_access_flags(vma, address, page_table, entry,1))
2230            update_mmu_cache(vma, address, page_table);
2231        ret |= VM_FAULT_WRITE;
2232        goto unlock;
2233    }
2234
2235    /*
2236     * Ok, we need to copy. Oh, well..
2237     */
2238    page_cache_get(old_page);
2239gotten:
2240    pte_unmap_unlock(page_table, ptl);
2241
2242    if (unlikely(anon_vma_prepare(vma)))
2243        goto oom;
2244
2245    if (is_zero_pfn(pte_pfn(orig_pte))) {
2246        new_page = alloc_zeroed_user_highpage_movable(vma, address);
2247        if (!new_page)
2248            goto oom;
2249    } else {
2250        new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
2251        if (!new_page)
2252            goto oom;
2253        cow_user_page(new_page, old_page, address, vma);
2254    }
2255    __SetPageUptodate(new_page);
2256
2257    /*
2258     * Don't let another task, with possibly unlocked vma,
2259     * keep the mlocked page.
2260     */
2261    if ((vma->vm_flags & VM_LOCKED) && old_page) {
2262        lock_page(old_page); /* for LRU manipulation */
2263        clear_page_mlock(old_page);
2264        unlock_page(old_page);
2265    }
2266
2267    if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
2268        goto oom_free_new;
2269
2270    /*
2271     * Re-check the pte - we dropped the lock
2272     */
2273    page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2274    if (likely(pte_same(*page_table, orig_pte))) {
2275        if (old_page) {
2276            if (!PageAnon(old_page)) {
2277                dec_mm_counter_fast(mm, MM_FILEPAGES);
2278                inc_mm_counter_fast(mm, MM_ANONPAGES);
2279            }
2280        } else
2281            inc_mm_counter_fast(mm, MM_ANONPAGES);
2282        flush_cache_page(vma, address, pte_pfn(orig_pte));
2283        entry = mk_pte(new_page, vma->vm_page_prot);
2284        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2285        /*
2286         * Clear the pte entry and flush it first, before updating the
2287         * pte with the new entry. This will avoid a race condition
2288         * seen in the presence of one thread doing SMC and another
2289         * thread doing COW.
2290         */
2291        ptep_clear_flush(vma, address, page_table);
2292        page_add_new_anon_rmap(new_page, vma, address);
2293        /*
2294         * We call the notify macro here because, when using secondary
2295         * mmu page tables (such as kvm shadow page tables), we want the
2296         * new page to be mapped directly into the secondary page table.
2297         */
2298        set_pte_at_notify(mm, address, page_table, entry);
2299        update_mmu_cache(vma, address, page_table);
2300        if (old_page) {
2301            /*
2302             * Only after switching the pte to the new page may
2303             * we remove the mapcount here. Otherwise another
2304             * process may come and find the rmap count decremented
2305             * before the pte is switched to the new page, and
2306             * "reuse" the old page writing into it while our pte
2307             * here still points into it and can be read by other
2308             * threads.
2309             *
2310             * The critical issue is to order this
2311             * page_remove_rmap with the ptp_clear_flush above.
2312             * Those stores are ordered by (if nothing else,)
2313             * the barrier present in the atomic_add_negative
2314             * in page_remove_rmap.
2315             *
2316             * Then the TLB flush in ptep_clear_flush ensures that
2317             * no process can access the old page before the
2318             * decremented mapcount is visible. And the old page
2319             * cannot be reused until after the decremented
2320             * mapcount is visible. So transitively, TLBs to
2321             * old page will be flushed before it can be reused.
2322             */
2323            page_remove_rmap(old_page);
2324        }
2325
2326        /* Free the old page.. */
2327        new_page = old_page;
2328        ret |= VM_FAULT_WRITE;
2329    } else
2330        mem_cgroup_uncharge_page(new_page);
2331
2332    if (new_page)
2333        page_cache_release(new_page);
2334    if (old_page)
2335        page_cache_release(old_page);
2336unlock:
2337    pte_unmap_unlock(page_table, ptl);
2338    if (dirty_page) {
2339        /*
2340         * Yes, Virginia, this is actually required to prevent a race
2341         * with clear_page_dirty_for_io() from clearing the page dirty
2342         * bit after it clear all dirty ptes, but before a racing
2343         * do_wp_page installs a dirty pte.
2344         *
2345         * do_no_page is protected similarly.
2346         */
2347        if (!page_mkwrite) {
2348            wait_on_page_locked(dirty_page);
2349            set_page_dirty_balance(dirty_page, page_mkwrite);
2350        }
2351        put_page(dirty_page);
2352        if (page_mkwrite) {
2353            struct address_space *mapping = dirty_page->mapping;
2354
2355            set_page_dirty(dirty_page);
2356            unlock_page(dirty_page);
2357            page_cache_release(dirty_page);
2358            if (mapping) {
2359                /*
2360                 * Some device drivers do not set page.mapping
2361                 * but still dirty their pages
2362                 */
2363                balance_dirty_pages_ratelimited(mapping);
2364            }
2365        }
2366
2367        /* file_update_time outside page_lock */
2368        if (vma->vm_file)
2369            file_update_time(vma->vm_file);
2370    }
2371    return ret;
2372oom_free_new:
2373    page_cache_release(new_page);
2374oom:
2375    if (old_page) {
2376        if (page_mkwrite) {
2377            unlock_page(old_page);
2378            page_cache_release(old_page);
2379        }
2380        page_cache_release(old_page);
2381    }
2382    return VM_FAULT_OOM;
2383
2384unwritable_page:
2385    page_cache_release(old_page);
2386    return ret;
2387}
2388
2389/*
2390 * Helper functions for unmap_mapping_range().
2391 *
2392 * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
2393 *
2394 * We have to restart searching the prio_tree whenever we drop the lock,
2395 * since the iterator is only valid while the lock is held, and anyway
2396 * a later vma might be split and reinserted earlier while lock dropped.
2397 *
2398 * The list of nonlinear vmas could be handled more efficiently, using
2399 * a placeholder, but handle it in the same way until a need is shown.
2400 * It is important to search the prio_tree before nonlinear list: a vma
2401 * may become nonlinear and be shifted from prio_tree to nonlinear list
2402 * while the lock is dropped; but never shifted from list to prio_tree.
2403 *
2404 * In order to make forward progress despite restarting the search,
2405 * vm_truncate_count is used to mark a vma as now dealt with, so we can
2406 * quickly skip it next time around. Since the prio_tree search only
2407 * shows us those vmas affected by unmapping the range in question, we
2408 * can't efficiently keep all vmas in step with mapping->truncate_count:
2409 * so instead reset them all whenever it wraps back to 0 (then go to 1).
2410 * mapping->truncate_count and vma->vm_truncate_count are protected by
2411 * i_mmap_lock.
2412 *
2413 * In order to make forward progress despite repeatedly restarting some
2414 * large vma, note the restart_addr from unmap_vmas when it breaks out:
2415 * and restart from that address when we reach that vma again. It might
2416 * have been split or merged, shrunk or extended, but never shifted: so
2417 * restart_addr remains valid so long as it remains in the vma's range.
2418 * unmap_mapping_range forces truncate_count to leap over page-aligned
2419 * values so we can save vma's restart_addr in its truncate_count field.
2420 */
2421#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
2422
2423static void reset_vma_truncate_counts(struct address_space *mapping)
2424{
2425    struct vm_area_struct *vma;
2426    struct prio_tree_iter iter;
2427
2428    vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
2429        vma->vm_truncate_count = 0;
2430    list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
2431        vma->vm_truncate_count = 0;
2432}
2433
2434static int unmap_mapping_range_vma(struct vm_area_struct *vma,
2435        unsigned long start_addr, unsigned long end_addr,
2436        struct zap_details *details)
2437{
2438    unsigned long restart_addr;
2439    int need_break;
2440
2441    /*
2442     * files that support invalidating or truncating portions of the
2443     * file from under mmaped areas must have their ->fault function
2444     * return a locked page (and set VM_FAULT_LOCKED in the return).
2445     * This provides synchronisation against concurrent unmapping here.
2446     */
2447
2448again:
2449    restart_addr = vma->vm_truncate_count;
2450    if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
2451        start_addr = restart_addr;
2452        if (start_addr >= end_addr) {
2453            /* Top of vma has been split off since last time */
2454            vma->vm_truncate_count = details->truncate_count;
2455            return 0;
2456        }
2457    }
2458
2459    restart_addr = zap_page_range(vma, start_addr,
2460                    end_addr - start_addr, details);
2461    need_break = need_resched() || spin_needbreak(details->i_mmap_lock);
2462
2463    if (restart_addr >= end_addr) {
2464        /* We have now completed this vma: mark it so */
2465        vma->vm_truncate_count = details->truncate_count;
2466        if (!need_break)
2467            return 0;
2468    } else {
2469        /* Note restart_addr in vma's truncate_count field */
2470        vma->vm_truncate_count = restart_addr;
2471        if (!need_break)
2472            goto again;
2473    }
2474
2475    spin_unlock(details->i_mmap_lock);
2476    cond_resched();
2477    spin_lock(details->i_mmap_lock);
2478    return -EINTR;
2479}
2480
2481static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
2482                        struct zap_details *details)
2483{
2484    struct vm_area_struct *vma;
2485    struct prio_tree_iter iter;
2486    pgoff_t vba, vea, zba, zea;
2487
2488restart:
2489    vma_prio_tree_foreach(vma, &iter, root,
2490            details->first_index, details->last_index) {
2491        /* Skip quickly over those we have already dealt with */
2492        if (vma->vm_truncate_count == details->truncate_count)
2493            continue;
2494
2495        vba = vma->vm_pgoff;
2496        vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
2497        /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
2498        zba = details->first_index;
2499        if (zba < vba)
2500            zba = vba;
2501        zea = details->last_index;
2502        if (zea > vea)
2503            zea = vea;
2504
2505        if (unmap_mapping_range_vma(vma,
2506            ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
2507            ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
2508                details) < 0)
2509            goto restart;
2510    }
2511}
2512
2513static inline void unmap_mapping_range_list(struct list_head *head,
2514                        struct zap_details *details)
2515{
2516    struct vm_area_struct *vma;
2517
2518    /*
2519     * In nonlinear VMAs there is no correspondence between virtual address
2520     * offset and file offset. So we must perform an exhaustive search
2521     * across *all* the pages in each nonlinear VMA, not just the pages
2522     * whose virtual address lies outside the file truncation point.
2523     */
2524restart:
2525    list_for_each_entry(vma, head, shared.vm_set.list) {
2526        /* Skip quickly over those we have already dealt with */
2527        if (vma->vm_truncate_count == details->truncate_count)
2528            continue;
2529        details->nonlinear_vma = vma;
2530        if (unmap_mapping_range_vma(vma, vma->vm_start,
2531                    vma->vm_end, details) < 0)
2532            goto restart;
2533    }
2534}
2535
2536/**
2537 * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
2538 * @mapping: the address space containing mmaps to be unmapped.
2539 * @holebegin: byte in first page to unmap, relative to the start of
2540 * the underlying file. This will be rounded down to a PAGE_SIZE
2541 * boundary. Note that this is different from truncate_pagecache(), which
2542 * must keep the partial page. In contrast, we must get rid of
2543 * partial pages.
2544 * @holelen: size of prospective hole in bytes. This will be rounded
2545 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
2546 * end of the file.
2547 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
2548 * but 0 when invalidating pagecache, don't throw away private data.
2549 */
2550void unmap_mapping_range(struct address_space *mapping,
2551        loff_t const holebegin, loff_t const holelen, int even_cows)
2552{
2553    struct zap_details details;
2554    pgoff_t hba = holebegin >> PAGE_SHIFT;
2555    pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
2556
2557    /* Check for overflow. */
2558    if (sizeof(holelen) > sizeof(hlen)) {
2559        long long holeend =
2560            (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
2561        if (holeend & ~(long long)ULONG_MAX)
2562            hlen = ULONG_MAX - hba + 1;
2563    }
2564
2565    details.check_mapping = even_cows? NULL: mapping;
2566    details.nonlinear_vma = NULL;
2567    details.first_index = hba;
2568    details.last_index = hba + hlen - 1;
2569    if (details.last_index < details.first_index)
2570        details.last_index = ULONG_MAX;
2571    details.i_mmap_lock = &mapping->i_mmap_lock;
2572
2573    spin_lock(&mapping->i_mmap_lock);
2574
2575    /* Protect against endless unmapping loops */
2576    mapping->truncate_count++;
2577    if (unlikely(is_restart_addr(mapping->truncate_count))) {
2578        if (mapping->truncate_count == 0)
2579            reset_vma_truncate_counts(mapping);
2580        mapping->truncate_count++;
2581    }
2582    details.truncate_count = mapping->truncate_count;
2583
2584    if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
2585        unmap_mapping_range_tree(&mapping->i_mmap, &details);
2586    if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
2587        unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
2588    spin_unlock(&mapping->i_mmap_lock);
2589}
2590EXPORT_SYMBOL(unmap_mapping_range);
2591
2592int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
2593{
2594    struct address_space *mapping = inode->i_mapping;
2595
2596    /*
2597     * If the underlying filesystem is not going to provide
2598     * a way to truncate a range of blocks (punch a hole) -
2599     * we should return failure right now.
2600     */
2601    if (!inode->i_op->truncate_range)
2602        return -ENOSYS;
2603
2604    mutex_lock(&inode->i_mutex);
2605    down_write(&inode->i_alloc_sem);
2606    unmap_mapping_range(mapping, offset, (end - offset), 1);
2607    truncate_inode_pages_range(mapping, offset, end);
2608    unmap_mapping_range(mapping, offset, (end - offset), 1);
2609    inode->i_op->truncate_range(inode, offset, end);
2610    up_write(&inode->i_alloc_sem);
2611    mutex_unlock(&inode->i_mutex);
2612
2613    return 0;
2614}
2615
2616/*
2617 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2618 * but allow concurrent faults), and pte mapped but not yet locked.
2619 * We return with mmap_sem still held, but pte unmapped and unlocked.
2620 */
2621static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2622        unsigned long address, pte_t *page_table, pmd_t *pmd,
2623        unsigned int flags, pte_t orig_pte)
2624{
2625    spinlock_t *ptl;
2626    struct page *page, *swapcache = NULL;
2627    swp_entry_t entry;
2628    pte_t pte;
2629    struct mem_cgroup *ptr = NULL;
2630    int exclusive = 0;
2631    int ret = 0;
2632
2633    if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
2634        goto out;
2635
2636    entry = pte_to_swp_entry(orig_pte);
2637    if (unlikely(non_swap_entry(entry))) {
2638        if (is_migration_entry(entry)) {
2639            migration_entry_wait(mm, pmd, address);
2640        } else if (is_hwpoison_entry(entry)) {
2641            ret = VM_FAULT_HWPOISON;
2642        } else {
2643            print_bad_pte(vma, address, orig_pte, NULL);
2644            ret = VM_FAULT_SIGBUS;
2645        }
2646        goto out;
2647    }
2648    delayacct_set_flag(DELAYACCT_PF_SWAPIN);
2649    page = lookup_swap_cache(entry);
2650    if (!page) {
2651        grab_swap_token(mm); /* Contend for token _before_ read-in */
2652        page = swapin_readahead(entry,
2653                    GFP_HIGHUSER_MOVABLE, vma, address);
2654        if (!page) {
2655            /*
2656             * Back out if somebody else faulted in this pte
2657             * while we released the pte lock.
2658             */
2659            page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2660            if (likely(pte_same(*page_table, orig_pte)))
2661                ret = VM_FAULT_OOM;
2662            delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2663            goto unlock;
2664        }
2665
2666        /* Had to read the page from swap area: Major fault */
2667        ret = VM_FAULT_MAJOR;
2668        count_vm_event(PGMAJFAULT);
2669    } else if (PageHWPoison(page)) {
2670        /*
2671         * hwpoisoned dirty swapcache pages are kept for killing
2672         * owner processes (which may be unknown at hwpoison time)
2673         */
2674        ret = VM_FAULT_HWPOISON;
2675        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2676        goto out_release;
2677    }
2678
2679    lock_page(page);
2680    delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2681
2682    /*
2683     * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
2684     * release the swapcache from under us. The page pin, and pte_same
2685     * test below, are not enough to exclude that. Even if it is still
2686     * swapcache, we need to check that the page's swap has not changed.
2687     */
2688    if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
2689        goto out_page;
2690
2691    if (ksm_might_need_to_copy(page, vma, address)) {
2692        swapcache = page;
2693        page = ksm_does_need_to_copy(page, vma, address);
2694
2695        if (unlikely(!page)) {
2696            ret = VM_FAULT_OOM;
2697            page = swapcache;
2698            swapcache = NULL;
2699            goto out_page;
2700        }
2701    }
2702
2703    if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
2704        ret = VM_FAULT_OOM;
2705        goto out_page;
2706    }
2707
2708    /*
2709     * Back out if somebody else already faulted in this pte.
2710     */
2711    page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2712    if (unlikely(!pte_same(*page_table, orig_pte)))
2713        goto out_nomap;
2714
2715    if (unlikely(!PageUptodate(page))) {
2716        ret = VM_FAULT_SIGBUS;
2717        goto out_nomap;
2718    }
2719
2720    /*
2721     * The page isn't present yet, go ahead with the fault.
2722     *
2723     * Be careful about the sequence of operations here.
2724     * To get its accounting right, reuse_swap_page() must be called
2725     * while the page is counted on swap but not yet in mapcount i.e.
2726     * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
2727     * must be called after the swap_free(), or it will never succeed.
2728     * Because delete_from_swap_page() may be called by reuse_swap_page(),
2729     * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry
2730     * in page->private. In this case, a record in swap_cgroup is silently
2731     * discarded at swap_free().
2732     */
2733
2734    inc_mm_counter_fast(mm, MM_ANONPAGES);
2735    dec_mm_counter_fast(mm, MM_SWAPENTS);
2736    pte = mk_pte(page, vma->vm_page_prot);
2737    if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
2738        pte = maybe_mkwrite(pte_mkdirty(pte), vma);
2739        flags &= ~FAULT_FLAG_WRITE;
2740        ret |= VM_FAULT_WRITE;
2741        exclusive = 1;
2742    }
2743    flush_icache_page(vma, page);
2744    set_pte_at(mm, address, page_table, pte);
2745    do_page_add_anon_rmap(page, vma, address, exclusive);
2746    /* It's better to call commit-charge after rmap is established */
2747    mem_cgroup_commit_charge_swapin(page, ptr);
2748
2749    swap_free(entry);
2750    if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
2751        try_to_free_swap(page);
2752    unlock_page(page);
2753    if (swapcache) {
2754        /*
2755         * Hold the lock to avoid the swap entry to be reused
2756         * until we take the PT lock for the pte_same() check
2757         * (to avoid false positives from pte_same). For
2758         * further safety release the lock after the swap_free
2759         * so that the swap count won't change under a
2760         * parallel locked swapcache.
2761         */
2762        unlock_page(swapcache);
2763        page_cache_release(swapcache);
2764    }
2765
2766    if (flags & FAULT_FLAG_WRITE) {
2767        ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
2768        if (ret & VM_FAULT_ERROR)
2769            ret &= VM_FAULT_ERROR;
2770        goto out;
2771    }
2772
2773    /* No need to invalidate - it was non-present before */
2774    update_mmu_cache(vma, address, page_table);
2775unlock:
2776    pte_unmap_unlock(page_table, ptl);
2777out:
2778    return ret;
2779out_nomap:
2780    mem_cgroup_cancel_charge_swapin(ptr);
2781    pte_unmap_unlock(page_table, ptl);
2782out_page:
2783    unlock_page(page);
2784out_release:
2785    page_cache_release(page);
2786    if (swapcache) {
2787        unlock_page(swapcache);
2788        page_cache_release(swapcache);
2789    }
2790    return ret;
2791}
2792
2793/*
2794 * This is like a special single-page "expand_{down|up}wards()",
2795 * except we must first make sure that 'address{-|+}PAGE_SIZE'
2796 * doesn't hit another vma.
2797 */
2798static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
2799{
2800    address &= PAGE_MASK;
2801    if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
2802        struct vm_area_struct *prev = vma->vm_prev;
2803
2804        /*
2805         * Is there a mapping abutting this one below?
2806         *
2807         * That's only ok if it's the same stack mapping
2808         * that has gotten split..
2809         */
2810        if (prev && prev->vm_end == address)
2811            return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
2812
2813        expand_stack(vma, address - PAGE_SIZE);
2814    }
2815    if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
2816        struct vm_area_struct *next = vma->vm_next;
2817
2818        /* As VM_GROWSDOWN but s/below/above/ */
2819        if (next && next->vm_start == address + PAGE_SIZE)
2820            return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
2821
2822        expand_upwards(vma, address + PAGE_SIZE);
2823    }
2824    return 0;
2825}
2826
2827/*
2828 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2829 * but allow concurrent faults), and pte mapped but not yet locked.
2830 * We return with mmap_sem still held, but pte unmapped and unlocked.
2831 */
2832static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2833        unsigned long address, pte_t *page_table, pmd_t *pmd,
2834        unsigned int flags)
2835{
2836    struct page *page;
2837    spinlock_t *ptl;
2838    pte_t entry;
2839
2840    pte_unmap(page_table);
2841
2842    /* Check if we need to add a guard page to the stack */
2843    if (check_stack_guard_page(vma, address) < 0)
2844        return VM_FAULT_SIGBUS;
2845
2846    /* Use the zero-page for reads */
2847    if (!(flags & FAULT_FLAG_WRITE)) {
2848        entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
2849                        vma->vm_page_prot));
2850        page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2851        if (!pte_none(*page_table))
2852            goto unlock;
2853        goto setpte;
2854    }
2855
2856    /* Allocate our own private page. */
2857    if (unlikely(anon_vma_prepare(vma)))
2858        goto oom;
2859    page = alloc_zeroed_user_highpage_movable(vma, address);
2860    if (!page)
2861        goto oom;
2862    __SetPageUptodate(page);
2863
2864    if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
2865        goto oom_free_page;
2866
2867    entry = mk_pte(page, vma->vm_page_prot);
2868    if (vma->vm_flags & VM_WRITE)
2869        entry = pte_mkwrite(pte_mkdirty(entry));
2870
2871    page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2872    if (!pte_none(*page_table))
2873        goto release;
2874
2875    inc_mm_counter_fast(mm, MM_ANONPAGES);
2876    page_add_new_anon_rmap(page, vma, address);
2877setpte:
2878    set_pte_at(mm, address, page_table, entry);
2879
2880    /* No need to invalidate - it was non-present before */
2881    update_mmu_cache(vma, address, page_table);
2882unlock:
2883    pte_unmap_unlock(page_table, ptl);
2884    return 0;
2885release:
2886    mem_cgroup_uncharge_page(page);
2887    page_cache_release(page);
2888    goto unlock;
2889oom_free_page:
2890    page_cache_release(page);
2891oom:
2892    return VM_FAULT_OOM;
2893}
2894
2895/*
2896 * __do_fault() tries to create a new page mapping. It aggressively
2897 * tries to share with existing pages, but makes a separate copy if
2898 * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid
2899 * the next page fault.
2900 *
2901 * As this is called only for pages that do not currently exist, we
2902 * do not need to flush old virtual caches or the TLB.
2903 *
2904 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2905 * but allow concurrent faults), and pte neither mapped nor locked.
2906 * We return with mmap_sem still held, but pte unmapped and unlocked.
2907 */
2908static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2909        unsigned long address, pmd_t *pmd,
2910        pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
2911{
2912    pte_t *page_table;
2913    spinlock_t *ptl;
2914    struct page *page;
2915    pte_t entry;
2916    int anon = 0;
2917    int charged = 0;
2918    struct page *dirty_page = NULL;
2919    struct vm_fault vmf;
2920    int ret;
2921    int page_mkwrite = 0;
2922
2923    vmf.virtual_address = (void __user *)(address & PAGE_MASK);
2924    vmf.pgoff = pgoff;
2925    vmf.flags = flags;
2926    vmf.page = NULL;
2927
2928    ret = vma->vm_ops->fault(vma, &vmf);
2929    if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2930        return ret;
2931
2932    if (unlikely(PageHWPoison(vmf.page))) {
2933        if (ret & VM_FAULT_LOCKED)
2934            unlock_page(vmf.page);
2935        return VM_FAULT_HWPOISON;
2936    }
2937
2938    /*
2939     * For consistency in subsequent calls, make the faulted page always
2940     * locked.
2941     */
2942    if (unlikely(!(ret & VM_FAULT_LOCKED)))
2943        lock_page(vmf.page);
2944    else
2945        VM_BUG_ON(!PageLocked(vmf.page));
2946
2947    /*
2948     * Should we do an early C-O-W break?
2949     */
2950    page = vmf.page;
2951    if (flags & FAULT_FLAG_WRITE) {
2952        if (!(vma->vm_flags & VM_SHARED)) {
2953            anon = 1;
2954            if (unlikely(anon_vma_prepare(vma))) {
2955                ret = VM_FAULT_OOM;
2956                goto out;
2957            }
2958            page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
2959                        vma, address);
2960            if (!page) {
2961                ret = VM_FAULT_OOM;
2962                goto out;
2963            }
2964            if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
2965                ret = VM_FAULT_OOM;
2966                page_cache_release(page);
2967                goto out;
2968            }
2969            charged = 1;
2970            /*
2971             * Don't let another task, with possibly unlocked vma,
2972             * keep the mlocked page.
2973             */
2974            if (vma->vm_flags & VM_LOCKED)
2975                clear_page_mlock(vmf.page);
2976            copy_user_highpage(page, vmf.page, address, vma);
2977            __SetPageUptodate(page);
2978        } else {
2979            /*
2980             * If the page will be shareable, see if the backing
2981             * address space wants to know that the page is about
2982             * to become writable
2983             */
2984            if (vma->vm_ops->page_mkwrite) {
2985                int tmp;
2986
2987                unlock_page(page);
2988                vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2989                tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
2990                if (unlikely(tmp &
2991                      (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
2992                    ret = tmp;
2993                    goto unwritable_page;
2994                }
2995                if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
2996                    lock_page(page);
2997                    if (!page->mapping) {
2998                        ret = 0; /* retry the fault */
2999                        unlock_page(page);
3000                        goto unwritable_page;
3001                    }
3002                } else
3003                    VM_BUG_ON(!PageLocked(page));
3004                page_mkwrite = 1;
3005            }
3006        }
3007
3008    }
3009
3010    page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
3011
3012    /*
3013     * This silly early PAGE_DIRTY setting removes a race
3014     * due to the bad i386 page protection. But it's valid
3015     * for other architectures too.
3016     *
3017     * Note that if FAULT_FLAG_WRITE is set, we either now have
3018     * an exclusive copy of the page, or this is a shared mapping,
3019     * so we can make it writable and dirty to avoid having to
3020     * handle that later.
3021     */
3022    /* Only go through if we didn't race with anybody else... */
3023    if (likely(pte_same(*page_table, orig_pte))) {
3024        flush_icache_page(vma, page);
3025        entry = mk_pte(page, vma->vm_page_prot);
3026        if (flags & FAULT_FLAG_WRITE)
3027            entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3028        if (anon) {
3029            inc_mm_counter_fast(mm, MM_ANONPAGES);
3030            page_add_new_anon_rmap(page, vma, address);
3031        } else {
3032            inc_mm_counter_fast(mm, MM_FILEPAGES);
3033            page_add_file_rmap(page);
3034            if (flags & FAULT_FLAG_WRITE) {
3035                dirty_page = page;
3036                get_page(dirty_page);
3037            }
3038        }
3039        set_pte_at(mm, address, page_table, entry);
3040
3041        /* no need to invalidate: a not-present page won't be cached */
3042        update_mmu_cache(vma, address, page_table);
3043    } else {
3044        if (charged)
3045            mem_cgroup_uncharge_page(page);
3046        if (anon)
3047            page_cache_release(page);
3048        else
3049            anon = 1; /* no anon but release faulted_page */
3050    }
3051
3052    pte_unmap_unlock(page_table, ptl);
3053
3054out:
3055    if (dirty_page) {
3056        struct address_space *mapping = page->mapping;
3057
3058        if (set_page_dirty(dirty_page))
3059            page_mkwrite = 1;
3060        unlock_page(dirty_page);
3061        put_page(dirty_page);
3062        if (page_mkwrite && mapping) {
3063            /*
3064             * Some device drivers do not set page.mapping but still
3065             * dirty their pages
3066             */
3067            balance_dirty_pages_ratelimited(mapping);
3068        }
3069
3070        /* file_update_time outside page_lock */
3071        if (vma->vm_file)
3072            file_update_time(vma->vm_file);
3073    } else {
3074        unlock_page(vmf.page);
3075        if (anon)
3076            page_cache_release(vmf.page);
3077    }
3078
3079    return ret;
3080
3081unwritable_page:
3082    page_cache_release(page);
3083    return ret;
3084}
3085
3086static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3087        unsigned long address, pte_t *page_table, pmd_t *pmd,
3088        unsigned int flags, pte_t orig_pte)
3089{
3090    pgoff_t pgoff = (((address & PAGE_MASK)
3091            - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
3092
3093    pte_unmap(page_table);
3094    return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
3095}
3096
3097/*
3098 * Fault of a previously existing named mapping. Repopulate the pte
3099 * from the encoded file_pte if possible. This enables swappable
3100 * nonlinear vmas.
3101 *
3102 * We enter with non-exclusive mmap_sem (to exclude vma changes,
3103 * but allow concurrent faults), and pte mapped but not yet locked.
3104 * We return with mmap_sem still held, but pte unmapped and unlocked.
3105 */
3106static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3107        unsigned long address, pte_t *page_table, pmd_t *pmd,
3108        unsigned int flags, pte_t orig_pte)
3109{
3110    pgoff_t pgoff;
3111
3112    flags |= FAULT_FLAG_NONLINEAR;
3113
3114    if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
3115        return 0;
3116
3117    if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
3118        /*
3119         * Page table corrupted: show pte and kill process.
3120         */
3121        print_bad_pte(vma, address, orig_pte, NULL);
3122        return VM_FAULT_SIGBUS;
3123    }
3124
3125    pgoff = pte_to_pgoff(orig_pte);
3126    return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
3127}
3128
3129/*
3130 * These routines also need to handle stuff like marking pages dirty
3131 * and/or accessed for architectures that don't do it in hardware (most
3132 * RISC architectures). The early dirtying is also good on the i386.
3133 *
3134 * There is also a hook called "update_mmu_cache()" that architectures
3135 * with external mmu caches can use to update those (ie the Sparc or
3136 * PowerPC hashed page tables that act as extended TLBs).
3137 *
3138 * We enter with non-exclusive mmap_sem (to exclude vma changes,
3139 * but allow concurrent faults), and pte mapped but not yet locked.
3140 * We return with mmap_sem still held, but pte unmapped and unlocked.
3141 */
3142static inline int handle_pte_fault(struct mm_struct *mm,
3143        struct vm_area_struct *vma, unsigned long address,
3144        pte_t *pte, pmd_t *pmd, unsigned int flags)
3145{
3146    pte_t entry;
3147    spinlock_t *ptl;
3148
3149    entry = *pte;
3150    if (!pte_present(entry)) {
3151        if (pte_none(entry)) {
3152            if (vma->vm_ops) {
3153                if (likely(vma->vm_ops->fault))
3154                    return do_linear_fault(mm, vma, address,
3155                        pte, pmd, flags, entry);
3156            }
3157            return do_anonymous_page(mm, vma, address,
3158                         pte, pmd, flags);
3159        }
3160        if (pte_file(entry))
3161            return do_nonlinear_fault(mm, vma, address,
3162                    pte, pmd, flags, entry);
3163        return do_swap_page(mm, vma, address,
3164                    pte, pmd, flags, entry);
3165    }
3166
3167    ptl = pte_lockptr(mm, pmd);
3168    spin_lock(ptl);
3169    if (unlikely(!pte_same(*pte, entry)))
3170        goto unlock;
3171    if (flags & FAULT_FLAG_WRITE) {
3172        if (!pte_write(entry))
3173            return do_wp_page(mm, vma, address,
3174                    pte, pmd, ptl, entry);
3175        entry = pte_mkdirty(entry);
3176    }
3177    entry = pte_mkyoung(entry);
3178    if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
3179        update_mmu_cache(vma, address, pte);
3180    } else {
3181        /*
3182         * This is needed only for protection faults but the arch code
3183         * is not yet telling us if this is a protection fault or not.
3184         * This still avoids useless tlb flushes for .text page faults
3185         * with threads.
3186         */
3187        if (flags & FAULT_FLAG_WRITE)
3188            flush_tlb_page(vma, address);
3189    }
3190unlock:
3191    pte_unmap_unlock(pte, ptl);
3192    return 0;
3193}
3194
3195/*
3196 * By the time we get here, we already hold the mm semaphore
3197 */
3198int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3199        unsigned long address, unsigned int flags)
3200{
3201    pgd_t *pgd;
3202    pud_t *pud;
3203    pmd_t *pmd;
3204    pte_t *pte;
3205
3206    __set_current_state(TASK_RUNNING);
3207
3208    count_vm_event(PGFAULT);
3209
3210    /* do counter updates before entering really critical section. */
3211    check_sync_rss_stat(current);
3212
3213    if (unlikely(is_vm_hugetlb_page(vma)))
3214        return hugetlb_fault(mm, vma, address, flags);
3215
3216    pgd = pgd_offset(mm, address);
3217    pud = pud_alloc(mm, pgd, address);
3218    if (!pud)
3219        return VM_FAULT_OOM;
3220    pmd = pmd_alloc(mm, pud, address);
3221    if (!pmd)
3222        return VM_FAULT_OOM;
3223    pte = pte_alloc_map(mm, pmd, address);
3224    if (!pte)
3225        return VM_FAULT_OOM;
3226
3227    return handle_pte_fault(mm, vma, address, pte, pmd, flags);
3228}
3229
3230#ifndef __PAGETABLE_PUD_FOLDED
3231/*
3232 * Allocate page upper directory.
3233 * We've already handled the fast-path in-line.
3234 */
3235int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
3236{
3237    pud_t *new = pud_alloc_one(mm, address);
3238    if (!new)
3239        return -ENOMEM;
3240
3241    smp_wmb(); /* See comment in __pte_alloc */
3242
3243    spin_lock(&mm->page_table_lock);
3244    if (pgd_present(*pgd)) /* Another has populated it */
3245        pud_free(mm, new);
3246    else
3247        pgd_populate(mm, pgd, new);
3248    spin_unlock(&mm->page_table_lock);
3249    return 0;
3250}
3251#endif /* __PAGETABLE_PUD_FOLDED */
3252
3253#ifndef __PAGETABLE_PMD_FOLDED
3254/*
3255 * Allocate page middle directory.
3256 * We've already handled the fast-path in-line.
3257 */
3258int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
3259{
3260    pmd_t *new = pmd_alloc_one(mm, address);
3261    if (!new)
3262        return -ENOMEM;
3263
3264    smp_wmb(); /* See comment in __pte_alloc */
3265
3266    spin_lock(&mm->page_table_lock);
3267#ifndef __ARCH_HAS_4LEVEL_HACK
3268    if (pud_present(*pud)) /* Another has populated it */
3269        pmd_free(mm, new);
3270    else
3271        pud_populate(mm, pud, new);
3272#else
3273    if (pgd_present(*pud)) /* Another has populated it */
3274        pmd_free(mm, new);
3275    else
3276        pgd_populate(mm, pud, new);
3277#endif /* __ARCH_HAS_4LEVEL_HACK */
3278    spin_unlock(&mm->page_table_lock);
3279    return 0;
3280}
3281#endif /* __PAGETABLE_PMD_FOLDED */
3282
3283int make_pages_present(unsigned long addr, unsigned long end)
3284{
3285    int ret, len, write;
3286    struct vm_area_struct * vma;
3287
3288    vma = find_vma(current->mm, addr);
3289    if (!vma)
3290        return -ENOMEM;
3291    write = (vma->vm_flags & VM_WRITE) != 0;
3292    BUG_ON(addr >= end);
3293    BUG_ON(end > vma->vm_end);
3294    len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
3295    ret = get_user_pages(current, current->mm, addr,
3296            len, write, 0, NULL, NULL);
3297    if (ret < 0)
3298        return ret;
3299    return ret == len ? 0 : -EFAULT;
3300}
3301
3302#if !defined(__HAVE_ARCH_GATE_AREA)
3303
3304#if defined(AT_SYSINFO_EHDR)
3305static struct vm_area_struct gate_vma;
3306
3307static int __init gate_vma_init(void)
3308{
3309    gate_vma.vm_mm = NULL;
3310    gate_vma.vm_start = FIXADDR_USER_START;
3311    gate_vma.vm_end = FIXADDR_USER_END;
3312    gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
3313    gate_vma.vm_page_prot = __P101;
3314    /*
3315     * Make sure the vDSO gets into every core dump.
3316     * Dumping its contents makes post-mortem fully interpretable later
3317     * without matching up the same kernel and hardware config to see
3318     * what PC values meant.
3319     */
3320    gate_vma.vm_flags |= VM_ALWAYSDUMP;
3321    return 0;
3322}
3323__initcall(gate_vma_init);
3324#endif
3325
3326struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
3327{
3328#ifdef AT_SYSINFO_EHDR
3329    return &gate_vma;
3330#else
3331    return NULL;
3332#endif
3333}
3334
3335int in_gate_area_no_task(unsigned long addr)
3336{
3337#ifdef AT_SYSINFO_EHDR
3338    if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
3339        return 1;
3340#endif
3341    return 0;
3342}
3343
3344#endif /* __HAVE_ARCH_GATE_AREA */
3345
3346static int follow_pte(struct mm_struct *mm, unsigned long address,
3347        pte_t **ptepp, spinlock_t **ptlp)
3348{
3349    pgd_t *pgd;
3350    pud_t *pud;
3351    pmd_t *pmd;
3352    pte_t *ptep;
3353
3354    pgd = pgd_offset(mm, address);
3355    if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
3356        goto out;
3357
3358    pud = pud_offset(pgd, address);
3359    if (pud_none(*pud) || unlikely(pud_bad(*pud)))
3360        goto out;
3361
3362    pmd = pmd_offset(pud, address);
3363    if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
3364        goto out;
3365
3366    /* We cannot handle huge page PFN maps. Luckily they don't exist. */
3367    if (pmd_huge(*pmd))
3368        goto out;
3369
3370    ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
3371    if (!ptep)
3372        goto out;
3373    if (!pte_present(*ptep))
3374        goto unlock;
3375    *ptepp = ptep;
3376    return 0;
3377unlock:
3378    pte_unmap_unlock(ptep, *ptlp);
3379out:
3380    return -EINVAL;
3381}
3382
3383/**
3384 * follow_pfn - look up PFN at a user virtual address
3385 * @vma: memory mapping
3386 * @address: user virtual address
3387 * @pfn: location to store found PFN
3388 *
3389 * Only IO mappings and raw PFN mappings are allowed.
3390 *
3391 * Returns zero and the pfn at @pfn on success, -ve otherwise.
3392 */
3393int follow_pfn(struct vm_area_struct *vma, unsigned long address,
3394    unsigned long *pfn)
3395{
3396    int ret = -EINVAL;
3397    spinlock_t *ptl;
3398    pte_t *ptep;
3399
3400    if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
3401        return ret;
3402
3403    ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
3404    if (ret)
3405        return ret;
3406    *pfn = pte_pfn(*ptep);
3407    pte_unmap_unlock(ptep, ptl);
3408    return 0;
3409}
3410EXPORT_SYMBOL(follow_pfn);
3411
3412#ifdef CONFIG_HAVE_IOREMAP_PROT
3413int follow_phys(struct vm_area_struct *vma,
3414        unsigned long address, unsigned int flags,
3415        unsigned long *prot, resource_size_t *phys)
3416{
3417    int ret = -EINVAL;
3418    pte_t *ptep, pte;
3419    spinlock_t *ptl;
3420
3421    if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
3422        goto out;
3423
3424    if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
3425        goto out;
3426    pte = *ptep;
3427
3428    if ((flags & FOLL_WRITE) && !pte_write(pte))
3429        goto unlock;
3430
3431    *prot = pgprot_val(pte_pgprot(pte));
3432    *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
3433
3434    ret = 0;
3435unlock:
3436    pte_unmap_unlock(ptep, ptl);
3437out:
3438    return ret;
3439}
3440
3441int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
3442            void *buf, int len, int write)
3443{
3444    resource_size_t phys_addr;
3445    unsigned long prot = 0;
3446    void __iomem *maddr;
3447    int offset = addr & (PAGE_SIZE-1);
3448
3449    if (follow_phys(vma, addr, write, &prot, &phys_addr))
3450        return -EINVAL;
3451
3452    maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
3453    if (write)
3454        memcpy_toio(maddr + offset, buf, len);
3455    else
3456        memcpy_fromio(buf, maddr + offset, len);
3457    iounmap(maddr);
3458
3459    return len;
3460}
3461#endif
3462
3463/*
3464 * Access another process' address space.
3465 * Source/target buffer must be kernel space,
3466 * Do not walk the page table directly, use get_user_pages
3467 */
3468int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
3469{
3470    struct mm_struct *mm;
3471    struct vm_area_struct *vma;
3472    void *old_buf = buf;
3473
3474    mm = get_task_mm(tsk);
3475    if (!mm)
3476        return 0;
3477
3478    down_read(&mm->mmap_sem);
3479    /* ignore errors, just check how much was successfully transferred */
3480    while (len) {
3481        int bytes, ret, offset;
3482        void *maddr;
3483        struct page *page = NULL;
3484
3485        ret = get_user_pages(tsk, mm, addr, 1,
3486                write, 1, &page, &vma);
3487        if (ret <= 0) {
3488            /*
3489             * Check if this is a VM_IO | VM_PFNMAP VMA, which
3490             * we can access using slightly different code.
3491             */
3492#ifdef CONFIG_HAVE_IOREMAP_PROT
3493            vma = find_vma(mm, addr);
3494            if (!vma)
3495                break;
3496            if (vma->vm_ops && vma->vm_ops->access)
3497                ret = vma->vm_ops->access(vma, addr, buf,
3498                              len, write);
3499            if (ret <= 0)
3500#endif
3501                break;
3502            bytes = ret;
3503        } else {
3504            bytes = len;
3505            offset = addr & (PAGE_SIZE-1);
3506            if (bytes > PAGE_SIZE-offset)
3507                bytes = PAGE_SIZE-offset;
3508
3509            maddr = kmap(page);
3510            if (write) {
3511                copy_to_user_page(vma, page, addr,
3512                          maddr + offset, buf, bytes);
3513                set_page_dirty_lock(page);
3514            } else {
3515                copy_from_user_page(vma, page, addr,
3516                            buf, maddr + offset, bytes);
3517            }
3518            kunmap(page);
3519            page_cache_release(page);
3520        }
3521        len -= bytes;
3522        buf += bytes;
3523        addr += bytes;
3524    }
3525    up_read(&mm->mmap_sem);
3526    mmput(mm);
3527
3528    return buf - old_buf;
3529}
3530
3531/*
3532 * Print the name of a VMA.
3533 */
3534void print_vma_addr(char *prefix, unsigned long ip)
3535{
3536    struct mm_struct *mm = current->mm;
3537    struct vm_area_struct *vma;
3538
3539    /*
3540     * Do not print if we are in atomic
3541     * contexts (in exception stacks, etc.):
3542     */
3543    if (preempt_count())
3544        return;
3545
3546    down_read(&mm->mmap_sem);
3547    vma = find_vma(mm, ip);
3548    if (vma && vma->vm_file) {
3549        struct file *f = vma->vm_file;
3550        char *buf = (char *)__get_free_page(GFP_KERNEL);
3551        if (buf) {
3552            char *p, *s;
3553
3554            p = d_path(&f->f_path, buf, PAGE_SIZE);
3555            if (IS_ERR(p))
3556                p = "?";
3557            s = strrchr(p, '/');
3558            if (s)
3559                p = s+1;
3560            printk("%s%s[%lx+%lx]", prefix, p,
3561                    vma->vm_start,
3562                    vma->vm_end - vma->vm_start);
3563            free_page((unsigned long)buf);
3564        }
3565    }
3566    up_read(&current->mm->mmap_sem);
3567}
3568
3569#ifdef CONFIG_PROVE_LOCKING
3570void might_fault(void)
3571{
3572    /*
3573     * Some code (nfs/sunrpc) uses socket ops on kernel memory while
3574     * holding the mmap_sem, this is safe because kernel memory doesn't
3575     * get paged out, therefore we'll never actually fault, and the
3576     * below annotations will generate false positives.
3577     */
3578    if (segment_eq(get_fs(), KERNEL_DS))
3579        return;
3580
3581    might_sleep();
3582    /*
3583     * it would be nicer only to annotate paths which are not under
3584     * pagefault_disable, however that requires a larger audit and
3585     * providing helpers like get_user_atomic.
3586     */
3587    if (!in_atomic() && current->mm)
3588        might_lock_read(&current->mm->mmap_sem);
3589}
3590EXPORT_SYMBOL(might_fault);
3591#endif
3592

Archive Download this file



interactive