Root/mm/mprotect.c

1/*
2 * mm/mprotect.c
3 *
4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
6 *
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
10
11#include <linux/mm.h>
12#include <linux/hugetlb.h>
13#include <linux/shm.h>
14#include <linux/mman.h>
15#include <linux/fs.h>
16#include <linux/highmem.h>
17#include <linux/security.h>
18#include <linux/mempolicy.h>
19#include <linux/personality.h>
20#include <linux/syscalls.h>
21#include <linux/swap.h>
22#include <linux/swapops.h>
23#include <linux/mmu_notifier.h>
24#include <linux/migrate.h>
25#include <linux/perf_event.h>
26#include <asm/uaccess.h>
27#include <asm/pgtable.h>
28#include <asm/cacheflush.h>
29#include <asm/tlbflush.h>
30
31#ifndef pgprot_modify
32static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
33{
34    return newprot;
35}
36#endif
37
38static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
39        unsigned long addr, unsigned long end, pgprot_t newprot,
40        int dirty_accountable)
41{
42    pte_t *pte, oldpte;
43    spinlock_t *ptl;
44
45    pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
46    arch_enter_lazy_mmu_mode();
47    do {
48        oldpte = *pte;
49        if (pte_present(oldpte)) {
50            pte_t ptent;
51
52            ptent = ptep_modify_prot_start(mm, addr, pte);
53            ptent = pte_modify(ptent, newprot);
54
55            /*
56             * Avoid taking write faults for pages we know to be
57             * dirty.
58             */
59            if (dirty_accountable && pte_dirty(ptent))
60                ptent = pte_mkwrite(ptent);
61
62            ptep_modify_prot_commit(mm, addr, pte, ptent);
63        } else if (PAGE_MIGRATION && !pte_file(oldpte)) {
64            swp_entry_t entry = pte_to_swp_entry(oldpte);
65
66            if (is_write_migration_entry(entry)) {
67                /*
68                 * A protection check is difficult so
69                 * just be safe and disable write
70                 */
71                make_migration_entry_read(&entry);
72                set_pte_at(mm, addr, pte,
73                    swp_entry_to_pte(entry));
74            }
75        }
76    } while (pte++, addr += PAGE_SIZE, addr != end);
77    arch_leave_lazy_mmu_mode();
78    pte_unmap_unlock(pte - 1, ptl);
79}
80
81static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
82        unsigned long addr, unsigned long end, pgprot_t newprot,
83        int dirty_accountable)
84{
85    pmd_t *pmd;
86    unsigned long next;
87
88    pmd = pmd_offset(pud, addr);
89    do {
90        next = pmd_addr_end(addr, end);
91        if (pmd_none_or_clear_bad(pmd))
92            continue;
93        change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
94    } while (pmd++, addr = next, addr != end);
95}
96
97static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
98        unsigned long addr, unsigned long end, pgprot_t newprot,
99        int dirty_accountable)
100{
101    pud_t *pud;
102    unsigned long next;
103
104    pud = pud_offset(pgd, addr);
105    do {
106        next = pud_addr_end(addr, end);
107        if (pud_none_or_clear_bad(pud))
108            continue;
109        change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable);
110    } while (pud++, addr = next, addr != end);
111}
112
113static void change_protection(struct vm_area_struct *vma,
114        unsigned long addr, unsigned long end, pgprot_t newprot,
115        int dirty_accountable)
116{
117    struct mm_struct *mm = vma->vm_mm;
118    pgd_t *pgd;
119    unsigned long next;
120    unsigned long start = addr;
121
122    BUG_ON(addr >= end);
123    pgd = pgd_offset(mm, addr);
124    flush_cache_range(vma, addr, end);
125    do {
126        next = pgd_addr_end(addr, end);
127        if (pgd_none_or_clear_bad(pgd))
128            continue;
129        change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable);
130    } while (pgd++, addr = next, addr != end);
131    flush_tlb_range(vma, start, end);
132}
133
134int
135mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
136    unsigned long start, unsigned long end, unsigned long newflags)
137{
138    struct mm_struct *mm = vma->vm_mm;
139    unsigned long oldflags = vma->vm_flags;
140    long nrpages = (end - start) >> PAGE_SHIFT;
141    unsigned long charged = 0;
142    pgoff_t pgoff;
143    int error;
144    int dirty_accountable = 0;
145
146    if (newflags == oldflags) {
147        *pprev = vma;
148        return 0;
149    }
150
151    /*
152     * If we make a private mapping writable we increase our commit;
153     * but (without finer accounting) cannot reduce our commit if we
154     * make it unwritable again. hugetlb mapping were accounted for
155     * even if read-only so there is no need to account for them here
156     */
157    if (newflags & VM_WRITE) {
158        if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
159                        VM_SHARED|VM_NORESERVE))) {
160            charged = nrpages;
161            if (security_vm_enough_memory(charged))
162                return -ENOMEM;
163            newflags |= VM_ACCOUNT;
164        }
165    }
166
167    /*
168     * First try to merge with previous and/or next vma.
169     */
170    pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
171    *pprev = vma_merge(mm, *pprev, start, end, newflags,
172            vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
173    if (*pprev) {
174        vma = *pprev;
175        goto success;
176    }
177
178    *pprev = vma;
179
180    if (start != vma->vm_start) {
181        error = split_vma(mm, vma, start, 1);
182        if (error)
183            goto fail;
184    }
185
186    if (end != vma->vm_end) {
187        error = split_vma(mm, vma, end, 0);
188        if (error)
189            goto fail;
190    }
191
192success:
193    /*
194     * vm_flags and vm_page_prot are protected by the mmap_sem
195     * held in write mode.
196     */
197    vma->vm_flags = newflags;
198    vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
199                      vm_get_page_prot(newflags));
200
201    if (vma_wants_writenotify(vma)) {
202        vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
203        dirty_accountable = 1;
204    }
205
206    mmu_notifier_invalidate_range_start(mm, start, end);
207    if (is_vm_hugetlb_page(vma))
208        hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
209    else
210        change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
211    mmu_notifier_invalidate_range_end(mm, start, end);
212    vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
213    vm_stat_account(mm, newflags, vma->vm_file, nrpages);
214    return 0;
215
216fail:
217    vm_unacct_memory(charged);
218    return error;
219}
220
221SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
222        unsigned long, prot)
223{
224    unsigned long vm_flags, nstart, end, tmp, reqprot;
225    struct vm_area_struct *vma, *prev;
226    int error = -EINVAL;
227    const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
228    prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
229    if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
230        return -EINVAL;
231
232    if (start & ~PAGE_MASK)
233        return -EINVAL;
234    if (!len)
235        return 0;
236    len = PAGE_ALIGN(len);
237    end = start + len;
238    if (end <= start)
239        return -ENOMEM;
240    if (!arch_validate_prot(prot))
241        return -EINVAL;
242
243    reqprot = prot;
244    /*
245     * Does the application expect PROT_READ to imply PROT_EXEC:
246     */
247    if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
248        prot |= PROT_EXEC;
249
250    vm_flags = calc_vm_prot_bits(prot);
251
252    down_write(&current->mm->mmap_sem);
253
254    vma = find_vma_prev(current->mm, start, &prev);
255    error = -ENOMEM;
256    if (!vma)
257        goto out;
258    if (unlikely(grows & PROT_GROWSDOWN)) {
259        if (vma->vm_start >= end)
260            goto out;
261        start = vma->vm_start;
262        error = -EINVAL;
263        if (!(vma->vm_flags & VM_GROWSDOWN))
264            goto out;
265    }
266    else {
267        if (vma->vm_start > start)
268            goto out;
269        if (unlikely(grows & PROT_GROWSUP)) {
270            end = vma->vm_end;
271            error = -EINVAL;
272            if (!(vma->vm_flags & VM_GROWSUP))
273                goto out;
274        }
275    }
276    if (start > vma->vm_start)
277        prev = vma;
278
279    for (nstart = start ; ; ) {
280        unsigned long newflags;
281
282        /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
283
284        newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
285
286        /* newflags >> 4 shift VM_MAY% in place of VM_% */
287        if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
288            error = -EACCES;
289            goto out;
290        }
291
292        error = security_file_mprotect(vma, reqprot, prot);
293        if (error)
294            goto out;
295
296        tmp = vma->vm_end;
297        if (tmp > end)
298            tmp = end;
299        error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
300        if (error)
301            goto out;
302        perf_event_mmap(vma);
303        nstart = tmp;
304
305        if (nstart < prev->vm_end)
306            nstart = prev->vm_end;
307        if (nstart >= end)
308            goto out;
309
310        vma = prev->vm_next;
311        if (!vma || vma->vm_start != nstart) {
312            error = -ENOMEM;
313            goto out;
314        }
315    }
316out:
317    up_write(&current->mm->mmap_sem);
318    return error;
319}
320

Archive Download this file



interactive