Root/
Source at commit b13e7eb172b6f08e5fc22da162bdde5fcde201b5 created 11 years 11 months ago. By Maarten ter Huurne, fbcon: Add 6x10 font | |
---|---|
1 | /* |
2 | * mm/mprotect.c |
3 | * |
4 | * (C) Copyright 1994 Linus Torvalds |
5 | * (C) Copyright 2002 Christoph Hellwig |
6 | * |
7 | * Address space accounting code <alan@lxorguk.ukuu.org.uk> |
8 | * (C) Copyright 2002 Red Hat Inc, All Rights Reserved |
9 | */ |
10 | |
11 | #include <linux/mm.h> |
12 | #include <linux/hugetlb.h> |
13 | #include <linux/shm.h> |
14 | #include <linux/mman.h> |
15 | #include <linux/fs.h> |
16 | #include <linux/highmem.h> |
17 | #include <linux/security.h> |
18 | #include <linux/mempolicy.h> |
19 | #include <linux/personality.h> |
20 | #include <linux/syscalls.h> |
21 | #include <linux/swap.h> |
22 | #include <linux/swapops.h> |
23 | #include <linux/mmu_notifier.h> |
24 | #include <linux/migrate.h> |
25 | #include <linux/perf_event.h> |
26 | #include <asm/uaccess.h> |
27 | #include <asm/pgtable.h> |
28 | #include <asm/cacheflush.h> |
29 | #include <asm/tlbflush.h> |
30 | |
31 | #ifndef pgprot_modify |
32 | static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) |
33 | { |
34 | return newprot; |
35 | } |
36 | #endif |
37 | |
38 | static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, |
39 | unsigned long addr, unsigned long end, pgprot_t newprot, |
40 | int dirty_accountable) |
41 | { |
42 | pte_t *pte, oldpte; |
43 | spinlock_t *ptl; |
44 | |
45 | pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
46 | arch_enter_lazy_mmu_mode(); |
47 | do { |
48 | oldpte = *pte; |
49 | if (pte_present(oldpte)) { |
50 | pte_t ptent; |
51 | |
52 | ptent = ptep_modify_prot_start(mm, addr, pte); |
53 | ptent = pte_modify(ptent, newprot); |
54 | |
55 | /* |
56 | * Avoid taking write faults for pages we know to be |
57 | * dirty. |
58 | */ |
59 | if (dirty_accountable && pte_dirty(ptent)) |
60 | ptent = pte_mkwrite(ptent); |
61 | |
62 | ptep_modify_prot_commit(mm, addr, pte, ptent); |
63 | } else if (PAGE_MIGRATION && !pte_file(oldpte)) { |
64 | swp_entry_t entry = pte_to_swp_entry(oldpte); |
65 | |
66 | if (is_write_migration_entry(entry)) { |
67 | /* |
68 | * A protection check is difficult so |
69 | * just be safe and disable write |
70 | */ |
71 | make_migration_entry_read(&entry); |
72 | set_pte_at(mm, addr, pte, |
73 | swp_entry_to_pte(entry)); |
74 | } |
75 | } |
76 | } while (pte++, addr += PAGE_SIZE, addr != end); |
77 | arch_leave_lazy_mmu_mode(); |
78 | pte_unmap_unlock(pte - 1, ptl); |
79 | } |
80 | |
81 | static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud, |
82 | unsigned long addr, unsigned long end, pgprot_t newprot, |
83 | int dirty_accountable) |
84 | { |
85 | pmd_t *pmd; |
86 | unsigned long next; |
87 | |
88 | pmd = pmd_offset(pud, addr); |
89 | do { |
90 | next = pmd_addr_end(addr, end); |
91 | if (pmd_trans_huge(*pmd)) { |
92 | if (next - addr != HPAGE_PMD_SIZE) |
93 | split_huge_page_pmd(vma->vm_mm, pmd); |
94 | else if (change_huge_pmd(vma, pmd, addr, newprot)) |
95 | continue; |
96 | /* fall through */ |
97 | } |
98 | if (pmd_none_or_clear_bad(pmd)) |
99 | continue; |
100 | change_pte_range(vma->vm_mm, pmd, addr, next, newprot, |
101 | dirty_accountable); |
102 | } while (pmd++, addr = next, addr != end); |
103 | } |
104 | |
105 | static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd, |
106 | unsigned long addr, unsigned long end, pgprot_t newprot, |
107 | int dirty_accountable) |
108 | { |
109 | pud_t *pud; |
110 | unsigned long next; |
111 | |
112 | pud = pud_offset(pgd, addr); |
113 | do { |
114 | next = pud_addr_end(addr, end); |
115 | if (pud_none_or_clear_bad(pud)) |
116 | continue; |
117 | change_pmd_range(vma, pud, addr, next, newprot, |
118 | dirty_accountable); |
119 | } while (pud++, addr = next, addr != end); |
120 | } |
121 | |
122 | static void change_protection(struct vm_area_struct *vma, |
123 | unsigned long addr, unsigned long end, pgprot_t newprot, |
124 | int dirty_accountable) |
125 | { |
126 | struct mm_struct *mm = vma->vm_mm; |
127 | pgd_t *pgd; |
128 | unsigned long next; |
129 | unsigned long start = addr; |
130 | |
131 | BUG_ON(addr >= end); |
132 | pgd = pgd_offset(mm, addr); |
133 | flush_cache_range(vma, addr, end); |
134 | do { |
135 | next = pgd_addr_end(addr, end); |
136 | if (pgd_none_or_clear_bad(pgd)) |
137 | continue; |
138 | change_pud_range(vma, pgd, addr, next, newprot, |
139 | dirty_accountable); |
140 | } while (pgd++, addr = next, addr != end); |
141 | flush_tlb_range(vma, start, end); |
142 | } |
143 | |
144 | int |
145 | mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, |
146 | unsigned long start, unsigned long end, unsigned long newflags) |
147 | { |
148 | struct mm_struct *mm = vma->vm_mm; |
149 | unsigned long oldflags = vma->vm_flags; |
150 | long nrpages = (end - start) >> PAGE_SHIFT; |
151 | unsigned long charged = 0; |
152 | pgoff_t pgoff; |
153 | int error; |
154 | int dirty_accountable = 0; |
155 | |
156 | if (newflags == oldflags) { |
157 | *pprev = vma; |
158 | return 0; |
159 | } |
160 | |
161 | /* |
162 | * If we make a private mapping writable we increase our commit; |
163 | * but (without finer accounting) cannot reduce our commit if we |
164 | * make it unwritable again. hugetlb mapping were accounted for |
165 | * even if read-only so there is no need to account for them here |
166 | */ |
167 | if (newflags & VM_WRITE) { |
168 | if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| |
169 | VM_SHARED|VM_NORESERVE))) { |
170 | charged = nrpages; |
171 | if (security_vm_enough_memory(charged)) |
172 | return -ENOMEM; |
173 | newflags |= VM_ACCOUNT; |
174 | } |
175 | } |
176 | |
177 | /* |
178 | * First try to merge with previous and/or next vma. |
179 | */ |
180 | pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); |
181 | *pprev = vma_merge(mm, *pprev, start, end, newflags, |
182 | vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); |
183 | if (*pprev) { |
184 | vma = *pprev; |
185 | goto success; |
186 | } |
187 | |
188 | *pprev = vma; |
189 | |
190 | if (start != vma->vm_start) { |
191 | error = split_vma(mm, vma, start, 1); |
192 | if (error) |
193 | goto fail; |
194 | } |
195 | |
196 | if (end != vma->vm_end) { |
197 | error = split_vma(mm, vma, end, 0); |
198 | if (error) |
199 | goto fail; |
200 | } |
201 | |
202 | success: |
203 | /* |
204 | * vm_flags and vm_page_prot are protected by the mmap_sem |
205 | * held in write mode. |
206 | */ |
207 | vma->vm_flags = newflags; |
208 | vma->vm_page_prot = pgprot_modify(vma->vm_page_prot, |
209 | vm_get_page_prot(newflags)); |
210 | |
211 | if (vma_wants_writenotify(vma)) { |
212 | vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); |
213 | dirty_accountable = 1; |
214 | } |
215 | |
216 | mmu_notifier_invalidate_range_start(mm, start, end); |
217 | if (is_vm_hugetlb_page(vma)) |
218 | hugetlb_change_protection(vma, start, end, vma->vm_page_prot); |
219 | else |
220 | change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable); |
221 | mmu_notifier_invalidate_range_end(mm, start, end); |
222 | vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); |
223 | vm_stat_account(mm, newflags, vma->vm_file, nrpages); |
224 | perf_event_mmap(vma); |
225 | return 0; |
226 | |
227 | fail: |
228 | vm_unacct_memory(charged); |
229 | return error; |
230 | } |
231 | |
232 | SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, |
233 | unsigned long, prot) |
234 | { |
235 | unsigned long vm_flags, nstart, end, tmp, reqprot; |
236 | struct vm_area_struct *vma, *prev; |
237 | int error = -EINVAL; |
238 | const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); |
239 | prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); |
240 | if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ |
241 | return -EINVAL; |
242 | |
243 | if (start & ~PAGE_MASK) |
244 | return -EINVAL; |
245 | if (!len) |
246 | return 0; |
247 | len = PAGE_ALIGN(len); |
248 | end = start + len; |
249 | if (end <= start) |
250 | return -ENOMEM; |
251 | if (!arch_validate_prot(prot)) |
252 | return -EINVAL; |
253 | |
254 | reqprot = prot; |
255 | /* |
256 | * Does the application expect PROT_READ to imply PROT_EXEC: |
257 | */ |
258 | if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) |
259 | prot |= PROT_EXEC; |
260 | |
261 | vm_flags = calc_vm_prot_bits(prot); |
262 | |
263 | down_write(¤t->mm->mmap_sem); |
264 | |
265 | vma = find_vma(current->mm, start); |
266 | error = -ENOMEM; |
267 | if (!vma) |
268 | goto out; |
269 | prev = vma->vm_prev; |
270 | if (unlikely(grows & PROT_GROWSDOWN)) { |
271 | if (vma->vm_start >= end) |
272 | goto out; |
273 | start = vma->vm_start; |
274 | error = -EINVAL; |
275 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
276 | goto out; |
277 | } |
278 | else { |
279 | if (vma->vm_start > start) |
280 | goto out; |
281 | if (unlikely(grows & PROT_GROWSUP)) { |
282 | end = vma->vm_end; |
283 | error = -EINVAL; |
284 | if (!(vma->vm_flags & VM_GROWSUP)) |
285 | goto out; |
286 | } |
287 | } |
288 | if (start > vma->vm_start) |
289 | prev = vma; |
290 | |
291 | for (nstart = start ; ; ) { |
292 | unsigned long newflags; |
293 | |
294 | /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ |
295 | |
296 | newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); |
297 | |
298 | /* newflags >> 4 shift VM_MAY% in place of VM_% */ |
299 | if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { |
300 | error = -EACCES; |
301 | goto out; |
302 | } |
303 | |
304 | error = security_file_mprotect(vma, reqprot, prot); |
305 | if (error) |
306 | goto out; |
307 | |
308 | tmp = vma->vm_end; |
309 | if (tmp > end) |
310 | tmp = end; |
311 | error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); |
312 | if (error) |
313 | goto out; |
314 | nstart = tmp; |
315 | |
316 | if (nstart < prev->vm_end) |
317 | nstart = prev->vm_end; |
318 | if (nstart >= end) |
319 | goto out; |
320 | |
321 | vma = prev->vm_next; |
322 | if (!vma || vma->vm_start != nstart) { |
323 | error = -ENOMEM; |
324 | goto out; |
325 | } |
326 | } |
327 | out: |
328 | up_write(¤t->mm->mmap_sem); |
329 | return error; |
330 | } |
331 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9