Root/
1 | /* |
2 | * linux/mm/mincore.c |
3 | * |
4 | * Copyright (C) 1994-2006 Linus Torvalds |
5 | */ |
6 | |
7 | /* |
8 | * The mincore() system call. |
9 | */ |
10 | #include <linux/pagemap.h> |
11 | #include <linux/gfp.h> |
12 | #include <linux/mm.h> |
13 | #include <linux/mman.h> |
14 | #include <linux/syscalls.h> |
15 | #include <linux/swap.h> |
16 | #include <linux/swapops.h> |
17 | #include <linux/hugetlb.h> |
18 | |
19 | #include <asm/uaccess.h> |
20 | #include <asm/pgtable.h> |
21 | |
22 | static void mincore_hugetlb_page_range(struct vm_area_struct *vma, |
23 | unsigned long addr, unsigned long end, |
24 | unsigned char *vec) |
25 | { |
26 | #ifdef CONFIG_HUGETLB_PAGE |
27 | struct hstate *h; |
28 | |
29 | h = hstate_vma(vma); |
30 | while (1) { |
31 | unsigned char present; |
32 | pte_t *ptep; |
33 | /* |
34 | * Huge pages are always in RAM for now, but |
35 | * theoretically it needs to be checked. |
36 | */ |
37 | ptep = huge_pte_offset(current->mm, |
38 | addr & huge_page_mask(h)); |
39 | present = ptep && !huge_pte_none(huge_ptep_get(ptep)); |
40 | while (1) { |
41 | *vec = present; |
42 | vec++; |
43 | addr += PAGE_SIZE; |
44 | if (addr == end) |
45 | return; |
46 | /* check hugepage border */ |
47 | if (!(addr & ~huge_page_mask(h))) |
48 | break; |
49 | } |
50 | } |
51 | #else |
52 | BUG(); |
53 | #endif |
54 | } |
55 | |
56 | /* |
57 | * Later we can get more picky about what "in core" means precisely. |
58 | * For now, simply check to see if the page is in the page cache, |
59 | * and is up to date; i.e. that no page-in operation would be required |
60 | * at this time if an application were to map and access this page. |
61 | */ |
62 | static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) |
63 | { |
64 | unsigned char present = 0; |
65 | struct page *page; |
66 | |
67 | /* |
68 | * When tmpfs swaps out a page from a file, any process mapping that |
69 | * file will not get a swp_entry_t in its pte, but rather it is like |
70 | * any other file mapping (ie. marked !present and faulted in with |
71 | * tmpfs's .fault). So swapped out tmpfs mappings are tested here. |
72 | * |
73 | * However when tmpfs moves the page from pagecache and into swapcache, |
74 | * it is still in core, but the find_get_page below won't find it. |
75 | * No big deal, but make a note of it. |
76 | */ |
77 | page = find_get_page(mapping, pgoff); |
78 | if (page) { |
79 | present = PageUptodate(page); |
80 | page_cache_release(page); |
81 | } |
82 | |
83 | return present; |
84 | } |
85 | |
86 | static void mincore_unmapped_range(struct vm_area_struct *vma, |
87 | unsigned long addr, unsigned long end, |
88 | unsigned char *vec) |
89 | { |
90 | unsigned long nr = (end - addr) >> PAGE_SHIFT; |
91 | int i; |
92 | |
93 | if (vma->vm_file) { |
94 | pgoff_t pgoff; |
95 | |
96 | pgoff = linear_page_index(vma, addr); |
97 | for (i = 0; i < nr; i++, pgoff++) |
98 | vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); |
99 | } else { |
100 | for (i = 0; i < nr; i++) |
101 | vec[i] = 0; |
102 | } |
103 | } |
104 | |
105 | static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd, |
106 | unsigned long addr, unsigned long end, |
107 | unsigned char *vec) |
108 | { |
109 | unsigned long next; |
110 | spinlock_t *ptl; |
111 | pte_t *ptep; |
112 | |
113 | ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
114 | do { |
115 | pte_t pte = *ptep; |
116 | pgoff_t pgoff; |
117 | |
118 | next = addr + PAGE_SIZE; |
119 | if (pte_none(pte)) |
120 | mincore_unmapped_range(vma, addr, next, vec); |
121 | else if (pte_present(pte)) |
122 | *vec = 1; |
123 | else if (pte_file(pte)) { |
124 | pgoff = pte_to_pgoff(pte); |
125 | *vec = mincore_page(vma->vm_file->f_mapping, pgoff); |
126 | } else { /* pte is a swap entry */ |
127 | swp_entry_t entry = pte_to_swp_entry(pte); |
128 | |
129 | if (is_migration_entry(entry)) { |
130 | /* migration entries are always uptodate */ |
131 | *vec = 1; |
132 | } else { |
133 | #ifdef CONFIG_SWAP |
134 | pgoff = entry.val; |
135 | *vec = mincore_page(&swapper_space, pgoff); |
136 | #else |
137 | WARN_ON(1); |
138 | *vec = 1; |
139 | #endif |
140 | } |
141 | } |
142 | vec++; |
143 | } while (ptep++, addr = next, addr != end); |
144 | pte_unmap_unlock(ptep - 1, ptl); |
145 | } |
146 | |
147 | static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud, |
148 | unsigned long addr, unsigned long end, |
149 | unsigned char *vec) |
150 | { |
151 | unsigned long next; |
152 | pmd_t *pmd; |
153 | |
154 | pmd = pmd_offset(pud, addr); |
155 | do { |
156 | next = pmd_addr_end(addr, end); |
157 | if (pmd_none_or_clear_bad(pmd)) |
158 | mincore_unmapped_range(vma, addr, next, vec); |
159 | else |
160 | mincore_pte_range(vma, pmd, addr, next, vec); |
161 | vec += (next - addr) >> PAGE_SHIFT; |
162 | } while (pmd++, addr = next, addr != end); |
163 | } |
164 | |
165 | static void mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd, |
166 | unsigned long addr, unsigned long end, |
167 | unsigned char *vec) |
168 | { |
169 | unsigned long next; |
170 | pud_t *pud; |
171 | |
172 | pud = pud_offset(pgd, addr); |
173 | do { |
174 | next = pud_addr_end(addr, end); |
175 | if (pud_none_or_clear_bad(pud)) |
176 | mincore_unmapped_range(vma, addr, next, vec); |
177 | else |
178 | mincore_pmd_range(vma, pud, addr, next, vec); |
179 | vec += (next - addr) >> PAGE_SHIFT; |
180 | } while (pud++, addr = next, addr != end); |
181 | } |
182 | |
183 | static void mincore_page_range(struct vm_area_struct *vma, |
184 | unsigned long addr, unsigned long end, |
185 | unsigned char *vec) |
186 | { |
187 | unsigned long next; |
188 | pgd_t *pgd; |
189 | |
190 | pgd = pgd_offset(vma->vm_mm, addr); |
191 | do { |
192 | next = pgd_addr_end(addr, end); |
193 | if (pgd_none_or_clear_bad(pgd)) |
194 | mincore_unmapped_range(vma, addr, next, vec); |
195 | else |
196 | mincore_pud_range(vma, pgd, addr, next, vec); |
197 | vec += (next - addr) >> PAGE_SHIFT; |
198 | } while (pgd++, addr = next, addr != end); |
199 | } |
200 | |
201 | /* |
202 | * Do a chunk of "sys_mincore()". We've already checked |
203 | * all the arguments, we hold the mmap semaphore: we should |
204 | * just return the amount of info we're asked for. |
205 | */ |
206 | static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) |
207 | { |
208 | struct vm_area_struct *vma; |
209 | unsigned long end; |
210 | |
211 | vma = find_vma(current->mm, addr); |
212 | if (!vma || addr < vma->vm_start) |
213 | return -ENOMEM; |
214 | |
215 | end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); |
216 | |
217 | if (is_vm_hugetlb_page(vma)) { |
218 | mincore_hugetlb_page_range(vma, addr, end, vec); |
219 | return (end - addr) >> PAGE_SHIFT; |
220 | } |
221 | |
222 | end = pmd_addr_end(addr, end); |
223 | |
224 | if (is_vm_hugetlb_page(vma)) |
225 | mincore_hugetlb_page_range(vma, addr, end, vec); |
226 | else |
227 | mincore_page_range(vma, addr, end, vec); |
228 | |
229 | return (end - addr) >> PAGE_SHIFT; |
230 | } |
231 | |
232 | /* |
233 | * The mincore(2) system call. |
234 | * |
235 | * mincore() returns the memory residency status of the pages in the |
236 | * current process's address space specified by [addr, addr + len). |
237 | * The status is returned in a vector of bytes. The least significant |
238 | * bit of each byte is 1 if the referenced page is in memory, otherwise |
239 | * it is zero. |
240 | * |
241 | * Because the status of a page can change after mincore() checks it |
242 | * but before it returns to the application, the returned vector may |
243 | * contain stale information. Only locked pages are guaranteed to |
244 | * remain in memory. |
245 | * |
246 | * return values: |
247 | * zero - success |
248 | * -EFAULT - vec points to an illegal address |
249 | * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE |
250 | * -ENOMEM - Addresses in the range [addr, addr + len] are |
251 | * invalid for the address space of this process, or |
252 | * specify one or more pages which are not currently |
253 | * mapped |
254 | * -EAGAIN - A kernel resource was temporarily unavailable. |
255 | */ |
256 | SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len, |
257 | unsigned char __user *, vec) |
258 | { |
259 | long retval; |
260 | unsigned long pages; |
261 | unsigned char *tmp; |
262 | |
263 | /* Check the start address: needs to be page-aligned.. */ |
264 | if (start & ~PAGE_CACHE_MASK) |
265 | return -EINVAL; |
266 | |
267 | /* ..and we need to be passed a valid user-space range */ |
268 | if (!access_ok(VERIFY_READ, (void __user *) start, len)) |
269 | return -ENOMEM; |
270 | |
271 | /* This also avoids any overflows on PAGE_CACHE_ALIGN */ |
272 | pages = len >> PAGE_SHIFT; |
273 | pages += (len & ~PAGE_MASK) != 0; |
274 | |
275 | if (!access_ok(VERIFY_WRITE, vec, pages)) |
276 | return -EFAULT; |
277 | |
278 | tmp = (void *) __get_free_page(GFP_USER); |
279 | if (!tmp) |
280 | return -EAGAIN; |
281 | |
282 | retval = 0; |
283 | while (pages) { |
284 | /* |
285 | * Do at most PAGE_SIZE entries per iteration, due to |
286 | * the temporary buffer size. |
287 | */ |
288 | down_read(¤t->mm->mmap_sem); |
289 | retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); |
290 | up_read(¤t->mm->mmap_sem); |
291 | |
292 | if (retval <= 0) |
293 | break; |
294 | if (copy_to_user(vec, tmp, retval)) { |
295 | retval = -EFAULT; |
296 | break; |
297 | } |
298 | pages -= retval; |
299 | vec += retval; |
300 | start += retval << PAGE_SHIFT; |
301 | retval = 0; |
302 | } |
303 | free_page((unsigned long) tmp); |
304 | return retval; |
305 | } |
306 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9