Root/
1 | /* |
2 | * linux/mm/filemap_xip.c |
3 | * |
4 | * Copyright (C) 2005 IBM Corporation |
5 | * Author: Carsten Otte <cotte@de.ibm.com> |
6 | * |
7 | * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds |
8 | * |
9 | */ |
10 | |
11 | #include <linux/fs.h> |
12 | #include <linux/pagemap.h> |
13 | #include <linux/export.h> |
14 | #include <linux/uio.h> |
15 | #include <linux/rmap.h> |
16 | #include <linux/mmu_notifier.h> |
17 | #include <linux/sched.h> |
18 | #include <linux/seqlock.h> |
19 | #include <linux/mutex.h> |
20 | #include <linux/gfp.h> |
21 | #include <asm/tlbflush.h> |
22 | #include <asm/io.h> |
23 | |
24 | /* |
25 | * We do use our own empty page to avoid interference with other users |
26 | * of ZERO_PAGE(), such as /dev/zero |
27 | */ |
28 | static DEFINE_MUTEX(xip_sparse_mutex); |
29 | static seqcount_t xip_sparse_seq = SEQCNT_ZERO; |
30 | static struct page *__xip_sparse_page; |
31 | |
32 | /* called under xip_sparse_mutex */ |
33 | static struct page *xip_sparse_page(void) |
34 | { |
35 | if (!__xip_sparse_page) { |
36 | struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO); |
37 | |
38 | if (page) |
39 | __xip_sparse_page = page; |
40 | } |
41 | return __xip_sparse_page; |
42 | } |
43 | |
44 | /* |
45 | * This is a file read routine for execute in place files, and uses |
46 | * the mapping->a_ops->get_xip_mem() function for the actual low-level |
47 | * stuff. |
48 | * |
49 | * Note the struct file* is not used at all. It may be NULL. |
50 | */ |
51 | static ssize_t |
52 | do_xip_mapping_read(struct address_space *mapping, |
53 | struct file_ra_state *_ra, |
54 | struct file *filp, |
55 | char __user *buf, |
56 | size_t len, |
57 | loff_t *ppos) |
58 | { |
59 | struct inode *inode = mapping->host; |
60 | pgoff_t index, end_index; |
61 | unsigned long offset; |
62 | loff_t isize, pos; |
63 | size_t copied = 0, error = 0; |
64 | |
65 | BUG_ON(!mapping->a_ops->get_xip_mem); |
66 | |
67 | pos = *ppos; |
68 | index = pos >> PAGE_CACHE_SHIFT; |
69 | offset = pos & ~PAGE_CACHE_MASK; |
70 | |
71 | isize = i_size_read(inode); |
72 | if (!isize) |
73 | goto out; |
74 | |
75 | end_index = (isize - 1) >> PAGE_CACHE_SHIFT; |
76 | do { |
77 | unsigned long nr, left; |
78 | void *xip_mem; |
79 | unsigned long xip_pfn; |
80 | int zero = 0; |
81 | |
82 | /* nr is the maximum number of bytes to copy from this page */ |
83 | nr = PAGE_CACHE_SIZE; |
84 | if (index >= end_index) { |
85 | if (index > end_index) |
86 | goto out; |
87 | nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; |
88 | if (nr <= offset) { |
89 | goto out; |
90 | } |
91 | } |
92 | nr = nr - offset; |
93 | if (nr > len - copied) |
94 | nr = len - copied; |
95 | |
96 | error = mapping->a_ops->get_xip_mem(mapping, index, 0, |
97 | &xip_mem, &xip_pfn); |
98 | if (unlikely(error)) { |
99 | if (error == -ENODATA) { |
100 | /* sparse */ |
101 | zero = 1; |
102 | } else |
103 | goto out; |
104 | } |
105 | |
106 | /* If users can be writing to this page using arbitrary |
107 | * virtual addresses, take care about potential aliasing |
108 | * before reading the page on the kernel side. |
109 | */ |
110 | if (mapping_writably_mapped(mapping)) |
111 | /* address based flush */ ; |
112 | |
113 | /* |
114 | * Ok, we have the mem, so now we can copy it to user space... |
115 | * |
116 | * The actor routine returns how many bytes were actually used.. |
117 | * NOTE! This may not be the same as how much of a user buffer |
118 | * we filled up (we may be padding etc), so we can only update |
119 | * "pos" here (the actor routine has to update the user buffer |
120 | * pointers and the remaining count). |
121 | */ |
122 | if (!zero) |
123 | left = __copy_to_user(buf+copied, xip_mem+offset, nr); |
124 | else |
125 | left = __clear_user(buf + copied, nr); |
126 | |
127 | if (left) { |
128 | error = -EFAULT; |
129 | goto out; |
130 | } |
131 | |
132 | copied += (nr - left); |
133 | offset += (nr - left); |
134 | index += offset >> PAGE_CACHE_SHIFT; |
135 | offset &= ~PAGE_CACHE_MASK; |
136 | } while (copied < len); |
137 | |
138 | out: |
139 | *ppos = pos + copied; |
140 | if (filp) |
141 | file_accessed(filp); |
142 | |
143 | return (copied ? copied : error); |
144 | } |
145 | |
146 | ssize_t |
147 | xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) |
148 | { |
149 | if (!access_ok(VERIFY_WRITE, buf, len)) |
150 | return -EFAULT; |
151 | |
152 | return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp, |
153 | buf, len, ppos); |
154 | } |
155 | EXPORT_SYMBOL_GPL(xip_file_read); |
156 | |
157 | /* |
158 | * __xip_unmap is invoked from xip_unmap and |
159 | * xip_write |
160 | * |
161 | * This function walks all vmas of the address_space and unmaps the |
162 | * __xip_sparse_page when found at pgoff. |
163 | */ |
164 | static void |
165 | __xip_unmap (struct address_space * mapping, |
166 | unsigned long pgoff) |
167 | { |
168 | struct vm_area_struct *vma; |
169 | struct mm_struct *mm; |
170 | unsigned long address; |
171 | pte_t *pte; |
172 | pte_t pteval; |
173 | spinlock_t *ptl; |
174 | struct page *page; |
175 | unsigned count; |
176 | int locked = 0; |
177 | |
178 | count = read_seqcount_begin(&xip_sparse_seq); |
179 | |
180 | page = __xip_sparse_page; |
181 | if (!page) |
182 | return; |
183 | |
184 | retry: |
185 | mutex_lock(&mapping->i_mmap_mutex); |
186 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { |
187 | mm = vma->vm_mm; |
188 | address = vma->vm_start + |
189 | ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); |
190 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
191 | pte = page_check_address(page, mm, address, &ptl, 1); |
192 | if (pte) { |
193 | /* Nuke the page table entry. */ |
194 | flush_cache_page(vma, address, pte_pfn(*pte)); |
195 | pteval = ptep_clear_flush(vma, address, pte); |
196 | page_remove_rmap(page); |
197 | dec_mm_counter(mm, MM_FILEPAGES); |
198 | BUG_ON(pte_dirty(pteval)); |
199 | pte_unmap_unlock(pte, ptl); |
200 | /* must invalidate_page _before_ freeing the page */ |
201 | mmu_notifier_invalidate_page(mm, address); |
202 | page_cache_release(page); |
203 | } |
204 | } |
205 | mutex_unlock(&mapping->i_mmap_mutex); |
206 | |
207 | if (locked) { |
208 | mutex_unlock(&xip_sparse_mutex); |
209 | } else if (read_seqcount_retry(&xip_sparse_seq, count)) { |
210 | mutex_lock(&xip_sparse_mutex); |
211 | locked = 1; |
212 | goto retry; |
213 | } |
214 | } |
215 | |
216 | /* |
217 | * xip_fault() is invoked via the vma operations vector for a |
218 | * mapped memory region to read in file data during a page fault. |
219 | * |
220 | * This function is derived from filemap_fault, but used for execute in place |
221 | */ |
222 | static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
223 | { |
224 | struct file *file = vma->vm_file; |
225 | struct address_space *mapping = file->f_mapping; |
226 | struct inode *inode = mapping->host; |
227 | pgoff_t size; |
228 | void *xip_mem; |
229 | unsigned long xip_pfn; |
230 | struct page *page; |
231 | int error; |
232 | |
233 | /* XXX: are VM_FAULT_ codes OK? */ |
234 | again: |
235 | size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
236 | if (vmf->pgoff >= size) |
237 | return VM_FAULT_SIGBUS; |
238 | |
239 | error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0, |
240 | &xip_mem, &xip_pfn); |
241 | if (likely(!error)) |
242 | goto found; |
243 | if (error != -ENODATA) |
244 | return VM_FAULT_OOM; |
245 | |
246 | /* sparse block */ |
247 | if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) && |
248 | (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) && |
249 | (!(mapping->host->i_sb->s_flags & MS_RDONLY))) { |
250 | int err; |
251 | |
252 | /* maybe shared writable, allocate new block */ |
253 | mutex_lock(&xip_sparse_mutex); |
254 | error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1, |
255 | &xip_mem, &xip_pfn); |
256 | mutex_unlock(&xip_sparse_mutex); |
257 | if (error) |
258 | return VM_FAULT_SIGBUS; |
259 | /* unmap sparse mappings at pgoff from all other vmas */ |
260 | __xip_unmap(mapping, vmf->pgoff); |
261 | |
262 | found: |
263 | err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, |
264 | xip_pfn); |
265 | if (err == -ENOMEM) |
266 | return VM_FAULT_OOM; |
267 | /* |
268 | * err == -EBUSY is fine, we've raced against another thread |
269 | * that faulted-in the same page |
270 | */ |
271 | if (err != -EBUSY) |
272 | BUG_ON(err); |
273 | return VM_FAULT_NOPAGE; |
274 | } else { |
275 | int err, ret = VM_FAULT_OOM; |
276 | |
277 | mutex_lock(&xip_sparse_mutex); |
278 | write_seqcount_begin(&xip_sparse_seq); |
279 | error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0, |
280 | &xip_mem, &xip_pfn); |
281 | if (unlikely(!error)) { |
282 | write_seqcount_end(&xip_sparse_seq); |
283 | mutex_unlock(&xip_sparse_mutex); |
284 | goto again; |
285 | } |
286 | if (error != -ENODATA) |
287 | goto out; |
288 | /* not shared and writable, use xip_sparse_page() */ |
289 | page = xip_sparse_page(); |
290 | if (!page) |
291 | goto out; |
292 | err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, |
293 | page); |
294 | if (err == -ENOMEM) |
295 | goto out; |
296 | |
297 | ret = VM_FAULT_NOPAGE; |
298 | out: |
299 | write_seqcount_end(&xip_sparse_seq); |
300 | mutex_unlock(&xip_sparse_mutex); |
301 | |
302 | return ret; |
303 | } |
304 | } |
305 | |
306 | static const struct vm_operations_struct xip_file_vm_ops = { |
307 | .fault = xip_file_fault, |
308 | .page_mkwrite = filemap_page_mkwrite, |
309 | .remap_pages = generic_file_remap_pages, |
310 | }; |
311 | |
312 | int xip_file_mmap(struct file * file, struct vm_area_struct * vma) |
313 | { |
314 | BUG_ON(!file->f_mapping->a_ops->get_xip_mem); |
315 | |
316 | file_accessed(file); |
317 | vma->vm_ops = &xip_file_vm_ops; |
318 | vma->vm_flags |= VM_MIXEDMAP; |
319 | return 0; |
320 | } |
321 | EXPORT_SYMBOL_GPL(xip_file_mmap); |
322 | |
323 | static ssize_t |
324 | __xip_file_write(struct file *filp, const char __user *buf, |
325 | size_t count, loff_t pos, loff_t *ppos) |
326 | { |
327 | struct address_space * mapping = filp->f_mapping; |
328 | const struct address_space_operations *a_ops = mapping->a_ops; |
329 | struct inode *inode = mapping->host; |
330 | long status = 0; |
331 | size_t bytes; |
332 | ssize_t written = 0; |
333 | |
334 | BUG_ON(!mapping->a_ops->get_xip_mem); |
335 | |
336 | do { |
337 | unsigned long index; |
338 | unsigned long offset; |
339 | size_t copied; |
340 | void *xip_mem; |
341 | unsigned long xip_pfn; |
342 | |
343 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ |
344 | index = pos >> PAGE_CACHE_SHIFT; |
345 | bytes = PAGE_CACHE_SIZE - offset; |
346 | if (bytes > count) |
347 | bytes = count; |
348 | |
349 | status = a_ops->get_xip_mem(mapping, index, 0, |
350 | &xip_mem, &xip_pfn); |
351 | if (status == -ENODATA) { |
352 | /* we allocate a new page unmap it */ |
353 | mutex_lock(&xip_sparse_mutex); |
354 | status = a_ops->get_xip_mem(mapping, index, 1, |
355 | &xip_mem, &xip_pfn); |
356 | mutex_unlock(&xip_sparse_mutex); |
357 | if (!status) |
358 | /* unmap page at pgoff from all other vmas */ |
359 | __xip_unmap(mapping, index); |
360 | } |
361 | |
362 | if (status) |
363 | break; |
364 | |
365 | copied = bytes - |
366 | __copy_from_user_nocache(xip_mem + offset, buf, bytes); |
367 | |
368 | if (likely(copied > 0)) { |
369 | status = copied; |
370 | |
371 | if (status >= 0) { |
372 | written += status; |
373 | count -= status; |
374 | pos += status; |
375 | buf += status; |
376 | } |
377 | } |
378 | if (unlikely(copied != bytes)) |
379 | if (status >= 0) |
380 | status = -EFAULT; |
381 | if (status < 0) |
382 | break; |
383 | } while (count); |
384 | *ppos = pos; |
385 | /* |
386 | * No need to use i_size_read() here, the i_size |
387 | * cannot change under us because we hold i_mutex. |
388 | */ |
389 | if (pos > inode->i_size) { |
390 | i_size_write(inode, pos); |
391 | mark_inode_dirty(inode); |
392 | } |
393 | |
394 | return written ? written : status; |
395 | } |
396 | |
397 | ssize_t |
398 | xip_file_write(struct file *filp, const char __user *buf, size_t len, |
399 | loff_t *ppos) |
400 | { |
401 | struct address_space *mapping = filp->f_mapping; |
402 | struct inode *inode = mapping->host; |
403 | size_t count; |
404 | loff_t pos; |
405 | ssize_t ret; |
406 | |
407 | mutex_lock(&inode->i_mutex); |
408 | |
409 | if (!access_ok(VERIFY_READ, buf, len)) { |
410 | ret=-EFAULT; |
411 | goto out_up; |
412 | } |
413 | |
414 | pos = *ppos; |
415 | count = len; |
416 | |
417 | /* We can write back this queue in page reclaim */ |
418 | current->backing_dev_info = mapping->backing_dev_info; |
419 | |
420 | ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode)); |
421 | if (ret) |
422 | goto out_backing; |
423 | if (count == 0) |
424 | goto out_backing; |
425 | |
426 | ret = file_remove_suid(filp); |
427 | if (ret) |
428 | goto out_backing; |
429 | |
430 | ret = file_update_time(filp); |
431 | if (ret) |
432 | goto out_backing; |
433 | |
434 | ret = __xip_file_write (filp, buf, count, pos, ppos); |
435 | |
436 | out_backing: |
437 | current->backing_dev_info = NULL; |
438 | out_up: |
439 | mutex_unlock(&inode->i_mutex); |
440 | return ret; |
441 | } |
442 | EXPORT_SYMBOL_GPL(xip_file_write); |
443 | |
444 | /* |
445 | * truncate a page used for execute in place |
446 | * functionality is analog to block_truncate_page but does use get_xip_mem |
447 | * to get the page instead of page cache |
448 | */ |
449 | int |
450 | xip_truncate_page(struct address_space *mapping, loff_t from) |
451 | { |
452 | pgoff_t index = from >> PAGE_CACHE_SHIFT; |
453 | unsigned offset = from & (PAGE_CACHE_SIZE-1); |
454 | unsigned blocksize; |
455 | unsigned length; |
456 | void *xip_mem; |
457 | unsigned long xip_pfn; |
458 | int err; |
459 | |
460 | BUG_ON(!mapping->a_ops->get_xip_mem); |
461 | |
462 | blocksize = 1 << mapping->host->i_blkbits; |
463 | length = offset & (blocksize - 1); |
464 | |
465 | /* Block boundary? Nothing to do */ |
466 | if (!length) |
467 | return 0; |
468 | |
469 | length = blocksize - length; |
470 | |
471 | err = mapping->a_ops->get_xip_mem(mapping, index, 0, |
472 | &xip_mem, &xip_pfn); |
473 | if (unlikely(err)) { |
474 | if (err == -ENODATA) |
475 | /* Hole? No need to truncate */ |
476 | return 0; |
477 | else |
478 | return err; |
479 | } |
480 | memset(xip_mem + offset, 0, length); |
481 | return 0; |
482 | } |
483 | EXPORT_SYMBOL_GPL(xip_truncate_page); |
484 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9