Root/
1 | /* |
2 | * mm/truncate.c - code for taking down pages from address_spaces |
3 | * |
4 | * Copyright (C) 2002, Linus Torvalds |
5 | * |
6 | * 10Sep2002 Andrew Morton |
7 | * Initial version. |
8 | */ |
9 | |
10 | #include <linux/kernel.h> |
11 | #include <linux/backing-dev.h> |
12 | #include <linux/gfp.h> |
13 | #include <linux/mm.h> |
14 | #include <linux/swap.h> |
15 | #include <linux/export.h> |
16 | #include <linux/pagemap.h> |
17 | #include <linux/highmem.h> |
18 | #include <linux/pagevec.h> |
19 | #include <linux/task_io_accounting_ops.h> |
20 | #include <linux/buffer_head.h> /* grr. try_to_release_page, |
21 | do_invalidatepage */ |
22 | #include <linux/cleancache.h> |
23 | #include "internal.h" |
24 | |
25 | |
26 | /** |
27 | * do_invalidatepage - invalidate part or all of a page |
28 | * @page: the page which is affected |
29 | * @offset: start of the range to invalidate |
30 | * @length: length of the range to invalidate |
31 | * |
32 | * do_invalidatepage() is called when all or part of the page has become |
33 | * invalidated by a truncate operation. |
34 | * |
35 | * do_invalidatepage() does not have to release all buffers, but it must |
36 | * ensure that no dirty buffer is left outside @offset and that no I/O |
37 | * is underway against any of the blocks which are outside the truncation |
38 | * point. Because the caller is about to free (and possibly reuse) those |
39 | * blocks on-disk. |
40 | */ |
41 | void do_invalidatepage(struct page *page, unsigned int offset, |
42 | unsigned int length) |
43 | { |
44 | void (*invalidatepage)(struct page *, unsigned int, unsigned int); |
45 | |
46 | invalidatepage = page->mapping->a_ops->invalidatepage; |
47 | #ifdef CONFIG_BLOCK |
48 | if (!invalidatepage) |
49 | invalidatepage = block_invalidatepage; |
50 | #endif |
51 | if (invalidatepage) |
52 | (*invalidatepage)(page, offset, length); |
53 | } |
54 | |
55 | /* |
56 | * This cancels just the dirty bit on the kernel page itself, it |
57 | * does NOT actually remove dirty bits on any mmap's that may be |
58 | * around. It also leaves the page tagged dirty, so any sync |
59 | * activity will still find it on the dirty lists, and in particular, |
60 | * clear_page_dirty_for_io() will still look at the dirty bits in |
61 | * the VM. |
62 | * |
63 | * Doing this should *normally* only ever be done when a page |
64 | * is truncated, and is not actually mapped anywhere at all. However, |
65 | * fs/buffer.c does this when it notices that somebody has cleaned |
66 | * out all the buffers on a page without actually doing it through |
67 | * the VM. Can you say "ext3 is horribly ugly"? Tought you could. |
68 | */ |
69 | void cancel_dirty_page(struct page *page, unsigned int account_size) |
70 | { |
71 | if (TestClearPageDirty(page)) { |
72 | struct address_space *mapping = page->mapping; |
73 | if (mapping && mapping_cap_account_dirty(mapping)) { |
74 | dec_zone_page_state(page, NR_FILE_DIRTY); |
75 | dec_bdi_stat(mapping->backing_dev_info, |
76 | BDI_RECLAIMABLE); |
77 | if (account_size) |
78 | task_io_account_cancelled_write(account_size); |
79 | } |
80 | } |
81 | } |
82 | EXPORT_SYMBOL(cancel_dirty_page); |
83 | |
84 | /* |
85 | * If truncate cannot remove the fs-private metadata from the page, the page |
86 | * becomes orphaned. It will be left on the LRU and may even be mapped into |
87 | * user pagetables if we're racing with filemap_fault(). |
88 | * |
89 | * We need to bale out if page->mapping is no longer equal to the original |
90 | * mapping. This happens a) when the VM reclaimed the page while we waited on |
91 | * its lock, b) when a concurrent invalidate_mapping_pages got there first and |
92 | * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. |
93 | */ |
94 | static int |
95 | truncate_complete_page(struct address_space *mapping, struct page *page) |
96 | { |
97 | if (page->mapping != mapping) |
98 | return -EIO; |
99 | |
100 | if (page_has_private(page)) |
101 | do_invalidatepage(page, 0, PAGE_CACHE_SIZE); |
102 | |
103 | cancel_dirty_page(page, PAGE_CACHE_SIZE); |
104 | |
105 | ClearPageMappedToDisk(page); |
106 | delete_from_page_cache(page); |
107 | return 0; |
108 | } |
109 | |
110 | /* |
111 | * This is for invalidate_mapping_pages(). That function can be called at |
112 | * any time, and is not supposed to throw away dirty pages. But pages can |
113 | * be marked dirty at any time too, so use remove_mapping which safely |
114 | * discards clean, unused pages. |
115 | * |
116 | * Returns non-zero if the page was successfully invalidated. |
117 | */ |
118 | static int |
119 | invalidate_complete_page(struct address_space *mapping, struct page *page) |
120 | { |
121 | int ret; |
122 | |
123 | if (page->mapping != mapping) |
124 | return 0; |
125 | |
126 | if (page_has_private(page) && !try_to_release_page(page, 0)) |
127 | return 0; |
128 | |
129 | ret = remove_mapping(mapping, page); |
130 | |
131 | return ret; |
132 | } |
133 | |
134 | int truncate_inode_page(struct address_space *mapping, struct page *page) |
135 | { |
136 | if (page_mapped(page)) { |
137 | unmap_mapping_range(mapping, |
138 | (loff_t)page->index << PAGE_CACHE_SHIFT, |
139 | PAGE_CACHE_SIZE, 0); |
140 | } |
141 | return truncate_complete_page(mapping, page); |
142 | } |
143 | |
144 | /* |
145 | * Used to get rid of pages on hardware memory corruption. |
146 | */ |
147 | int generic_error_remove_page(struct address_space *mapping, struct page *page) |
148 | { |
149 | if (!mapping) |
150 | return -EINVAL; |
151 | /* |
152 | * Only punch for normal data pages for now. |
153 | * Handling other types like directories would need more auditing. |
154 | */ |
155 | if (!S_ISREG(mapping->host->i_mode)) |
156 | return -EIO; |
157 | return truncate_inode_page(mapping, page); |
158 | } |
159 | EXPORT_SYMBOL(generic_error_remove_page); |
160 | |
161 | /* |
162 | * Safely invalidate one page from its pagecache mapping. |
163 | * It only drops clean, unused pages. The page must be locked. |
164 | * |
165 | * Returns 1 if the page is successfully invalidated, otherwise 0. |
166 | */ |
167 | int invalidate_inode_page(struct page *page) |
168 | { |
169 | struct address_space *mapping = page_mapping(page); |
170 | if (!mapping) |
171 | return 0; |
172 | if (PageDirty(page) || PageWriteback(page)) |
173 | return 0; |
174 | if (page_mapped(page)) |
175 | return 0; |
176 | return invalidate_complete_page(mapping, page); |
177 | } |
178 | |
179 | /** |
180 | * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets |
181 | * @mapping: mapping to truncate |
182 | * @lstart: offset from which to truncate |
183 | * @lend: offset to which to truncate (inclusive) |
184 | * |
185 | * Truncate the page cache, removing the pages that are between |
186 | * specified offsets (and zeroing out partial pages |
187 | * if lstart or lend + 1 is not page aligned). |
188 | * |
189 | * Truncate takes two passes - the first pass is nonblocking. It will not |
190 | * block on page locks and it will not block on writeback. The second pass |
191 | * will wait. This is to prevent as much IO as possible in the affected region. |
192 | * The first pass will remove most pages, so the search cost of the second pass |
193 | * is low. |
194 | * |
195 | * We pass down the cache-hot hint to the page freeing code. Even if the |
196 | * mapping is large, it is probably the case that the final pages are the most |
197 | * recently touched, and freeing happens in ascending file offset order. |
198 | * |
199 | * Note that since ->invalidatepage() accepts range to invalidate |
200 | * truncate_inode_pages_range is able to handle cases where lend + 1 is not |
201 | * page aligned properly. |
202 | */ |
203 | void truncate_inode_pages_range(struct address_space *mapping, |
204 | loff_t lstart, loff_t lend) |
205 | { |
206 | pgoff_t start; /* inclusive */ |
207 | pgoff_t end; /* exclusive */ |
208 | unsigned int partial_start; /* inclusive */ |
209 | unsigned int partial_end; /* exclusive */ |
210 | struct pagevec pvec; |
211 | pgoff_t index; |
212 | int i; |
213 | |
214 | cleancache_invalidate_inode(mapping); |
215 | if (mapping->nrpages == 0) |
216 | return; |
217 | |
218 | /* Offsets within partial pages */ |
219 | partial_start = lstart & (PAGE_CACHE_SIZE - 1); |
220 | partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); |
221 | |
222 | /* |
223 | * 'start' and 'end' always covers the range of pages to be fully |
224 | * truncated. Partial pages are covered with 'partial_start' at the |
225 | * start of the range and 'partial_end' at the end of the range. |
226 | * Note that 'end' is exclusive while 'lend' is inclusive. |
227 | */ |
228 | start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
229 | if (lend == -1) |
230 | /* |
231 | * lend == -1 indicates end-of-file so we have to set 'end' |
232 | * to the highest possible pgoff_t and since the type is |
233 | * unsigned we're using -1. |
234 | */ |
235 | end = -1; |
236 | else |
237 | end = (lend + 1) >> PAGE_CACHE_SHIFT; |
238 | |
239 | pagevec_init(&pvec, 0); |
240 | index = start; |
241 | while (index < end && pagevec_lookup(&pvec, mapping, index, |
242 | min(end - index, (pgoff_t)PAGEVEC_SIZE))) { |
243 | mem_cgroup_uncharge_start(); |
244 | for (i = 0; i < pagevec_count(&pvec); i++) { |
245 | struct page *page = pvec.pages[i]; |
246 | |
247 | /* We rely upon deletion not changing page->index */ |
248 | index = page->index; |
249 | if (index >= end) |
250 | break; |
251 | |
252 | if (!trylock_page(page)) |
253 | continue; |
254 | WARN_ON(page->index != index); |
255 | if (PageWriteback(page)) { |
256 | unlock_page(page); |
257 | continue; |
258 | } |
259 | truncate_inode_page(mapping, page); |
260 | unlock_page(page); |
261 | } |
262 | pagevec_release(&pvec); |
263 | mem_cgroup_uncharge_end(); |
264 | cond_resched(); |
265 | index++; |
266 | } |
267 | |
268 | if (partial_start) { |
269 | struct page *page = find_lock_page(mapping, start - 1); |
270 | if (page) { |
271 | unsigned int top = PAGE_CACHE_SIZE; |
272 | if (start > end) { |
273 | /* Truncation within a single page */ |
274 | top = partial_end; |
275 | partial_end = 0; |
276 | } |
277 | wait_on_page_writeback(page); |
278 | zero_user_segment(page, partial_start, top); |
279 | cleancache_invalidate_page(mapping, page); |
280 | if (page_has_private(page)) |
281 | do_invalidatepage(page, partial_start, |
282 | top - partial_start); |
283 | unlock_page(page); |
284 | page_cache_release(page); |
285 | } |
286 | } |
287 | if (partial_end) { |
288 | struct page *page = find_lock_page(mapping, end); |
289 | if (page) { |
290 | wait_on_page_writeback(page); |
291 | zero_user_segment(page, 0, partial_end); |
292 | cleancache_invalidate_page(mapping, page); |
293 | if (page_has_private(page)) |
294 | do_invalidatepage(page, 0, |
295 | partial_end); |
296 | unlock_page(page); |
297 | page_cache_release(page); |
298 | } |
299 | } |
300 | /* |
301 | * If the truncation happened within a single page no pages |
302 | * will be released, just zeroed, so we can bail out now. |
303 | */ |
304 | if (start >= end) |
305 | return; |
306 | |
307 | index = start; |
308 | for ( ; ; ) { |
309 | cond_resched(); |
310 | if (!pagevec_lookup(&pvec, mapping, index, |
311 | min(end - index, (pgoff_t)PAGEVEC_SIZE))) { |
312 | if (index == start) |
313 | break; |
314 | index = start; |
315 | continue; |
316 | } |
317 | if (index == start && pvec.pages[0]->index >= end) { |
318 | pagevec_release(&pvec); |
319 | break; |
320 | } |
321 | mem_cgroup_uncharge_start(); |
322 | for (i = 0; i < pagevec_count(&pvec); i++) { |
323 | struct page *page = pvec.pages[i]; |
324 | |
325 | /* We rely upon deletion not changing page->index */ |
326 | index = page->index; |
327 | if (index >= end) |
328 | break; |
329 | |
330 | lock_page(page); |
331 | WARN_ON(page->index != index); |
332 | wait_on_page_writeback(page); |
333 | truncate_inode_page(mapping, page); |
334 | unlock_page(page); |
335 | } |
336 | pagevec_release(&pvec); |
337 | mem_cgroup_uncharge_end(); |
338 | index++; |
339 | } |
340 | cleancache_invalidate_inode(mapping); |
341 | } |
342 | EXPORT_SYMBOL(truncate_inode_pages_range); |
343 | |
344 | /** |
345 | * truncate_inode_pages - truncate *all* the pages from an offset |
346 | * @mapping: mapping to truncate |
347 | * @lstart: offset from which to truncate |
348 | * |
349 | * Called under (and serialised by) inode->i_mutex. |
350 | * |
351 | * Note: When this function returns, there can be a page in the process of |
352 | * deletion (inside __delete_from_page_cache()) in the specified range. Thus |
353 | * mapping->nrpages can be non-zero when this function returns even after |
354 | * truncation of the whole mapping. |
355 | */ |
356 | void truncate_inode_pages(struct address_space *mapping, loff_t lstart) |
357 | { |
358 | truncate_inode_pages_range(mapping, lstart, (loff_t)-1); |
359 | } |
360 | EXPORT_SYMBOL(truncate_inode_pages); |
361 | |
362 | /** |
363 | * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode |
364 | * @mapping: the address_space which holds the pages to invalidate |
365 | * @start: the offset 'from' which to invalidate |
366 | * @end: the offset 'to' which to invalidate (inclusive) |
367 | * |
368 | * This function only removes the unlocked pages, if you want to |
369 | * remove all the pages of one inode, you must call truncate_inode_pages. |
370 | * |
371 | * invalidate_mapping_pages() will not block on IO activity. It will not |
372 | * invalidate pages which are dirty, locked, under writeback or mapped into |
373 | * pagetables. |
374 | */ |
375 | unsigned long invalidate_mapping_pages(struct address_space *mapping, |
376 | pgoff_t start, pgoff_t end) |
377 | { |
378 | struct pagevec pvec; |
379 | pgoff_t index = start; |
380 | unsigned long ret; |
381 | unsigned long count = 0; |
382 | int i; |
383 | |
384 | /* |
385 | * Note: this function may get called on a shmem/tmpfs mapping: |
386 | * pagevec_lookup() might then return 0 prematurely (because it |
387 | * got a gangful of swap entries); but it's hardly worth worrying |
388 | * about - it can rarely have anything to free from such a mapping |
389 | * (most pages are dirty), and already skips over any difficulties. |
390 | */ |
391 | |
392 | pagevec_init(&pvec, 0); |
393 | while (index <= end && pagevec_lookup(&pvec, mapping, index, |
394 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { |
395 | mem_cgroup_uncharge_start(); |
396 | for (i = 0; i < pagevec_count(&pvec); i++) { |
397 | struct page *page = pvec.pages[i]; |
398 | |
399 | /* We rely upon deletion not changing page->index */ |
400 | index = page->index; |
401 | if (index > end) |
402 | break; |
403 | |
404 | if (!trylock_page(page)) |
405 | continue; |
406 | WARN_ON(page->index != index); |
407 | ret = invalidate_inode_page(page); |
408 | unlock_page(page); |
409 | /* |
410 | * Invalidation is a hint that the page is no longer |
411 | * of interest and try to speed up its reclaim. |
412 | */ |
413 | if (!ret) |
414 | deactivate_page(page); |
415 | count += ret; |
416 | } |
417 | pagevec_release(&pvec); |
418 | mem_cgroup_uncharge_end(); |
419 | cond_resched(); |
420 | index++; |
421 | } |
422 | return count; |
423 | } |
424 | EXPORT_SYMBOL(invalidate_mapping_pages); |
425 | |
426 | /* |
427 | * This is like invalidate_complete_page(), except it ignores the page's |
428 | * refcount. We do this because invalidate_inode_pages2() needs stronger |
429 | * invalidation guarantees, and cannot afford to leave pages behind because |
430 | * shrink_page_list() has a temp ref on them, or because they're transiently |
431 | * sitting in the lru_cache_add() pagevecs. |
432 | */ |
433 | static int |
434 | invalidate_complete_page2(struct address_space *mapping, struct page *page) |
435 | { |
436 | if (page->mapping != mapping) |
437 | return 0; |
438 | |
439 | if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) |
440 | return 0; |
441 | |
442 | spin_lock_irq(&mapping->tree_lock); |
443 | if (PageDirty(page)) |
444 | goto failed; |
445 | |
446 | BUG_ON(page_has_private(page)); |
447 | __delete_from_page_cache(page); |
448 | spin_unlock_irq(&mapping->tree_lock); |
449 | mem_cgroup_uncharge_cache_page(page); |
450 | |
451 | if (mapping->a_ops->freepage) |
452 | mapping->a_ops->freepage(page); |
453 | |
454 | page_cache_release(page); /* pagecache ref */ |
455 | return 1; |
456 | failed: |
457 | spin_unlock_irq(&mapping->tree_lock); |
458 | return 0; |
459 | } |
460 | |
461 | static int do_launder_page(struct address_space *mapping, struct page *page) |
462 | { |
463 | if (!PageDirty(page)) |
464 | return 0; |
465 | if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) |
466 | return 0; |
467 | return mapping->a_ops->launder_page(page); |
468 | } |
469 | |
470 | /** |
471 | * invalidate_inode_pages2_range - remove range of pages from an address_space |
472 | * @mapping: the address_space |
473 | * @start: the page offset 'from' which to invalidate |
474 | * @end: the page offset 'to' which to invalidate (inclusive) |
475 | * |
476 | * Any pages which are found to be mapped into pagetables are unmapped prior to |
477 | * invalidation. |
478 | * |
479 | * Returns -EBUSY if any pages could not be invalidated. |
480 | */ |
481 | int invalidate_inode_pages2_range(struct address_space *mapping, |
482 | pgoff_t start, pgoff_t end) |
483 | { |
484 | struct pagevec pvec; |
485 | pgoff_t index; |
486 | int i; |
487 | int ret = 0; |
488 | int ret2 = 0; |
489 | int did_range_unmap = 0; |
490 | |
491 | cleancache_invalidate_inode(mapping); |
492 | pagevec_init(&pvec, 0); |
493 | index = start; |
494 | while (index <= end && pagevec_lookup(&pvec, mapping, index, |
495 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { |
496 | mem_cgroup_uncharge_start(); |
497 | for (i = 0; i < pagevec_count(&pvec); i++) { |
498 | struct page *page = pvec.pages[i]; |
499 | |
500 | /* We rely upon deletion not changing page->index */ |
501 | index = page->index; |
502 | if (index > end) |
503 | break; |
504 | |
505 | lock_page(page); |
506 | WARN_ON(page->index != index); |
507 | if (page->mapping != mapping) { |
508 | unlock_page(page); |
509 | continue; |
510 | } |
511 | wait_on_page_writeback(page); |
512 | if (page_mapped(page)) { |
513 | if (!did_range_unmap) { |
514 | /* |
515 | * Zap the rest of the file in one hit. |
516 | */ |
517 | unmap_mapping_range(mapping, |
518 | (loff_t)index << PAGE_CACHE_SHIFT, |
519 | (loff_t)(1 + end - index) |
520 | << PAGE_CACHE_SHIFT, |
521 | 0); |
522 | did_range_unmap = 1; |
523 | } else { |
524 | /* |
525 | * Just zap this page |
526 | */ |
527 | unmap_mapping_range(mapping, |
528 | (loff_t)index << PAGE_CACHE_SHIFT, |
529 | PAGE_CACHE_SIZE, 0); |
530 | } |
531 | } |
532 | BUG_ON(page_mapped(page)); |
533 | ret2 = do_launder_page(mapping, page); |
534 | if (ret2 == 0) { |
535 | if (!invalidate_complete_page2(mapping, page)) |
536 | ret2 = -EBUSY; |
537 | } |
538 | if (ret2 < 0) |
539 | ret = ret2; |
540 | unlock_page(page); |
541 | } |
542 | pagevec_release(&pvec); |
543 | mem_cgroup_uncharge_end(); |
544 | cond_resched(); |
545 | index++; |
546 | } |
547 | cleancache_invalidate_inode(mapping); |
548 | return ret; |
549 | } |
550 | EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); |
551 | |
552 | /** |
553 | * invalidate_inode_pages2 - remove all pages from an address_space |
554 | * @mapping: the address_space |
555 | * |
556 | * Any pages which are found to be mapped into pagetables are unmapped prior to |
557 | * invalidation. |
558 | * |
559 | * Returns -EBUSY if any pages could not be invalidated. |
560 | */ |
561 | int invalidate_inode_pages2(struct address_space *mapping) |
562 | { |
563 | return invalidate_inode_pages2_range(mapping, 0, -1); |
564 | } |
565 | EXPORT_SYMBOL_GPL(invalidate_inode_pages2); |
566 | |
567 | /** |
568 | * truncate_pagecache - unmap and remove pagecache that has been truncated |
569 | * @inode: inode |
570 | * @newsize: new file size |
571 | * |
572 | * inode's new i_size must already be written before truncate_pagecache |
573 | * is called. |
574 | * |
575 | * This function should typically be called before the filesystem |
576 | * releases resources associated with the freed range (eg. deallocates |
577 | * blocks). This way, pagecache will always stay logically coherent |
578 | * with on-disk format, and the filesystem would not have to deal with |
579 | * situations such as writepage being called for a page that has already |
580 | * had its underlying blocks deallocated. |
581 | */ |
582 | void truncate_pagecache(struct inode *inode, loff_t newsize) |
583 | { |
584 | struct address_space *mapping = inode->i_mapping; |
585 | loff_t holebegin = round_up(newsize, PAGE_SIZE); |
586 | |
587 | /* |
588 | * unmap_mapping_range is called twice, first simply for |
589 | * efficiency so that truncate_inode_pages does fewer |
590 | * single-page unmaps. However after this first call, and |
591 | * before truncate_inode_pages finishes, it is possible for |
592 | * private pages to be COWed, which remain after |
593 | * truncate_inode_pages finishes, hence the second |
594 | * unmap_mapping_range call must be made for correctness. |
595 | */ |
596 | unmap_mapping_range(mapping, holebegin, 0, 1); |
597 | truncate_inode_pages(mapping, newsize); |
598 | unmap_mapping_range(mapping, holebegin, 0, 1); |
599 | } |
600 | EXPORT_SYMBOL(truncate_pagecache); |
601 | |
602 | /** |
603 | * truncate_setsize - update inode and pagecache for a new file size |
604 | * @inode: inode |
605 | * @newsize: new file size |
606 | * |
607 | * truncate_setsize updates i_size and performs pagecache truncation (if |
608 | * necessary) to @newsize. It will be typically be called from the filesystem's |
609 | * setattr function when ATTR_SIZE is passed in. |
610 | * |
611 | * Must be called with inode_mutex held and before all filesystem specific |
612 | * block truncation has been performed. |
613 | */ |
614 | void truncate_setsize(struct inode *inode, loff_t newsize) |
615 | { |
616 | i_size_write(inode, newsize); |
617 | truncate_pagecache(inode, newsize); |
618 | } |
619 | EXPORT_SYMBOL(truncate_setsize); |
620 | |
621 | /** |
622 | * truncate_pagecache_range - unmap and remove pagecache that is hole-punched |
623 | * @inode: inode |
624 | * @lstart: offset of beginning of hole |
625 | * @lend: offset of last byte of hole |
626 | * |
627 | * This function should typically be called before the filesystem |
628 | * releases resources associated with the freed range (eg. deallocates |
629 | * blocks). This way, pagecache will always stay logically coherent |
630 | * with on-disk format, and the filesystem would not have to deal with |
631 | * situations such as writepage being called for a page that has already |
632 | * had its underlying blocks deallocated. |
633 | */ |
634 | void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend) |
635 | { |
636 | struct address_space *mapping = inode->i_mapping; |
637 | loff_t unmap_start = round_up(lstart, PAGE_SIZE); |
638 | loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1; |
639 | /* |
640 | * This rounding is currently just for example: unmap_mapping_range |
641 | * expands its hole outwards, whereas we want it to contract the hole |
642 | * inwards. However, existing callers of truncate_pagecache_range are |
643 | * doing their own page rounding first. Note that unmap_mapping_range |
644 | * allows holelen 0 for all, and we allow lend -1 for end of file. |
645 | */ |
646 | |
647 | /* |
648 | * Unlike in truncate_pagecache, unmap_mapping_range is called only |
649 | * once (before truncating pagecache), and without "even_cows" flag: |
650 | * hole-punching should not remove private COWed pages from the hole. |
651 | */ |
652 | if ((u64)unmap_end > (u64)unmap_start) |
653 | unmap_mapping_range(mapping, unmap_start, |
654 | 1 + unmap_end - unmap_start, 0); |
655 | truncate_inode_pages_range(mapping, lstart, lend); |
656 | } |
657 | EXPORT_SYMBOL(truncate_pagecache_range); |
658 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9