Root/
1 | /* |
2 | * fs/mpage.c |
3 | * |
4 | * Copyright (C) 2002, Linus Torvalds. |
5 | * |
6 | * Contains functions related to preparing and submitting BIOs which contain |
7 | * multiple pagecache pages. |
8 | * |
9 | * 15May2002 Andrew Morton |
10 | * Initial version |
11 | * 27Jun2002 axboe@suse.de |
12 | * use bio_add_page() to build bio's just the right size |
13 | */ |
14 | |
15 | #include <linux/kernel.h> |
16 | #include <linux/module.h> |
17 | #include <linux/mm.h> |
18 | #include <linux/kdev_t.h> |
19 | #include <linux/gfp.h> |
20 | #include <linux/bio.h> |
21 | #include <linux/fs.h> |
22 | #include <linux/buffer_head.h> |
23 | #include <linux/blkdev.h> |
24 | #include <linux/highmem.h> |
25 | #include <linux/prefetch.h> |
26 | #include <linux/mpage.h> |
27 | #include <linux/writeback.h> |
28 | #include <linux/backing-dev.h> |
29 | #include <linux/pagevec.h> |
30 | #include <linux/cleancache.h> |
31 | |
32 | /* |
33 | * I/O completion handler for multipage BIOs. |
34 | * |
35 | * The mpage code never puts partial pages into a BIO (except for end-of-file). |
36 | * If a page does not map to a contiguous run of blocks then it simply falls |
37 | * back to block_read_full_page(). |
38 | * |
39 | * Why is this? If a page's completion depends on a number of different BIOs |
40 | * which can complete in any order (or at the same time) then determining the |
41 | * status of that page is hard. See end_buffer_async_read() for the details. |
42 | * There is no point in duplicating all that complexity. |
43 | */ |
44 | static void mpage_end_io(struct bio *bio, int err) |
45 | { |
46 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
47 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; |
48 | |
49 | do { |
50 | struct page *page = bvec->bv_page; |
51 | |
52 | if (--bvec >= bio->bi_io_vec) |
53 | prefetchw(&bvec->bv_page->flags); |
54 | if (bio_data_dir(bio) == READ) { |
55 | if (uptodate) { |
56 | SetPageUptodate(page); |
57 | } else { |
58 | ClearPageUptodate(page); |
59 | SetPageError(page); |
60 | } |
61 | unlock_page(page); |
62 | } else { /* bio_data_dir(bio) == WRITE */ |
63 | if (!uptodate) { |
64 | SetPageError(page); |
65 | if (page->mapping) |
66 | set_bit(AS_EIO, &page->mapping->flags); |
67 | } |
68 | end_page_writeback(page); |
69 | } |
70 | } while (bvec >= bio->bi_io_vec); |
71 | bio_put(bio); |
72 | } |
73 | |
74 | static struct bio *mpage_bio_submit(int rw, struct bio *bio) |
75 | { |
76 | bio->bi_end_io = mpage_end_io; |
77 | submit_bio(rw, bio); |
78 | return NULL; |
79 | } |
80 | |
81 | static struct bio * |
82 | mpage_alloc(struct block_device *bdev, |
83 | sector_t first_sector, int nr_vecs, |
84 | gfp_t gfp_flags) |
85 | { |
86 | struct bio *bio; |
87 | |
88 | bio = bio_alloc(gfp_flags, nr_vecs); |
89 | |
90 | if (bio == NULL && (current->flags & PF_MEMALLOC)) { |
91 | while (!bio && (nr_vecs /= 2)) |
92 | bio = bio_alloc(gfp_flags, nr_vecs); |
93 | } |
94 | |
95 | if (bio) { |
96 | bio->bi_bdev = bdev; |
97 | bio->bi_sector = first_sector; |
98 | } |
99 | return bio; |
100 | } |
101 | |
102 | /* |
103 | * support function for mpage_readpages. The fs supplied get_block might |
104 | * return an up to date buffer. This is used to map that buffer into |
105 | * the page, which allows readpage to avoid triggering a duplicate call |
106 | * to get_block. |
107 | * |
108 | * The idea is to avoid adding buffers to pages that don't already have |
109 | * them. So when the buffer is up to date and the page size == block size, |
110 | * this marks the page up to date instead of adding new buffers. |
111 | */ |
112 | static void |
113 | map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) |
114 | { |
115 | struct inode *inode = page->mapping->host; |
116 | struct buffer_head *page_bh, *head; |
117 | int block = 0; |
118 | |
119 | if (!page_has_buffers(page)) { |
120 | /* |
121 | * don't make any buffers if there is only one buffer on |
122 | * the page and the page just needs to be set up to date |
123 | */ |
124 | if (inode->i_blkbits == PAGE_CACHE_SHIFT && |
125 | buffer_uptodate(bh)) { |
126 | SetPageUptodate(page); |
127 | return; |
128 | } |
129 | create_empty_buffers(page, 1 << inode->i_blkbits, 0); |
130 | } |
131 | head = page_buffers(page); |
132 | page_bh = head; |
133 | do { |
134 | if (block == page_block) { |
135 | page_bh->b_state = bh->b_state; |
136 | page_bh->b_bdev = bh->b_bdev; |
137 | page_bh->b_blocknr = bh->b_blocknr; |
138 | break; |
139 | } |
140 | page_bh = page_bh->b_this_page; |
141 | block++; |
142 | } while (page_bh != head); |
143 | } |
144 | |
145 | /* |
146 | * This is the worker routine which does all the work of mapping the disk |
147 | * blocks and constructs largest possible bios, submits them for IO if the |
148 | * blocks are not contiguous on the disk. |
149 | * |
150 | * We pass a buffer_head back and forth and use its buffer_mapped() flag to |
151 | * represent the validity of its disk mapping and to decide when to do the next |
152 | * get_block() call. |
153 | */ |
154 | static struct bio * |
155 | do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, |
156 | sector_t *last_block_in_bio, struct buffer_head *map_bh, |
157 | unsigned long *first_logical_block, get_block_t get_block) |
158 | { |
159 | struct inode *inode = page->mapping->host; |
160 | const unsigned blkbits = inode->i_blkbits; |
161 | const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; |
162 | const unsigned blocksize = 1 << blkbits; |
163 | sector_t block_in_file; |
164 | sector_t last_block; |
165 | sector_t last_block_in_file; |
166 | sector_t blocks[MAX_BUF_PER_PAGE]; |
167 | unsigned page_block; |
168 | unsigned first_hole = blocks_per_page; |
169 | struct block_device *bdev = NULL; |
170 | int length; |
171 | int fully_mapped = 1; |
172 | unsigned nblocks; |
173 | unsigned relative_block; |
174 | |
175 | if (page_has_buffers(page)) |
176 | goto confused; |
177 | |
178 | block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); |
179 | last_block = block_in_file + nr_pages * blocks_per_page; |
180 | last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; |
181 | if (last_block > last_block_in_file) |
182 | last_block = last_block_in_file; |
183 | page_block = 0; |
184 | |
185 | /* |
186 | * Map blocks using the result from the previous get_blocks call first. |
187 | */ |
188 | nblocks = map_bh->b_size >> blkbits; |
189 | if (buffer_mapped(map_bh) && block_in_file > *first_logical_block && |
190 | block_in_file < (*first_logical_block + nblocks)) { |
191 | unsigned map_offset = block_in_file - *first_logical_block; |
192 | unsigned last = nblocks - map_offset; |
193 | |
194 | for (relative_block = 0; ; relative_block++) { |
195 | if (relative_block == last) { |
196 | clear_buffer_mapped(map_bh); |
197 | break; |
198 | } |
199 | if (page_block == blocks_per_page) |
200 | break; |
201 | blocks[page_block] = map_bh->b_blocknr + map_offset + |
202 | relative_block; |
203 | page_block++; |
204 | block_in_file++; |
205 | } |
206 | bdev = map_bh->b_bdev; |
207 | } |
208 | |
209 | /* |
210 | * Then do more get_blocks calls until we are done with this page. |
211 | */ |
212 | map_bh->b_page = page; |
213 | while (page_block < blocks_per_page) { |
214 | map_bh->b_state = 0; |
215 | map_bh->b_size = 0; |
216 | |
217 | if (block_in_file < last_block) { |
218 | map_bh->b_size = (last_block-block_in_file) << blkbits; |
219 | if (get_block(inode, block_in_file, map_bh, 0)) |
220 | goto confused; |
221 | *first_logical_block = block_in_file; |
222 | } |
223 | |
224 | if (!buffer_mapped(map_bh)) { |
225 | fully_mapped = 0; |
226 | if (first_hole == blocks_per_page) |
227 | first_hole = page_block; |
228 | page_block++; |
229 | block_in_file++; |
230 | continue; |
231 | } |
232 | |
233 | /* some filesystems will copy data into the page during |
234 | * the get_block call, in which case we don't want to |
235 | * read it again. map_buffer_to_page copies the data |
236 | * we just collected from get_block into the page's buffers |
237 | * so readpage doesn't have to repeat the get_block call |
238 | */ |
239 | if (buffer_uptodate(map_bh)) { |
240 | map_buffer_to_page(page, map_bh, page_block); |
241 | goto confused; |
242 | } |
243 | |
244 | if (first_hole != blocks_per_page) |
245 | goto confused; /* hole -> non-hole */ |
246 | |
247 | /* Contiguous blocks? */ |
248 | if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) |
249 | goto confused; |
250 | nblocks = map_bh->b_size >> blkbits; |
251 | for (relative_block = 0; ; relative_block++) { |
252 | if (relative_block == nblocks) { |
253 | clear_buffer_mapped(map_bh); |
254 | break; |
255 | } else if (page_block == blocks_per_page) |
256 | break; |
257 | blocks[page_block] = map_bh->b_blocknr+relative_block; |
258 | page_block++; |
259 | block_in_file++; |
260 | } |
261 | bdev = map_bh->b_bdev; |
262 | } |
263 | |
264 | if (first_hole != blocks_per_page) { |
265 | zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE); |
266 | if (first_hole == 0) { |
267 | SetPageUptodate(page); |
268 | unlock_page(page); |
269 | goto out; |
270 | } |
271 | } else if (fully_mapped) { |
272 | SetPageMappedToDisk(page); |
273 | } |
274 | |
275 | if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) && |
276 | cleancache_get_page(page) == 0) { |
277 | SetPageUptodate(page); |
278 | goto confused; |
279 | } |
280 | |
281 | /* |
282 | * This page will go to BIO. Do we need to send this BIO off first? |
283 | */ |
284 | if (bio && (*last_block_in_bio != blocks[0] - 1)) |
285 | bio = mpage_bio_submit(READ, bio); |
286 | |
287 | alloc_new: |
288 | if (bio == NULL) { |
289 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), |
290 | min_t(int, nr_pages, bio_get_nr_vecs(bdev)), |
291 | GFP_KERNEL); |
292 | if (bio == NULL) |
293 | goto confused; |
294 | } |
295 | |
296 | length = first_hole << blkbits; |
297 | if (bio_add_page(bio, page, length, 0) < length) { |
298 | bio = mpage_bio_submit(READ, bio); |
299 | goto alloc_new; |
300 | } |
301 | |
302 | relative_block = block_in_file - *first_logical_block; |
303 | nblocks = map_bh->b_size >> blkbits; |
304 | if ((buffer_boundary(map_bh) && relative_block == nblocks) || |
305 | (first_hole != blocks_per_page)) |
306 | bio = mpage_bio_submit(READ, bio); |
307 | else |
308 | *last_block_in_bio = blocks[blocks_per_page - 1]; |
309 | out: |
310 | return bio; |
311 | |
312 | confused: |
313 | if (bio) |
314 | bio = mpage_bio_submit(READ, bio); |
315 | if (!PageUptodate(page)) |
316 | block_read_full_page(page, get_block); |
317 | else |
318 | unlock_page(page); |
319 | goto out; |
320 | } |
321 | |
322 | /** |
323 | * mpage_readpages - populate an address space with some pages & start reads against them |
324 | * @mapping: the address_space |
325 | * @pages: The address of a list_head which contains the target pages. These |
326 | * pages have their ->index populated and are otherwise uninitialised. |
327 | * The page at @pages->prev has the lowest file offset, and reads should be |
328 | * issued in @pages->prev to @pages->next order. |
329 | * @nr_pages: The number of pages at *@pages |
330 | * @get_block: The filesystem's block mapper function. |
331 | * |
332 | * This function walks the pages and the blocks within each page, building and |
333 | * emitting large BIOs. |
334 | * |
335 | * If anything unusual happens, such as: |
336 | * |
337 | * - encountering a page which has buffers |
338 | * - encountering a page which has a non-hole after a hole |
339 | * - encountering a page with non-contiguous blocks |
340 | * |
341 | * then this code just gives up and calls the buffer_head-based read function. |
342 | * It does handle a page which has holes at the end - that is a common case: |
343 | * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. |
344 | * |
345 | * BH_Boundary explanation: |
346 | * |
347 | * There is a problem. The mpage read code assembles several pages, gets all |
348 | * their disk mappings, and then submits them all. That's fine, but obtaining |
349 | * the disk mappings may require I/O. Reads of indirect blocks, for example. |
350 | * |
351 | * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be |
352 | * submitted in the following order: |
353 | * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16 |
354 | * |
355 | * because the indirect block has to be read to get the mappings of blocks |
356 | * 13,14,15,16. Obviously, this impacts performance. |
357 | * |
358 | * So what we do it to allow the filesystem's get_block() function to set |
359 | * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block |
360 | * after this one will require I/O against a block which is probably close to |
361 | * this one. So you should push what I/O you have currently accumulated. |
362 | * |
363 | * This all causes the disk requests to be issued in the correct order. |
364 | */ |
365 | int |
366 | mpage_readpages(struct address_space *mapping, struct list_head *pages, |
367 | unsigned nr_pages, get_block_t get_block) |
368 | { |
369 | struct bio *bio = NULL; |
370 | unsigned page_idx; |
371 | sector_t last_block_in_bio = 0; |
372 | struct buffer_head map_bh; |
373 | unsigned long first_logical_block = 0; |
374 | struct blk_plug plug; |
375 | |
376 | blk_start_plug(&plug); |
377 | |
378 | map_bh.b_state = 0; |
379 | map_bh.b_size = 0; |
380 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { |
381 | struct page *page = list_entry(pages->prev, struct page, lru); |
382 | |
383 | prefetchw(&page->flags); |
384 | list_del(&page->lru); |
385 | if (!add_to_page_cache_lru(page, mapping, |
386 | page->index, GFP_KERNEL)) { |
387 | bio = do_mpage_readpage(bio, page, |
388 | nr_pages - page_idx, |
389 | &last_block_in_bio, &map_bh, |
390 | &first_logical_block, |
391 | get_block); |
392 | } |
393 | page_cache_release(page); |
394 | } |
395 | BUG_ON(!list_empty(pages)); |
396 | if (bio) |
397 | mpage_bio_submit(READ, bio); |
398 | blk_finish_plug(&plug); |
399 | return 0; |
400 | } |
401 | EXPORT_SYMBOL(mpage_readpages); |
402 | |
403 | /* |
404 | * This isn't called much at all |
405 | */ |
406 | int mpage_readpage(struct page *page, get_block_t get_block) |
407 | { |
408 | struct bio *bio = NULL; |
409 | sector_t last_block_in_bio = 0; |
410 | struct buffer_head map_bh; |
411 | unsigned long first_logical_block = 0; |
412 | |
413 | map_bh.b_state = 0; |
414 | map_bh.b_size = 0; |
415 | bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, |
416 | &map_bh, &first_logical_block, get_block); |
417 | if (bio) |
418 | mpage_bio_submit(READ, bio); |
419 | return 0; |
420 | } |
421 | EXPORT_SYMBOL(mpage_readpage); |
422 | |
423 | /* |
424 | * Writing is not so simple. |
425 | * |
426 | * If the page has buffers then they will be used for obtaining the disk |
427 | * mapping. We only support pages which are fully mapped-and-dirty, with a |
428 | * special case for pages which are unmapped at the end: end-of-file. |
429 | * |
430 | * If the page has no buffers (preferred) then the page is mapped here. |
431 | * |
432 | * If all blocks are found to be contiguous then the page can go into the |
433 | * BIO. Otherwise fall back to the mapping's writepage(). |
434 | * |
435 | * FIXME: This code wants an estimate of how many pages are still to be |
436 | * written, so it can intelligently allocate a suitably-sized BIO. For now, |
437 | * just allocate full-size (16-page) BIOs. |
438 | */ |
439 | |
440 | struct mpage_data { |
441 | struct bio *bio; |
442 | sector_t last_block_in_bio; |
443 | get_block_t *get_block; |
444 | unsigned use_writepage; |
445 | }; |
446 | |
447 | static int __mpage_writepage(struct page *page, struct writeback_control *wbc, |
448 | void *data) |
449 | { |
450 | struct mpage_data *mpd = data; |
451 | struct bio *bio = mpd->bio; |
452 | struct address_space *mapping = page->mapping; |
453 | struct inode *inode = page->mapping->host; |
454 | const unsigned blkbits = inode->i_blkbits; |
455 | unsigned long end_index; |
456 | const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; |
457 | sector_t last_block; |
458 | sector_t block_in_file; |
459 | sector_t blocks[MAX_BUF_PER_PAGE]; |
460 | unsigned page_block; |
461 | unsigned first_unmapped = blocks_per_page; |
462 | struct block_device *bdev = NULL; |
463 | int boundary = 0; |
464 | sector_t boundary_block = 0; |
465 | struct block_device *boundary_bdev = NULL; |
466 | int length; |
467 | struct buffer_head map_bh; |
468 | loff_t i_size = i_size_read(inode); |
469 | int ret = 0; |
470 | |
471 | if (page_has_buffers(page)) { |
472 | struct buffer_head *head = page_buffers(page); |
473 | struct buffer_head *bh = head; |
474 | |
475 | /* If they're all mapped and dirty, do it */ |
476 | page_block = 0; |
477 | do { |
478 | BUG_ON(buffer_locked(bh)); |
479 | if (!buffer_mapped(bh)) { |
480 | /* |
481 | * unmapped dirty buffers are created by |
482 | * __set_page_dirty_buffers -> mmapped data |
483 | */ |
484 | if (buffer_dirty(bh)) |
485 | goto confused; |
486 | if (first_unmapped == blocks_per_page) |
487 | first_unmapped = page_block; |
488 | continue; |
489 | } |
490 | |
491 | if (first_unmapped != blocks_per_page) |
492 | goto confused; /* hole -> non-hole */ |
493 | |
494 | if (!buffer_dirty(bh) || !buffer_uptodate(bh)) |
495 | goto confused; |
496 | if (page_block) { |
497 | if (bh->b_blocknr != blocks[page_block-1] + 1) |
498 | goto confused; |
499 | } |
500 | blocks[page_block++] = bh->b_blocknr; |
501 | boundary = buffer_boundary(bh); |
502 | if (boundary) { |
503 | boundary_block = bh->b_blocknr; |
504 | boundary_bdev = bh->b_bdev; |
505 | } |
506 | bdev = bh->b_bdev; |
507 | } while ((bh = bh->b_this_page) != head); |
508 | |
509 | if (first_unmapped) |
510 | goto page_is_mapped; |
511 | |
512 | /* |
513 | * Page has buffers, but they are all unmapped. The page was |
514 | * created by pagein or read over a hole which was handled by |
515 | * block_read_full_page(). If this address_space is also |
516 | * using mpage_readpages then this can rarely happen. |
517 | */ |
518 | goto confused; |
519 | } |
520 | |
521 | /* |
522 | * The page has no buffers: map it to disk |
523 | */ |
524 | BUG_ON(!PageUptodate(page)); |
525 | block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); |
526 | last_block = (i_size - 1) >> blkbits; |
527 | map_bh.b_page = page; |
528 | for (page_block = 0; page_block < blocks_per_page; ) { |
529 | |
530 | map_bh.b_state = 0; |
531 | map_bh.b_size = 1 << blkbits; |
532 | if (mpd->get_block(inode, block_in_file, &map_bh, 1)) |
533 | goto confused; |
534 | if (buffer_new(&map_bh)) |
535 | unmap_underlying_metadata(map_bh.b_bdev, |
536 | map_bh.b_blocknr); |
537 | if (buffer_boundary(&map_bh)) { |
538 | boundary_block = map_bh.b_blocknr; |
539 | boundary_bdev = map_bh.b_bdev; |
540 | } |
541 | if (page_block) { |
542 | if (map_bh.b_blocknr != blocks[page_block-1] + 1) |
543 | goto confused; |
544 | } |
545 | blocks[page_block++] = map_bh.b_blocknr; |
546 | boundary = buffer_boundary(&map_bh); |
547 | bdev = map_bh.b_bdev; |
548 | if (block_in_file == last_block) |
549 | break; |
550 | block_in_file++; |
551 | } |
552 | BUG_ON(page_block == 0); |
553 | |
554 | first_unmapped = page_block; |
555 | |
556 | page_is_mapped: |
557 | end_index = i_size >> PAGE_CACHE_SHIFT; |
558 | if (page->index >= end_index) { |
559 | /* |
560 | * The page straddles i_size. It must be zeroed out on each |
561 | * and every writepage invocation because it may be mmapped. |
562 | * "A file is mapped in multiples of the page size. For a file |
563 | * that is not a multiple of the page size, the remaining memory |
564 | * is zeroed when mapped, and writes to that region are not |
565 | * written out to the file." |
566 | */ |
567 | unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); |
568 | |
569 | if (page->index > end_index || !offset) |
570 | goto confused; |
571 | zero_user_segment(page, offset, PAGE_CACHE_SIZE); |
572 | } |
573 | |
574 | /* |
575 | * This page will go to BIO. Do we need to send this BIO off first? |
576 | */ |
577 | if (bio && mpd->last_block_in_bio != blocks[0] - 1) |
578 | bio = mpage_bio_submit(WRITE, bio); |
579 | |
580 | alloc_new: |
581 | if (bio == NULL) { |
582 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), |
583 | bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH); |
584 | if (bio == NULL) |
585 | goto confused; |
586 | } |
587 | |
588 | /* |
589 | * Must try to add the page before marking the buffer clean or |
590 | * the confused fail path above (OOM) will be very confused when |
591 | * it finds all bh marked clean (i.e. it will not write anything) |
592 | */ |
593 | length = first_unmapped << blkbits; |
594 | if (bio_add_page(bio, page, length, 0) < length) { |
595 | bio = mpage_bio_submit(WRITE, bio); |
596 | goto alloc_new; |
597 | } |
598 | |
599 | /* |
600 | * OK, we have our BIO, so we can now mark the buffers clean. Make |
601 | * sure to only clean buffers which we know we'll be writing. |
602 | */ |
603 | if (page_has_buffers(page)) { |
604 | struct buffer_head *head = page_buffers(page); |
605 | struct buffer_head *bh = head; |
606 | unsigned buffer_counter = 0; |
607 | |
608 | do { |
609 | if (buffer_counter++ == first_unmapped) |
610 | break; |
611 | clear_buffer_dirty(bh); |
612 | bh = bh->b_this_page; |
613 | } while (bh != head); |
614 | |
615 | /* |
616 | * we cannot drop the bh if the page is not uptodate |
617 | * or a concurrent readpage would fail to serialize with the bh |
618 | * and it would read from disk before we reach the platter. |
619 | */ |
620 | if (buffer_heads_over_limit && PageUptodate(page)) |
621 | try_to_free_buffers(page); |
622 | } |
623 | |
624 | BUG_ON(PageWriteback(page)); |
625 | set_page_writeback(page); |
626 | unlock_page(page); |
627 | if (boundary || (first_unmapped != blocks_per_page)) { |
628 | bio = mpage_bio_submit(WRITE, bio); |
629 | if (boundary_block) { |
630 | write_boundary_block(boundary_bdev, |
631 | boundary_block, 1 << blkbits); |
632 | } |
633 | } else { |
634 | mpd->last_block_in_bio = blocks[blocks_per_page - 1]; |
635 | } |
636 | goto out; |
637 | |
638 | confused: |
639 | if (bio) |
640 | bio = mpage_bio_submit(WRITE, bio); |
641 | |
642 | if (mpd->use_writepage) { |
643 | ret = mapping->a_ops->writepage(page, wbc); |
644 | } else { |
645 | ret = -EAGAIN; |
646 | goto out; |
647 | } |
648 | /* |
649 | * The caller has a ref on the inode, so *mapping is stable |
650 | */ |
651 | mapping_set_error(mapping, ret); |
652 | out: |
653 | mpd->bio = bio; |
654 | return ret; |
655 | } |
656 | |
657 | /** |
658 | * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them |
659 | * @mapping: address space structure to write |
660 | * @wbc: subtract the number of written pages from *@wbc->nr_to_write |
661 | * @get_block: the filesystem's block mapper function. |
662 | * If this is NULL then use a_ops->writepage. Otherwise, go |
663 | * direct-to-BIO. |
664 | * |
665 | * This is a library function, which implements the writepages() |
666 | * address_space_operation. |
667 | * |
668 | * If a page is already under I/O, generic_writepages() skips it, even |
669 | * if it's dirty. This is desirable behaviour for memory-cleaning writeback, |
670 | * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() |
671 | * and msync() need to guarantee that all the data which was dirty at the time |
672 | * the call was made get new I/O started against them. If wbc->sync_mode is |
673 | * WB_SYNC_ALL then we were called for data integrity and we must wait for |
674 | * existing IO to complete. |
675 | */ |
676 | int |
677 | mpage_writepages(struct address_space *mapping, |
678 | struct writeback_control *wbc, get_block_t get_block) |
679 | { |
680 | struct blk_plug plug; |
681 | int ret; |
682 | |
683 | blk_start_plug(&plug); |
684 | |
685 | if (!get_block) |
686 | ret = generic_writepages(mapping, wbc); |
687 | else { |
688 | struct mpage_data mpd = { |
689 | .bio = NULL, |
690 | .last_block_in_bio = 0, |
691 | .get_block = get_block, |
692 | .use_writepage = 1, |
693 | }; |
694 | |
695 | ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); |
696 | if (mpd.bio) |
697 | mpage_bio_submit(WRITE, mpd.bio); |
698 | } |
699 | blk_finish_plug(&plug); |
700 | return ret; |
701 | } |
702 | EXPORT_SYMBOL(mpage_writepages); |
703 | |
704 | int mpage_writepage(struct page *page, get_block_t get_block, |
705 | struct writeback_control *wbc) |
706 | { |
707 | struct mpage_data mpd = { |
708 | .bio = NULL, |
709 | .last_block_in_bio = 0, |
710 | .get_block = get_block, |
711 | .use_writepage = 0, |
712 | }; |
713 | int ret = __mpage_writepage(page, wbc, &mpd); |
714 | if (mpd.bio) |
715 | mpage_bio_submit(WRITE, mpd.bio); |
716 | return ret; |
717 | } |
718 | EXPORT_SYMBOL(mpage_writepage); |
719 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9