Root/fs/mpage.c

1/*
2 * fs/mpage.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains functions related to preparing and submitting BIOs which contain
7 * multiple pagecache pages.
8 *
9 * 15May2002 Andrew Morton
10 * Initial version
11 * 27Jun2002 axboe@suse.de
12 * use bio_add_page() to build bio's just the right size
13 */
14
15#include <linux/kernel.h>
16#include <linux/export.h>
17#include <linux/mm.h>
18#include <linux/kdev_t.h>
19#include <linux/gfp.h>
20#include <linux/bio.h>
21#include <linux/fs.h>
22#include <linux/buffer_head.h>
23#include <linux/blkdev.h>
24#include <linux/highmem.h>
25#include <linux/prefetch.h>
26#include <linux/mpage.h>
27#include <linux/writeback.h>
28#include <linux/backing-dev.h>
29#include <linux/pagevec.h>
30#include <linux/cleancache.h>
31#include "internal.h"
32
33/*
34 * I/O completion handler for multipage BIOs.
35 *
36 * The mpage code never puts partial pages into a BIO (except for end-of-file).
37 * If a page does not map to a contiguous run of blocks then it simply falls
38 * back to block_read_full_page().
39 *
40 * Why is this? If a page's completion depends on a number of different BIOs
41 * which can complete in any order (or at the same time) then determining the
42 * status of that page is hard. See end_buffer_async_read() for the details.
43 * There is no point in duplicating all that complexity.
44 */
45static void mpage_end_io(struct bio *bio, int err)
46{
47    struct bio_vec *bv;
48    int i;
49
50    bio_for_each_segment_all(bv, bio, i) {
51        struct page *page = bv->bv_page;
52        page_endio(page, bio_data_dir(bio), err);
53    }
54
55    bio_put(bio);
56}
57
58static struct bio *mpage_bio_submit(int rw, struct bio *bio)
59{
60    bio->bi_end_io = mpage_end_io;
61    guard_bio_eod(rw, bio);
62    submit_bio(rw, bio);
63    return NULL;
64}
65
66static struct bio *
67mpage_alloc(struct block_device *bdev,
68        sector_t first_sector, int nr_vecs,
69        gfp_t gfp_flags)
70{
71    struct bio *bio;
72
73    bio = bio_alloc(gfp_flags, nr_vecs);
74
75    if (bio == NULL && (current->flags & PF_MEMALLOC)) {
76        while (!bio && (nr_vecs /= 2))
77            bio = bio_alloc(gfp_flags, nr_vecs);
78    }
79
80    if (bio) {
81        bio->bi_bdev = bdev;
82        bio->bi_iter.bi_sector = first_sector;
83    }
84    return bio;
85}
86
87/*
88 * support function for mpage_readpages. The fs supplied get_block might
89 * return an up to date buffer. This is used to map that buffer into
90 * the page, which allows readpage to avoid triggering a duplicate call
91 * to get_block.
92 *
93 * The idea is to avoid adding buffers to pages that don't already have
94 * them. So when the buffer is up to date and the page size == block size,
95 * this marks the page up to date instead of adding new buffers.
96 */
97static void
98map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
99{
100    struct inode *inode = page->mapping->host;
101    struct buffer_head *page_bh, *head;
102    int block = 0;
103
104    if (!page_has_buffers(page)) {
105        /*
106         * don't make any buffers if there is only one buffer on
107         * the page and the page just needs to be set up to date
108         */
109        if (inode->i_blkbits == PAGE_CACHE_SHIFT &&
110            buffer_uptodate(bh)) {
111            SetPageUptodate(page);
112            return;
113        }
114        create_empty_buffers(page, 1 << inode->i_blkbits, 0);
115    }
116    head = page_buffers(page);
117    page_bh = head;
118    do {
119        if (block == page_block) {
120            page_bh->b_state = bh->b_state;
121            page_bh->b_bdev = bh->b_bdev;
122            page_bh->b_blocknr = bh->b_blocknr;
123            break;
124        }
125        page_bh = page_bh->b_this_page;
126        block++;
127    } while (page_bh != head);
128}
129
130/*
131 * This is the worker routine which does all the work of mapping the disk
132 * blocks and constructs largest possible bios, submits them for IO if the
133 * blocks are not contiguous on the disk.
134 *
135 * We pass a buffer_head back and forth and use its buffer_mapped() flag to
136 * represent the validity of its disk mapping and to decide when to do the next
137 * get_block() call.
138 */
139static struct bio *
140do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
141        sector_t *last_block_in_bio, struct buffer_head *map_bh,
142        unsigned long *first_logical_block, get_block_t get_block)
143{
144    struct inode *inode = page->mapping->host;
145    const unsigned blkbits = inode->i_blkbits;
146    const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
147    const unsigned blocksize = 1 << blkbits;
148    sector_t block_in_file;
149    sector_t last_block;
150    sector_t last_block_in_file;
151    sector_t blocks[MAX_BUF_PER_PAGE];
152    unsigned page_block;
153    unsigned first_hole = blocks_per_page;
154    struct block_device *bdev = NULL;
155    int length;
156    int fully_mapped = 1;
157    unsigned nblocks;
158    unsigned relative_block;
159
160    if (page_has_buffers(page))
161        goto confused;
162
163    block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
164    last_block = block_in_file + nr_pages * blocks_per_page;
165    last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
166    if (last_block > last_block_in_file)
167        last_block = last_block_in_file;
168    page_block = 0;
169
170    /*
171     * Map blocks using the result from the previous get_blocks call first.
172     */
173    nblocks = map_bh->b_size >> blkbits;
174    if (buffer_mapped(map_bh) && block_in_file > *first_logical_block &&
175            block_in_file < (*first_logical_block + nblocks)) {
176        unsigned map_offset = block_in_file - *first_logical_block;
177        unsigned last = nblocks - map_offset;
178
179        for (relative_block = 0; ; relative_block++) {
180            if (relative_block == last) {
181                clear_buffer_mapped(map_bh);
182                break;
183            }
184            if (page_block == blocks_per_page)
185                break;
186            blocks[page_block] = map_bh->b_blocknr + map_offset +
187                        relative_block;
188            page_block++;
189            block_in_file++;
190        }
191        bdev = map_bh->b_bdev;
192    }
193
194    /*
195     * Then do more get_blocks calls until we are done with this page.
196     */
197    map_bh->b_page = page;
198    while (page_block < blocks_per_page) {
199        map_bh->b_state = 0;
200        map_bh->b_size = 0;
201
202        if (block_in_file < last_block) {
203            map_bh->b_size = (last_block-block_in_file) << blkbits;
204            if (get_block(inode, block_in_file, map_bh, 0))
205                goto confused;
206            *first_logical_block = block_in_file;
207        }
208
209        if (!buffer_mapped(map_bh)) {
210            fully_mapped = 0;
211            if (first_hole == blocks_per_page)
212                first_hole = page_block;
213            page_block++;
214            block_in_file++;
215            continue;
216        }
217
218        /* some filesystems will copy data into the page during
219         * the get_block call, in which case we don't want to
220         * read it again. map_buffer_to_page copies the data
221         * we just collected from get_block into the page's buffers
222         * so readpage doesn't have to repeat the get_block call
223         */
224        if (buffer_uptodate(map_bh)) {
225            map_buffer_to_page(page, map_bh, page_block);
226            goto confused;
227        }
228    
229        if (first_hole != blocks_per_page)
230            goto confused; /* hole -> non-hole */
231
232        /* Contiguous blocks? */
233        if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
234            goto confused;
235        nblocks = map_bh->b_size >> blkbits;
236        for (relative_block = 0; ; relative_block++) {
237            if (relative_block == nblocks) {
238                clear_buffer_mapped(map_bh);
239                break;
240            } else if (page_block == blocks_per_page)
241                break;
242            blocks[page_block] = map_bh->b_blocknr+relative_block;
243            page_block++;
244            block_in_file++;
245        }
246        bdev = map_bh->b_bdev;
247    }
248
249    if (first_hole != blocks_per_page) {
250        zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE);
251        if (first_hole == 0) {
252            SetPageUptodate(page);
253            unlock_page(page);
254            goto out;
255        }
256    } else if (fully_mapped) {
257        SetPageMappedToDisk(page);
258    }
259
260    if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) &&
261        cleancache_get_page(page) == 0) {
262        SetPageUptodate(page);
263        goto confused;
264    }
265
266    /*
267     * This page will go to BIO. Do we need to send this BIO off first?
268     */
269    if (bio && (*last_block_in_bio != blocks[0] - 1))
270        bio = mpage_bio_submit(READ, bio);
271
272alloc_new:
273    if (bio == NULL) {
274        if (first_hole == blocks_per_page) {
275            if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9),
276                                page))
277                goto out;
278        }
279        bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
280                  min_t(int, nr_pages, bio_get_nr_vecs(bdev)),
281                GFP_KERNEL);
282        if (bio == NULL)
283            goto confused;
284    }
285
286    length = first_hole << blkbits;
287    if (bio_add_page(bio, page, length, 0) < length) {
288        bio = mpage_bio_submit(READ, bio);
289        goto alloc_new;
290    }
291
292    relative_block = block_in_file - *first_logical_block;
293    nblocks = map_bh->b_size >> blkbits;
294    if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
295        (first_hole != blocks_per_page))
296        bio = mpage_bio_submit(READ, bio);
297    else
298        *last_block_in_bio = blocks[blocks_per_page - 1];
299out:
300    return bio;
301
302confused:
303    if (bio)
304        bio = mpage_bio_submit(READ, bio);
305    if (!PageUptodate(page))
306            block_read_full_page(page, get_block);
307    else
308        unlock_page(page);
309    goto out;
310}
311
312/**
313 * mpage_readpages - populate an address space with some pages & start reads against them
314 * @mapping: the address_space
315 * @pages: The address of a list_head which contains the target pages. These
316 * pages have their ->index populated and are otherwise uninitialised.
317 * The page at @pages->prev has the lowest file offset, and reads should be
318 * issued in @pages->prev to @pages->next order.
319 * @nr_pages: The number of pages at *@pages
320 * @get_block: The filesystem's block mapper function.
321 *
322 * This function walks the pages and the blocks within each page, building and
323 * emitting large BIOs.
324 *
325 * If anything unusual happens, such as:
326 *
327 * - encountering a page which has buffers
328 * - encountering a page which has a non-hole after a hole
329 * - encountering a page with non-contiguous blocks
330 *
331 * then this code just gives up and calls the buffer_head-based read function.
332 * It does handle a page which has holes at the end - that is a common case:
333 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
334 *
335 * BH_Boundary explanation:
336 *
337 * There is a problem. The mpage read code assembles several pages, gets all
338 * their disk mappings, and then submits them all. That's fine, but obtaining
339 * the disk mappings may require I/O. Reads of indirect blocks, for example.
340 *
341 * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
342 * submitted in the following order:
343 * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
344 *
345 * because the indirect block has to be read to get the mappings of blocks
346 * 13,14,15,16. Obviously, this impacts performance.
347 *
348 * So what we do it to allow the filesystem's get_block() function to set
349 * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block
350 * after this one will require I/O against a block which is probably close to
351 * this one. So you should push what I/O you have currently accumulated.
352 *
353 * This all causes the disk requests to be issued in the correct order.
354 */
355int
356mpage_readpages(struct address_space *mapping, struct list_head *pages,
357                unsigned nr_pages, get_block_t get_block)
358{
359    struct bio *bio = NULL;
360    unsigned page_idx;
361    sector_t last_block_in_bio = 0;
362    struct buffer_head map_bh;
363    unsigned long first_logical_block = 0;
364
365    map_bh.b_state = 0;
366    map_bh.b_size = 0;
367    for (page_idx = 0; page_idx < nr_pages; page_idx++) {
368        struct page *page = list_entry(pages->prev, struct page, lru);
369
370        prefetchw(&page->flags);
371        list_del(&page->lru);
372        if (!add_to_page_cache_lru(page, mapping,
373                    page->index, GFP_KERNEL)) {
374            bio = do_mpage_readpage(bio, page,
375                    nr_pages - page_idx,
376                    &last_block_in_bio, &map_bh,
377                    &first_logical_block,
378                    get_block);
379        }
380        page_cache_release(page);
381    }
382    BUG_ON(!list_empty(pages));
383    if (bio)
384        mpage_bio_submit(READ, bio);
385    return 0;
386}
387EXPORT_SYMBOL(mpage_readpages);
388
389/*
390 * This isn't called much at all
391 */
392int mpage_readpage(struct page *page, get_block_t get_block)
393{
394    struct bio *bio = NULL;
395    sector_t last_block_in_bio = 0;
396    struct buffer_head map_bh;
397    unsigned long first_logical_block = 0;
398
399    map_bh.b_state = 0;
400    map_bh.b_size = 0;
401    bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
402            &map_bh, &first_logical_block, get_block);
403    if (bio)
404        mpage_bio_submit(READ, bio);
405    return 0;
406}
407EXPORT_SYMBOL(mpage_readpage);
408
409/*
410 * Writing is not so simple.
411 *
412 * If the page has buffers then they will be used for obtaining the disk
413 * mapping. We only support pages which are fully mapped-and-dirty, with a
414 * special case for pages which are unmapped at the end: end-of-file.
415 *
416 * If the page has no buffers (preferred) then the page is mapped here.
417 *
418 * If all blocks are found to be contiguous then the page can go into the
419 * BIO. Otherwise fall back to the mapping's writepage().
420 *
421 * FIXME: This code wants an estimate of how many pages are still to be
422 * written, so it can intelligently allocate a suitably-sized BIO. For now,
423 * just allocate full-size (16-page) BIOs.
424 */
425
426struct mpage_data {
427    struct bio *bio;
428    sector_t last_block_in_bio;
429    get_block_t *get_block;
430    unsigned use_writepage;
431};
432
433/*
434 * We have our BIO, so we can now mark the buffers clean. Make
435 * sure to only clean buffers which we know we'll be writing.
436 */
437static void clean_buffers(struct page *page, unsigned first_unmapped)
438{
439    unsigned buffer_counter = 0;
440    struct buffer_head *bh, *head;
441    if (!page_has_buffers(page))
442        return;
443    head = page_buffers(page);
444    bh = head;
445
446    do {
447        if (buffer_counter++ == first_unmapped)
448            break;
449        clear_buffer_dirty(bh);
450        bh = bh->b_this_page;
451    } while (bh != head);
452
453    /*
454     * we cannot drop the bh if the page is not uptodate or a concurrent
455     * readpage would fail to serialize with the bh and it would read from
456     * disk before we reach the platter.
457     */
458    if (buffer_heads_over_limit && PageUptodate(page))
459        try_to_free_buffers(page);
460}
461
462static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
463              void *data)
464{
465    struct mpage_data *mpd = data;
466    struct bio *bio = mpd->bio;
467    struct address_space *mapping = page->mapping;
468    struct inode *inode = page->mapping->host;
469    const unsigned blkbits = inode->i_blkbits;
470    unsigned long end_index;
471    const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
472    sector_t last_block;
473    sector_t block_in_file;
474    sector_t blocks[MAX_BUF_PER_PAGE];
475    unsigned page_block;
476    unsigned first_unmapped = blocks_per_page;
477    struct block_device *bdev = NULL;
478    int boundary = 0;
479    sector_t boundary_block = 0;
480    struct block_device *boundary_bdev = NULL;
481    int length;
482    struct buffer_head map_bh;
483    loff_t i_size = i_size_read(inode);
484    int ret = 0;
485
486    if (page_has_buffers(page)) {
487        struct buffer_head *head = page_buffers(page);
488        struct buffer_head *bh = head;
489
490        /* If they're all mapped and dirty, do it */
491        page_block = 0;
492        do {
493            BUG_ON(buffer_locked(bh));
494            if (!buffer_mapped(bh)) {
495                /*
496                 * unmapped dirty buffers are created by
497                 * __set_page_dirty_buffers -> mmapped data
498                 */
499                if (buffer_dirty(bh))
500                    goto confused;
501                if (first_unmapped == blocks_per_page)
502                    first_unmapped = page_block;
503                continue;
504            }
505
506            if (first_unmapped != blocks_per_page)
507                goto confused; /* hole -> non-hole */
508
509            if (!buffer_dirty(bh) || !buffer_uptodate(bh))
510                goto confused;
511            if (page_block) {
512                if (bh->b_blocknr != blocks[page_block-1] + 1)
513                    goto confused;
514            }
515            blocks[page_block++] = bh->b_blocknr;
516            boundary = buffer_boundary(bh);
517            if (boundary) {
518                boundary_block = bh->b_blocknr;
519                boundary_bdev = bh->b_bdev;
520            }
521            bdev = bh->b_bdev;
522        } while ((bh = bh->b_this_page) != head);
523
524        if (first_unmapped)
525            goto page_is_mapped;
526
527        /*
528         * Page has buffers, but they are all unmapped. The page was
529         * created by pagein or read over a hole which was handled by
530         * block_read_full_page(). If this address_space is also
531         * using mpage_readpages then this can rarely happen.
532         */
533        goto confused;
534    }
535
536    /*
537     * The page has no buffers: map it to disk
538     */
539    BUG_ON(!PageUptodate(page));
540    block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
541    last_block = (i_size - 1) >> blkbits;
542    map_bh.b_page = page;
543    for (page_block = 0; page_block < blocks_per_page; ) {
544
545        map_bh.b_state = 0;
546        map_bh.b_size = 1 << blkbits;
547        if (mpd->get_block(inode, block_in_file, &map_bh, 1))
548            goto confused;
549        if (buffer_new(&map_bh))
550            unmap_underlying_metadata(map_bh.b_bdev,
551                        map_bh.b_blocknr);
552        if (buffer_boundary(&map_bh)) {
553            boundary_block = map_bh.b_blocknr;
554            boundary_bdev = map_bh.b_bdev;
555        }
556        if (page_block) {
557            if (map_bh.b_blocknr != blocks[page_block-1] + 1)
558                goto confused;
559        }
560        blocks[page_block++] = map_bh.b_blocknr;
561        boundary = buffer_boundary(&map_bh);
562        bdev = map_bh.b_bdev;
563        if (block_in_file == last_block)
564            break;
565        block_in_file++;
566    }
567    BUG_ON(page_block == 0);
568
569    first_unmapped = page_block;
570
571page_is_mapped:
572    end_index = i_size >> PAGE_CACHE_SHIFT;
573    if (page->index >= end_index) {
574        /*
575         * The page straddles i_size. It must be zeroed out on each
576         * and every writepage invocation because it may be mmapped.
577         * "A file is mapped in multiples of the page size. For a file
578         * that is not a multiple of the page size, the remaining memory
579         * is zeroed when mapped, and writes to that region are not
580         * written out to the file."
581         */
582        unsigned offset = i_size & (PAGE_CACHE_SIZE - 1);
583
584        if (page->index > end_index || !offset)
585            goto confused;
586        zero_user_segment(page, offset, PAGE_CACHE_SIZE);
587    }
588
589    /*
590     * This page will go to BIO. Do we need to send this BIO off first?
591     */
592    if (bio && mpd->last_block_in_bio != blocks[0] - 1)
593        bio = mpage_bio_submit(WRITE, bio);
594
595alloc_new:
596    if (bio == NULL) {
597        if (first_unmapped == blocks_per_page) {
598            if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
599                                page, wbc)) {
600                clean_buffers(page, first_unmapped);
601                goto out;
602            }
603        }
604        bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
605                bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH);
606        if (bio == NULL)
607            goto confused;
608    }
609
610    /*
611     * Must try to add the page before marking the buffer clean or
612     * the confused fail path above (OOM) will be very confused when
613     * it finds all bh marked clean (i.e. it will not write anything)
614     */
615    length = first_unmapped << blkbits;
616    if (bio_add_page(bio, page, length, 0) < length) {
617        bio = mpage_bio_submit(WRITE, bio);
618        goto alloc_new;
619    }
620
621    clean_buffers(page, first_unmapped);
622
623    BUG_ON(PageWriteback(page));
624    set_page_writeback(page);
625    unlock_page(page);
626    if (boundary || (first_unmapped != blocks_per_page)) {
627        bio = mpage_bio_submit(WRITE, bio);
628        if (boundary_block) {
629            write_boundary_block(boundary_bdev,
630                    boundary_block, 1 << blkbits);
631        }
632    } else {
633        mpd->last_block_in_bio = blocks[blocks_per_page - 1];
634    }
635    goto out;
636
637confused:
638    if (bio)
639        bio = mpage_bio_submit(WRITE, bio);
640
641    if (mpd->use_writepage) {
642        ret = mapping->a_ops->writepage(page, wbc);
643    } else {
644        ret = -EAGAIN;
645        goto out;
646    }
647    /*
648     * The caller has a ref on the inode, so *mapping is stable
649     */
650    mapping_set_error(mapping, ret);
651out:
652    mpd->bio = bio;
653    return ret;
654}
655
656/**
657 * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them
658 * @mapping: address space structure to write
659 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
660 * @get_block: the filesystem's block mapper function.
661 * If this is NULL then use a_ops->writepage. Otherwise, go
662 * direct-to-BIO.
663 *
664 * This is a library function, which implements the writepages()
665 * address_space_operation.
666 *
667 * If a page is already under I/O, generic_writepages() skips it, even
668 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
669 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
670 * and msync() need to guarantee that all the data which was dirty at the time
671 * the call was made get new I/O started against them. If wbc->sync_mode is
672 * WB_SYNC_ALL then we were called for data integrity and we must wait for
673 * existing IO to complete.
674 */
675int
676mpage_writepages(struct address_space *mapping,
677        struct writeback_control *wbc, get_block_t get_block)
678{
679    struct blk_plug plug;
680    int ret;
681
682    blk_start_plug(&plug);
683
684    if (!get_block)
685        ret = generic_writepages(mapping, wbc);
686    else {
687        struct mpage_data mpd = {
688            .bio = NULL,
689            .last_block_in_bio = 0,
690            .get_block = get_block,
691            .use_writepage = 1,
692        };
693
694        ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
695        if (mpd.bio)
696            mpage_bio_submit(WRITE, mpd.bio);
697    }
698    blk_finish_plug(&plug);
699    return ret;
700}
701EXPORT_SYMBOL(mpage_writepages);
702
703int mpage_writepage(struct page *page, get_block_t get_block,
704    struct writeback_control *wbc)
705{
706    struct mpage_data mpd = {
707        .bio = NULL,
708        .last_block_in_bio = 0,
709        .get_block = get_block,
710        .use_writepage = 0,
711    };
712    int ret = __mpage_writepage(page, wbc, &mpd);
713    if (mpd.bio)
714        mpage_bio_submit(WRITE, mpd.bio);
715    return ret;
716}
717EXPORT_SYMBOL(mpage_writepage);
718

Archive Download this file



interactive