Root/fs/btrfs/disk-io.c

1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/fs.h>
20#include <linux/blkdev.h>
21#include <linux/scatterlist.h>
22#include <linux/swap.h>
23#include <linux/radix-tree.h>
24#include <linux/writeback.h>
25#include <linux/buffer_head.h>
26#include <linux/workqueue.h>
27#include <linux/kthread.h>
28#include <linux/freezer.h>
29#include <linux/crc32c.h>
30#include <linux/slab.h>
31#include "compat.h"
32#include "ctree.h"
33#include "disk-io.h"
34#include "transaction.h"
35#include "btrfs_inode.h"
36#include "volumes.h"
37#include "print-tree.h"
38#include "async-thread.h"
39#include "locking.h"
40#include "tree-log.h"
41#include "free-space-cache.h"
42
43static struct extent_io_ops btree_extent_io_ops;
44static void end_workqueue_fn(struct btrfs_work *work);
45static void free_fs_root(struct btrfs_root *root);
46
47static atomic_t btrfs_bdi_num = ATOMIC_INIT(0);
48
49/*
50 * end_io_wq structs are used to do processing in task context when an IO is
51 * complete. This is used during reads to verify checksums, and it is used
52 * by writes to insert metadata for new file extents after IO is complete.
53 */
54struct end_io_wq {
55    struct bio *bio;
56    bio_end_io_t *end_io;
57    void *private;
58    struct btrfs_fs_info *info;
59    int error;
60    int metadata;
61    struct list_head list;
62    struct btrfs_work work;
63};
64
65/*
66 * async submit bios are used to offload expensive checksumming
67 * onto the worker threads. They checksum file and metadata bios
68 * just before they are sent down the IO stack.
69 */
70struct async_submit_bio {
71    struct inode *inode;
72    struct bio *bio;
73    struct list_head list;
74    extent_submit_bio_hook_t *submit_bio_start;
75    extent_submit_bio_hook_t *submit_bio_done;
76    int rw;
77    int mirror_num;
78    unsigned long bio_flags;
79    struct btrfs_work work;
80};
81
82/* These are used to set the lockdep class on the extent buffer locks.
83 * The class is set by the readpage_end_io_hook after the buffer has
84 * passed csum validation but before the pages are unlocked.
85 *
86 * The lockdep class is also set by btrfs_init_new_buffer on freshly
87 * allocated blocks.
88 *
89 * The class is based on the level in the tree block, which allows lockdep
90 * to know that lower nodes nest inside the locks of higher nodes.
91 *
92 * We also add a check to make sure the highest level of the tree is
93 * the same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this
94 * code needs update as well.
95 */
96#ifdef CONFIG_DEBUG_LOCK_ALLOC
97# if BTRFS_MAX_LEVEL != 8
98# error
99# endif
100static struct lock_class_key btrfs_eb_class[BTRFS_MAX_LEVEL + 1];
101static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
102    /* leaf */
103    "btrfs-extent-00",
104    "btrfs-extent-01",
105    "btrfs-extent-02",
106    "btrfs-extent-03",
107    "btrfs-extent-04",
108    "btrfs-extent-05",
109    "btrfs-extent-06",
110    "btrfs-extent-07",
111    /* highest possible level */
112    "btrfs-extent-08",
113};
114#endif
115
116/*
117 * extents on the btree inode are pretty simple, there's one extent
118 * that covers the entire device
119 */
120static struct extent_map *btree_get_extent(struct inode *inode,
121        struct page *page, size_t page_offset, u64 start, u64 len,
122        int create)
123{
124    struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
125    struct extent_map *em;
126    int ret;
127
128    read_lock(&em_tree->lock);
129    em = lookup_extent_mapping(em_tree, start, len);
130    if (em) {
131        em->bdev =
132            BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
133        read_unlock(&em_tree->lock);
134        goto out;
135    }
136    read_unlock(&em_tree->lock);
137
138    em = alloc_extent_map(GFP_NOFS);
139    if (!em) {
140        em = ERR_PTR(-ENOMEM);
141        goto out;
142    }
143    em->start = 0;
144    em->len = (u64)-1;
145    em->block_len = (u64)-1;
146    em->block_start = 0;
147    em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
148
149    write_lock(&em_tree->lock);
150    ret = add_extent_mapping(em_tree, em);
151    if (ret == -EEXIST) {
152        u64 failed_start = em->start;
153        u64 failed_len = em->len;
154
155        free_extent_map(em);
156        em = lookup_extent_mapping(em_tree, start, len);
157        if (em) {
158            ret = 0;
159        } else {
160            em = lookup_extent_mapping(em_tree, failed_start,
161                           failed_len);
162            ret = -EIO;
163        }
164    } else if (ret) {
165        free_extent_map(em);
166        em = NULL;
167    }
168    write_unlock(&em_tree->lock);
169
170    if (ret)
171        em = ERR_PTR(ret);
172out:
173    return em;
174}
175
176u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
177{
178    return crc32c(seed, data, len);
179}
180
181void btrfs_csum_final(u32 crc, char *result)
182{
183    *(__le32 *)result = ~cpu_to_le32(crc);
184}
185
186/*
187 * compute the csum for a btree block, and either verify it or write it
188 * into the csum field of the block.
189 */
190static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
191               int verify)
192{
193    u16 csum_size =
194        btrfs_super_csum_size(&root->fs_info->super_copy);
195    char *result = NULL;
196    unsigned long len;
197    unsigned long cur_len;
198    unsigned long offset = BTRFS_CSUM_SIZE;
199    char *map_token = NULL;
200    char *kaddr;
201    unsigned long map_start;
202    unsigned long map_len;
203    int err;
204    u32 crc = ~(u32)0;
205    unsigned long inline_result;
206
207    len = buf->len - offset;
208    while (len > 0) {
209        err = map_private_extent_buffer(buf, offset, 32,
210                    &map_token, &kaddr,
211                    &map_start, &map_len, KM_USER0);
212        if (err)
213            return 1;
214        cur_len = min(len, map_len - (offset - map_start));
215        crc = btrfs_csum_data(root, kaddr + offset - map_start,
216                      crc, cur_len);
217        len -= cur_len;
218        offset += cur_len;
219        unmap_extent_buffer(buf, map_token, KM_USER0);
220    }
221    if (csum_size > sizeof(inline_result)) {
222        result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
223        if (!result)
224            return 1;
225    } else {
226        result = (char *)&inline_result;
227    }
228
229    btrfs_csum_final(crc, result);
230
231    if (verify) {
232        if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
233            u32 val;
234            u32 found = 0;
235            memcpy(&found, result, csum_size);
236
237            read_extent_buffer(buf, &val, 0, csum_size);
238            if (printk_ratelimit()) {
239                printk(KERN_INFO "btrfs: %s checksum verify "
240                       "failed on %llu wanted %X found %X "
241                       "level %d\n",
242                       root->fs_info->sb->s_id,
243                       (unsigned long long)buf->start, val, found,
244                       btrfs_header_level(buf));
245            }
246            if (result != (char *)&inline_result)
247                kfree(result);
248            return 1;
249        }
250    } else {
251        write_extent_buffer(buf, result, 0, csum_size);
252    }
253    if (result != (char *)&inline_result)
254        kfree(result);
255    return 0;
256}
257
258/*
259 * we can't consider a given block up to date unless the transid of the
260 * block matches the transid in the parent node's pointer. This is how we
261 * detect blocks that either didn't get written at all or got written
262 * in the wrong place.
263 */
264static int verify_parent_transid(struct extent_io_tree *io_tree,
265                 struct extent_buffer *eb, u64 parent_transid)
266{
267    struct extent_state *cached_state = NULL;
268    int ret;
269
270    if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
271        return 0;
272
273    lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
274             0, &cached_state, GFP_NOFS);
275    if (extent_buffer_uptodate(io_tree, eb, cached_state) &&
276        btrfs_header_generation(eb) == parent_transid) {
277        ret = 0;
278        goto out;
279    }
280    if (printk_ratelimit()) {
281        printk("parent transid verify failed on %llu wanted %llu "
282               "found %llu\n",
283               (unsigned long long)eb->start,
284               (unsigned long long)parent_transid,
285               (unsigned long long)btrfs_header_generation(eb));
286    }
287    ret = 1;
288    clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
289out:
290    unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
291                 &cached_state, GFP_NOFS);
292    return ret;
293}
294
295/*
296 * helper to read a given tree block, doing retries as required when
297 * the checksums don't match and we have alternate mirrors to try.
298 */
299static int btree_read_extent_buffer_pages(struct btrfs_root *root,
300                      struct extent_buffer *eb,
301                      u64 start, u64 parent_transid)
302{
303    struct extent_io_tree *io_tree;
304    int ret;
305    int num_copies = 0;
306    int mirror_num = 0;
307
308    io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
309    while (1) {
310        ret = read_extent_buffer_pages(io_tree, eb, start, 1,
311                           btree_get_extent, mirror_num);
312        if (!ret &&
313            !verify_parent_transid(io_tree, eb, parent_transid))
314            return ret;
315
316        num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
317                          eb->start, eb->len);
318        if (num_copies == 1)
319            return ret;
320
321        mirror_num++;
322        if (mirror_num > num_copies)
323            return ret;
324    }
325    return -EIO;
326}
327
328/*
329 * checksum a dirty tree block before IO. This has extra checks to make sure
330 * we only fill in the checksum field in the first page of a multi-page block
331 */
332
333static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
334{
335    struct extent_io_tree *tree;
336    u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
337    u64 found_start;
338    int found_level;
339    unsigned long len;
340    struct extent_buffer *eb;
341    int ret;
342
343    tree = &BTRFS_I(page->mapping->host)->io_tree;
344
345    if (page->private == EXTENT_PAGE_PRIVATE)
346        goto out;
347    if (!page->private)
348        goto out;
349    len = page->private >> 2;
350    WARN_ON(len == 0);
351
352    eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
353    ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
354                         btrfs_header_generation(eb));
355    BUG_ON(ret);
356    found_start = btrfs_header_bytenr(eb);
357    if (found_start != start) {
358        WARN_ON(1);
359        goto err;
360    }
361    if (eb->first_page != page) {
362        WARN_ON(1);
363        goto err;
364    }
365    if (!PageUptodate(page)) {
366        WARN_ON(1);
367        goto err;
368    }
369    found_level = btrfs_header_level(eb);
370
371    csum_tree_block(root, eb, 0);
372err:
373    free_extent_buffer(eb);
374out:
375    return 0;
376}
377
378static int check_tree_block_fsid(struct btrfs_root *root,
379                 struct extent_buffer *eb)
380{
381    struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
382    u8 fsid[BTRFS_UUID_SIZE];
383    int ret = 1;
384
385    read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
386               BTRFS_FSID_SIZE);
387    while (fs_devices) {
388        if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
389            ret = 0;
390            break;
391        }
392        fs_devices = fs_devices->seed;
393    }
394    return ret;
395}
396
397#ifdef CONFIG_DEBUG_LOCK_ALLOC
398void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level)
399{
400    lockdep_set_class_and_name(&eb->lock,
401               &btrfs_eb_class[level],
402               btrfs_eb_name[level]);
403}
404#endif
405
406static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
407                   struct extent_state *state)
408{
409    struct extent_io_tree *tree;
410    u64 found_start;
411    int found_level;
412    unsigned long len;
413    struct extent_buffer *eb;
414    struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
415    int ret = 0;
416
417    tree = &BTRFS_I(page->mapping->host)->io_tree;
418    if (page->private == EXTENT_PAGE_PRIVATE)
419        goto out;
420    if (!page->private)
421        goto out;
422
423    len = page->private >> 2;
424    WARN_ON(len == 0);
425
426    eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
427
428    found_start = btrfs_header_bytenr(eb);
429    if (found_start != start) {
430        if (printk_ratelimit()) {
431            printk(KERN_INFO "btrfs bad tree block start "
432                   "%llu %llu\n",
433                   (unsigned long long)found_start,
434                   (unsigned long long)eb->start);
435        }
436        ret = -EIO;
437        goto err;
438    }
439    if (eb->first_page != page) {
440        printk(KERN_INFO "btrfs bad first page %lu %lu\n",
441               eb->first_page->index, page->index);
442        WARN_ON(1);
443        ret = -EIO;
444        goto err;
445    }
446    if (check_tree_block_fsid(root, eb)) {
447        if (printk_ratelimit()) {
448            printk(KERN_INFO "btrfs bad fsid on block %llu\n",
449                   (unsigned long long)eb->start);
450        }
451        ret = -EIO;
452        goto err;
453    }
454    found_level = btrfs_header_level(eb);
455
456    btrfs_set_buffer_lockdep_class(eb, found_level);
457
458    ret = csum_tree_block(root, eb, 1);
459    if (ret)
460        ret = -EIO;
461
462    end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
463    end = eb->start + end - 1;
464err:
465    free_extent_buffer(eb);
466out:
467    return ret;
468}
469
470static void end_workqueue_bio(struct bio *bio, int err)
471{
472    struct end_io_wq *end_io_wq = bio->bi_private;
473    struct btrfs_fs_info *fs_info;
474
475    fs_info = end_io_wq->info;
476    end_io_wq->error = err;
477    end_io_wq->work.func = end_workqueue_fn;
478    end_io_wq->work.flags = 0;
479
480    if (bio->bi_rw & (1 << BIO_RW)) {
481        if (end_io_wq->metadata)
482            btrfs_queue_worker(&fs_info->endio_meta_write_workers,
483                       &end_io_wq->work);
484        else
485            btrfs_queue_worker(&fs_info->endio_write_workers,
486                       &end_io_wq->work);
487    } else {
488        if (end_io_wq->metadata)
489            btrfs_queue_worker(&fs_info->endio_meta_workers,
490                       &end_io_wq->work);
491        else
492            btrfs_queue_worker(&fs_info->endio_workers,
493                       &end_io_wq->work);
494    }
495}
496
497int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
498            int metadata)
499{
500    struct end_io_wq *end_io_wq;
501    end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
502    if (!end_io_wq)
503        return -ENOMEM;
504
505    end_io_wq->private = bio->bi_private;
506    end_io_wq->end_io = bio->bi_end_io;
507    end_io_wq->info = info;
508    end_io_wq->error = 0;
509    end_io_wq->bio = bio;
510    end_io_wq->metadata = metadata;
511
512    bio->bi_private = end_io_wq;
513    bio->bi_end_io = end_workqueue_bio;
514    return 0;
515}
516
517unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
518{
519    unsigned long limit = min_t(unsigned long,
520                    info->workers.max_workers,
521                    info->fs_devices->open_devices);
522    return 256 * limit;
523}
524
525int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
526{
527    return atomic_read(&info->nr_async_bios) >
528        btrfs_async_submit_limit(info);
529}
530
531static void run_one_async_start(struct btrfs_work *work)
532{
533    struct btrfs_fs_info *fs_info;
534    struct async_submit_bio *async;
535
536    async = container_of(work, struct async_submit_bio, work);
537    fs_info = BTRFS_I(async->inode)->root->fs_info;
538    async->submit_bio_start(async->inode, async->rw, async->bio,
539                   async->mirror_num, async->bio_flags);
540}
541
542static void run_one_async_done(struct btrfs_work *work)
543{
544    struct btrfs_fs_info *fs_info;
545    struct async_submit_bio *async;
546    int limit;
547
548    async = container_of(work, struct async_submit_bio, work);
549    fs_info = BTRFS_I(async->inode)->root->fs_info;
550
551    limit = btrfs_async_submit_limit(fs_info);
552    limit = limit * 2 / 3;
553
554    atomic_dec(&fs_info->nr_async_submits);
555
556    if (atomic_read(&fs_info->nr_async_submits) < limit &&
557        waitqueue_active(&fs_info->async_submit_wait))
558        wake_up(&fs_info->async_submit_wait);
559
560    async->submit_bio_done(async->inode, async->rw, async->bio,
561                   async->mirror_num, async->bio_flags);
562}
563
564static void run_one_async_free(struct btrfs_work *work)
565{
566    struct async_submit_bio *async;
567
568    async = container_of(work, struct async_submit_bio, work);
569    kfree(async);
570}
571
572int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
573            int rw, struct bio *bio, int mirror_num,
574            unsigned long bio_flags,
575            extent_submit_bio_hook_t *submit_bio_start,
576            extent_submit_bio_hook_t *submit_bio_done)
577{
578    struct async_submit_bio *async;
579
580    async = kmalloc(sizeof(*async), GFP_NOFS);
581    if (!async)
582        return -ENOMEM;
583
584    async->inode = inode;
585    async->rw = rw;
586    async->bio = bio;
587    async->mirror_num = mirror_num;
588    async->submit_bio_start = submit_bio_start;
589    async->submit_bio_done = submit_bio_done;
590
591    async->work.func = run_one_async_start;
592    async->work.ordered_func = run_one_async_done;
593    async->work.ordered_free = run_one_async_free;
594
595    async->work.flags = 0;
596    async->bio_flags = bio_flags;
597
598    atomic_inc(&fs_info->nr_async_submits);
599
600    if (rw & (1 << BIO_RW_SYNCIO))
601        btrfs_set_work_high_prio(&async->work);
602
603    btrfs_queue_worker(&fs_info->workers, &async->work);
604
605    while (atomic_read(&fs_info->async_submit_draining) &&
606          atomic_read(&fs_info->nr_async_submits)) {
607        wait_event(fs_info->async_submit_wait,
608               (atomic_read(&fs_info->nr_async_submits) == 0));
609    }
610
611    return 0;
612}
613
614static int btree_csum_one_bio(struct bio *bio)
615{
616    struct bio_vec *bvec = bio->bi_io_vec;
617    int bio_index = 0;
618    struct btrfs_root *root;
619
620    WARN_ON(bio->bi_vcnt <= 0);
621    while (bio_index < bio->bi_vcnt) {
622        root = BTRFS_I(bvec->bv_page->mapping->host)->root;
623        csum_dirty_buffer(root, bvec->bv_page);
624        bio_index++;
625        bvec++;
626    }
627    return 0;
628}
629
630static int __btree_submit_bio_start(struct inode *inode, int rw,
631                    struct bio *bio, int mirror_num,
632                    unsigned long bio_flags)
633{
634    /*
635     * when we're called for a write, we're already in the async
636     * submission context. Just jump into btrfs_map_bio
637     */
638    btree_csum_one_bio(bio);
639    return 0;
640}
641
642static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
643                 int mirror_num, unsigned long bio_flags)
644{
645    /*
646     * when we're called for a write, we're already in the async
647     * submission context. Just jump into btrfs_map_bio
648     */
649    return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
650}
651
652static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
653                 int mirror_num, unsigned long bio_flags)
654{
655    int ret;
656
657    ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
658                      bio, 1);
659    BUG_ON(ret);
660
661    if (!(rw & (1 << BIO_RW))) {
662        /*
663         * called for a read, do the setup so that checksum validation
664         * can happen in the async kernel threads
665         */
666        return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
667                     mirror_num, 0);
668    }
669
670    /*
671     * kthread helpers are used to submit writes so that checksumming
672     * can happen in parallel across all CPUs
673     */
674    return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
675                   inode, rw, bio, mirror_num, 0,
676                   __btree_submit_bio_start,
677                   __btree_submit_bio_done);
678}
679
680static int btree_writepage(struct page *page, struct writeback_control *wbc)
681{
682    struct extent_io_tree *tree;
683    struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
684    struct extent_buffer *eb;
685    int was_dirty;
686
687    tree = &BTRFS_I(page->mapping->host)->io_tree;
688    if (!(current->flags & PF_MEMALLOC)) {
689        return extent_write_full_page(tree, page,
690                          btree_get_extent, wbc);
691    }
692
693    redirty_page_for_writepage(wbc, page);
694    eb = btrfs_find_tree_block(root, page_offset(page),
695                      PAGE_CACHE_SIZE);
696    WARN_ON(!eb);
697
698    was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
699    if (!was_dirty) {
700        spin_lock(&root->fs_info->delalloc_lock);
701        root->fs_info->dirty_metadata_bytes += PAGE_CACHE_SIZE;
702        spin_unlock(&root->fs_info->delalloc_lock);
703    }
704    free_extent_buffer(eb);
705
706    unlock_page(page);
707    return 0;
708}
709
710static int btree_writepages(struct address_space *mapping,
711                struct writeback_control *wbc)
712{
713    struct extent_io_tree *tree;
714    tree = &BTRFS_I(mapping->host)->io_tree;
715    if (wbc->sync_mode == WB_SYNC_NONE) {
716        struct btrfs_root *root = BTRFS_I(mapping->host)->root;
717        u64 num_dirty;
718        unsigned long thresh = 32 * 1024 * 1024;
719
720        if (wbc->for_kupdate)
721            return 0;
722
723        /* this is a bit racy, but that's ok */
724        num_dirty = root->fs_info->dirty_metadata_bytes;
725        if (num_dirty < thresh)
726            return 0;
727    }
728    return extent_writepages(tree, mapping, btree_get_extent, wbc);
729}
730
731static int btree_readpage(struct file *file, struct page *page)
732{
733    struct extent_io_tree *tree;
734    tree = &BTRFS_I(page->mapping->host)->io_tree;
735    return extent_read_full_page(tree, page, btree_get_extent);
736}
737
738static int btree_releasepage(struct page *page, gfp_t gfp_flags)
739{
740    struct extent_io_tree *tree;
741    struct extent_map_tree *map;
742    int ret;
743
744    if (PageWriteback(page) || PageDirty(page))
745        return 0;
746
747    tree = &BTRFS_I(page->mapping->host)->io_tree;
748    map = &BTRFS_I(page->mapping->host)->extent_tree;
749
750    ret = try_release_extent_state(map, tree, page, gfp_flags);
751    if (!ret)
752        return 0;
753
754    ret = try_release_extent_buffer(tree, page);
755    if (ret == 1) {
756        ClearPagePrivate(page);
757        set_page_private(page, 0);
758        page_cache_release(page);
759    }
760
761    return ret;
762}
763
764static void btree_invalidatepage(struct page *page, unsigned long offset)
765{
766    struct extent_io_tree *tree;
767    tree = &BTRFS_I(page->mapping->host)->io_tree;
768    extent_invalidatepage(tree, page, offset);
769    btree_releasepage(page, GFP_NOFS);
770    if (PagePrivate(page)) {
771        printk(KERN_WARNING "btrfs warning page private not zero "
772               "on page %llu\n", (unsigned long long)page_offset(page));
773        ClearPagePrivate(page);
774        set_page_private(page, 0);
775        page_cache_release(page);
776    }
777}
778
779static const struct address_space_operations btree_aops = {
780    .readpage = btree_readpage,
781    .writepage = btree_writepage,
782    .writepages = btree_writepages,
783    .releasepage = btree_releasepage,
784    .invalidatepage = btree_invalidatepage,
785    .sync_page = block_sync_page,
786};
787
788int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
789             u64 parent_transid)
790{
791    struct extent_buffer *buf = NULL;
792    struct inode *btree_inode = root->fs_info->btree_inode;
793    int ret = 0;
794
795    buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
796    if (!buf)
797        return 0;
798    read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
799                 buf, 0, 0, btree_get_extent, 0);
800    free_extent_buffer(buf);
801    return ret;
802}
803
804struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
805                        u64 bytenr, u32 blocksize)
806{
807    struct inode *btree_inode = root->fs_info->btree_inode;
808    struct extent_buffer *eb;
809    eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
810                bytenr, blocksize, GFP_NOFS);
811    return eb;
812}
813
814struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
815                         u64 bytenr, u32 blocksize)
816{
817    struct inode *btree_inode = root->fs_info->btree_inode;
818    struct extent_buffer *eb;
819
820    eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
821                 bytenr, blocksize, NULL, GFP_NOFS);
822    return eb;
823}
824
825
826int btrfs_write_tree_block(struct extent_buffer *buf)
827{
828    return filemap_fdatawrite_range(buf->first_page->mapping, buf->start,
829                    buf->start + buf->len - 1);
830}
831
832int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
833{
834    return filemap_fdatawait_range(buf->first_page->mapping,
835                       buf->start, buf->start + buf->len - 1);
836}
837
838struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
839                      u32 blocksize, u64 parent_transid)
840{
841    struct extent_buffer *buf = NULL;
842    struct inode *btree_inode = root->fs_info->btree_inode;
843    struct extent_io_tree *io_tree;
844    int ret;
845
846    io_tree = &BTRFS_I(btree_inode)->io_tree;
847
848    buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
849    if (!buf)
850        return NULL;
851
852    ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
853
854    if (ret == 0)
855        set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
856    return buf;
857
858}
859
860int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
861             struct extent_buffer *buf)
862{
863    struct inode *btree_inode = root->fs_info->btree_inode;
864    if (btrfs_header_generation(buf) ==
865        root->fs_info->running_transaction->transid) {
866        btrfs_assert_tree_locked(buf);
867
868        if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
869            spin_lock(&root->fs_info->delalloc_lock);
870            if (root->fs_info->dirty_metadata_bytes >= buf->len)
871                root->fs_info->dirty_metadata_bytes -= buf->len;
872            else
873                WARN_ON(1);
874            spin_unlock(&root->fs_info->delalloc_lock);
875        }
876
877        /* ugh, clear_extent_buffer_dirty needs to lock the page */
878        btrfs_set_lock_blocking(buf);
879        clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
880                      buf);
881    }
882    return 0;
883}
884
885static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
886            u32 stripesize, struct btrfs_root *root,
887            struct btrfs_fs_info *fs_info,
888            u64 objectid)
889{
890    root->node = NULL;
891    root->commit_root = NULL;
892    root->sectorsize = sectorsize;
893    root->nodesize = nodesize;
894    root->leafsize = leafsize;
895    root->stripesize = stripesize;
896    root->ref_cows = 0;
897    root->track_dirty = 0;
898    root->in_radix = 0;
899    root->clean_orphans = 0;
900
901    root->fs_info = fs_info;
902    root->objectid = objectid;
903    root->last_trans = 0;
904    root->highest_objectid = 0;
905    root->name = NULL;
906    root->in_sysfs = 0;
907    root->inode_tree = RB_ROOT;
908
909    INIT_LIST_HEAD(&root->dirty_list);
910    INIT_LIST_HEAD(&root->orphan_list);
911    INIT_LIST_HEAD(&root->root_list);
912    spin_lock_init(&root->node_lock);
913    spin_lock_init(&root->list_lock);
914    spin_lock_init(&root->inode_lock);
915    mutex_init(&root->objectid_mutex);
916    mutex_init(&root->log_mutex);
917    init_waitqueue_head(&root->log_writer_wait);
918    init_waitqueue_head(&root->log_commit_wait[0]);
919    init_waitqueue_head(&root->log_commit_wait[1]);
920    atomic_set(&root->log_commit[0], 0);
921    atomic_set(&root->log_commit[1], 0);
922    atomic_set(&root->log_writers, 0);
923    root->log_batch = 0;
924    root->log_transid = 0;
925    root->last_log_commit = 0;
926    extent_io_tree_init(&root->dirty_log_pages,
927                 fs_info->btree_inode->i_mapping, GFP_NOFS);
928
929    memset(&root->root_key, 0, sizeof(root->root_key));
930    memset(&root->root_item, 0, sizeof(root->root_item));
931    memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
932    memset(&root->root_kobj, 0, sizeof(root->root_kobj));
933    root->defrag_trans_start = fs_info->generation;
934    init_completion(&root->kobj_unregister);
935    root->defrag_running = 0;
936    root->root_key.objectid = objectid;
937    root->anon_super.s_root = NULL;
938    root->anon_super.s_dev = 0;
939    INIT_LIST_HEAD(&root->anon_super.s_list);
940    INIT_LIST_HEAD(&root->anon_super.s_instances);
941    init_rwsem(&root->anon_super.s_umount);
942
943    return 0;
944}
945
946static int find_and_setup_root(struct btrfs_root *tree_root,
947                   struct btrfs_fs_info *fs_info,
948                   u64 objectid,
949                   struct btrfs_root *root)
950{
951    int ret;
952    u32 blocksize;
953    u64 generation;
954
955    __setup_root(tree_root->nodesize, tree_root->leafsize,
956             tree_root->sectorsize, tree_root->stripesize,
957             root, fs_info, objectid);
958    ret = btrfs_find_last_root(tree_root, objectid,
959                   &root->root_item, &root->root_key);
960    if (ret > 0)
961        return -ENOENT;
962    BUG_ON(ret);
963
964    generation = btrfs_root_generation(&root->root_item);
965    blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
966    root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
967                     blocksize, generation);
968    BUG_ON(!root->node);
969    root->commit_root = btrfs_root_node(root);
970    return 0;
971}
972
973int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
974                 struct btrfs_fs_info *fs_info)
975{
976    struct extent_buffer *eb;
977    struct btrfs_root *log_root_tree = fs_info->log_root_tree;
978    u64 start = 0;
979    u64 end = 0;
980    int ret;
981
982    if (!log_root_tree)
983        return 0;
984
985    while (1) {
986        ret = find_first_extent_bit(&log_root_tree->dirty_log_pages,
987                0, &start, &end, EXTENT_DIRTY | EXTENT_NEW);
988        if (ret)
989            break;
990
991        clear_extent_bits(&log_root_tree->dirty_log_pages, start, end,
992                  EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
993    }
994    eb = fs_info->log_root_tree->node;
995
996    WARN_ON(btrfs_header_level(eb) != 0);
997    WARN_ON(btrfs_header_nritems(eb) != 0);
998
999    ret = btrfs_free_reserved_extent(fs_info->tree_root,
1000                eb->start, eb->len);
1001    BUG_ON(ret);
1002
1003    free_extent_buffer(eb);
1004    kfree(fs_info->log_root_tree);
1005    fs_info->log_root_tree = NULL;
1006    return 0;
1007}
1008
1009static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1010                     struct btrfs_fs_info *fs_info)
1011{
1012    struct btrfs_root *root;
1013    struct btrfs_root *tree_root = fs_info->tree_root;
1014    struct extent_buffer *leaf;
1015
1016    root = kzalloc(sizeof(*root), GFP_NOFS);
1017    if (!root)
1018        return ERR_PTR(-ENOMEM);
1019
1020    __setup_root(tree_root->nodesize, tree_root->leafsize,
1021             tree_root->sectorsize, tree_root->stripesize,
1022             root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1023
1024    root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1025    root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1026    root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1027    /*
1028     * log trees do not get reference counted because they go away
1029     * before a real commit is actually done. They do store pointers
1030     * to file data extents, and those reference counts still get
1031     * updated (along with back refs to the log tree).
1032     */
1033    root->ref_cows = 0;
1034
1035    leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1036                      BTRFS_TREE_LOG_OBJECTID, NULL, 0, 0, 0);
1037    if (IS_ERR(leaf)) {
1038        kfree(root);
1039        return ERR_CAST(leaf);
1040    }
1041
1042    memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1043    btrfs_set_header_bytenr(leaf, leaf->start);
1044    btrfs_set_header_generation(leaf, trans->transid);
1045    btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1046    btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1047    root->node = leaf;
1048
1049    write_extent_buffer(root->node, root->fs_info->fsid,
1050                (unsigned long)btrfs_header_fsid(root->node),
1051                BTRFS_FSID_SIZE);
1052    btrfs_mark_buffer_dirty(root->node);
1053    btrfs_tree_unlock(root->node);
1054    return root;
1055}
1056
1057int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1058                 struct btrfs_fs_info *fs_info)
1059{
1060    struct btrfs_root *log_root;
1061
1062    log_root = alloc_log_tree(trans, fs_info);
1063    if (IS_ERR(log_root))
1064        return PTR_ERR(log_root);
1065    WARN_ON(fs_info->log_root_tree);
1066    fs_info->log_root_tree = log_root;
1067    return 0;
1068}
1069
1070int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1071               struct btrfs_root *root)
1072{
1073    struct btrfs_root *log_root;
1074    struct btrfs_inode_item *inode_item;
1075
1076    log_root = alloc_log_tree(trans, root->fs_info);
1077    if (IS_ERR(log_root))
1078        return PTR_ERR(log_root);
1079
1080    log_root->last_trans = trans->transid;
1081    log_root->root_key.offset = root->root_key.objectid;
1082
1083    inode_item = &log_root->root_item.inode;
1084    inode_item->generation = cpu_to_le64(1);
1085    inode_item->size = cpu_to_le64(3);
1086    inode_item->nlink = cpu_to_le32(1);
1087    inode_item->nbytes = cpu_to_le64(root->leafsize);
1088    inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1089
1090    btrfs_set_root_node(&log_root->root_item, log_root->node);
1091
1092    WARN_ON(root->log_root);
1093    root->log_root = log_root;
1094    root->log_transid = 0;
1095    root->last_log_commit = 0;
1096    return 0;
1097}
1098
1099struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1100                           struct btrfs_key *location)
1101{
1102    struct btrfs_root *root;
1103    struct btrfs_fs_info *fs_info = tree_root->fs_info;
1104    struct btrfs_path *path;
1105    struct extent_buffer *l;
1106    u64 generation;
1107    u32 blocksize;
1108    int ret = 0;
1109
1110    root = kzalloc(sizeof(*root), GFP_NOFS);
1111    if (!root)
1112        return ERR_PTR(-ENOMEM);
1113    if (location->offset == (u64)-1) {
1114        ret = find_and_setup_root(tree_root, fs_info,
1115                      location->objectid, root);
1116        if (ret) {
1117            kfree(root);
1118            return ERR_PTR(ret);
1119        }
1120        goto out;
1121    }
1122
1123    __setup_root(tree_root->nodesize, tree_root->leafsize,
1124             tree_root->sectorsize, tree_root->stripesize,
1125             root, fs_info, location->objectid);
1126
1127    path = btrfs_alloc_path();
1128    BUG_ON(!path);
1129    ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1130    if (ret == 0) {
1131        l = path->nodes[0];
1132        read_extent_buffer(l, &root->root_item,
1133                btrfs_item_ptr_offset(l, path->slots[0]),
1134                sizeof(root->root_item));
1135        memcpy(&root->root_key, location, sizeof(*location));
1136    }
1137    btrfs_free_path(path);
1138    if (ret) {
1139        if (ret > 0)
1140            ret = -ENOENT;
1141        return ERR_PTR(ret);
1142    }
1143
1144    generation = btrfs_root_generation(&root->root_item);
1145    blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1146    root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1147                     blocksize, generation);
1148    root->commit_root = btrfs_root_node(root);
1149    BUG_ON(!root->node);
1150out:
1151    if (location->objectid != BTRFS_TREE_LOG_OBJECTID)
1152        root->ref_cows = 1;
1153
1154    return root;
1155}
1156
1157struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1158                    u64 root_objectid)
1159{
1160    struct btrfs_root *root;
1161
1162    if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
1163        return fs_info->tree_root;
1164    if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
1165        return fs_info->extent_root;
1166
1167    root = radix_tree_lookup(&fs_info->fs_roots_radix,
1168                 (unsigned long)root_objectid);
1169    return root;
1170}
1171
1172struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1173                          struct btrfs_key *location)
1174{
1175    struct btrfs_root *root;
1176    int ret;
1177
1178    if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1179        return fs_info->tree_root;
1180    if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1181        return fs_info->extent_root;
1182    if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1183        return fs_info->chunk_root;
1184    if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1185        return fs_info->dev_root;
1186    if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1187        return fs_info->csum_root;
1188again:
1189    spin_lock(&fs_info->fs_roots_radix_lock);
1190    root = radix_tree_lookup(&fs_info->fs_roots_radix,
1191                 (unsigned long)location->objectid);
1192    spin_unlock(&fs_info->fs_roots_radix_lock);
1193    if (root)
1194        return root;
1195
1196    ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1197    if (ret == 0)
1198        ret = -ENOENT;
1199    if (ret < 0)
1200        return ERR_PTR(ret);
1201
1202    root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1203    if (IS_ERR(root))
1204        return root;
1205
1206    WARN_ON(btrfs_root_refs(&root->root_item) == 0);
1207    set_anon_super(&root->anon_super, NULL);
1208
1209    ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1210    if (ret)
1211        goto fail;
1212
1213    spin_lock(&fs_info->fs_roots_radix_lock);
1214    ret = radix_tree_insert(&fs_info->fs_roots_radix,
1215                (unsigned long)root->root_key.objectid,
1216                root);
1217    if (ret == 0) {
1218        root->in_radix = 1;
1219        root->clean_orphans = 1;
1220    }
1221    spin_unlock(&fs_info->fs_roots_radix_lock);
1222    radix_tree_preload_end();
1223    if (ret) {
1224        if (ret == -EEXIST) {
1225            free_fs_root(root);
1226            goto again;
1227        }
1228        goto fail;
1229    }
1230
1231    ret = btrfs_find_dead_roots(fs_info->tree_root,
1232                    root->root_key.objectid);
1233    WARN_ON(ret);
1234    return root;
1235fail:
1236    free_fs_root(root);
1237    return ERR_PTR(ret);
1238}
1239
1240struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
1241                      struct btrfs_key *location,
1242                      const char *name, int namelen)
1243{
1244    return btrfs_read_fs_root_no_name(fs_info, location);
1245#if 0
1246    struct btrfs_root *root;
1247    int ret;
1248
1249    root = btrfs_read_fs_root_no_name(fs_info, location);
1250    if (!root)
1251        return NULL;
1252
1253    if (root->in_sysfs)
1254        return root;
1255
1256    ret = btrfs_set_root_name(root, name, namelen);
1257    if (ret) {
1258        free_extent_buffer(root->node);
1259        kfree(root);
1260        return ERR_PTR(ret);
1261    }
1262
1263    ret = btrfs_sysfs_add_root(root);
1264    if (ret) {
1265        free_extent_buffer(root->node);
1266        kfree(root->name);
1267        kfree(root);
1268        return ERR_PTR(ret);
1269    }
1270    root->in_sysfs = 1;
1271    return root;
1272#endif
1273}
1274
1275static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1276{
1277    struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1278    int ret = 0;
1279    struct btrfs_device *device;
1280    struct backing_dev_info *bdi;
1281
1282    list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1283        if (!device->bdev)
1284            continue;
1285        bdi = blk_get_backing_dev_info(device->bdev);
1286        if (bdi && bdi_congested(bdi, bdi_bits)) {
1287            ret = 1;
1288            break;
1289        }
1290    }
1291    return ret;
1292}
1293
1294/*
1295 * this unplugs every device on the box, and it is only used when page
1296 * is null
1297 */
1298static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1299{
1300    struct btrfs_device *device;
1301    struct btrfs_fs_info *info;
1302
1303    info = (struct btrfs_fs_info *)bdi->unplug_io_data;
1304    list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1305        if (!device->bdev)
1306            continue;
1307
1308        bdi = blk_get_backing_dev_info(device->bdev);
1309        if (bdi->unplug_io_fn)
1310            bdi->unplug_io_fn(bdi, page);
1311    }
1312}
1313
1314static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1315{
1316    struct inode *inode;
1317    struct extent_map_tree *em_tree;
1318    struct extent_map *em;
1319    struct address_space *mapping;
1320    u64 offset;
1321
1322    /* the generic O_DIRECT read code does this */
1323    if (1 || !page) {
1324        __unplug_io_fn(bdi, page);
1325        return;
1326    }
1327
1328    /*
1329     * page->mapping may change at any time. Get a consistent copy
1330     * and use that for everything below
1331     */
1332    smp_mb();
1333    mapping = page->mapping;
1334    if (!mapping)
1335        return;
1336
1337    inode = mapping->host;
1338
1339    /*
1340     * don't do the expensive searching for a small number of
1341     * devices
1342     */
1343    if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
1344        __unplug_io_fn(bdi, page);
1345        return;
1346    }
1347
1348    offset = page_offset(page);
1349
1350    em_tree = &BTRFS_I(inode)->extent_tree;
1351    read_lock(&em_tree->lock);
1352    em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1353    read_unlock(&em_tree->lock);
1354    if (!em) {
1355        __unplug_io_fn(bdi, page);
1356        return;
1357    }
1358
1359    if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1360        free_extent_map(em);
1361        __unplug_io_fn(bdi, page);
1362        return;
1363    }
1364    offset = offset - em->start;
1365    btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
1366              em->block_start + offset, page);
1367    free_extent_map(em);
1368}
1369
1370/*
1371 * If this fails, caller must call bdi_destroy() to get rid of the
1372 * bdi again.
1373 */
1374static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1375{
1376    int err;
1377
1378    bdi->name = "btrfs";
1379    bdi->capabilities = BDI_CAP_MAP_COPY;
1380    err = bdi_init(bdi);
1381    if (err)
1382        return err;
1383
1384    err = bdi_register(bdi, NULL, "btrfs-%d",
1385                atomic_inc_return(&btrfs_bdi_num));
1386    if (err) {
1387        bdi_destroy(bdi);
1388        return err;
1389    }
1390
1391    bdi->ra_pages = default_backing_dev_info.ra_pages;
1392    bdi->unplug_io_fn = btrfs_unplug_io_fn;
1393    bdi->unplug_io_data = info;
1394    bdi->congested_fn = btrfs_congested_fn;
1395    bdi->congested_data = info;
1396    return 0;
1397}
1398
1399static int bio_ready_for_csum(struct bio *bio)
1400{
1401    u64 length = 0;
1402    u64 buf_len = 0;
1403    u64 start = 0;
1404    struct page *page;
1405    struct extent_io_tree *io_tree = NULL;
1406    struct btrfs_fs_info *info = NULL;
1407    struct bio_vec *bvec;
1408    int i;
1409    int ret;
1410
1411    bio_for_each_segment(bvec, bio, i) {
1412        page = bvec->bv_page;
1413        if (page->private == EXTENT_PAGE_PRIVATE) {
1414            length += bvec->bv_len;
1415            continue;
1416        }
1417        if (!page->private) {
1418            length += bvec->bv_len;
1419            continue;
1420        }
1421        length = bvec->bv_len;
1422        buf_len = page->private >> 2;
1423        start = page_offset(page) + bvec->bv_offset;
1424        io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1425        info = BTRFS_I(page->mapping->host)->root->fs_info;
1426    }
1427    /* are we fully contained in this bio? */
1428    if (buf_len <= length)
1429        return 1;
1430
1431    ret = extent_range_uptodate(io_tree, start + length,
1432                    start + buf_len - 1);
1433    return ret;
1434}
1435
1436/*
1437 * called by the kthread helper functions to finally call the bio end_io
1438 * functions. This is where read checksum verification actually happens
1439 */
1440static void end_workqueue_fn(struct btrfs_work *work)
1441{
1442    struct bio *bio;
1443    struct end_io_wq *end_io_wq;
1444    struct btrfs_fs_info *fs_info;
1445    int error;
1446
1447    end_io_wq = container_of(work, struct end_io_wq, work);
1448    bio = end_io_wq->bio;
1449    fs_info = end_io_wq->info;
1450
1451    /* metadata bio reads are special because the whole tree block must
1452     * be checksummed at once. This makes sure the entire block is in
1453     * ram and up to date before trying to verify things. For
1454     * blocksize <= pagesize, it is basically a noop
1455     */
1456    if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata &&
1457        !bio_ready_for_csum(bio)) {
1458        btrfs_queue_worker(&fs_info->endio_meta_workers,
1459                   &end_io_wq->work);
1460        return;
1461    }
1462    error = end_io_wq->error;
1463    bio->bi_private = end_io_wq->private;
1464    bio->bi_end_io = end_io_wq->end_io;
1465    kfree(end_io_wq);
1466    bio_endio(bio, error);
1467}
1468
1469static int cleaner_kthread(void *arg)
1470{
1471    struct btrfs_root *root = arg;
1472
1473    do {
1474        smp_mb();
1475        if (root->fs_info->closing)
1476            break;
1477
1478        vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1479
1480        if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
1481            mutex_trylock(&root->fs_info->cleaner_mutex)) {
1482            btrfs_run_delayed_iputs(root);
1483            btrfs_clean_old_snapshots(root);
1484            mutex_unlock(&root->fs_info->cleaner_mutex);
1485        }
1486
1487        if (freezing(current)) {
1488            refrigerator();
1489        } else {
1490            smp_mb();
1491            if (root->fs_info->closing)
1492                break;
1493            set_current_state(TASK_INTERRUPTIBLE);
1494            schedule();
1495            __set_current_state(TASK_RUNNING);
1496        }
1497    } while (!kthread_should_stop());
1498    return 0;
1499}
1500
1501static int transaction_kthread(void *arg)
1502{
1503    struct btrfs_root *root = arg;
1504    struct btrfs_trans_handle *trans;
1505    struct btrfs_transaction *cur;
1506    unsigned long now;
1507    unsigned long delay;
1508    int ret;
1509
1510    do {
1511        smp_mb();
1512        if (root->fs_info->closing)
1513            break;
1514
1515        delay = HZ * 30;
1516        vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1517        mutex_lock(&root->fs_info->transaction_kthread_mutex);
1518
1519        mutex_lock(&root->fs_info->trans_mutex);
1520        cur = root->fs_info->running_transaction;
1521        if (!cur) {
1522            mutex_unlock(&root->fs_info->trans_mutex);
1523            goto sleep;
1524        }
1525
1526        now = get_seconds();
1527        if (now < cur->start_time || now - cur->start_time < 30) {
1528            mutex_unlock(&root->fs_info->trans_mutex);
1529            delay = HZ * 5;
1530            goto sleep;
1531        }
1532        mutex_unlock(&root->fs_info->trans_mutex);
1533        trans = btrfs_start_transaction(root, 1);
1534        ret = btrfs_commit_transaction(trans, root);
1535
1536sleep:
1537        wake_up_process(root->fs_info->cleaner_kthread);
1538        mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1539
1540        if (freezing(current)) {
1541            refrigerator();
1542        } else {
1543            if (root->fs_info->closing)
1544                break;
1545            set_current_state(TASK_INTERRUPTIBLE);
1546            schedule_timeout(delay);
1547            __set_current_state(TASK_RUNNING);
1548        }
1549    } while (!kthread_should_stop());
1550    return 0;
1551}
1552
1553struct btrfs_root *open_ctree(struct super_block *sb,
1554                  struct btrfs_fs_devices *fs_devices,
1555                  char *options)
1556{
1557    u32 sectorsize;
1558    u32 nodesize;
1559    u32 leafsize;
1560    u32 blocksize;
1561    u32 stripesize;
1562    u64 generation;
1563    u64 features;
1564    struct btrfs_key location;
1565    struct buffer_head *bh;
1566    struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
1567                         GFP_NOFS);
1568    struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1569                         GFP_NOFS);
1570    struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root),
1571                           GFP_NOFS);
1572    struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
1573                        GFP_NOFS);
1574    struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
1575                        GFP_NOFS);
1576    struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
1577                          GFP_NOFS);
1578    struct btrfs_root *log_tree_root;
1579
1580    int ret;
1581    int err = -EINVAL;
1582
1583    struct btrfs_super_block *disk_super;
1584
1585    if (!extent_root || !tree_root || !fs_info ||
1586        !chunk_root || !dev_root || !csum_root) {
1587        err = -ENOMEM;
1588        goto fail;
1589    }
1590
1591    ret = init_srcu_struct(&fs_info->subvol_srcu);
1592    if (ret) {
1593        err = ret;
1594        goto fail;
1595    }
1596
1597    ret = setup_bdi(fs_info, &fs_info->bdi);
1598    if (ret) {
1599        err = ret;
1600        goto fail_srcu;
1601    }
1602
1603    fs_info->btree_inode = new_inode(sb);
1604    if (!fs_info->btree_inode) {
1605        err = -ENOMEM;
1606        goto fail_bdi;
1607    }
1608
1609    INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
1610    INIT_LIST_HEAD(&fs_info->trans_list);
1611    INIT_LIST_HEAD(&fs_info->dead_roots);
1612    INIT_LIST_HEAD(&fs_info->delayed_iputs);
1613    INIT_LIST_HEAD(&fs_info->hashers);
1614    INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1615    INIT_LIST_HEAD(&fs_info->ordered_operations);
1616    INIT_LIST_HEAD(&fs_info->caching_block_groups);
1617    spin_lock_init(&fs_info->delalloc_lock);
1618    spin_lock_init(&fs_info->new_trans_lock);
1619    spin_lock_init(&fs_info->ref_cache_lock);
1620    spin_lock_init(&fs_info->fs_roots_radix_lock);
1621    spin_lock_init(&fs_info->delayed_iput_lock);
1622
1623    init_completion(&fs_info->kobj_unregister);
1624    fs_info->tree_root = tree_root;
1625    fs_info->extent_root = extent_root;
1626    fs_info->csum_root = csum_root;
1627    fs_info->chunk_root = chunk_root;
1628    fs_info->dev_root = dev_root;
1629    fs_info->fs_devices = fs_devices;
1630    INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1631    INIT_LIST_HEAD(&fs_info->space_info);
1632    btrfs_mapping_init(&fs_info->mapping_tree);
1633    atomic_set(&fs_info->nr_async_submits, 0);
1634    atomic_set(&fs_info->async_delalloc_pages, 0);
1635    atomic_set(&fs_info->async_submit_draining, 0);
1636    atomic_set(&fs_info->nr_async_bios, 0);
1637    fs_info->sb = sb;
1638    fs_info->max_inline = 8192 * 1024;
1639    fs_info->metadata_ratio = 0;
1640
1641    fs_info->thread_pool_size = min_t(unsigned long,
1642                      num_online_cpus() + 2, 8);
1643
1644    INIT_LIST_HEAD(&fs_info->ordered_extents);
1645    spin_lock_init(&fs_info->ordered_extent_lock);
1646
1647    sb->s_blocksize = 4096;
1648    sb->s_blocksize_bits = blksize_bits(4096);
1649    sb->s_bdi = &fs_info->bdi;
1650
1651    fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
1652    fs_info->btree_inode->i_nlink = 1;
1653    /*
1654     * we set the i_size on the btree inode to the max possible int.
1655     * the real end of the address space is determined by all of
1656     * the devices in the system
1657     */
1658    fs_info->btree_inode->i_size = OFFSET_MAX;
1659    fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1660    fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1661
1662    RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
1663    extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1664                 fs_info->btree_inode->i_mapping,
1665                 GFP_NOFS);
1666    extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
1667                 GFP_NOFS);
1668
1669    BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1670
1671    BTRFS_I(fs_info->btree_inode)->root = tree_root;
1672    memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1673           sizeof(struct btrfs_key));
1674    BTRFS_I(fs_info->btree_inode)->dummy_inode = 1;
1675    insert_inode_hash(fs_info->btree_inode);
1676
1677    spin_lock_init(&fs_info->block_group_cache_lock);
1678    fs_info->block_group_cache_tree = RB_ROOT;
1679
1680    extent_io_tree_init(&fs_info->freed_extents[0],
1681                 fs_info->btree_inode->i_mapping, GFP_NOFS);
1682    extent_io_tree_init(&fs_info->freed_extents[1],
1683                 fs_info->btree_inode->i_mapping, GFP_NOFS);
1684    fs_info->pinned_extents = &fs_info->freed_extents[0];
1685    fs_info->do_barriers = 1;
1686
1687
1688    mutex_init(&fs_info->trans_mutex);
1689    mutex_init(&fs_info->ordered_operations_mutex);
1690    mutex_init(&fs_info->tree_log_mutex);
1691    mutex_init(&fs_info->chunk_mutex);
1692    mutex_init(&fs_info->transaction_kthread_mutex);
1693    mutex_init(&fs_info->cleaner_mutex);
1694    mutex_init(&fs_info->volume_mutex);
1695    init_rwsem(&fs_info->extent_commit_sem);
1696    init_rwsem(&fs_info->cleanup_work_sem);
1697    init_rwsem(&fs_info->subvol_sem);
1698
1699    btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
1700    btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
1701
1702    init_waitqueue_head(&fs_info->transaction_throttle);
1703    init_waitqueue_head(&fs_info->transaction_wait);
1704    init_waitqueue_head(&fs_info->async_submit_wait);
1705
1706    __setup_root(4096, 4096, 4096, 4096, tree_root,
1707             fs_info, BTRFS_ROOT_TREE_OBJECTID);
1708
1709
1710    bh = btrfs_read_dev_super(fs_devices->latest_bdev);
1711    if (!bh)
1712        goto fail_iput;
1713
1714    memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1715    memcpy(&fs_info->super_for_commit, &fs_info->super_copy,
1716           sizeof(fs_info->super_for_commit));
1717    brelse(bh);
1718
1719    memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1720
1721    disk_super = &fs_info->super_copy;
1722    if (!btrfs_super_root(disk_super))
1723        goto fail_iput;
1724
1725    ret = btrfs_parse_options(tree_root, options);
1726    if (ret) {
1727        err = ret;
1728        goto fail_iput;
1729    }
1730
1731    features = btrfs_super_incompat_flags(disk_super) &
1732        ~BTRFS_FEATURE_INCOMPAT_SUPP;
1733    if (features) {
1734        printk(KERN_ERR "BTRFS: couldn't mount because of "
1735               "unsupported optional features (%Lx).\n",
1736               (unsigned long long)features);
1737        err = -EINVAL;
1738        goto fail_iput;
1739    }
1740
1741    features = btrfs_super_incompat_flags(disk_super);
1742    if (!(features & BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF)) {
1743        features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
1744        btrfs_set_super_incompat_flags(disk_super, features);
1745    }
1746
1747    features = btrfs_super_compat_ro_flags(disk_super) &
1748        ~BTRFS_FEATURE_COMPAT_RO_SUPP;
1749    if (!(sb->s_flags & MS_RDONLY) && features) {
1750        printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
1751               "unsupported option features (%Lx).\n",
1752               (unsigned long long)features);
1753        err = -EINVAL;
1754        goto fail_iput;
1755    }
1756
1757    btrfs_init_workers(&fs_info->generic_worker,
1758               "genwork", 1, NULL);
1759
1760    btrfs_init_workers(&fs_info->workers, "worker",
1761               fs_info->thread_pool_size,
1762               &fs_info->generic_worker);
1763
1764    btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
1765               fs_info->thread_pool_size,
1766               &fs_info->generic_worker);
1767
1768    btrfs_init_workers(&fs_info->submit_workers, "submit",
1769               min_t(u64, fs_devices->num_devices,
1770               fs_info->thread_pool_size),
1771               &fs_info->generic_worker);
1772    btrfs_init_workers(&fs_info->enospc_workers, "enospc",
1773               fs_info->thread_pool_size,
1774               &fs_info->generic_worker);
1775
1776    /* a higher idle thresh on the submit workers makes it much more
1777     * likely that bios will be send down in a sane order to the
1778     * devices
1779     */
1780    fs_info->submit_workers.idle_thresh = 64;
1781
1782    fs_info->workers.idle_thresh = 16;
1783    fs_info->workers.ordered = 1;
1784
1785    fs_info->delalloc_workers.idle_thresh = 2;
1786    fs_info->delalloc_workers.ordered = 1;
1787
1788    btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
1789               &fs_info->generic_worker);
1790    btrfs_init_workers(&fs_info->endio_workers, "endio",
1791               fs_info->thread_pool_size,
1792               &fs_info->generic_worker);
1793    btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
1794               fs_info->thread_pool_size,
1795               &fs_info->generic_worker);
1796    btrfs_init_workers(&fs_info->endio_meta_write_workers,
1797               "endio-meta-write", fs_info->thread_pool_size,
1798               &fs_info->generic_worker);
1799    btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1800               fs_info->thread_pool_size,
1801               &fs_info->generic_worker);
1802
1803    /*
1804     * endios are largely parallel and should have a very
1805     * low idle thresh
1806     */
1807    fs_info->endio_workers.idle_thresh = 4;
1808    fs_info->endio_meta_workers.idle_thresh = 4;
1809
1810    fs_info->endio_write_workers.idle_thresh = 2;
1811    fs_info->endio_meta_write_workers.idle_thresh = 2;
1812
1813    btrfs_start_workers(&fs_info->workers, 1);
1814    btrfs_start_workers(&fs_info->generic_worker, 1);
1815    btrfs_start_workers(&fs_info->submit_workers, 1);
1816    btrfs_start_workers(&fs_info->delalloc_workers, 1);
1817    btrfs_start_workers(&fs_info->fixup_workers, 1);
1818    btrfs_start_workers(&fs_info->endio_workers, 1);
1819    btrfs_start_workers(&fs_info->endio_meta_workers, 1);
1820    btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
1821    btrfs_start_workers(&fs_info->endio_write_workers, 1);
1822    btrfs_start_workers(&fs_info->enospc_workers, 1);
1823
1824    fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1825    fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
1826                    4 * 1024 * 1024 / PAGE_CACHE_SIZE);
1827
1828    nodesize = btrfs_super_nodesize(disk_super);
1829    leafsize = btrfs_super_leafsize(disk_super);
1830    sectorsize = btrfs_super_sectorsize(disk_super);
1831    stripesize = btrfs_super_stripesize(disk_super);
1832    tree_root->nodesize = nodesize;
1833    tree_root->leafsize = leafsize;
1834    tree_root->sectorsize = sectorsize;
1835    tree_root->stripesize = stripesize;
1836
1837    sb->s_blocksize = sectorsize;
1838    sb->s_blocksize_bits = blksize_bits(sectorsize);
1839
1840    if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1841            sizeof(disk_super->magic))) {
1842        printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
1843        goto fail_sb_buffer;
1844    }
1845
1846    mutex_lock(&fs_info->chunk_mutex);
1847    ret = btrfs_read_sys_array(tree_root);
1848    mutex_unlock(&fs_info->chunk_mutex);
1849    if (ret) {
1850        printk(KERN_WARNING "btrfs: failed to read the system "
1851               "array on %s\n", sb->s_id);
1852        goto fail_sb_buffer;
1853    }
1854
1855    blocksize = btrfs_level_size(tree_root,
1856                     btrfs_super_chunk_root_level(disk_super));
1857    generation = btrfs_super_chunk_root_generation(disk_super);
1858
1859    __setup_root(nodesize, leafsize, sectorsize, stripesize,
1860             chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1861
1862    chunk_root->node = read_tree_block(chunk_root,
1863                       btrfs_super_chunk_root(disk_super),
1864                       blocksize, generation);
1865    BUG_ON(!chunk_root->node);
1866    if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
1867        printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
1868               sb->s_id);
1869        goto fail_chunk_root;
1870    }
1871    btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
1872    chunk_root->commit_root = btrfs_root_node(chunk_root);
1873
1874    read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1875       (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1876       BTRFS_UUID_SIZE);
1877
1878    mutex_lock(&fs_info->chunk_mutex);
1879    ret = btrfs_read_chunk_tree(chunk_root);
1880    mutex_unlock(&fs_info->chunk_mutex);
1881    if (ret) {
1882        printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
1883               sb->s_id);
1884        goto fail_chunk_root;
1885    }
1886
1887    btrfs_close_extra_devices(fs_devices);
1888
1889    blocksize = btrfs_level_size(tree_root,
1890                     btrfs_super_root_level(disk_super));
1891    generation = btrfs_super_generation(disk_super);
1892
1893    tree_root->node = read_tree_block(tree_root,
1894                      btrfs_super_root(disk_super),
1895                      blocksize, generation);
1896    if (!tree_root->node)
1897        goto fail_chunk_root;
1898    if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
1899        printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
1900               sb->s_id);
1901        goto fail_tree_root;
1902    }
1903    btrfs_set_root_node(&tree_root->root_item, tree_root->node);
1904    tree_root->commit_root = btrfs_root_node(tree_root);
1905
1906    ret = find_and_setup_root(tree_root, fs_info,
1907                  BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1908    if (ret)
1909        goto fail_tree_root;
1910    extent_root->track_dirty = 1;
1911
1912    ret = find_and_setup_root(tree_root, fs_info,
1913                  BTRFS_DEV_TREE_OBJECTID, dev_root);
1914    if (ret)
1915        goto fail_extent_root;
1916    dev_root->track_dirty = 1;
1917
1918    ret = find_and_setup_root(tree_root, fs_info,
1919                  BTRFS_CSUM_TREE_OBJECTID, csum_root);
1920    if (ret)
1921        goto fail_dev_root;
1922
1923    csum_root->track_dirty = 1;
1924
1925    ret = btrfs_read_block_groups(extent_root);
1926    if (ret) {
1927        printk(KERN_ERR "Failed to read block groups: %d\n", ret);
1928        goto fail_block_groups;
1929    }
1930
1931    fs_info->generation = generation;
1932    fs_info->last_trans_committed = generation;
1933    fs_info->data_alloc_profile = (u64)-1;
1934    fs_info->metadata_alloc_profile = (u64)-1;
1935    fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1936    fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1937                           "btrfs-cleaner");
1938    if (IS_ERR(fs_info->cleaner_kthread))
1939        goto fail_block_groups;
1940
1941    fs_info->transaction_kthread = kthread_run(transaction_kthread,
1942                           tree_root,
1943                           "btrfs-transaction");
1944    if (IS_ERR(fs_info->transaction_kthread))
1945        goto fail_cleaner;
1946
1947    if (!btrfs_test_opt(tree_root, SSD) &&
1948        !btrfs_test_opt(tree_root, NOSSD) &&
1949        !fs_info->fs_devices->rotating) {
1950        printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
1951               "mode\n");
1952        btrfs_set_opt(fs_info->mount_opt, SSD);
1953    }
1954
1955    if (btrfs_super_log_root(disk_super) != 0) {
1956        u64 bytenr = btrfs_super_log_root(disk_super);
1957
1958        if (fs_devices->rw_devices == 0) {
1959            printk(KERN_WARNING "Btrfs log replay required "
1960                   "on RO media\n");
1961            err = -EIO;
1962            goto fail_trans_kthread;
1963        }
1964        blocksize =
1965             btrfs_level_size(tree_root,
1966                      btrfs_super_log_root_level(disk_super));
1967
1968        log_tree_root = kzalloc(sizeof(struct btrfs_root),
1969                              GFP_NOFS);
1970
1971        __setup_root(nodesize, leafsize, sectorsize, stripesize,
1972                 log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1973
1974        log_tree_root->node = read_tree_block(tree_root, bytenr,
1975                              blocksize,
1976                              generation + 1);
1977        ret = btrfs_recover_log_trees(log_tree_root);
1978        BUG_ON(ret);
1979
1980        if (sb->s_flags & MS_RDONLY) {
1981            ret = btrfs_commit_super(tree_root);
1982            BUG_ON(ret);
1983        }
1984    }
1985
1986    ret = btrfs_find_orphan_roots(tree_root);
1987    BUG_ON(ret);
1988
1989    if (!(sb->s_flags & MS_RDONLY)) {
1990        ret = btrfs_recover_relocation(tree_root);
1991        if (ret < 0) {
1992            printk(KERN_WARNING
1993                   "btrfs: failed to recover relocation\n");
1994            err = -EINVAL;
1995            goto fail_trans_kthread;
1996        }
1997    }
1998
1999    location.objectid = BTRFS_FS_TREE_OBJECTID;
2000    location.type = BTRFS_ROOT_ITEM_KEY;
2001    location.offset = (u64)-1;
2002
2003    fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2004    if (!fs_info->fs_root)
2005        goto fail_trans_kthread;
2006
2007    if (!(sb->s_flags & MS_RDONLY)) {
2008        down_read(&fs_info->cleanup_work_sem);
2009        btrfs_orphan_cleanup(fs_info->fs_root);
2010        up_read(&fs_info->cleanup_work_sem);
2011    }
2012
2013    return tree_root;
2014
2015fail_trans_kthread:
2016    kthread_stop(fs_info->transaction_kthread);
2017fail_cleaner:
2018    kthread_stop(fs_info->cleaner_kthread);
2019
2020    /*
2021     * make sure we're done with the btree inode before we stop our
2022     * kthreads
2023     */
2024    filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2025    invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2026
2027fail_block_groups:
2028    btrfs_free_block_groups(fs_info);
2029    free_extent_buffer(csum_root->node);
2030    free_extent_buffer(csum_root->commit_root);
2031fail_dev_root:
2032    free_extent_buffer(dev_root->node);
2033    free_extent_buffer(dev_root->commit_root);
2034fail_extent_root:
2035    free_extent_buffer(extent_root->node);
2036    free_extent_buffer(extent_root->commit_root);
2037fail_tree_root:
2038    free_extent_buffer(tree_root->node);
2039    free_extent_buffer(tree_root->commit_root);
2040fail_chunk_root:
2041    free_extent_buffer(chunk_root->node);
2042    free_extent_buffer(chunk_root->commit_root);
2043fail_sb_buffer:
2044    btrfs_stop_workers(&fs_info->generic_worker);
2045    btrfs_stop_workers(&fs_info->fixup_workers);
2046    btrfs_stop_workers(&fs_info->delalloc_workers);
2047    btrfs_stop_workers(&fs_info->workers);
2048    btrfs_stop_workers(&fs_info->endio_workers);
2049    btrfs_stop_workers(&fs_info->endio_meta_workers);
2050    btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2051    btrfs_stop_workers(&fs_info->endio_write_workers);
2052    btrfs_stop_workers(&fs_info->submit_workers);
2053    btrfs_stop_workers(&fs_info->enospc_workers);
2054fail_iput:
2055    invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2056    iput(fs_info->btree_inode);
2057
2058    btrfs_close_devices(fs_info->fs_devices);
2059    btrfs_mapping_tree_free(&fs_info->mapping_tree);
2060fail_bdi:
2061    bdi_destroy(&fs_info->bdi);
2062fail_srcu:
2063    cleanup_srcu_struct(&fs_info->subvol_srcu);
2064fail:
2065    kfree(extent_root);
2066    kfree(tree_root);
2067    kfree(fs_info);
2068    kfree(chunk_root);
2069    kfree(dev_root);
2070    kfree(csum_root);
2071    return ERR_PTR(err);
2072}
2073
2074static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2075{
2076    char b[BDEVNAME_SIZE];
2077
2078    if (uptodate) {
2079        set_buffer_uptodate(bh);
2080    } else {
2081        if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
2082            printk(KERN_WARNING "lost page write due to "
2083                    "I/O error on %s\n",
2084                       bdevname(bh->b_bdev, b));
2085        }
2086        /* note, we dont' set_buffer_write_io_error because we have
2087         * our own ways of dealing with the IO errors
2088         */
2089        clear_buffer_uptodate(bh);
2090    }
2091    unlock_buffer(bh);
2092    put_bh(bh);
2093}
2094
2095struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2096{
2097    struct buffer_head *bh;
2098    struct buffer_head *latest = NULL;
2099    struct btrfs_super_block *super;
2100    int i;
2101    u64 transid = 0;
2102    u64 bytenr;
2103
2104    /* we would like to check all the supers, but that would make
2105     * a btrfs mount succeed after a mkfs from a different FS.
2106     * So, we need to add a special mount option to scan for
2107     * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2108     */
2109    for (i = 0; i < 1; i++) {
2110        bytenr = btrfs_sb_offset(i);
2111        if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
2112            break;
2113        bh = __bread(bdev, bytenr / 4096, 4096);
2114        if (!bh)
2115            continue;
2116
2117        super = (struct btrfs_super_block *)bh->b_data;
2118        if (btrfs_super_bytenr(super) != bytenr ||
2119            strncmp((char *)(&super->magic), BTRFS_MAGIC,
2120                sizeof(super->magic))) {
2121            brelse(bh);
2122            continue;
2123        }
2124
2125        if (!latest || btrfs_super_generation(super) > transid) {
2126            brelse(latest);
2127            latest = bh;
2128            transid = btrfs_super_generation(super);
2129        } else {
2130            brelse(bh);
2131        }
2132    }
2133    return latest;
2134}
2135
2136/*
2137 * this should be called twice, once with wait == 0 and
2138 * once with wait == 1. When wait == 0 is done, all the buffer heads
2139 * we write are pinned.
2140 *
2141 * They are released when wait == 1 is done.
2142 * max_mirrors must be the same for both runs, and it indicates how
2143 * many supers on this one device should be written.
2144 *
2145 * max_mirrors == 0 means to write them all.
2146 */
2147static int write_dev_supers(struct btrfs_device *device,
2148                struct btrfs_super_block *sb,
2149                int do_barriers, int wait, int max_mirrors)
2150{
2151    struct buffer_head *bh;
2152    int i;
2153    int ret;
2154    int errors = 0;
2155    u32 crc;
2156    u64 bytenr;
2157    int last_barrier = 0;
2158
2159    if (max_mirrors == 0)
2160        max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2161
2162    /* make sure only the last submit_bh does a barrier */
2163    if (do_barriers) {
2164        for (i = 0; i < max_mirrors; i++) {
2165            bytenr = btrfs_sb_offset(i);
2166            if (bytenr + BTRFS_SUPER_INFO_SIZE >=
2167                device->total_bytes)
2168                break;
2169            last_barrier = i;
2170        }
2171    }
2172
2173    for (i = 0; i < max_mirrors; i++) {
2174        bytenr = btrfs_sb_offset(i);
2175        if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
2176            break;
2177
2178        if (wait) {
2179            bh = __find_get_block(device->bdev, bytenr / 4096,
2180                          BTRFS_SUPER_INFO_SIZE);
2181            BUG_ON(!bh);
2182            wait_on_buffer(bh);
2183            if (!buffer_uptodate(bh))
2184                errors++;
2185
2186            /* drop our reference */
2187            brelse(bh);
2188
2189            /* drop the reference from the wait == 0 run */
2190            brelse(bh);
2191            continue;
2192        } else {
2193            btrfs_set_super_bytenr(sb, bytenr);
2194
2195            crc = ~(u32)0;
2196            crc = btrfs_csum_data(NULL, (char *)sb +
2197                          BTRFS_CSUM_SIZE, crc,
2198                          BTRFS_SUPER_INFO_SIZE -
2199                          BTRFS_CSUM_SIZE);
2200            btrfs_csum_final(crc, sb->csum);
2201
2202            /*
2203             * one reference for us, and we leave it for the
2204             * caller
2205             */
2206            bh = __getblk(device->bdev, bytenr / 4096,
2207                      BTRFS_SUPER_INFO_SIZE);
2208            memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
2209
2210            /* one reference for submit_bh */
2211            get_bh(bh);
2212
2213            set_buffer_uptodate(bh);
2214            lock_buffer(bh);
2215            bh->b_end_io = btrfs_end_buffer_write_sync;
2216        }
2217
2218        if (i == last_barrier && do_barriers && device->barriers) {
2219            ret = submit_bh(WRITE_BARRIER, bh);
2220            if (ret == -EOPNOTSUPP) {
2221                printk("btrfs: disabling barriers on dev %s\n",
2222                       device->name);
2223                set_buffer_uptodate(bh);
2224                device->barriers = 0;
2225                /* one reference for submit_bh */
2226                get_bh(bh);
2227                lock_buffer(bh);
2228                ret = submit_bh(WRITE_SYNC, bh);
2229            }
2230        } else {
2231            ret = submit_bh(WRITE_SYNC, bh);
2232        }
2233
2234        if (ret)
2235            errors++;
2236    }
2237    return errors < i ? 0 : -1;
2238}
2239
2240int write_all_supers(struct btrfs_root *root, int max_mirrors)
2241{
2242    struct list_head *head;
2243    struct btrfs_device *dev;
2244    struct btrfs_super_block *sb;
2245    struct btrfs_dev_item *dev_item;
2246    int ret;
2247    int do_barriers;
2248    int max_errors;
2249    int total_errors = 0;
2250    u64 flags;
2251
2252    max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
2253    do_barriers = !btrfs_test_opt(root, NOBARRIER);
2254
2255    sb = &root->fs_info->super_for_commit;
2256    dev_item = &sb->dev_item;
2257
2258    mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2259    head = &root->fs_info->fs_devices->devices;
2260    list_for_each_entry(dev, head, dev_list) {
2261        if (!dev->bdev) {
2262            total_errors++;
2263            continue;
2264        }
2265        if (!dev->in_fs_metadata || !dev->writeable)
2266            continue;
2267
2268        btrfs_set_stack_device_generation(dev_item, 0);
2269        btrfs_set_stack_device_type(dev_item, dev->type);
2270        btrfs_set_stack_device_id(dev_item, dev->devid);
2271        btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
2272        btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
2273        btrfs_set_stack_device_io_align(dev_item, dev->io_align);
2274        btrfs_set_stack_device_io_width(dev_item, dev->io_width);
2275        btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
2276        memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
2277        memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
2278
2279        flags = btrfs_super_flags(sb);
2280        btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
2281
2282        ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
2283        if (ret)
2284            total_errors++;
2285    }
2286    if (total_errors > max_errors) {
2287        printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2288               total_errors);
2289        BUG();
2290    }
2291
2292    total_errors = 0;
2293    list_for_each_entry(dev, head, dev_list) {
2294        if (!dev->bdev)
2295            continue;
2296        if (!dev->in_fs_metadata || !dev->writeable)
2297            continue;
2298
2299        ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
2300        if (ret)
2301            total_errors++;
2302    }
2303    mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2304    if (total_errors > max_errors) {
2305        printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2306               total_errors);
2307        BUG();
2308    }
2309    return 0;
2310}
2311
2312int write_ctree_super(struct btrfs_trans_handle *trans,
2313              struct btrfs_root *root, int max_mirrors)
2314{
2315    int ret;
2316
2317    ret = write_all_supers(root, max_mirrors);
2318    return ret;
2319}
2320
2321int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2322{
2323    spin_lock(&fs_info->fs_roots_radix_lock);
2324    radix_tree_delete(&fs_info->fs_roots_radix,
2325              (unsigned long)root->root_key.objectid);
2326    spin_unlock(&fs_info->fs_roots_radix_lock);
2327
2328    if (btrfs_root_refs(&root->root_item) == 0)
2329        synchronize_srcu(&fs_info->subvol_srcu);
2330
2331    free_fs_root(root);
2332    return 0;
2333}
2334
2335static void free_fs_root(struct btrfs_root *root)
2336{
2337    WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
2338    if (root->anon_super.s_dev) {
2339        down_write(&root->anon_super.s_umount);
2340        kill_anon_super(&root->anon_super);
2341    }
2342    free_extent_buffer(root->node);
2343    free_extent_buffer(root->commit_root);
2344    kfree(root->name);
2345    kfree(root);
2346}
2347
2348static int del_fs_roots(struct btrfs_fs_info *fs_info)
2349{
2350    int ret;
2351    struct btrfs_root *gang[8];
2352    int i;
2353
2354    while (!list_empty(&fs_info->dead_roots)) {
2355        gang[0] = list_entry(fs_info->dead_roots.next,
2356                     struct btrfs_root, root_list);
2357        list_del(&gang[0]->root_list);
2358
2359        if (gang[0]->in_radix) {
2360            btrfs_free_fs_root(fs_info, gang[0]);
2361        } else {
2362            free_extent_buffer(gang[0]->node);
2363            free_extent_buffer(gang[0]->commit_root);
2364            kfree(gang[0]);
2365        }
2366    }
2367
2368    while (1) {
2369        ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2370                         (void **)gang, 0,
2371                         ARRAY_SIZE(gang));
2372        if (!ret)
2373            break;
2374        for (i = 0; i < ret; i++)
2375            btrfs_free_fs_root(fs_info, gang[i]);
2376    }
2377    return 0;
2378}
2379
2380int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2381{
2382    u64 root_objectid = 0;
2383    struct btrfs_root *gang[8];
2384    int i;
2385    int ret;
2386
2387    while (1) {
2388        ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2389                         (void **)gang, root_objectid,
2390                         ARRAY_SIZE(gang));
2391        if (!ret)
2392            break;
2393
2394        root_objectid = gang[ret - 1]->root_key.objectid + 1;
2395        for (i = 0; i < ret; i++) {
2396            root_objectid = gang[i]->root_key.objectid;
2397            btrfs_orphan_cleanup(gang[i]);
2398        }
2399        root_objectid++;
2400    }
2401    return 0;
2402}
2403
2404int btrfs_commit_super(struct btrfs_root *root)
2405{
2406    struct btrfs_trans_handle *trans;
2407    int ret;
2408
2409    mutex_lock(&root->fs_info->cleaner_mutex);
2410    btrfs_run_delayed_iputs(root);
2411    btrfs_clean_old_snapshots(root);
2412    mutex_unlock(&root->fs_info->cleaner_mutex);
2413
2414    /* wait until ongoing cleanup work done */
2415    down_write(&root->fs_info->cleanup_work_sem);
2416    up_write(&root->fs_info->cleanup_work_sem);
2417
2418    trans = btrfs_start_transaction(root, 1);
2419    ret = btrfs_commit_transaction(trans, root);
2420    BUG_ON(ret);
2421    /* run commit again to drop the original snapshot */
2422    trans = btrfs_start_transaction(root, 1);
2423    btrfs_commit_transaction(trans, root);
2424    ret = btrfs_write_and_wait_transaction(NULL, root);
2425    BUG_ON(ret);
2426
2427    ret = write_ctree_super(NULL, root, 0);
2428    return ret;
2429}
2430
2431int close_ctree(struct btrfs_root *root)
2432{
2433    struct btrfs_fs_info *fs_info = root->fs_info;
2434    int ret;
2435
2436    fs_info->closing = 1;
2437    smp_mb();
2438
2439    kthread_stop(root->fs_info->transaction_kthread);
2440    kthread_stop(root->fs_info->cleaner_kthread);
2441
2442    if (!(fs_info->sb->s_flags & MS_RDONLY)) {
2443        ret = btrfs_commit_super(root);
2444        if (ret)
2445            printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2446    }
2447
2448    fs_info->closing = 2;
2449    smp_mb();
2450
2451    if (fs_info->delalloc_bytes) {
2452        printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
2453               (unsigned long long)fs_info->delalloc_bytes);
2454    }
2455    if (fs_info->total_ref_cache_size) {
2456        printk(KERN_INFO "btrfs: at umount reference cache size %llu\n",
2457               (unsigned long long)fs_info->total_ref_cache_size);
2458    }
2459
2460    free_extent_buffer(fs_info->extent_root->node);
2461    free_extent_buffer(fs_info->extent_root->commit_root);
2462    free_extent_buffer(fs_info->tree_root->node);
2463    free_extent_buffer(fs_info->tree_root->commit_root);
2464    free_extent_buffer(root->fs_info->chunk_root->node);
2465    free_extent_buffer(root->fs_info->chunk_root->commit_root);
2466    free_extent_buffer(root->fs_info->dev_root->node);
2467    free_extent_buffer(root->fs_info->dev_root->commit_root);
2468    free_extent_buffer(root->fs_info->csum_root->node);
2469    free_extent_buffer(root->fs_info->csum_root->commit_root);
2470
2471    btrfs_free_block_groups(root->fs_info);
2472
2473    del_fs_roots(fs_info);
2474
2475    iput(fs_info->btree_inode);
2476
2477    btrfs_stop_workers(&fs_info->generic_worker);
2478    btrfs_stop_workers(&fs_info->fixup_workers);
2479    btrfs_stop_workers(&fs_info->delalloc_workers);
2480    btrfs_stop_workers(&fs_info->workers);
2481    btrfs_stop_workers(&fs_info->endio_workers);
2482    btrfs_stop_workers(&fs_info->endio_meta_workers);
2483    btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2484    btrfs_stop_workers(&fs_info->endio_write_workers);
2485    btrfs_stop_workers(&fs_info->submit_workers);
2486    btrfs_stop_workers(&fs_info->enospc_workers);
2487
2488    btrfs_close_devices(fs_info->fs_devices);
2489    btrfs_mapping_tree_free(&fs_info->mapping_tree);
2490
2491    bdi_destroy(&fs_info->bdi);
2492    cleanup_srcu_struct(&fs_info->subvol_srcu);
2493
2494    kfree(fs_info->extent_root);
2495    kfree(fs_info->tree_root);
2496    kfree(fs_info->chunk_root);
2497    kfree(fs_info->dev_root);
2498    kfree(fs_info->csum_root);
2499    return 0;
2500}
2501
2502int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
2503{
2504    int ret;
2505    struct inode *btree_inode = buf->first_page->mapping->host;
2506
2507    ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf,
2508                     NULL);
2509    if (!ret)
2510        return ret;
2511
2512    ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
2513                    parent_transid);
2514    return !ret;
2515}
2516
2517int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
2518{
2519    struct inode *btree_inode = buf->first_page->mapping->host;
2520    return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
2521                      buf);
2522}
2523
2524void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
2525{
2526    struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2527    u64 transid = btrfs_header_generation(buf);
2528    struct inode *btree_inode = root->fs_info->btree_inode;
2529    int was_dirty;
2530
2531    btrfs_assert_tree_locked(buf);
2532    if (transid != root->fs_info->generation) {
2533        printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
2534               "found %llu running %llu\n",
2535            (unsigned long long)buf->start,
2536            (unsigned long long)transid,
2537            (unsigned long long)root->fs_info->generation);
2538        WARN_ON(1);
2539    }
2540    was_dirty = set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
2541                        buf);
2542    if (!was_dirty) {
2543        spin_lock(&root->fs_info->delalloc_lock);
2544        root->fs_info->dirty_metadata_bytes += buf->len;
2545        spin_unlock(&root->fs_info->delalloc_lock);
2546    }
2547}
2548
2549void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
2550{
2551    /*
2552     * looks as though older kernels can get into trouble with
2553     * this code, they end up stuck in balance_dirty_pages forever
2554     */
2555    u64 num_dirty;
2556    unsigned long thresh = 32 * 1024 * 1024;
2557
2558    if (current->flags & PF_MEMALLOC)
2559        return;
2560
2561    num_dirty = root->fs_info->dirty_metadata_bytes;
2562
2563    if (num_dirty > thresh) {
2564        balance_dirty_pages_ratelimited_nr(
2565                   root->fs_info->btree_inode->i_mapping, 1);
2566    }
2567    return;
2568}
2569
2570int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
2571{
2572    struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2573    int ret;
2574    ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
2575    if (ret == 0)
2576        set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
2577    return ret;
2578}
2579
2580int btree_lock_page_hook(struct page *page)
2581{
2582    struct inode *inode = page->mapping->host;
2583    struct btrfs_root *root = BTRFS_I(inode)->root;
2584    struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2585    struct extent_buffer *eb;
2586    unsigned long len;
2587    u64 bytenr = page_offset(page);
2588
2589    if (page->private == EXTENT_PAGE_PRIVATE)
2590        goto out;
2591
2592    len = page->private >> 2;
2593    eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS);
2594    if (!eb)
2595        goto out;
2596
2597    btrfs_tree_lock(eb);
2598    btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
2599
2600    if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
2601        spin_lock(&root->fs_info->delalloc_lock);
2602        if (root->fs_info->dirty_metadata_bytes >= eb->len)
2603            root->fs_info->dirty_metadata_bytes -= eb->len;
2604        else
2605            WARN_ON(1);
2606        spin_unlock(&root->fs_info->delalloc_lock);
2607    }
2608
2609    btrfs_tree_unlock(eb);
2610    free_extent_buffer(eb);
2611out:
2612    lock_page(page);
2613    return 0;
2614}
2615
2616static struct extent_io_ops btree_extent_io_ops = {
2617    .write_cache_pages_lock_hook = btree_lock_page_hook,
2618    .readpage_end_io_hook = btree_readpage_end_io_hook,
2619    .submit_bio_hook = btree_submit_bio_hook,
2620    /* note we're sharing with inode.c for the merge bio hook */
2621    .merge_bio_hook = btrfs_merge_bio_hook,
2622};
2623

Archive Download this file



interactive