Root/fs/btrfs/inode.c

1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/kernel.h>
20#include <linux/bio.h>
21#include <linux/buffer_head.h>
22#include <linux/file.h>
23#include <linux/fs.h>
24#include <linux/pagemap.h>
25#include <linux/highmem.h>
26#include <linux/time.h>
27#include <linux/init.h>
28#include <linux/string.h>
29#include <linux/backing-dev.h>
30#include <linux/mpage.h>
31#include <linux/swap.h>
32#include <linux/writeback.h>
33#include <linux/statfs.h>
34#include <linux/compat.h>
35#include <linux/bit_spinlock.h>
36#include <linux/xattr.h>
37#include <linux/posix_acl.h>
38#include <linux/falloc.h>
39#include <linux/slab.h>
40#include "compat.h"
41#include "ctree.h"
42#include "disk-io.h"
43#include "transaction.h"
44#include "btrfs_inode.h"
45#include "ioctl.h"
46#include "print-tree.h"
47#include "volumes.h"
48#include "ordered-data.h"
49#include "xattr.h"
50#include "tree-log.h"
51#include "compression.h"
52#include "locking.h"
53
54struct btrfs_iget_args {
55    u64 ino;
56    struct btrfs_root *root;
57};
58
59static const struct inode_operations btrfs_dir_inode_operations;
60static const struct inode_operations btrfs_symlink_inode_operations;
61static const struct inode_operations btrfs_dir_ro_inode_operations;
62static const struct inode_operations btrfs_special_inode_operations;
63static const struct inode_operations btrfs_file_inode_operations;
64static const struct address_space_operations btrfs_aops;
65static const struct address_space_operations btrfs_symlink_aops;
66static const struct file_operations btrfs_dir_file_operations;
67static struct extent_io_ops btrfs_extent_io_ops;
68
69static struct kmem_cache *btrfs_inode_cachep;
70struct kmem_cache *btrfs_trans_handle_cachep;
71struct kmem_cache *btrfs_transaction_cachep;
72struct kmem_cache *btrfs_path_cachep;
73
74#define S_SHIFT 12
75static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
76    [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
77    [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
78    [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
79    [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
80    [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
81    [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
82    [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
83};
84
85static void btrfs_truncate(struct inode *inode);
86static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
87static noinline int cow_file_range(struct inode *inode,
88                   struct page *locked_page,
89                   u64 start, u64 end, int *page_started,
90                   unsigned long *nr_written, int unlock);
91
92static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
93                     struct inode *inode, struct inode *dir)
94{
95    int err;
96
97    err = btrfs_init_acl(trans, inode, dir);
98    if (!err)
99        err = btrfs_xattr_security_init(trans, inode, dir);
100    return err;
101}
102
103/*
104 * this does all the hard work for inserting an inline extent into
105 * the btree. The caller should have done a btrfs_drop_extents so that
106 * no overlapping inline items exist in the btree
107 */
108static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
109                struct btrfs_root *root, struct inode *inode,
110                u64 start, size_t size, size_t compressed_size,
111                struct page **compressed_pages)
112{
113    struct btrfs_key key;
114    struct btrfs_path *path;
115    struct extent_buffer *leaf;
116    struct page *page = NULL;
117    char *kaddr;
118    unsigned long ptr;
119    struct btrfs_file_extent_item *ei;
120    int err = 0;
121    int ret;
122    size_t cur_size = size;
123    size_t datasize;
124    unsigned long offset;
125    int use_compress = 0;
126
127    if (compressed_size && compressed_pages) {
128        use_compress = 1;
129        cur_size = compressed_size;
130    }
131
132    path = btrfs_alloc_path();
133    if (!path)
134        return -ENOMEM;
135
136    path->leave_spinning = 1;
137    btrfs_set_trans_block_group(trans, inode);
138
139    key.objectid = inode->i_ino;
140    key.offset = start;
141    btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
142    datasize = btrfs_file_extent_calc_inline_size(cur_size);
143
144    inode_add_bytes(inode, size);
145    ret = btrfs_insert_empty_item(trans, root, path, &key,
146                      datasize);
147    BUG_ON(ret);
148    if (ret) {
149        err = ret;
150        goto fail;
151    }
152    leaf = path->nodes[0];
153    ei = btrfs_item_ptr(leaf, path->slots[0],
154                struct btrfs_file_extent_item);
155    btrfs_set_file_extent_generation(leaf, ei, trans->transid);
156    btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
157    btrfs_set_file_extent_encryption(leaf, ei, 0);
158    btrfs_set_file_extent_other_encoding(leaf, ei, 0);
159    btrfs_set_file_extent_ram_bytes(leaf, ei, size);
160    ptr = btrfs_file_extent_inline_start(ei);
161
162    if (use_compress) {
163        struct page *cpage;
164        int i = 0;
165        while (compressed_size > 0) {
166            cpage = compressed_pages[i];
167            cur_size = min_t(unsigned long, compressed_size,
168                       PAGE_CACHE_SIZE);
169
170            kaddr = kmap_atomic(cpage, KM_USER0);
171            write_extent_buffer(leaf, kaddr, ptr, cur_size);
172            kunmap_atomic(kaddr, KM_USER0);
173
174            i++;
175            ptr += cur_size;
176            compressed_size -= cur_size;
177        }
178        btrfs_set_file_extent_compression(leaf, ei,
179                          BTRFS_COMPRESS_ZLIB);
180    } else {
181        page = find_get_page(inode->i_mapping,
182                     start >> PAGE_CACHE_SHIFT);
183        btrfs_set_file_extent_compression(leaf, ei, 0);
184        kaddr = kmap_atomic(page, KM_USER0);
185        offset = start & (PAGE_CACHE_SIZE - 1);
186        write_extent_buffer(leaf, kaddr + offset, ptr, size);
187        kunmap_atomic(kaddr, KM_USER0);
188        page_cache_release(page);
189    }
190    btrfs_mark_buffer_dirty(leaf);
191    btrfs_free_path(path);
192
193    /*
194     * we're an inline extent, so nobody can
195     * extend the file past i_size without locking
196     * a page we already have locked.
197     *
198     * We must do any isize and inode updates
199     * before we unlock the pages. Otherwise we
200     * could end up racing with unlink.
201     */
202    BTRFS_I(inode)->disk_i_size = inode->i_size;
203    btrfs_update_inode(trans, root, inode);
204
205    return 0;
206fail:
207    btrfs_free_path(path);
208    return err;
209}
210
211
212/*
213 * conditionally insert an inline extent into the file. This
214 * does the checks required to make sure the data is small enough
215 * to fit as an inline extent.
216 */
217static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
218                 struct btrfs_root *root,
219                 struct inode *inode, u64 start, u64 end,
220                 size_t compressed_size,
221                 struct page **compressed_pages)
222{
223    u64 isize = i_size_read(inode);
224    u64 actual_end = min(end + 1, isize);
225    u64 inline_len = actual_end - start;
226    u64 aligned_end = (end + root->sectorsize - 1) &
227            ~((u64)root->sectorsize - 1);
228    u64 hint_byte;
229    u64 data_len = inline_len;
230    int ret;
231
232    if (compressed_size)
233        data_len = compressed_size;
234
235    if (start > 0 ||
236        actual_end >= PAGE_CACHE_SIZE ||
237        data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
238        (!compressed_size &&
239        (actual_end & (root->sectorsize - 1)) == 0) ||
240        end + 1 < isize ||
241        data_len > root->fs_info->max_inline) {
242        return 1;
243    }
244
245    ret = btrfs_drop_extents(trans, inode, start, aligned_end,
246                 &hint_byte, 1);
247    BUG_ON(ret);
248
249    if (isize > actual_end)
250        inline_len = min_t(u64, isize, actual_end);
251    ret = insert_inline_extent(trans, root, inode, start,
252                   inline_len, compressed_size,
253                   compressed_pages);
254    BUG_ON(ret);
255    btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
256    return 0;
257}
258
259struct async_extent {
260    u64 start;
261    u64 ram_size;
262    u64 compressed_size;
263    struct page **pages;
264    unsigned long nr_pages;
265    struct list_head list;
266};
267
268struct async_cow {
269    struct inode *inode;
270    struct btrfs_root *root;
271    struct page *locked_page;
272    u64 start;
273    u64 end;
274    struct list_head extents;
275    struct btrfs_work work;
276};
277
278static noinline int add_async_extent(struct async_cow *cow,
279                     u64 start, u64 ram_size,
280                     u64 compressed_size,
281                     struct page **pages,
282                     unsigned long nr_pages)
283{
284    struct async_extent *async_extent;
285
286    async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
287    async_extent->start = start;
288    async_extent->ram_size = ram_size;
289    async_extent->compressed_size = compressed_size;
290    async_extent->pages = pages;
291    async_extent->nr_pages = nr_pages;
292    list_add_tail(&async_extent->list, &cow->extents);
293    return 0;
294}
295
296/*
297 * we create compressed extents in two phases. The first
298 * phase compresses a range of pages that have already been
299 * locked (both pages and state bits are locked).
300 *
301 * This is done inside an ordered work queue, and the compression
302 * is spread across many cpus. The actual IO submission is step
303 * two, and the ordered work queue takes care of making sure that
304 * happens in the same order things were put onto the queue by
305 * writepages and friends.
306 *
307 * If this code finds it can't get good compression, it puts an
308 * entry onto the work queue to write the uncompressed bytes. This
309 * makes sure that both compressed inodes and uncompressed inodes
310 * are written in the same order that pdflush sent them down.
311 */
312static noinline int compress_file_range(struct inode *inode,
313                    struct page *locked_page,
314                    u64 start, u64 end,
315                    struct async_cow *async_cow,
316                    int *num_added)
317{
318    struct btrfs_root *root = BTRFS_I(inode)->root;
319    struct btrfs_trans_handle *trans;
320    u64 num_bytes;
321    u64 orig_start;
322    u64 disk_num_bytes;
323    u64 blocksize = root->sectorsize;
324    u64 actual_end;
325    u64 isize = i_size_read(inode);
326    int ret = 0;
327    struct page **pages = NULL;
328    unsigned long nr_pages;
329    unsigned long nr_pages_ret = 0;
330    unsigned long total_compressed = 0;
331    unsigned long total_in = 0;
332    unsigned long max_compressed = 128 * 1024;
333    unsigned long max_uncompressed = 128 * 1024;
334    int i;
335    int will_compress;
336
337    orig_start = start;
338
339    actual_end = min_t(u64, isize, end + 1);
340again:
341    will_compress = 0;
342    nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
343    nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
344
345    /*
346     * we don't want to send crud past the end of i_size through
347     * compression, that's just a waste of CPU time. So, if the
348     * end of the file is before the start of our current
349     * requested range of bytes, we bail out to the uncompressed
350     * cleanup code that can deal with all of this.
351     *
352     * It isn't really the fastest way to fix things, but this is a
353     * very uncommon corner.
354     */
355    if (actual_end <= start)
356        goto cleanup_and_bail_uncompressed;
357
358    total_compressed = actual_end - start;
359
360    /* we want to make sure that amount of ram required to uncompress
361     * an extent is reasonable, so we limit the total size in ram
362     * of a compressed extent to 128k. This is a crucial number
363     * because it also controls how easily we can spread reads across
364     * cpus for decompression.
365     *
366     * We also want to make sure the amount of IO required to do
367     * a random read is reasonably small, so we limit the size of
368     * a compressed extent to 128k.
369     */
370    total_compressed = min(total_compressed, max_uncompressed);
371    num_bytes = (end - start + blocksize) & ~(blocksize - 1);
372    num_bytes = max(blocksize, num_bytes);
373    disk_num_bytes = num_bytes;
374    total_in = 0;
375    ret = 0;
376
377    /*
378     * we do compression for mount -o compress and when the
379     * inode has not been flagged as nocompress. This flag can
380     * change at any time if we discover bad compression ratios.
381     */
382    if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
383        (btrfs_test_opt(root, COMPRESS) ||
384         (BTRFS_I(inode)->force_compress))) {
385        WARN_ON(pages);
386        pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
387
388        ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
389                        total_compressed, pages,
390                        nr_pages, &nr_pages_ret,
391                        &total_in,
392                        &total_compressed,
393                        max_compressed);
394
395        if (!ret) {
396            unsigned long offset = total_compressed &
397                (PAGE_CACHE_SIZE - 1);
398            struct page *page = pages[nr_pages_ret - 1];
399            char *kaddr;
400
401            /* zero the tail end of the last page, we might be
402             * sending it down to disk
403             */
404            if (offset) {
405                kaddr = kmap_atomic(page, KM_USER0);
406                memset(kaddr + offset, 0,
407                       PAGE_CACHE_SIZE - offset);
408                kunmap_atomic(kaddr, KM_USER0);
409            }
410            will_compress = 1;
411        }
412    }
413    if (start == 0) {
414        trans = btrfs_join_transaction(root, 1);
415        BUG_ON(!trans);
416        btrfs_set_trans_block_group(trans, inode);
417
418        /* lets try to make an inline extent */
419        if (ret || total_in < (actual_end - start)) {
420            /* we didn't compress the entire range, try
421             * to make an uncompressed inline extent.
422             */
423            ret = cow_file_range_inline(trans, root, inode,
424                            start, end, 0, NULL);
425        } else {
426            /* try making a compressed inline extent */
427            ret = cow_file_range_inline(trans, root, inode,
428                            start, end,
429                            total_compressed, pages);
430        }
431        if (ret == 0) {
432            /*
433             * inline extent creation worked, we don't need
434             * to create any more async work items. Unlock
435             * and free up our temp pages.
436             */
437            extent_clear_unlock_delalloc(inode,
438                 &BTRFS_I(inode)->io_tree,
439                 start, end, NULL,
440                 EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
441                 EXTENT_CLEAR_DELALLOC |
442                 EXTENT_CLEAR_ACCOUNTING |
443                 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
444
445            btrfs_end_transaction(trans, root);
446            goto free_pages_out;
447        }
448        btrfs_end_transaction(trans, root);
449    }
450
451    if (will_compress) {
452        /*
453         * we aren't doing an inline extent round the compressed size
454         * up to a block size boundary so the allocator does sane
455         * things
456         */
457        total_compressed = (total_compressed + blocksize - 1) &
458            ~(blocksize - 1);
459
460        /*
461         * one last check to make sure the compression is really a
462         * win, compare the page count read with the blocks on disk
463         */
464        total_in = (total_in + PAGE_CACHE_SIZE - 1) &
465            ~(PAGE_CACHE_SIZE - 1);
466        if (total_compressed >= total_in) {
467            will_compress = 0;
468        } else {
469            disk_num_bytes = total_compressed;
470            num_bytes = total_in;
471        }
472    }
473    if (!will_compress && pages) {
474        /*
475         * the compression code ran but failed to make things smaller,
476         * free any pages it allocated and our page pointer array
477         */
478        for (i = 0; i < nr_pages_ret; i++) {
479            WARN_ON(pages[i]->mapping);
480            page_cache_release(pages[i]);
481        }
482        kfree(pages);
483        pages = NULL;
484        total_compressed = 0;
485        nr_pages_ret = 0;
486
487        /* flag the file so we don't compress in the future */
488        if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
489            !(BTRFS_I(inode)->force_compress)) {
490            BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
491        }
492    }
493    if (will_compress) {
494        *num_added += 1;
495
496        /* the async work queues will take care of doing actual
497         * allocation on disk for these compressed pages,
498         * and will submit them to the elevator.
499         */
500        add_async_extent(async_cow, start, num_bytes,
501                 total_compressed, pages, nr_pages_ret);
502
503        if (start + num_bytes < end && start + num_bytes < actual_end) {
504            start += num_bytes;
505            pages = NULL;
506            cond_resched();
507            goto again;
508        }
509    } else {
510cleanup_and_bail_uncompressed:
511        /*
512         * No compression, but we still need to write the pages in
513         * the file we've been given so far. redirty the locked
514         * page if it corresponds to our extent and set things up
515         * for the async work queue to run cow_file_range to do
516         * the normal delalloc dance
517         */
518        if (page_offset(locked_page) >= start &&
519            page_offset(locked_page) <= end) {
520            __set_page_dirty_nobuffers(locked_page);
521            /* unlocked later on in the async handlers */
522        }
523        add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
524        *num_added += 1;
525    }
526
527out:
528    return 0;
529
530free_pages_out:
531    for (i = 0; i < nr_pages_ret; i++) {
532        WARN_ON(pages[i]->mapping);
533        page_cache_release(pages[i]);
534    }
535    kfree(pages);
536
537    goto out;
538}
539
540/*
541 * phase two of compressed writeback. This is the ordered portion
542 * of the code, which only gets called in the order the work was
543 * queued. We walk all the async extents created by compress_file_range
544 * and send them down to the disk.
545 */
546static noinline int submit_compressed_extents(struct inode *inode,
547                          struct async_cow *async_cow)
548{
549    struct async_extent *async_extent;
550    u64 alloc_hint = 0;
551    struct btrfs_trans_handle *trans;
552    struct btrfs_key ins;
553    struct extent_map *em;
554    struct btrfs_root *root = BTRFS_I(inode)->root;
555    struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
556    struct extent_io_tree *io_tree;
557    int ret = 0;
558
559    if (list_empty(&async_cow->extents))
560        return 0;
561
562
563    while (!list_empty(&async_cow->extents)) {
564        async_extent = list_entry(async_cow->extents.next,
565                      struct async_extent, list);
566        list_del(&async_extent->list);
567
568        io_tree = &BTRFS_I(inode)->io_tree;
569
570retry:
571        /* did the compression code fall back to uncompressed IO? */
572        if (!async_extent->pages) {
573            int page_started = 0;
574            unsigned long nr_written = 0;
575
576            lock_extent(io_tree, async_extent->start,
577                     async_extent->start +
578                     async_extent->ram_size - 1, GFP_NOFS);
579
580            /* allocate blocks */
581            ret = cow_file_range(inode, async_cow->locked_page,
582                         async_extent->start,
583                         async_extent->start +
584                         async_extent->ram_size - 1,
585                         &page_started, &nr_written, 0);
586
587            /*
588             * if page_started, cow_file_range inserted an
589             * inline extent and took care of all the unlocking
590             * and IO for us. Otherwise, we need to submit
591             * all those pages down to the drive.
592             */
593            if (!page_started && !ret)
594                extent_write_locked_range(io_tree,
595                          inode, async_extent->start,
596                          async_extent->start +
597                          async_extent->ram_size - 1,
598                          btrfs_get_extent,
599                          WB_SYNC_ALL);
600            kfree(async_extent);
601            cond_resched();
602            continue;
603        }
604
605        lock_extent(io_tree, async_extent->start,
606                async_extent->start + async_extent->ram_size - 1,
607                GFP_NOFS);
608
609        trans = btrfs_join_transaction(root, 1);
610        ret = btrfs_reserve_extent(trans, root,
611                       async_extent->compressed_size,
612                       async_extent->compressed_size,
613                       0, alloc_hint,
614                       (u64)-1, &ins, 1);
615        btrfs_end_transaction(trans, root);
616
617        if (ret) {
618            int i;
619            for (i = 0; i < async_extent->nr_pages; i++) {
620                WARN_ON(async_extent->pages[i]->mapping);
621                page_cache_release(async_extent->pages[i]);
622            }
623            kfree(async_extent->pages);
624            async_extent->nr_pages = 0;
625            async_extent->pages = NULL;
626            unlock_extent(io_tree, async_extent->start,
627                      async_extent->start +
628                      async_extent->ram_size - 1, GFP_NOFS);
629            goto retry;
630        }
631
632        /*
633         * here we're doing allocation and writeback of the
634         * compressed pages
635         */
636        btrfs_drop_extent_cache(inode, async_extent->start,
637                    async_extent->start +
638                    async_extent->ram_size - 1, 0);
639
640        em = alloc_extent_map(GFP_NOFS);
641        em->start = async_extent->start;
642        em->len = async_extent->ram_size;
643        em->orig_start = em->start;
644
645        em->block_start = ins.objectid;
646        em->block_len = ins.offset;
647        em->bdev = root->fs_info->fs_devices->latest_bdev;
648        set_bit(EXTENT_FLAG_PINNED, &em->flags);
649        set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
650
651        while (1) {
652            write_lock(&em_tree->lock);
653            ret = add_extent_mapping(em_tree, em);
654            write_unlock(&em_tree->lock);
655            if (ret != -EEXIST) {
656                free_extent_map(em);
657                break;
658            }
659            btrfs_drop_extent_cache(inode, async_extent->start,
660                        async_extent->start +
661                        async_extent->ram_size - 1, 0);
662        }
663
664        ret = btrfs_add_ordered_extent(inode, async_extent->start,
665                           ins.objectid,
666                           async_extent->ram_size,
667                           ins.offset,
668                           BTRFS_ORDERED_COMPRESSED);
669        BUG_ON(ret);
670
671        /*
672         * clear dirty, set writeback and unlock the pages.
673         */
674        extent_clear_unlock_delalloc(inode,
675                &BTRFS_I(inode)->io_tree,
676                async_extent->start,
677                async_extent->start +
678                async_extent->ram_size - 1,
679                NULL, EXTENT_CLEAR_UNLOCK_PAGE |
680                EXTENT_CLEAR_UNLOCK |
681                EXTENT_CLEAR_DELALLOC |
682                EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
683
684        ret = btrfs_submit_compressed_write(inode,
685                    async_extent->start,
686                    async_extent->ram_size,
687                    ins.objectid,
688                    ins.offset, async_extent->pages,
689                    async_extent->nr_pages);
690
691        BUG_ON(ret);
692        alloc_hint = ins.objectid + ins.offset;
693        kfree(async_extent);
694        cond_resched();
695    }
696
697    return 0;
698}
699
700/*
701 * when extent_io.c finds a delayed allocation range in the file,
702 * the call backs end up in this code. The basic idea is to
703 * allocate extents on disk for the range, and create ordered data structs
704 * in ram to track those extents.
705 *
706 * locked_page is the page that writepage had locked already. We use
707 * it to make sure we don't do extra locks or unlocks.
708 *
709 * *page_started is set to one if we unlock locked_page and do everything
710 * required to start IO on it. It may be clean and already done with
711 * IO when we return.
712 */
713static noinline int cow_file_range(struct inode *inode,
714                   struct page *locked_page,
715                   u64 start, u64 end, int *page_started,
716                   unsigned long *nr_written,
717                   int unlock)
718{
719    struct btrfs_root *root = BTRFS_I(inode)->root;
720    struct btrfs_trans_handle *trans;
721    u64 alloc_hint = 0;
722    u64 num_bytes;
723    unsigned long ram_size;
724    u64 disk_num_bytes;
725    u64 cur_alloc_size;
726    u64 blocksize = root->sectorsize;
727    u64 actual_end;
728    u64 isize = i_size_read(inode);
729    struct btrfs_key ins;
730    struct extent_map *em;
731    struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
732    int ret = 0;
733
734    trans = btrfs_join_transaction(root, 1);
735    BUG_ON(!trans);
736    btrfs_set_trans_block_group(trans, inode);
737
738    actual_end = min_t(u64, isize, end + 1);
739
740    num_bytes = (end - start + blocksize) & ~(blocksize - 1);
741    num_bytes = max(blocksize, num_bytes);
742    disk_num_bytes = num_bytes;
743    ret = 0;
744
745    if (start == 0) {
746        /* lets try to make an inline extent */
747        ret = cow_file_range_inline(trans, root, inode,
748                        start, end, 0, NULL);
749        if (ret == 0) {
750            extent_clear_unlock_delalloc(inode,
751                     &BTRFS_I(inode)->io_tree,
752                     start, end, NULL,
753                     EXTENT_CLEAR_UNLOCK_PAGE |
754                     EXTENT_CLEAR_UNLOCK |
755                     EXTENT_CLEAR_DELALLOC |
756                     EXTENT_CLEAR_ACCOUNTING |
757                     EXTENT_CLEAR_DIRTY |
758                     EXTENT_SET_WRITEBACK |
759                     EXTENT_END_WRITEBACK);
760
761            *nr_written = *nr_written +
762                 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
763            *page_started = 1;
764            ret = 0;
765            goto out;
766        }
767    }
768
769    BUG_ON(disk_num_bytes >
770           btrfs_super_total_bytes(&root->fs_info->super_copy));
771
772
773    read_lock(&BTRFS_I(inode)->extent_tree.lock);
774    em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
775                   start, num_bytes);
776    if (em) {
777        /*
778         * if block start isn't an actual block number then find the
779         * first block in this inode and use that as a hint. If that
780         * block is also bogus then just don't worry about it.
781         */
782        if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
783            free_extent_map(em);
784            em = search_extent_mapping(em_tree, 0, 0);
785            if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
786                alloc_hint = em->block_start;
787            if (em)
788                free_extent_map(em);
789        } else {
790            alloc_hint = em->block_start;
791            free_extent_map(em);
792        }
793    }
794    read_unlock(&BTRFS_I(inode)->extent_tree.lock);
795    btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
796
797    while (disk_num_bytes > 0) {
798        unsigned long op;
799
800        cur_alloc_size = disk_num_bytes;
801        ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
802                       root->sectorsize, 0, alloc_hint,
803                       (u64)-1, &ins, 1);
804        BUG_ON(ret);
805
806        em = alloc_extent_map(GFP_NOFS);
807        em->start = start;
808        em->orig_start = em->start;
809        ram_size = ins.offset;
810        em->len = ins.offset;
811
812        em->block_start = ins.objectid;
813        em->block_len = ins.offset;
814        em->bdev = root->fs_info->fs_devices->latest_bdev;
815        set_bit(EXTENT_FLAG_PINNED, &em->flags);
816
817        while (1) {
818            write_lock(&em_tree->lock);
819            ret = add_extent_mapping(em_tree, em);
820            write_unlock(&em_tree->lock);
821            if (ret != -EEXIST) {
822                free_extent_map(em);
823                break;
824            }
825            btrfs_drop_extent_cache(inode, start,
826                        start + ram_size - 1, 0);
827        }
828
829        cur_alloc_size = ins.offset;
830        ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
831                           ram_size, cur_alloc_size, 0);
832        BUG_ON(ret);
833
834        if (root->root_key.objectid ==
835            BTRFS_DATA_RELOC_TREE_OBJECTID) {
836            ret = btrfs_reloc_clone_csums(inode, start,
837                              cur_alloc_size);
838            BUG_ON(ret);
839        }
840
841        if (disk_num_bytes < cur_alloc_size)
842            break;
843
844        /* we're not doing compressed IO, don't unlock the first
845         * page (which the caller expects to stay locked), don't
846         * clear any dirty bits and don't set any writeback bits
847         *
848         * Do set the Private2 bit so we know this page was properly
849         * setup for writepage
850         */
851        op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
852        op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
853            EXTENT_SET_PRIVATE2;
854
855        extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
856                         start, start + ram_size - 1,
857                         locked_page, op);
858        disk_num_bytes -= cur_alloc_size;
859        num_bytes -= cur_alloc_size;
860        alloc_hint = ins.objectid + ins.offset;
861        start += cur_alloc_size;
862    }
863out:
864    ret = 0;
865    btrfs_end_transaction(trans, root);
866
867    return ret;
868}
869
870/*
871 * work queue call back to started compression on a file and pages
872 */
873static noinline void async_cow_start(struct btrfs_work *work)
874{
875    struct async_cow *async_cow;
876    int num_added = 0;
877    async_cow = container_of(work, struct async_cow, work);
878
879    compress_file_range(async_cow->inode, async_cow->locked_page,
880                async_cow->start, async_cow->end, async_cow,
881                &num_added);
882    if (num_added == 0)
883        async_cow->inode = NULL;
884}
885
886/*
887 * work queue call back to submit previously compressed pages
888 */
889static noinline void async_cow_submit(struct btrfs_work *work)
890{
891    struct async_cow *async_cow;
892    struct btrfs_root *root;
893    unsigned long nr_pages;
894
895    async_cow = container_of(work, struct async_cow, work);
896
897    root = async_cow->root;
898    nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
899        PAGE_CACHE_SHIFT;
900
901    atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
902
903    if (atomic_read(&root->fs_info->async_delalloc_pages) <
904        5 * 1042 * 1024 &&
905        waitqueue_active(&root->fs_info->async_submit_wait))
906        wake_up(&root->fs_info->async_submit_wait);
907
908    if (async_cow->inode)
909        submit_compressed_extents(async_cow->inode, async_cow);
910}
911
912static noinline void async_cow_free(struct btrfs_work *work)
913{
914    struct async_cow *async_cow;
915    async_cow = container_of(work, struct async_cow, work);
916    kfree(async_cow);
917}
918
919static int cow_file_range_async(struct inode *inode, struct page *locked_page,
920                u64 start, u64 end, int *page_started,
921                unsigned long *nr_written)
922{
923    struct async_cow *async_cow;
924    struct btrfs_root *root = BTRFS_I(inode)->root;
925    unsigned long nr_pages;
926    u64 cur_end;
927    int limit = 10 * 1024 * 1042;
928
929    clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
930             1, 0, NULL, GFP_NOFS);
931    while (start < end) {
932        async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
933        async_cow->inode = inode;
934        async_cow->root = root;
935        async_cow->locked_page = locked_page;
936        async_cow->start = start;
937
938        if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
939            cur_end = end;
940        else
941            cur_end = min(end, start + 512 * 1024 - 1);
942
943        async_cow->end = cur_end;
944        INIT_LIST_HEAD(&async_cow->extents);
945
946        async_cow->work.func = async_cow_start;
947        async_cow->work.ordered_func = async_cow_submit;
948        async_cow->work.ordered_free = async_cow_free;
949        async_cow->work.flags = 0;
950
951        nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
952            PAGE_CACHE_SHIFT;
953        atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
954
955        btrfs_queue_worker(&root->fs_info->delalloc_workers,
956                   &async_cow->work);
957
958        if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
959            wait_event(root->fs_info->async_submit_wait,
960               (atomic_read(&root->fs_info->async_delalloc_pages) <
961                limit));
962        }
963
964        while (atomic_read(&root->fs_info->async_submit_draining) &&
965              atomic_read(&root->fs_info->async_delalloc_pages)) {
966            wait_event(root->fs_info->async_submit_wait,
967              (atomic_read(&root->fs_info->async_delalloc_pages) ==
968               0));
969        }
970
971        *nr_written += nr_pages;
972        start = cur_end + 1;
973    }
974    *page_started = 1;
975    return 0;
976}
977
978static noinline int csum_exist_in_range(struct btrfs_root *root,
979                    u64 bytenr, u64 num_bytes)
980{
981    int ret;
982    struct btrfs_ordered_sum *sums;
983    LIST_HEAD(list);
984
985    ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
986                       bytenr + num_bytes - 1, &list);
987    if (ret == 0 && list_empty(&list))
988        return 0;
989
990    while (!list_empty(&list)) {
991        sums = list_entry(list.next, struct btrfs_ordered_sum, list);
992        list_del(&sums->list);
993        kfree(sums);
994    }
995    return 1;
996}
997
998/*
999 * when nowcow writeback call back. This checks for snapshots or COW copies
1000 * of the extents that exist in the file, and COWs the file as required.
1001 *
1002 * If no cow copies or snapshots exist, we write directly to the existing
1003 * blocks on disk
1004 */
1005static noinline int run_delalloc_nocow(struct inode *inode,
1006                       struct page *locked_page,
1007                  u64 start, u64 end, int *page_started, int force,
1008                  unsigned long *nr_written)
1009{
1010    struct btrfs_root *root = BTRFS_I(inode)->root;
1011    struct btrfs_trans_handle *trans;
1012    struct extent_buffer *leaf;
1013    struct btrfs_path *path;
1014    struct btrfs_file_extent_item *fi;
1015    struct btrfs_key found_key;
1016    u64 cow_start;
1017    u64 cur_offset;
1018    u64 extent_end;
1019    u64 extent_offset;
1020    u64 disk_bytenr;
1021    u64 num_bytes;
1022    int extent_type;
1023    int ret;
1024    int type;
1025    int nocow;
1026    int check_prev = 1;
1027
1028    path = btrfs_alloc_path();
1029    BUG_ON(!path);
1030    trans = btrfs_join_transaction(root, 1);
1031    BUG_ON(!trans);
1032
1033    cow_start = (u64)-1;
1034    cur_offset = start;
1035    while (1) {
1036        ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
1037                           cur_offset, 0);
1038        BUG_ON(ret < 0);
1039        if (ret > 0 && path->slots[0] > 0 && check_prev) {
1040            leaf = path->nodes[0];
1041            btrfs_item_key_to_cpu(leaf, &found_key,
1042                          path->slots[0] - 1);
1043            if (found_key.objectid == inode->i_ino &&
1044                found_key.type == BTRFS_EXTENT_DATA_KEY)
1045                path->slots[0]--;
1046        }
1047        check_prev = 0;
1048next_slot:
1049        leaf = path->nodes[0];
1050        if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1051            ret = btrfs_next_leaf(root, path);
1052            if (ret < 0)
1053                BUG_ON(1);
1054            if (ret > 0)
1055                break;
1056            leaf = path->nodes[0];
1057        }
1058
1059        nocow = 0;
1060        disk_bytenr = 0;
1061        num_bytes = 0;
1062        btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1063
1064        if (found_key.objectid > inode->i_ino ||
1065            found_key.type > BTRFS_EXTENT_DATA_KEY ||
1066            found_key.offset > end)
1067            break;
1068
1069        if (found_key.offset > cur_offset) {
1070            extent_end = found_key.offset;
1071            extent_type = 0;
1072            goto out_check;
1073        }
1074
1075        fi = btrfs_item_ptr(leaf, path->slots[0],
1076                    struct btrfs_file_extent_item);
1077        extent_type = btrfs_file_extent_type(leaf, fi);
1078
1079        if (extent_type == BTRFS_FILE_EXTENT_REG ||
1080            extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1081            disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1082            extent_offset = btrfs_file_extent_offset(leaf, fi);
1083            extent_end = found_key.offset +
1084                btrfs_file_extent_num_bytes(leaf, fi);
1085            if (extent_end <= start) {
1086                path->slots[0]++;
1087                goto next_slot;
1088            }
1089            if (disk_bytenr == 0)
1090                goto out_check;
1091            if (btrfs_file_extent_compression(leaf, fi) ||
1092                btrfs_file_extent_encryption(leaf, fi) ||
1093                btrfs_file_extent_other_encoding(leaf, fi))
1094                goto out_check;
1095            if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1096                goto out_check;
1097            if (btrfs_extent_readonly(root, disk_bytenr))
1098                goto out_check;
1099            if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1100                          found_key.offset -
1101                          extent_offset, disk_bytenr))
1102                goto out_check;
1103            disk_bytenr += extent_offset;
1104            disk_bytenr += cur_offset - found_key.offset;
1105            num_bytes = min(end + 1, extent_end) - cur_offset;
1106            /*
1107             * force cow if csum exists in the range.
1108             * this ensure that csum for a given extent are
1109             * either valid or do not exist.
1110             */
1111            if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1112                goto out_check;
1113            nocow = 1;
1114        } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1115            extent_end = found_key.offset +
1116                btrfs_file_extent_inline_len(leaf, fi);
1117            extent_end = ALIGN(extent_end, root->sectorsize);
1118        } else {
1119            BUG_ON(1);
1120        }
1121out_check:
1122        if (extent_end <= start) {
1123            path->slots[0]++;
1124            goto next_slot;
1125        }
1126        if (!nocow) {
1127            if (cow_start == (u64)-1)
1128                cow_start = cur_offset;
1129            cur_offset = extent_end;
1130            if (cur_offset > end)
1131                break;
1132            path->slots[0]++;
1133            goto next_slot;
1134        }
1135
1136        btrfs_release_path(root, path);
1137        if (cow_start != (u64)-1) {
1138            ret = cow_file_range(inode, locked_page, cow_start,
1139                    found_key.offset - 1, page_started,
1140                    nr_written, 1);
1141            BUG_ON(ret);
1142            cow_start = (u64)-1;
1143        }
1144
1145        if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1146            struct extent_map *em;
1147            struct extent_map_tree *em_tree;
1148            em_tree = &BTRFS_I(inode)->extent_tree;
1149            em = alloc_extent_map(GFP_NOFS);
1150            em->start = cur_offset;
1151            em->orig_start = em->start;
1152            em->len = num_bytes;
1153            em->block_len = num_bytes;
1154            em->block_start = disk_bytenr;
1155            em->bdev = root->fs_info->fs_devices->latest_bdev;
1156            set_bit(EXTENT_FLAG_PINNED, &em->flags);
1157            while (1) {
1158                write_lock(&em_tree->lock);
1159                ret = add_extent_mapping(em_tree, em);
1160                write_unlock(&em_tree->lock);
1161                if (ret != -EEXIST) {
1162                    free_extent_map(em);
1163                    break;
1164                }
1165                btrfs_drop_extent_cache(inode, em->start,
1166                        em->start + em->len - 1, 0);
1167            }
1168            type = BTRFS_ORDERED_PREALLOC;
1169        } else {
1170            type = BTRFS_ORDERED_NOCOW;
1171        }
1172
1173        ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1174                           num_bytes, num_bytes, type);
1175        BUG_ON(ret);
1176
1177        extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1178                cur_offset, cur_offset + num_bytes - 1,
1179                locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1180                EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1181                EXTENT_SET_PRIVATE2);
1182        cur_offset = extent_end;
1183        if (cur_offset > end)
1184            break;
1185    }
1186    btrfs_release_path(root, path);
1187
1188    if (cur_offset <= end && cow_start == (u64)-1)
1189        cow_start = cur_offset;
1190    if (cow_start != (u64)-1) {
1191        ret = cow_file_range(inode, locked_page, cow_start, end,
1192                     page_started, nr_written, 1);
1193        BUG_ON(ret);
1194    }
1195
1196    ret = btrfs_end_transaction(trans, root);
1197    BUG_ON(ret);
1198    btrfs_free_path(path);
1199    return 0;
1200}
1201
1202/*
1203 * extent_io.c call back to do delayed allocation processing
1204 */
1205static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1206                  u64 start, u64 end, int *page_started,
1207                  unsigned long *nr_written)
1208{
1209    int ret;
1210    struct btrfs_root *root = BTRFS_I(inode)->root;
1211
1212    if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1213        ret = run_delalloc_nocow(inode, locked_page, start, end,
1214                     page_started, 1, nr_written);
1215    else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1216        ret = run_delalloc_nocow(inode, locked_page, start, end,
1217                     page_started, 0, nr_written);
1218    else if (!btrfs_test_opt(root, COMPRESS) &&
1219         !(BTRFS_I(inode)->force_compress))
1220        ret = cow_file_range(inode, locked_page, start, end,
1221                      page_started, nr_written, 1);
1222    else
1223        ret = cow_file_range_async(inode, locked_page, start, end,
1224                       page_started, nr_written);
1225    return ret;
1226}
1227
1228static int btrfs_split_extent_hook(struct inode *inode,
1229                    struct extent_state *orig, u64 split)
1230{
1231    if (!(orig->state & EXTENT_DELALLOC))
1232        return 0;
1233
1234    spin_lock(&BTRFS_I(inode)->accounting_lock);
1235    BTRFS_I(inode)->outstanding_extents++;
1236    spin_unlock(&BTRFS_I(inode)->accounting_lock);
1237
1238    return 0;
1239}
1240
1241/*
1242 * extent_io.c merge_extent_hook, used to track merged delayed allocation
1243 * extents so we can keep track of new extents that are just merged onto old
1244 * extents, such as when we are doing sequential writes, so we can properly
1245 * account for the metadata space we'll need.
1246 */
1247static int btrfs_merge_extent_hook(struct inode *inode,
1248                   struct extent_state *new,
1249                   struct extent_state *other)
1250{
1251    /* not delalloc, ignore it */
1252    if (!(other->state & EXTENT_DELALLOC))
1253        return 0;
1254
1255    spin_lock(&BTRFS_I(inode)->accounting_lock);
1256    BTRFS_I(inode)->outstanding_extents--;
1257    spin_unlock(&BTRFS_I(inode)->accounting_lock);
1258
1259    return 0;
1260}
1261
1262/*
1263 * extent_io.c set_bit_hook, used to track delayed allocation
1264 * bytes in this file, and to maintain the list of inodes that
1265 * have pending delalloc work to be done.
1266 */
1267static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1268               unsigned long old, unsigned long bits)
1269{
1270
1271    /*
1272     * set_bit and clear bit hooks normally require _irqsave/restore
1273     * but in this case, we are only testeing for the DELALLOC
1274     * bit, which is only set or cleared with irqs on
1275     */
1276    if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1277        struct btrfs_root *root = BTRFS_I(inode)->root;
1278
1279        spin_lock(&BTRFS_I(inode)->accounting_lock);
1280        BTRFS_I(inode)->outstanding_extents++;
1281        spin_unlock(&BTRFS_I(inode)->accounting_lock);
1282        btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1283
1284        spin_lock(&root->fs_info->delalloc_lock);
1285        BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1286        root->fs_info->delalloc_bytes += end - start + 1;
1287        if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1288            list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1289                      &root->fs_info->delalloc_inodes);
1290        }
1291        spin_unlock(&root->fs_info->delalloc_lock);
1292    }
1293    return 0;
1294}
1295
1296/*
1297 * extent_io.c clear_bit_hook, see set_bit_hook for why
1298 */
1299static int btrfs_clear_bit_hook(struct inode *inode,
1300                struct extent_state *state, unsigned long bits)
1301{
1302    /*
1303     * set_bit and clear bit hooks normally require _irqsave/restore
1304     * but in this case, we are only testeing for the DELALLOC
1305     * bit, which is only set or cleared with irqs on
1306     */
1307    if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1308        struct btrfs_root *root = BTRFS_I(inode)->root;
1309
1310        if (bits & EXTENT_DO_ACCOUNTING) {
1311            spin_lock(&BTRFS_I(inode)->accounting_lock);
1312            WARN_ON(!BTRFS_I(inode)->outstanding_extents);
1313            BTRFS_I(inode)->outstanding_extents--;
1314            spin_unlock(&BTRFS_I(inode)->accounting_lock);
1315            btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
1316        }
1317
1318        spin_lock(&root->fs_info->delalloc_lock);
1319        if (state->end - state->start + 1 >
1320            root->fs_info->delalloc_bytes) {
1321            printk(KERN_INFO "btrfs warning: delalloc account "
1322                   "%llu %llu\n",
1323                   (unsigned long long)
1324                   state->end - state->start + 1,
1325                   (unsigned long long)
1326                   root->fs_info->delalloc_bytes);
1327            btrfs_delalloc_free_space(root, inode, (u64)-1);
1328            root->fs_info->delalloc_bytes = 0;
1329            BTRFS_I(inode)->delalloc_bytes = 0;
1330        } else {
1331            btrfs_delalloc_free_space(root, inode,
1332                          state->end -
1333                          state->start + 1);
1334            root->fs_info->delalloc_bytes -= state->end -
1335                state->start + 1;
1336            BTRFS_I(inode)->delalloc_bytes -= state->end -
1337                state->start + 1;
1338        }
1339        if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1340            !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1341            list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1342        }
1343        spin_unlock(&root->fs_info->delalloc_lock);
1344    }
1345    return 0;
1346}
1347
1348/*
1349 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1350 * we don't create bios that span stripes or chunks
1351 */
1352int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1353             size_t size, struct bio *bio,
1354             unsigned long bio_flags)
1355{
1356    struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1357    struct btrfs_mapping_tree *map_tree;
1358    u64 logical = (u64)bio->bi_sector << 9;
1359    u64 length = 0;
1360    u64 map_length;
1361    int ret;
1362
1363    if (bio_flags & EXTENT_BIO_COMPRESSED)
1364        return 0;
1365
1366    length = bio->bi_size;
1367    map_tree = &root->fs_info->mapping_tree;
1368    map_length = length;
1369    ret = btrfs_map_block(map_tree, READ, logical,
1370                  &map_length, NULL, 0);
1371
1372    if (map_length < length + size)
1373        return 1;
1374    return 0;
1375}
1376
1377/*
1378 * in order to insert checksums into the metadata in large chunks,
1379 * we wait until bio submission time. All the pages in the bio are
1380 * checksummed and sums are attached onto the ordered extent record.
1381 *
1382 * At IO completion time the cums attached on the ordered extent record
1383 * are inserted into the btree
1384 */
1385static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1386                    struct bio *bio, int mirror_num,
1387                    unsigned long bio_flags)
1388{
1389    struct btrfs_root *root = BTRFS_I(inode)->root;
1390    int ret = 0;
1391
1392    ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1393    BUG_ON(ret);
1394    return 0;
1395}
1396
1397/*
1398 * in order to insert checksums into the metadata in large chunks,
1399 * we wait until bio submission time. All the pages in the bio are
1400 * checksummed and sums are attached onto the ordered extent record.
1401 *
1402 * At IO completion time the cums attached on the ordered extent record
1403 * are inserted into the btree
1404 */
1405static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1406              int mirror_num, unsigned long bio_flags)
1407{
1408    struct btrfs_root *root = BTRFS_I(inode)->root;
1409    return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1410}
1411
1412/*
1413 * extent_io.c submission hook. This does the right thing for csum calculation
1414 * on write, or reading the csums from the tree before a read
1415 */
1416static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1417              int mirror_num, unsigned long bio_flags)
1418{
1419    struct btrfs_root *root = BTRFS_I(inode)->root;
1420    int ret = 0;
1421    int skip_sum;
1422
1423    skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1424
1425    ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1426    BUG_ON(ret);
1427
1428    if (!(rw & (1 << BIO_RW))) {
1429        if (bio_flags & EXTENT_BIO_COMPRESSED) {
1430            return btrfs_submit_compressed_read(inode, bio,
1431                            mirror_num, bio_flags);
1432        } else if (!skip_sum)
1433            btrfs_lookup_bio_sums(root, inode, bio, NULL);
1434        goto mapit;
1435    } else if (!skip_sum) {
1436        /* csum items have already been cloned */
1437        if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1438            goto mapit;
1439        /* we're doing a write, do the async checksumming */
1440        return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1441                   inode, rw, bio, mirror_num,
1442                   bio_flags, __btrfs_submit_bio_start,
1443                   __btrfs_submit_bio_done);
1444    }
1445
1446mapit:
1447    return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1448}
1449
1450/*
1451 * given a list of ordered sums record them in the inode. This happens
1452 * at IO completion time based on sums calculated at bio submission time.
1453 */
1454static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1455                 struct inode *inode, u64 file_offset,
1456                 struct list_head *list)
1457{
1458    struct btrfs_ordered_sum *sum;
1459
1460    btrfs_set_trans_block_group(trans, inode);
1461
1462    list_for_each_entry(sum, list, list) {
1463        btrfs_csum_file_blocks(trans,
1464               BTRFS_I(inode)->root->fs_info->csum_root, sum);
1465    }
1466    return 0;
1467}
1468
1469int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1470                  struct extent_state **cached_state)
1471{
1472    if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1473        WARN_ON(1);
1474    return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1475                   cached_state, GFP_NOFS);
1476}
1477
1478/* see btrfs_writepage_start_hook for details on why this is required */
1479struct btrfs_writepage_fixup {
1480    struct page *page;
1481    struct btrfs_work work;
1482};
1483
1484static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1485{
1486    struct btrfs_writepage_fixup *fixup;
1487    struct btrfs_ordered_extent *ordered;
1488    struct extent_state *cached_state = NULL;
1489    struct page *page;
1490    struct inode *inode;
1491    u64 page_start;
1492    u64 page_end;
1493
1494    fixup = container_of(work, struct btrfs_writepage_fixup, work);
1495    page = fixup->page;
1496again:
1497    lock_page(page);
1498    if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1499        ClearPageChecked(page);
1500        goto out_page;
1501    }
1502
1503    inode = page->mapping->host;
1504    page_start = page_offset(page);
1505    page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1506
1507    lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1508             &cached_state, GFP_NOFS);
1509
1510    /* already ordered? We're done */
1511    if (PagePrivate2(page))
1512        goto out;
1513
1514    ordered = btrfs_lookup_ordered_extent(inode, page_start);
1515    if (ordered) {
1516        unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1517                     page_end, &cached_state, GFP_NOFS);
1518        unlock_page(page);
1519        btrfs_start_ordered_extent(inode, ordered, 1);
1520        goto again;
1521    }
1522
1523    btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1524    ClearPageChecked(page);
1525out:
1526    unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1527                 &cached_state, GFP_NOFS);
1528out_page:
1529    unlock_page(page);
1530    page_cache_release(page);
1531}
1532
1533/*
1534 * There are a few paths in the higher layers of the kernel that directly
1535 * set the page dirty bit without asking the filesystem if it is a
1536 * good idea. This causes problems because we want to make sure COW
1537 * properly happens and the data=ordered rules are followed.
1538 *
1539 * In our case any range that doesn't have the ORDERED bit set
1540 * hasn't been properly setup for IO. We kick off an async process
1541 * to fix it up. The async helper will wait for ordered extents, set
1542 * the delalloc bit and make it safe to write the page.
1543 */
1544static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1545{
1546    struct inode *inode = page->mapping->host;
1547    struct btrfs_writepage_fixup *fixup;
1548    struct btrfs_root *root = BTRFS_I(inode)->root;
1549
1550    /* this page is properly in the ordered list */
1551    if (TestClearPagePrivate2(page))
1552        return 0;
1553
1554    if (PageChecked(page))
1555        return -EAGAIN;
1556
1557    fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1558    if (!fixup)
1559        return -EAGAIN;
1560
1561    SetPageChecked(page);
1562    page_cache_get(page);
1563    fixup->work.func = btrfs_writepage_fixup_worker;
1564    fixup->page = page;
1565    btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1566    return -EAGAIN;
1567}
1568
1569static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1570                       struct inode *inode, u64 file_pos,
1571                       u64 disk_bytenr, u64 disk_num_bytes,
1572                       u64 num_bytes, u64 ram_bytes,
1573                       u8 compression, u8 encryption,
1574                       u16 other_encoding, int extent_type)
1575{
1576    struct btrfs_root *root = BTRFS_I(inode)->root;
1577    struct btrfs_file_extent_item *fi;
1578    struct btrfs_path *path;
1579    struct extent_buffer *leaf;
1580    struct btrfs_key ins;
1581    u64 hint;
1582    int ret;
1583
1584    path = btrfs_alloc_path();
1585    BUG_ON(!path);
1586
1587    path->leave_spinning = 1;
1588
1589    /*
1590     * we may be replacing one extent in the tree with another.
1591     * The new extent is pinned in the extent map, and we don't want
1592     * to drop it from the cache until it is completely in the btree.
1593     *
1594     * So, tell btrfs_drop_extents to leave this extent in the cache.
1595     * the caller is expected to unpin it and allow it to be merged
1596     * with the others.
1597     */
1598    ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
1599                 &hint, 0);
1600    BUG_ON(ret);
1601
1602    ins.objectid = inode->i_ino;
1603    ins.offset = file_pos;
1604    ins.type = BTRFS_EXTENT_DATA_KEY;
1605    ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1606    BUG_ON(ret);
1607    leaf = path->nodes[0];
1608    fi = btrfs_item_ptr(leaf, path->slots[0],
1609                struct btrfs_file_extent_item);
1610    btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1611    btrfs_set_file_extent_type(leaf, fi, extent_type);
1612    btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1613    btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1614    btrfs_set_file_extent_offset(leaf, fi, 0);
1615    btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1616    btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1617    btrfs_set_file_extent_compression(leaf, fi, compression);
1618    btrfs_set_file_extent_encryption(leaf, fi, encryption);
1619    btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1620
1621    btrfs_unlock_up_safe(path, 1);
1622    btrfs_set_lock_blocking(leaf);
1623
1624    btrfs_mark_buffer_dirty(leaf);
1625
1626    inode_add_bytes(inode, num_bytes);
1627
1628    ins.objectid = disk_bytenr;
1629    ins.offset = disk_num_bytes;
1630    ins.type = BTRFS_EXTENT_ITEM_KEY;
1631    ret = btrfs_alloc_reserved_file_extent(trans, root,
1632                    root->root_key.objectid,
1633                    inode->i_ino, file_pos, &ins);
1634    BUG_ON(ret);
1635    btrfs_free_path(path);
1636
1637    return 0;
1638}
1639
1640/*
1641 * helper function for btrfs_finish_ordered_io, this
1642 * just reads in some of the csum leaves to prime them into ram
1643 * before we start the transaction. It limits the amount of btree
1644 * reads required while inside the transaction.
1645 */
1646/* as ordered data IO finishes, this gets called so we can finish
1647 * an ordered extent if the range of bytes in the file it covers are
1648 * fully written.
1649 */
1650static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1651{
1652    struct btrfs_root *root = BTRFS_I(inode)->root;
1653    struct btrfs_trans_handle *trans;
1654    struct btrfs_ordered_extent *ordered_extent = NULL;
1655    struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1656    struct extent_state *cached_state = NULL;
1657    int compressed = 0;
1658    int ret;
1659
1660    ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
1661                         end - start + 1);
1662    if (!ret)
1663        return 0;
1664    BUG_ON(!ordered_extent);
1665
1666    if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1667        BUG_ON(!list_empty(&ordered_extent->list));
1668        ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1669        if (!ret) {
1670            trans = btrfs_join_transaction(root, 1);
1671            ret = btrfs_update_inode(trans, root, inode);
1672            BUG_ON(ret);
1673            btrfs_end_transaction(trans, root);
1674        }
1675        goto out;
1676    }
1677
1678    lock_extent_bits(io_tree, ordered_extent->file_offset,
1679             ordered_extent->file_offset + ordered_extent->len - 1,
1680             0, &cached_state, GFP_NOFS);
1681
1682    trans = btrfs_join_transaction(root, 1);
1683
1684    if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1685        compressed = 1;
1686    if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1687        BUG_ON(compressed);
1688        ret = btrfs_mark_extent_written(trans, inode,
1689                        ordered_extent->file_offset,
1690                        ordered_extent->file_offset +
1691                        ordered_extent->len);
1692        BUG_ON(ret);
1693    } else {
1694        ret = insert_reserved_file_extent(trans, inode,
1695                        ordered_extent->file_offset,
1696                        ordered_extent->start,
1697                        ordered_extent->disk_len,
1698                        ordered_extent->len,
1699                        ordered_extent->len,
1700                        compressed, 0, 0,
1701                        BTRFS_FILE_EXTENT_REG);
1702        unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1703                   ordered_extent->file_offset,
1704                   ordered_extent->len);
1705        BUG_ON(ret);
1706    }
1707    unlock_extent_cached(io_tree, ordered_extent->file_offset,
1708                 ordered_extent->file_offset +
1709                 ordered_extent->len - 1, &cached_state, GFP_NOFS);
1710
1711    add_pending_csums(trans, inode, ordered_extent->file_offset,
1712              &ordered_extent->list);
1713
1714    /* this also removes the ordered extent from the tree */
1715    btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1716    ret = btrfs_update_inode(trans, root, inode);
1717    BUG_ON(ret);
1718    btrfs_end_transaction(trans, root);
1719out:
1720    /* once for us */
1721    btrfs_put_ordered_extent(ordered_extent);
1722    /* once for the tree */
1723    btrfs_put_ordered_extent(ordered_extent);
1724
1725    return 0;
1726}
1727
1728static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1729                struct extent_state *state, int uptodate)
1730{
1731    ClearPagePrivate2(page);
1732    return btrfs_finish_ordered_io(page->mapping->host, start, end);
1733}
1734
1735/*
1736 * When IO fails, either with EIO or csum verification fails, we
1737 * try other mirrors that might have a good copy of the data. This
1738 * io_failure_record is used to record state as we go through all the
1739 * mirrors. If another mirror has good data, the page is set up to date
1740 * and things continue. If a good mirror can't be found, the original
1741 * bio end_io callback is called to indicate things have failed.
1742 */
1743struct io_failure_record {
1744    struct page *page;
1745    u64 start;
1746    u64 len;
1747    u64 logical;
1748    unsigned long bio_flags;
1749    int last_mirror;
1750};
1751
1752static int btrfs_io_failed_hook(struct bio *failed_bio,
1753             struct page *page, u64 start, u64 end,
1754             struct extent_state *state)
1755{
1756    struct io_failure_record *failrec = NULL;
1757    u64 private;
1758    struct extent_map *em;
1759    struct inode *inode = page->mapping->host;
1760    struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1761    struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1762    struct bio *bio;
1763    int num_copies;
1764    int ret;
1765    int rw;
1766    u64 logical;
1767
1768    ret = get_state_private(failure_tree, start, &private);
1769    if (ret) {
1770        failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1771        if (!failrec)
1772            return -ENOMEM;
1773        failrec->start = start;
1774        failrec->len = end - start + 1;
1775        failrec->last_mirror = 0;
1776        failrec->bio_flags = 0;
1777
1778        read_lock(&em_tree->lock);
1779        em = lookup_extent_mapping(em_tree, start, failrec->len);
1780        if (em->start > start || em->start + em->len < start) {
1781            free_extent_map(em);
1782            em = NULL;
1783        }
1784        read_unlock(&em_tree->lock);
1785
1786        if (!em || IS_ERR(em)) {
1787            kfree(failrec);
1788            return -EIO;
1789        }
1790        logical = start - em->start;
1791        logical = em->block_start + logical;
1792        if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1793            logical = em->block_start;
1794            failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1795        }
1796        failrec->logical = logical;
1797        free_extent_map(em);
1798        set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1799                EXTENT_DIRTY, GFP_NOFS);
1800        set_state_private(failure_tree, start,
1801                 (u64)(unsigned long)failrec);
1802    } else {
1803        failrec = (struct io_failure_record *)(unsigned long)private;
1804    }
1805    num_copies = btrfs_num_copies(
1806                  &BTRFS_I(inode)->root->fs_info->mapping_tree,
1807                  failrec->logical, failrec->len);
1808    failrec->last_mirror++;
1809    if (!state) {
1810        spin_lock(&BTRFS_I(inode)->io_tree.lock);
1811        state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1812                            failrec->start,
1813                            EXTENT_LOCKED);
1814        if (state && state->start != failrec->start)
1815            state = NULL;
1816        spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1817    }
1818    if (!state || failrec->last_mirror > num_copies) {
1819        set_state_private(failure_tree, failrec->start, 0);
1820        clear_extent_bits(failure_tree, failrec->start,
1821                  failrec->start + failrec->len - 1,
1822                  EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1823        kfree(failrec);
1824        return -EIO;
1825    }
1826    bio = bio_alloc(GFP_NOFS, 1);
1827    bio->bi_private = state;
1828    bio->bi_end_io = failed_bio->bi_end_io;
1829    bio->bi_sector = failrec->logical >> 9;
1830    bio->bi_bdev = failed_bio->bi_bdev;
1831    bio->bi_size = 0;
1832
1833    bio_add_page(bio, page, failrec->len, start - page_offset(page));
1834    if (failed_bio->bi_rw & (1 << BIO_RW))
1835        rw = WRITE;
1836    else
1837        rw = READ;
1838
1839    BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1840                              failrec->last_mirror,
1841                              failrec->bio_flags);
1842    return 0;
1843}
1844
1845/*
1846 * each time an IO finishes, we do a fast check in the IO failure tree
1847 * to see if we need to process or clean up an io_failure_record
1848 */
1849static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1850{
1851    u64 private;
1852    u64 private_failure;
1853    struct io_failure_record *failure;
1854    int ret;
1855
1856    private = 0;
1857    if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1858                 (u64)-1, 1, EXTENT_DIRTY)) {
1859        ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1860                    start, &private_failure);
1861        if (ret == 0) {
1862            failure = (struct io_failure_record *)(unsigned long)
1863                   private_failure;
1864            set_state_private(&BTRFS_I(inode)->io_failure_tree,
1865                      failure->start, 0);
1866            clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1867                      failure->start,
1868                      failure->start + failure->len - 1,
1869                      EXTENT_DIRTY | EXTENT_LOCKED,
1870                      GFP_NOFS);
1871            kfree(failure);
1872        }
1873    }
1874    return 0;
1875}
1876
1877/*
1878 * when reads are done, we need to check csums to verify the data is correct
1879 * if there's a match, we allow the bio to finish. If not, we go through
1880 * the io_failure_record routines to find good copies
1881 */
1882static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1883                   struct extent_state *state)
1884{
1885    size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1886    struct inode *inode = page->mapping->host;
1887    struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1888    char *kaddr;
1889    u64 private = ~(u32)0;
1890    int ret;
1891    struct btrfs_root *root = BTRFS_I(inode)->root;
1892    u32 csum = ~(u32)0;
1893
1894    if (PageChecked(page)) {
1895        ClearPageChecked(page);
1896        goto good;
1897    }
1898
1899    if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1900        return 0;
1901
1902    if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1903        test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1904        clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1905                  GFP_NOFS);
1906        return 0;
1907    }
1908
1909    if (state && state->start == start) {
1910        private = state->private;
1911        ret = 0;
1912    } else {
1913        ret = get_state_private(io_tree, start, &private);
1914    }
1915    kaddr = kmap_atomic(page, KM_USER0);
1916    if (ret)
1917        goto zeroit;
1918
1919    csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1920    btrfs_csum_final(csum, (char *)&csum);
1921    if (csum != private)
1922        goto zeroit;
1923
1924    kunmap_atomic(kaddr, KM_USER0);
1925good:
1926    /* if the io failure tree for this inode is non-empty,
1927     * check to see if we've recovered from a failed IO
1928     */
1929    btrfs_clean_io_failures(inode, start);
1930    return 0;
1931
1932zeroit:
1933    if (printk_ratelimit()) {
1934        printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1935               "private %llu\n", page->mapping->host->i_ino,
1936               (unsigned long long)start, csum,
1937               (unsigned long long)private);
1938    }
1939    memset(kaddr + offset, 1, end - start + 1);
1940    flush_dcache_page(page);
1941    kunmap_atomic(kaddr, KM_USER0);
1942    if (private == 0)
1943        return 0;
1944    return -EIO;
1945}
1946
1947struct delayed_iput {
1948    struct list_head list;
1949    struct inode *inode;
1950};
1951
1952void btrfs_add_delayed_iput(struct inode *inode)
1953{
1954    struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1955    struct delayed_iput *delayed;
1956
1957    if (atomic_add_unless(&inode->i_count, -1, 1))
1958        return;
1959
1960    delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
1961    delayed->inode = inode;
1962
1963    spin_lock(&fs_info->delayed_iput_lock);
1964    list_add_tail(&delayed->list, &fs_info->delayed_iputs);
1965    spin_unlock(&fs_info->delayed_iput_lock);
1966}
1967
1968void btrfs_run_delayed_iputs(struct btrfs_root *root)
1969{
1970    LIST_HEAD(list);
1971    struct btrfs_fs_info *fs_info = root->fs_info;
1972    struct delayed_iput *delayed;
1973    int empty;
1974
1975    spin_lock(&fs_info->delayed_iput_lock);
1976    empty = list_empty(&fs_info->delayed_iputs);
1977    spin_unlock(&fs_info->delayed_iput_lock);
1978    if (empty)
1979        return;
1980
1981    down_read(&root->fs_info->cleanup_work_sem);
1982    spin_lock(&fs_info->delayed_iput_lock);
1983    list_splice_init(&fs_info->delayed_iputs, &list);
1984    spin_unlock(&fs_info->delayed_iput_lock);
1985
1986    while (!list_empty(&list)) {
1987        delayed = list_entry(list.next, struct delayed_iput, list);
1988        list_del(&delayed->list);
1989        iput(delayed->inode);
1990        kfree(delayed);
1991    }
1992    up_read(&root->fs_info->cleanup_work_sem);
1993}
1994
1995/*
1996 * This creates an orphan entry for the given inode in case something goes
1997 * wrong in the middle of an unlink/truncate.
1998 */
1999int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2000{
2001    struct btrfs_root *root = BTRFS_I(inode)->root;
2002    int ret = 0;
2003
2004    spin_lock(&root->list_lock);
2005
2006    /* already on the orphan list, we're good */
2007    if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
2008        spin_unlock(&root->list_lock);
2009        return 0;
2010    }
2011
2012    list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2013
2014    spin_unlock(&root->list_lock);
2015
2016    /*
2017     * insert an orphan item to track this unlinked/truncated file
2018     */
2019    ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
2020
2021    return ret;
2022}
2023
2024/*
2025 * We have done the truncate/delete so we can go ahead and remove the orphan
2026 * item for this particular inode.
2027 */
2028int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2029{
2030    struct btrfs_root *root = BTRFS_I(inode)->root;
2031    int ret = 0;
2032
2033    spin_lock(&root->list_lock);
2034
2035    if (list_empty(&BTRFS_I(inode)->i_orphan)) {
2036        spin_unlock(&root->list_lock);
2037        return 0;
2038    }
2039
2040    list_del_init(&BTRFS_I(inode)->i_orphan);
2041    if (!trans) {
2042        spin_unlock(&root->list_lock);
2043        return 0;
2044    }
2045
2046    spin_unlock(&root->list_lock);
2047
2048    ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
2049
2050    return ret;
2051}
2052
2053/*
2054 * this cleans up any orphans that may be left on the list from the last use
2055 * of this root.
2056 */
2057void btrfs_orphan_cleanup(struct btrfs_root *root)
2058{
2059    struct btrfs_path *path;
2060    struct extent_buffer *leaf;
2061    struct btrfs_item *item;
2062    struct btrfs_key key, found_key;
2063    struct btrfs_trans_handle *trans;
2064    struct inode *inode;
2065    int ret = 0, nr_unlink = 0, nr_truncate = 0;
2066
2067    if (!xchg(&root->clean_orphans, 0))
2068        return;
2069
2070    path = btrfs_alloc_path();
2071    BUG_ON(!path);
2072    path->reada = -1;
2073
2074    key.objectid = BTRFS_ORPHAN_OBJECTID;
2075    btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2076    key.offset = (u64)-1;
2077
2078    while (1) {
2079        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2080        if (ret < 0) {
2081            printk(KERN_ERR "Error searching slot for orphan: %d"
2082                   "\n", ret);
2083            break;
2084        }
2085
2086        /*
2087         * if ret == 0 means we found what we were searching for, which
2088         * is weird, but possible, so only screw with path if we didnt
2089         * find the key and see if we have stuff that matches
2090         */
2091        if (ret > 0) {
2092            if (path->slots[0] == 0)
2093                break;
2094            path->slots[0]--;
2095        }
2096
2097        /* pull out the item */
2098        leaf = path->nodes[0];
2099        item = btrfs_item_nr(leaf, path->slots[0]);
2100        btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2101
2102        /* make sure the item matches what we want */
2103        if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2104            break;
2105        if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2106            break;
2107
2108        /* release the path since we're done with it */
2109        btrfs_release_path(root, path);
2110
2111        /*
2112         * this is where we are basically btrfs_lookup, without the
2113         * crossing root thing. we store the inode number in the
2114         * offset of the orphan item.
2115         */
2116        found_key.objectid = found_key.offset;
2117        found_key.type = BTRFS_INODE_ITEM_KEY;
2118        found_key.offset = 0;
2119        inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2120        if (IS_ERR(inode))
2121            break;
2122
2123        /*
2124         * add this inode to the orphan list so btrfs_orphan_del does
2125         * the proper thing when we hit it
2126         */
2127        spin_lock(&root->list_lock);
2128        list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2129        spin_unlock(&root->list_lock);
2130
2131        /*
2132         * if this is a bad inode, means we actually succeeded in
2133         * removing the inode, but not the orphan record, which means
2134         * we need to manually delete the orphan since iput will just
2135         * do a destroy_inode
2136         */
2137        if (is_bad_inode(inode)) {
2138            trans = btrfs_start_transaction(root, 1);
2139            btrfs_orphan_del(trans, inode);
2140            btrfs_end_transaction(trans, root);
2141            iput(inode);
2142            continue;
2143        }
2144
2145        /* if we have links, this was a truncate, lets do that */
2146        if (inode->i_nlink) {
2147            nr_truncate++;
2148            btrfs_truncate(inode);
2149        } else {
2150            nr_unlink++;
2151        }
2152
2153        /* this will do delete_inode and everything for us */
2154        iput(inode);
2155    }
2156
2157    if (nr_unlink)
2158        printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2159    if (nr_truncate)
2160        printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2161
2162    btrfs_free_path(path);
2163}
2164
2165/*
2166 * very simple check to peek ahead in the leaf looking for xattrs. If we
2167 * don't find any xattrs, we know there can't be any acls.
2168 *
2169 * slot is the slot the inode is in, objectid is the objectid of the inode
2170 */
2171static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2172                      int slot, u64 objectid)
2173{
2174    u32 nritems = btrfs_header_nritems(leaf);
2175    struct btrfs_key found_key;
2176    int scanned = 0;
2177
2178    slot++;
2179    while (slot < nritems) {
2180        btrfs_item_key_to_cpu(leaf, &found_key, slot);
2181
2182        /* we found a different objectid, there must not be acls */
2183        if (found_key.objectid != objectid)
2184            return 0;
2185
2186        /* we found an xattr, assume we've got an acl */
2187        if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2188            return 1;
2189
2190        /*
2191         * we found a key greater than an xattr key, there can't
2192         * be any acls later on
2193         */
2194        if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2195            return 0;
2196
2197        slot++;
2198        scanned++;
2199
2200        /*
2201         * it goes inode, inode backrefs, xattrs, extents,
2202         * so if there are a ton of hard links to an inode there can
2203         * be a lot of backrefs. Don't waste time searching too hard,
2204         * this is just an optimization
2205         */
2206        if (scanned >= 8)
2207            break;
2208    }
2209    /* we hit the end of the leaf before we found an xattr or
2210     * something larger than an xattr. We have to assume the inode
2211     * has acls
2212     */
2213    return 1;
2214}
2215
2216/*
2217 * read an inode from the btree into the in-memory inode
2218 */
2219static void btrfs_read_locked_inode(struct inode *inode)
2220{
2221    struct btrfs_path *path;
2222    struct extent_buffer *leaf;
2223    struct btrfs_inode_item *inode_item;
2224    struct btrfs_timespec *tspec;
2225    struct btrfs_root *root = BTRFS_I(inode)->root;
2226    struct btrfs_key location;
2227    int maybe_acls;
2228    u64 alloc_group_block;
2229    u32 rdev;
2230    int ret;
2231
2232    path = btrfs_alloc_path();
2233    BUG_ON(!path);
2234    memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2235
2236    ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2237    if (ret)
2238        goto make_bad;
2239
2240    leaf = path->nodes[0];
2241    inode_item = btrfs_item_ptr(leaf, path->slots[0],
2242                    struct btrfs_inode_item);
2243
2244    inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2245    inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2246    inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2247    inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2248    btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2249
2250    tspec = btrfs_inode_atime(inode_item);
2251    inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2252    inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2253
2254    tspec = btrfs_inode_mtime(inode_item);
2255    inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2256    inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2257
2258    tspec = btrfs_inode_ctime(inode_item);
2259    inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2260    inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2261
2262    inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2263    BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2264    BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2265    inode->i_generation = BTRFS_I(inode)->generation;
2266    inode->i_rdev = 0;
2267    rdev = btrfs_inode_rdev(leaf, inode_item);
2268
2269    BTRFS_I(inode)->index_cnt = (u64)-1;
2270    BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2271
2272    alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2273
2274    /*
2275     * try to precache a NULL acl entry for files that don't have
2276     * any xattrs or acls
2277     */
2278    maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2279    if (!maybe_acls)
2280        cache_no_acl(inode);
2281
2282    BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2283                        alloc_group_block, 0);
2284    btrfs_free_path(path);
2285    inode_item = NULL;
2286
2287    switch (inode->i_mode & S_IFMT) {
2288    case S_IFREG:
2289        inode->i_mapping->a_ops = &btrfs_aops;
2290        inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2291        BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2292        inode->i_fop = &btrfs_file_operations;
2293        inode->i_op = &btrfs_file_inode_operations;
2294        break;
2295    case S_IFDIR:
2296        inode->i_fop = &btrfs_dir_file_operations;
2297        if (root == root->fs_info->tree_root)
2298            inode->i_op = &btrfs_dir_ro_inode_operations;
2299        else
2300            inode->i_op = &btrfs_dir_inode_operations;
2301        break;
2302    case S_IFLNK:
2303        inode->i_op = &btrfs_symlink_inode_operations;
2304        inode->i_mapping->a_ops = &btrfs_symlink_aops;
2305        inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2306        break;
2307    default:
2308        inode->i_op = &btrfs_special_inode_operations;
2309        init_special_inode(inode, inode->i_mode, rdev);
2310        break;
2311    }
2312
2313    btrfs_update_iflags(inode);
2314    return;
2315
2316make_bad:
2317    btrfs_free_path(path);
2318    make_bad_inode(inode);
2319}
2320
2321/*
2322 * given a leaf and an inode, copy the inode fields into the leaf
2323 */
2324static void fill_inode_item(struct btrfs_trans_handle *trans,
2325                struct extent_buffer *leaf,
2326                struct btrfs_inode_item *item,
2327                struct inode *inode)
2328{
2329    btrfs_set_inode_uid(leaf, item, inode->i_uid);
2330    btrfs_set_inode_gid(leaf, item, inode->i_gid);
2331    btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2332    btrfs_set_inode_mode(leaf, item, inode->i_mode);
2333    btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2334
2335    btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2336                   inode->i_atime.tv_sec);
2337    btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2338                inode->i_atime.tv_nsec);
2339
2340    btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2341                   inode->i_mtime.tv_sec);
2342    btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2343                inode->i_mtime.tv_nsec);
2344
2345    btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2346                   inode->i_ctime.tv_sec);
2347    btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2348                inode->i_ctime.tv_nsec);
2349
2350    btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2351    btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2352    btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2353    btrfs_set_inode_transid(leaf, item, trans->transid);
2354    btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2355    btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2356    btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2357}
2358
2359/*
2360 * copy everything in the in-memory inode into the btree.
2361 */
2362noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2363                struct btrfs_root *root, struct inode *inode)
2364{
2365    struct btrfs_inode_item *inode_item;
2366    struct btrfs_path *path;
2367    struct extent_buffer *leaf;
2368    int ret;
2369
2370    path = btrfs_alloc_path();
2371    BUG_ON(!path);
2372    path->leave_spinning = 1;
2373    ret = btrfs_lookup_inode(trans, root, path,
2374                 &BTRFS_I(inode)->location, 1);
2375    if (ret) {
2376        if (ret > 0)
2377            ret = -ENOENT;
2378        goto failed;
2379    }
2380
2381    btrfs_unlock_up_safe(path, 1);
2382    leaf = path->nodes[0];
2383    inode_item = btrfs_item_ptr(leaf, path->slots[0],
2384                  struct btrfs_inode_item);
2385
2386    fill_inode_item(trans, leaf, inode_item, inode);
2387    btrfs_mark_buffer_dirty(leaf);
2388    btrfs_set_inode_last_trans(trans, inode);
2389    ret = 0;
2390failed:
2391    btrfs_free_path(path);
2392    return ret;
2393}
2394
2395
2396/*
2397 * unlink helper that gets used here in inode.c and in the tree logging
2398 * recovery code. It remove a link in a directory with a given name, and
2399 * also drops the back refs in the inode to the directory
2400 */
2401int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2402               struct btrfs_root *root,
2403               struct inode *dir, struct inode *inode,
2404               const char *name, int name_len)
2405{
2406    struct btrfs_path *path;
2407    int ret = 0;
2408    struct extent_buffer *leaf;
2409    struct btrfs_dir_item *di;
2410    struct btrfs_key key;
2411    u64 index;
2412
2413    path = btrfs_alloc_path();
2414    if (!path) {
2415        ret = -ENOMEM;
2416        goto err;
2417    }
2418
2419    path->leave_spinning = 1;
2420    di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2421                    name, name_len, -1);
2422    if (IS_ERR(di)) {
2423        ret = PTR_ERR(di);
2424        goto err;
2425    }
2426    if (!di) {
2427        ret = -ENOENT;
2428        goto err;
2429    }
2430    leaf = path->nodes[0];
2431    btrfs_dir_item_key_to_cpu(leaf, di, &key);
2432    ret = btrfs_delete_one_dir_name(trans, root, path, di);
2433    if (ret)
2434        goto err;
2435    btrfs_release_path(root, path);
2436
2437    ret = btrfs_del_inode_ref(trans, root, name, name_len,
2438                  inode->i_ino,
2439                  dir->i_ino, &index);
2440    if (ret) {
2441        printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2442               "inode %lu parent %lu\n", name_len, name,
2443               inode->i_ino, dir->i_ino);
2444        goto err;
2445    }
2446
2447    di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2448                     index, name, name_len, -1);
2449    if (IS_ERR(di)) {
2450        ret = PTR_ERR(di);
2451        goto err;
2452    }
2453    if (!di) {
2454        ret = -ENOENT;
2455        goto err;
2456    }
2457    ret = btrfs_delete_one_dir_name(trans, root, path, di);
2458    btrfs_release_path(root, path);
2459
2460    ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2461                     inode, dir->i_ino);
2462    BUG_ON(ret != 0 && ret != -ENOENT);
2463
2464    ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2465                       dir, index);
2466    BUG_ON(ret);
2467err:
2468    btrfs_free_path(path);
2469    if (ret)
2470        goto out;
2471
2472    btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2473    inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2474    btrfs_update_inode(trans, root, dir);
2475    btrfs_drop_nlink(inode);
2476    ret = btrfs_update_inode(trans, root, inode);
2477out:
2478    return ret;
2479}
2480
2481static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2482{
2483    struct btrfs_root *root;
2484    struct btrfs_trans_handle *trans;
2485    struct inode *inode = dentry->d_inode;
2486    int ret;
2487    unsigned long nr = 0;
2488
2489    root = BTRFS_I(dir)->root;
2490
2491    /*
2492     * 5 items for unlink inode
2493     * 1 for orphan
2494     */
2495    ret = btrfs_reserve_metadata_space(root, 6);
2496    if (ret)
2497        return ret;
2498
2499    trans = btrfs_start_transaction(root, 1);
2500    if (IS_ERR(trans)) {
2501        btrfs_unreserve_metadata_space(root, 6);
2502        return PTR_ERR(trans);
2503    }
2504
2505    btrfs_set_trans_block_group(trans, dir);
2506
2507    btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2508
2509    ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2510                 dentry->d_name.name, dentry->d_name.len);
2511
2512    if (inode->i_nlink == 0)
2513        ret = btrfs_orphan_add(trans, inode);
2514
2515    nr = trans->blocks_used;
2516
2517    btrfs_end_transaction_throttle(trans, root);
2518    btrfs_unreserve_metadata_space(root, 6);
2519    btrfs_btree_balance_dirty(root, nr);
2520    return ret;
2521}
2522
2523int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
2524            struct btrfs_root *root,
2525            struct inode *dir, u64 objectid,
2526            const char *name, int name_len)
2527{
2528    struct btrfs_path *path;
2529    struct extent_buffer *leaf;
2530    struct btrfs_dir_item *di;
2531    struct btrfs_key key;
2532    u64 index;
2533    int ret;
2534
2535    path = btrfs_alloc_path();
2536    if (!path)
2537        return -ENOMEM;
2538
2539    di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2540                   name, name_len, -1);
2541    BUG_ON(!di || IS_ERR(di));
2542
2543    leaf = path->nodes[0];
2544    btrfs_dir_item_key_to_cpu(leaf, di, &key);
2545    WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2546    ret = btrfs_delete_one_dir_name(trans, root, path, di);
2547    BUG_ON(ret);
2548    btrfs_release_path(root, path);
2549
2550    ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
2551                 objectid, root->root_key.objectid,
2552                 dir->i_ino, &index, name, name_len);
2553    if (ret < 0) {
2554        BUG_ON(ret != -ENOENT);
2555        di = btrfs_search_dir_index_item(root, path, dir->i_ino,
2556                         name, name_len);
2557        BUG_ON(!di || IS_ERR(di));
2558
2559        leaf = path->nodes[0];
2560        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2561        btrfs_release_path(root, path);
2562        index = key.offset;
2563    }
2564
2565    di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2566                     index, name, name_len, -1);
2567    BUG_ON(!di || IS_ERR(di));
2568
2569    leaf = path->nodes[0];
2570    btrfs_dir_item_key_to_cpu(leaf, di, &key);
2571    WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2572    ret = btrfs_delete_one_dir_name(trans, root, path, di);
2573    BUG_ON(ret);
2574    btrfs_release_path(root, path);
2575
2576    btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2577    dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2578    ret = btrfs_update_inode(trans, root, dir);
2579    BUG_ON(ret);
2580    dir->i_sb->s_dirt = 1;
2581
2582    btrfs_free_path(path);
2583    return 0;
2584}
2585
2586static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2587{
2588    struct inode *inode = dentry->d_inode;
2589    int err = 0;
2590    int ret;
2591    struct btrfs_root *root = BTRFS_I(dir)->root;
2592    struct btrfs_trans_handle *trans;
2593    unsigned long nr = 0;
2594
2595    if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2596        inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
2597        return -ENOTEMPTY;
2598
2599    ret = btrfs_reserve_metadata_space(root, 5);
2600    if (ret)
2601        return ret;
2602
2603    trans = btrfs_start_transaction(root, 1);
2604    if (IS_ERR(trans)) {
2605        btrfs_unreserve_metadata_space(root, 5);
2606        return PTR_ERR(trans);
2607    }
2608
2609    btrfs_set_trans_block_group(trans, dir);
2610
2611    if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
2612        err = btrfs_unlink_subvol(trans, root, dir,
2613                      BTRFS_I(inode)->location.objectid,
2614                      dentry->d_name.name,
2615                      dentry->d_name.len);
2616        goto out;
2617    }
2618
2619    err = btrfs_orphan_add(trans, inode);
2620    if (err)
2621        goto out;
2622
2623    /* now the directory is empty */
2624    err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2625                 dentry->d_name.name, dentry->d_name.len);
2626    if (!err)
2627        btrfs_i_size_write(inode, 0);
2628out:
2629    nr = trans->blocks_used;
2630    ret = btrfs_end_transaction_throttle(trans, root);
2631    btrfs_unreserve_metadata_space(root, 5);
2632    btrfs_btree_balance_dirty(root, nr);
2633
2634    if (ret && !err)
2635        err = ret;
2636    return err;
2637}
2638
2639#if 0
2640/*
2641 * when truncating bytes in a file, it is possible to avoid reading
2642 * the leaves that contain only checksum items. This can be the
2643 * majority of the IO required to delete a large file, but it must
2644 * be done carefully.
2645 *
2646 * The keys in the level just above the leaves are checked to make sure
2647 * the lowest key in a given leaf is a csum key, and starts at an offset
2648 * after the new size.
2649 *
2650 * Then the key for the next leaf is checked to make sure it also has
2651 * a checksum item for the same file. If it does, we know our target leaf
2652 * contains only checksum items, and it can be safely freed without reading
2653 * it.
2654 *
2655 * This is just an optimization targeted at large files. It may do
2656 * nothing. It will return 0 unless things went badly.
2657 */
2658static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2659                     struct btrfs_root *root,
2660                     struct btrfs_path *path,
2661                     struct inode *inode, u64 new_size)
2662{
2663    struct btrfs_key key;
2664    int ret;
2665    int nritems;
2666    struct btrfs_key found_key;
2667    struct btrfs_key other_key;
2668    struct btrfs_leaf_ref *ref;
2669    u64 leaf_gen;
2670    u64 leaf_start;
2671
2672    path->lowest_level = 1;
2673    key.objectid = inode->i_ino;
2674    key.type = BTRFS_CSUM_ITEM_KEY;
2675    key.offset = new_size;
2676again:
2677    ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2678    if (ret < 0)
2679        goto out;
2680
2681    if (path->nodes[1] == NULL) {
2682        ret = 0;
2683        goto out;
2684    }
2685    ret = 0;
2686    btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2687    nritems = btrfs_header_nritems(path->nodes[1]);
2688
2689    if (!nritems)
2690        goto out;
2691
2692    if (path->slots[1] >= nritems)
2693        goto next_node;
2694
2695    /* did we find a key greater than anything we want to delete? */
2696    if (found_key.objectid > inode->i_ino ||
2697       (found_key.objectid == inode->i_ino && found_key.type > key.type))
2698        goto out;
2699
2700    /* we check the next key in the node to make sure the leave contains
2701     * only checksum items. This comparison doesn't work if our
2702     * leaf is the last one in the node
2703     */
2704    if (path->slots[1] + 1 >= nritems) {
2705next_node:
2706        /* search forward from the last key in the node, this
2707         * will bring us into the next node in the tree
2708         */
2709        btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2710
2711        /* unlikely, but we inc below, so check to be safe */
2712        if (found_key.offset == (u64)-1)
2713            goto out;
2714
2715        /* search_forward needs a path with locks held, do the
2716         * search again for the original key. It is possible
2717         * this will race with a balance and return a path that
2718         * we could modify, but this drop is just an optimization
2719         * and is allowed to miss some leaves.
2720         */
2721        btrfs_release_path(root, path);
2722        found_key.offset++;
2723
2724        /* setup a max key for search_forward */
2725        other_key.offset = (u64)-1;
2726        other_key.type = key.type;
2727        other_key.objectid = key.objectid;
2728
2729        path->keep_locks = 1;
2730        ret = btrfs_search_forward(root, &found_key, &other_key,
2731                       path, 0, 0);
2732        path->keep_locks = 0;
2733        if (ret || found_key.objectid != key.objectid ||
2734            found_key.type != key.type) {
2735            ret = 0;
2736            goto out;
2737        }
2738
2739        key.offset = found_key.offset;
2740        btrfs_release_path(root, path);
2741        cond_resched();
2742        goto again;
2743    }
2744
2745    /* we know there's one more slot after us in the tree,
2746     * read that key so we can verify it is also a checksum item
2747     */
2748    btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2749
2750    if (found_key.objectid < inode->i_ino)
2751        goto next_key;
2752
2753    if (found_key.type != key.type || found_key.offset < new_size)
2754        goto next_key;
2755
2756    /*
2757     * if the key for the next leaf isn't a csum key from this objectid,
2758     * we can't be sure there aren't good items inside this leaf.
2759     * Bail out
2760     */
2761    if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2762        goto out;
2763
2764    leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2765    leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2766    /*
2767     * it is safe to delete this leaf, it contains only
2768     * csum items from this inode at an offset >= new_size
2769     */
2770    ret = btrfs_del_leaf(trans, root, path, leaf_start);
2771    BUG_ON(ret);
2772
2773    if (root->ref_cows && leaf_gen < trans->transid) {
2774        ref = btrfs_alloc_leaf_ref(root, 0);
2775        if (ref) {
2776            ref->root_gen = root->root_key.offset;
2777            ref->bytenr = leaf_start;
2778            ref->owner = 0;
2779            ref->generation = leaf_gen;
2780            ref->nritems = 0;
2781
2782            btrfs_sort_leaf_ref(ref);
2783
2784            ret = btrfs_add_leaf_ref(root, ref, 0);
2785            WARN_ON(ret);
2786            btrfs_free_leaf_ref(root, ref);
2787        } else {
2788            WARN_ON(1);
2789        }
2790    }
2791next_key:
2792    btrfs_release_path(root, path);
2793
2794    if (other_key.objectid == inode->i_ino &&
2795        other_key.type == key.type && other_key.offset > key.offset) {
2796        key.offset = other_key.offset;
2797        cond_resched();
2798        goto again;
2799    }
2800    ret = 0;
2801out:
2802    /* fixup any changes we've made to the path */
2803    path->lowest_level = 0;
2804    path->keep_locks = 0;
2805    btrfs_release_path(root, path);
2806    return ret;
2807}
2808
2809#endif
2810
2811/*
2812 * this can truncate away extent items, csum items and directory items.
2813 * It starts at a high offset and removes keys until it can't find
2814 * any higher than new_size
2815 *
2816 * csum items that cross the new i_size are truncated to the new size
2817 * as well.
2818 *
2819 * min_type is the minimum key type to truncate down to. If set to 0, this
2820 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2821 */
2822int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2823                   struct btrfs_root *root,
2824                   struct inode *inode,
2825                   u64 new_size, u32 min_type)
2826{
2827    struct btrfs_path *path;
2828    struct extent_buffer *leaf;
2829    struct btrfs_file_extent_item *fi;
2830    struct btrfs_key key;
2831    struct btrfs_key found_key;
2832    u64 extent_start = 0;
2833    u64 extent_num_bytes = 0;
2834    u64 extent_offset = 0;
2835    u64 item_end = 0;
2836    u64 mask = root->sectorsize - 1;
2837    u32 found_type = (u8)-1;
2838    int found_extent;
2839    int del_item;
2840    int pending_del_nr = 0;
2841    int pending_del_slot = 0;
2842    int extent_type = -1;
2843    int encoding;
2844    int ret;
2845    int err = 0;
2846
2847    BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
2848
2849    if (root->ref_cows)
2850        btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2851
2852    path = btrfs_alloc_path();
2853    BUG_ON(!path);
2854    path->reada = -1;
2855
2856    key.objectid = inode->i_ino;
2857    key.offset = (u64)-1;
2858    key.type = (u8)-1;
2859
2860search_again:
2861    path->leave_spinning = 1;
2862    ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2863    if (ret < 0) {
2864        err = ret;
2865        goto out;
2866    }
2867
2868    if (ret > 0) {
2869        /* there are no items in the tree for us to truncate, we're
2870         * done
2871         */
2872        if (path->slots[0] == 0)
2873            goto out;
2874        path->slots[0]--;
2875    }
2876
2877    while (1) {
2878        fi = NULL;
2879        leaf = path->nodes[0];
2880        btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2881        found_type = btrfs_key_type(&found_key);
2882        encoding = 0;
2883
2884        if (found_key.objectid != inode->i_ino)
2885            break;
2886
2887        if (found_type < min_type)
2888            break;
2889
2890        item_end = found_key.offset;
2891        if (found_type == BTRFS_EXTENT_DATA_KEY) {
2892            fi = btrfs_item_ptr(leaf, path->slots[0],
2893                        struct btrfs_file_extent_item);
2894            extent_type = btrfs_file_extent_type(leaf, fi);
2895            encoding = btrfs_file_extent_compression(leaf, fi);
2896            encoding |= btrfs_file_extent_encryption(leaf, fi);
2897            encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2898
2899            if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2900                item_end +=
2901                    btrfs_file_extent_num_bytes(leaf, fi);
2902            } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2903                item_end += btrfs_file_extent_inline_len(leaf,
2904                                     fi);
2905            }
2906            item_end--;
2907        }
2908        if (found_type > min_type) {
2909            del_item = 1;
2910        } else {
2911            if (item_end < new_size)
2912                break;
2913            if (found_key.offset >= new_size)
2914                del_item = 1;
2915            else
2916                del_item = 0;
2917        }
2918        found_extent = 0;
2919        /* FIXME, shrink the extent if the ref count is only 1 */
2920        if (found_type != BTRFS_EXTENT_DATA_KEY)
2921            goto delete;
2922
2923        if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2924            u64 num_dec;
2925            extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2926            if (!del_item && !encoding) {
2927                u64 orig_num_bytes =
2928                    btrfs_file_extent_num_bytes(leaf, fi);
2929                extent_num_bytes = new_size -
2930                    found_key.offset + root->sectorsize - 1;
2931                extent_num_bytes = extent_num_bytes &
2932                    ~((u64)root->sectorsize - 1);
2933                btrfs_set_file_extent_num_bytes(leaf, fi,
2934                             extent_num_bytes);
2935                num_dec = (orig_num_bytes -
2936                       extent_num_bytes);
2937                if (root->ref_cows && extent_start != 0)
2938                    inode_sub_bytes(inode, num_dec);
2939                btrfs_mark_buffer_dirty(leaf);
2940            } else {
2941                extent_num_bytes =
2942                    btrfs_file_extent_disk_num_bytes(leaf,
2943                                     fi);
2944                extent_offset = found_key.offset -
2945                    btrfs_file_extent_offset(leaf, fi);
2946
2947                /* FIXME blocksize != 4096 */
2948                num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2949                if (extent_start != 0) {
2950                    found_extent = 1;
2951                    if (root->ref_cows)
2952                        inode_sub_bytes(inode, num_dec);
2953                }
2954            }
2955        } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2956            /*
2957             * we can't truncate inline items that have had
2958             * special encodings
2959             */
2960            if (!del_item &&
2961                btrfs_file_extent_compression(leaf, fi) == 0 &&
2962                btrfs_file_extent_encryption(leaf, fi) == 0 &&
2963                btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2964                u32 size = new_size - found_key.offset;
2965
2966                if (root->ref_cows) {
2967                    inode_sub_bytes(inode, item_end + 1 -
2968                            new_size);
2969                }
2970                size =
2971                    btrfs_file_extent_calc_inline_size(size);
2972                ret = btrfs_truncate_item(trans, root, path,
2973                              size, 1);
2974                BUG_ON(ret);
2975            } else if (root->ref_cows) {
2976                inode_sub_bytes(inode, item_end + 1 -
2977                        found_key.offset);
2978            }
2979        }
2980delete:
2981        if (del_item) {
2982            if (!pending_del_nr) {
2983                /* no pending yet, add ourselves */
2984                pending_del_slot = path->slots[0];
2985                pending_del_nr = 1;
2986            } else if (pending_del_nr &&
2987                   path->slots[0] + 1 == pending_del_slot) {
2988                /* hop on the pending chunk */
2989                pending_del_nr++;
2990                pending_del_slot = path->slots[0];
2991            } else {
2992                BUG();
2993            }
2994        } else {
2995            break;
2996        }
2997        if (found_extent && root->ref_cows) {
2998            btrfs_set_path_blocking(path);
2999            ret = btrfs_free_extent(trans, root, extent_start,
3000                        extent_num_bytes, 0,
3001                        btrfs_header_owner(leaf),
3002                        inode->i_ino, extent_offset);
3003            BUG_ON(ret);
3004        }
3005
3006        if (found_type == BTRFS_INODE_ITEM_KEY)
3007            break;
3008
3009        if (path->slots[0] == 0 ||
3010            path->slots[0] != pending_del_slot) {
3011            if (root->ref_cows) {
3012                err = -EAGAIN;
3013                goto out;
3014            }
3015            if (pending_del_nr) {
3016                ret = btrfs_del_items(trans, root, path,
3017                        pending_del_slot,
3018                        pending_del_nr);
3019                BUG_ON(ret);
3020                pending_del_nr = 0;
3021            }
3022            btrfs_release_path(root, path);
3023            goto search_again;
3024        } else {
3025            path->slots[0]--;
3026        }
3027    }
3028out:
3029    if (pending_del_nr) {
3030        ret = btrfs_del_items(trans, root, path, pending_del_slot,
3031                      pending_del_nr);
3032    }
3033    btrfs_free_path(path);
3034    return err;
3035}
3036
3037/*
3038 * taken from block_truncate_page, but does cow as it zeros out
3039 * any bytes left in the last page in the file.
3040 */
3041static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3042{
3043    struct inode *inode = mapping->host;
3044    struct btrfs_root *root = BTRFS_I(inode)->root;
3045    struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3046    struct btrfs_ordered_extent *ordered;
3047    struct extent_state *cached_state = NULL;
3048    char *kaddr;
3049    u32 blocksize = root->sectorsize;
3050    pgoff_t index = from >> PAGE_CACHE_SHIFT;
3051    unsigned offset = from & (PAGE_CACHE_SIZE-1);
3052    struct page *page;
3053    int ret = 0;
3054    u64 page_start;
3055    u64 page_end;
3056
3057    if ((offset & (blocksize - 1)) == 0)
3058        goto out;
3059    ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
3060    if (ret)
3061        goto out;
3062
3063    ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
3064    if (ret)
3065        goto out;
3066
3067    ret = -ENOMEM;
3068again:
3069    page = grab_cache_page(mapping, index);
3070    if (!page) {
3071        btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3072        btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3073        goto out;
3074    }
3075
3076    page_start = page_offset(page);
3077    page_end = page_start + PAGE_CACHE_SIZE - 1;
3078
3079    if (!PageUptodate(page)) {
3080        ret = btrfs_readpage(NULL, page);
3081        lock_page(page);
3082        if (page->mapping != mapping) {
3083            unlock_page(page);
3084            page_cache_release(page);
3085            goto again;
3086        }
3087        if (!PageUptodate(page)) {
3088            ret = -EIO;
3089            goto out_unlock;
3090        }
3091    }
3092    wait_on_page_writeback(page);
3093
3094    lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
3095             GFP_NOFS);
3096    set_page_extent_mapped(page);
3097
3098    ordered = btrfs_lookup_ordered_extent(inode, page_start);
3099    if (ordered) {
3100        unlock_extent_cached(io_tree, page_start, page_end,
3101                     &cached_state, GFP_NOFS);
3102        unlock_page(page);
3103        page_cache_release(page);
3104        btrfs_start_ordered_extent(inode, ordered, 1);
3105        btrfs_put_ordered_extent(ordered);
3106        goto again;
3107    }
3108
3109    clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
3110              EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3111              0, 0, &cached_state, GFP_NOFS);
3112
3113    ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
3114                    &cached_state);
3115    if (ret) {
3116        unlock_extent_cached(io_tree, page_start, page_end,
3117                     &cached_state, GFP_NOFS);
3118        goto out_unlock;
3119    }
3120
3121    ret = 0;
3122    if (offset != PAGE_CACHE_SIZE) {
3123        kaddr = kmap(page);
3124        memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
3125        flush_dcache_page(page);
3126        kunmap(page);
3127    }
3128    ClearPageChecked(page);
3129    set_page_dirty(page);
3130    unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
3131                 GFP_NOFS);
3132
3133out_unlock:
3134    if (ret)
3135        btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3136    btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3137    unlock_page(page);
3138    page_cache_release(page);
3139out:
3140    return ret;
3141}
3142
3143int btrfs_cont_expand(struct inode *inode, loff_t size)
3144{
3145    struct btrfs_trans_handle *trans;
3146    struct btrfs_root *root = BTRFS_I(inode)->root;
3147    struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3148    struct extent_map *em;
3149    struct extent_state *cached_state = NULL;
3150    u64 mask = root->sectorsize - 1;
3151    u64 hole_start = (inode->i_size + mask) & ~mask;
3152    u64 block_end = (size + mask) & ~mask;
3153    u64 last_byte;
3154    u64 cur_offset;
3155    u64 hole_size;
3156    int err = 0;
3157
3158    if (size <= hole_start)
3159        return 0;
3160
3161    while (1) {
3162        struct btrfs_ordered_extent *ordered;
3163        btrfs_wait_ordered_range(inode, hole_start,
3164                     block_end - hole_start);
3165        lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
3166                 &cached_state, GFP_NOFS);
3167        ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3168        if (!ordered)
3169            break;
3170        unlock_extent_cached(io_tree, hole_start, block_end - 1,
3171                     &cached_state, GFP_NOFS);
3172        btrfs_put_ordered_extent(ordered);
3173    }
3174
3175    cur_offset = hole_start;
3176    while (1) {
3177        em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3178                block_end - cur_offset, 0);
3179        BUG_ON(IS_ERR(em) || !em);
3180        last_byte = min(extent_map_end(em), block_end);
3181        last_byte = (last_byte + mask) & ~mask;
3182        if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3183            u64 hint_byte = 0;
3184            hole_size = last_byte - cur_offset;
3185
3186            err = btrfs_reserve_metadata_space(root, 2);
3187            if (err)
3188                break;
3189
3190            trans = btrfs_start_transaction(root, 1);
3191            btrfs_set_trans_block_group(trans, inode);
3192
3193            err = btrfs_drop_extents(trans, inode, cur_offset,
3194                         cur_offset + hole_size,
3195                         &hint_byte, 1);
3196            BUG_ON(err);
3197
3198            err = btrfs_insert_file_extent(trans, root,
3199                    inode->i_ino, cur_offset, 0,
3200                    0, hole_size, 0, hole_size,
3201                    0, 0, 0);
3202            BUG_ON(err);
3203
3204            btrfs_drop_extent_cache(inode, hole_start,
3205                    last_byte - 1, 0);
3206
3207            btrfs_end_transaction(trans, root);
3208            btrfs_unreserve_metadata_space(root, 2);
3209        }
3210        free_extent_map(em);
3211        cur_offset = last_byte;
3212        if (cur_offset >= block_end)
3213            break;
3214    }
3215
3216    unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
3217                 GFP_NOFS);
3218    return err;
3219}
3220
3221static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
3222{
3223    struct btrfs_root *root = BTRFS_I(inode)->root;
3224    struct btrfs_trans_handle *trans;
3225    unsigned long nr;
3226    int ret;
3227
3228    if (attr->ia_size == inode->i_size)
3229        return 0;
3230
3231    if (attr->ia_size > inode->i_size) {
3232        unsigned long limit;
3233        limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
3234        if (attr->ia_size > inode->i_sb->s_maxbytes)
3235            return -EFBIG;
3236        if (limit != RLIM_INFINITY && attr->ia_size > limit) {
3237            send_sig(SIGXFSZ, current, 0);
3238            return -EFBIG;
3239        }
3240    }
3241
3242    ret = btrfs_reserve_metadata_space(root, 1);
3243    if (ret)
3244        return ret;
3245
3246    trans = btrfs_start_transaction(root, 1);
3247    btrfs_set_trans_block_group(trans, inode);
3248
3249    ret = btrfs_orphan_add(trans, inode);
3250    BUG_ON(ret);
3251
3252    nr = trans->blocks_used;
3253    btrfs_end_transaction(trans, root);
3254    btrfs_unreserve_metadata_space(root, 1);
3255    btrfs_btree_balance_dirty(root, nr);
3256
3257    if (attr->ia_size > inode->i_size) {
3258        ret = btrfs_cont_expand(inode, attr->ia_size);
3259        if (ret) {
3260            btrfs_truncate(inode);
3261            return ret;
3262        }
3263
3264        i_size_write(inode, attr->ia_size);
3265        btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
3266
3267        trans = btrfs_start_transaction(root, 1);
3268        btrfs_set_trans_block_group(trans, inode);
3269
3270        ret = btrfs_update_inode(trans, root, inode);
3271        BUG_ON(ret);
3272        if (inode->i_nlink > 0) {
3273            ret = btrfs_orphan_del(trans, inode);
3274            BUG_ON(ret);
3275        }
3276        nr = trans->blocks_used;
3277        btrfs_end_transaction(trans, root);
3278        btrfs_btree_balance_dirty(root, nr);
3279        return 0;
3280    }
3281
3282    /*
3283     * We're truncating a file that used to have good data down to
3284     * zero. Make sure it gets into the ordered flush list so that
3285     * any new writes get down to disk quickly.
3286     */
3287    if (attr->ia_size == 0)
3288        BTRFS_I(inode)->ordered_data_close = 1;
3289
3290    /* we don't support swapfiles, so vmtruncate shouldn't fail */
3291    ret = vmtruncate(inode, attr->ia_size);
3292    BUG_ON(ret);
3293
3294    return 0;
3295}
3296
3297static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3298{
3299    struct inode *inode = dentry->d_inode;
3300    int err;
3301
3302    err = inode_change_ok(inode, attr);
3303    if (err)
3304        return err;
3305
3306    if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3307        err = btrfs_setattr_size(inode, attr);
3308        if (err)
3309            return err;
3310    }
3311    attr->ia_valid &= ~ATTR_SIZE;
3312
3313    if (attr->ia_valid)
3314        err = inode_setattr(inode, attr);
3315
3316    if (!err && ((attr->ia_valid & ATTR_MODE)))
3317        err = btrfs_acl_chmod(inode);
3318    return err;
3319}
3320
3321void btrfs_delete_inode(struct inode *inode)
3322{
3323    struct btrfs_trans_handle *trans;
3324    struct btrfs_root *root = BTRFS_I(inode)->root;
3325    unsigned long nr;
3326    int ret;
3327
3328    truncate_inode_pages(&inode->i_data, 0);
3329    if (is_bad_inode(inode)) {
3330        btrfs_orphan_del(NULL, inode);
3331        goto no_delete;
3332    }
3333    btrfs_wait_ordered_range(inode, 0, (u64)-1);
3334
3335    if (root->fs_info->log_root_recovering) {
3336        BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
3337        goto no_delete;
3338    }
3339
3340    if (inode->i_nlink > 0) {
3341        BUG_ON(btrfs_root_refs(&root->root_item) != 0);
3342        goto no_delete;
3343    }
3344
3345    btrfs_i_size_write(inode, 0);
3346
3347    while (1) {
3348        trans = btrfs_start_transaction(root, 1);
3349        btrfs_set_trans_block_group(trans, inode);
3350        ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3351
3352        if (ret != -EAGAIN)
3353            break;
3354
3355        nr = trans->blocks_used;
3356        btrfs_end_transaction(trans, root);
3357        trans = NULL;
3358        btrfs_btree_balance_dirty(root, nr);
3359    }
3360
3361    if (ret == 0) {
3362        ret = btrfs_orphan_del(trans, inode);
3363        BUG_ON(ret);
3364    }
3365
3366    nr = trans->blocks_used;
3367    btrfs_end_transaction(trans, root);
3368    btrfs_btree_balance_dirty(root, nr);
3369no_delete:
3370    clear_inode(inode);
3371    return;
3372}
3373
3374/*
3375 * this returns the key found in the dir entry in the location pointer.
3376 * If no dir entries were found, location->objectid is 0.
3377 */
3378static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3379                   struct btrfs_key *location)
3380{
3381    const char *name = dentry->d_name.name;
3382    int namelen = dentry->d_name.len;
3383    struct btrfs_dir_item *di;
3384    struct btrfs_path *path;
3385    struct btrfs_root *root = BTRFS_I(dir)->root;
3386    int ret = 0;
3387
3388    path = btrfs_alloc_path();
3389    BUG_ON(!path);
3390
3391    di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
3392                    namelen, 0);
3393    if (IS_ERR(di))
3394        ret = PTR_ERR(di);
3395
3396    if (!di || IS_ERR(di))
3397        goto out_err;
3398
3399    btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3400out:
3401    btrfs_free_path(path);
3402    return ret;
3403out_err:
3404    location->objectid = 0;
3405    goto out;
3406}
3407
3408/*
3409 * when we hit a tree root in a directory, the btrfs part of the inode
3410 * needs to be changed to reflect the root directory of the tree root. This
3411 * is kind of like crossing a mount point.
3412 */
3413static int fixup_tree_root_location(struct btrfs_root *root,
3414                    struct inode *dir,
3415                    struct dentry *dentry,
3416                    struct btrfs_key *location,
3417                    struct btrfs_root **sub_root)
3418{
3419    struct btrfs_path *path;
3420    struct btrfs_root *new_root;
3421    struct btrfs_root_ref *ref;
3422    struct extent_buffer *leaf;
3423    int ret;
3424    int err = 0;
3425
3426    path = btrfs_alloc_path();
3427    if (!path) {
3428        err = -ENOMEM;
3429        goto out;
3430    }
3431
3432    err = -ENOENT;
3433    ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
3434                  BTRFS_I(dir)->root->root_key.objectid,
3435                  location->objectid);
3436    if (ret) {
3437        if (ret < 0)
3438            err = ret;
3439        goto out;
3440    }
3441
3442    leaf = path->nodes[0];
3443    ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
3444    if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino ||
3445        btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
3446        goto out;
3447
3448    ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
3449                   (unsigned long)(ref + 1),
3450                   dentry->d_name.len);
3451    if (ret)
3452        goto out;
3453
3454    btrfs_release_path(root->fs_info->tree_root, path);
3455
3456    new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
3457    if (IS_ERR(new_root)) {
3458        err = PTR_ERR(new_root);
3459        goto out;
3460    }
3461
3462    if (btrfs_root_refs(&new_root->root_item) == 0) {
3463        err = -ENOENT;
3464        goto out;
3465    }
3466
3467    *sub_root = new_root;
3468    location->objectid = btrfs_root_dirid(&new_root->root_item);
3469    location->type = BTRFS_INODE_ITEM_KEY;
3470    location->offset = 0;
3471    err = 0;
3472out:
3473    btrfs_free_path(path);
3474    return err;
3475}
3476
3477static void inode_tree_add(struct inode *inode)
3478{
3479    struct btrfs_root *root = BTRFS_I(inode)->root;
3480    struct btrfs_inode *entry;
3481    struct rb_node **p;
3482    struct rb_node *parent;
3483again:
3484    p = &root->inode_tree.rb_node;
3485    parent = NULL;
3486
3487    if (hlist_unhashed(&inode->i_hash))
3488        return;
3489
3490    spin_lock(&root->inode_lock);
3491    while (*p) {
3492        parent = *p;
3493        entry = rb_entry(parent, struct btrfs_inode, rb_node);
3494
3495        if (inode->i_ino < entry->vfs_inode.i_ino)
3496            p = &parent->rb_left;
3497        else if (inode->i_ino > entry->vfs_inode.i_ino)
3498            p = &parent->rb_right;
3499        else {
3500            WARN_ON(!(entry->vfs_inode.i_state &
3501                  (I_WILL_FREE | I_FREEING | I_CLEAR)));
3502            rb_erase(parent, &root->inode_tree);
3503            RB_CLEAR_NODE(parent);
3504            spin_unlock(&root->inode_lock);
3505            goto again;
3506        }
3507    }
3508    rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
3509    rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3510    spin_unlock(&root->inode_lock);
3511}
3512
3513static void inode_tree_del(struct inode *inode)
3514{
3515    struct btrfs_root *root = BTRFS_I(inode)->root;
3516    int empty = 0;
3517
3518    spin_lock(&root->inode_lock);
3519    if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
3520        rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3521        RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3522        empty = RB_EMPTY_ROOT(&root->inode_tree);
3523    }
3524    spin_unlock(&root->inode_lock);
3525
3526    if (empty && btrfs_root_refs(&root->root_item) == 0) {
3527        synchronize_srcu(&root->fs_info->subvol_srcu);
3528        spin_lock(&root->inode_lock);
3529        empty = RB_EMPTY_ROOT(&root->inode_tree);
3530        spin_unlock(&root->inode_lock);
3531        if (empty)
3532            btrfs_add_dead_root(root);
3533    }
3534}
3535
3536int btrfs_invalidate_inodes(struct btrfs_root *root)
3537{
3538    struct rb_node *node;
3539    struct rb_node *prev;
3540    struct btrfs_inode *entry;
3541    struct inode *inode;
3542    u64 objectid = 0;
3543
3544    WARN_ON(btrfs_root_refs(&root->root_item) != 0);
3545
3546    spin_lock(&root->inode_lock);
3547again:
3548    node = root->inode_tree.rb_node;
3549    prev = NULL;
3550    while (node) {
3551        prev = node;
3552        entry = rb_entry(node, struct btrfs_inode, rb_node);
3553
3554        if (objectid < entry->vfs_inode.i_ino)
3555            node = node->rb_left;
3556        else if (objectid > entry->vfs_inode.i_ino)
3557            node = node->rb_right;
3558        else
3559            break;
3560    }
3561    if (!node) {
3562        while (prev) {
3563            entry = rb_entry(prev, struct btrfs_inode, rb_node);
3564            if (objectid <= entry->vfs_inode.i_ino) {
3565                node = prev;
3566                break;
3567            }
3568            prev = rb_next(prev);
3569        }
3570    }
3571    while (node) {
3572        entry = rb_entry(node, struct btrfs_inode, rb_node);
3573        objectid = entry->vfs_inode.i_ino + 1;
3574        inode = igrab(&entry->vfs_inode);
3575        if (inode) {
3576            spin_unlock(&root->inode_lock);
3577            if (atomic_read(&inode->i_count) > 1)
3578                d_prune_aliases(inode);
3579            /*
3580             * btrfs_drop_inode will remove it from
3581             * the inode cache when its usage count
3582             * hits zero.
3583             */
3584            iput(inode);
3585            cond_resched();
3586            spin_lock(&root->inode_lock);
3587            goto again;
3588        }
3589
3590        if (cond_resched_lock(&root->inode_lock))
3591            goto again;
3592
3593        node = rb_next(node);
3594    }
3595    spin_unlock(&root->inode_lock);
3596    return 0;
3597}
3598
3599static noinline void init_btrfs_i(struct inode *inode)
3600{
3601    struct btrfs_inode *bi = BTRFS_I(inode);
3602
3603    bi->generation = 0;
3604    bi->sequence = 0;
3605    bi->last_trans = 0;
3606    bi->last_sub_trans = 0;
3607    bi->logged_trans = 0;
3608    bi->delalloc_bytes = 0;
3609    bi->reserved_bytes = 0;
3610    bi->disk_i_size = 0;
3611    bi->flags = 0;
3612    bi->index_cnt = (u64)-1;
3613    bi->last_unlink_trans = 0;
3614    bi->ordered_data_close = 0;
3615    bi->force_compress = 0;
3616    extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3617    extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3618                 inode->i_mapping, GFP_NOFS);
3619    extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3620                 inode->i_mapping, GFP_NOFS);
3621    INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3622    INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
3623    RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3624    btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3625    mutex_init(&BTRFS_I(inode)->log_mutex);
3626}
3627
3628static int btrfs_init_locked_inode(struct inode *inode, void *p)
3629{
3630    struct btrfs_iget_args *args = p;
3631    inode->i_ino = args->ino;
3632    init_btrfs_i(inode);
3633    BTRFS_I(inode)->root = args->root;
3634    btrfs_set_inode_space_info(args->root, inode);
3635    return 0;
3636}
3637
3638static int btrfs_find_actor(struct inode *inode, void *opaque)
3639{
3640    struct btrfs_iget_args *args = opaque;
3641    return args->ino == inode->i_ino &&
3642        args->root == BTRFS_I(inode)->root;
3643}
3644
3645static struct inode *btrfs_iget_locked(struct super_block *s,
3646                       u64 objectid,
3647                       struct btrfs_root *root)
3648{
3649    struct inode *inode;
3650    struct btrfs_iget_args args;
3651    args.ino = objectid;
3652    args.root = root;
3653
3654    inode = iget5_locked(s, objectid, btrfs_find_actor,
3655                 btrfs_init_locked_inode,
3656                 (void *)&args);
3657    return inode;
3658}
3659
3660/* Get an inode object given its location and corresponding root.
3661 * Returns in *is_new if the inode was read from disk
3662 */
3663struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3664             struct btrfs_root *root, int *new)
3665{
3666    struct inode *inode;
3667
3668    inode = btrfs_iget_locked(s, location->objectid, root);
3669    if (!inode)
3670        return ERR_PTR(-ENOMEM);
3671
3672    if (inode->i_state & I_NEW) {
3673        BTRFS_I(inode)->root = root;
3674        memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3675        btrfs_read_locked_inode(inode);
3676
3677        inode_tree_add(inode);
3678        unlock_new_inode(inode);
3679        if (new)
3680            *new = 1;
3681    }
3682
3683    return inode;
3684}
3685
3686static struct inode *new_simple_dir(struct super_block *s,
3687                    struct btrfs_key *key,
3688                    struct btrfs_root *root)
3689{
3690    struct inode *inode = new_inode(s);
3691
3692    if (!inode)
3693        return ERR_PTR(-ENOMEM);
3694
3695    init_btrfs_i(inode);
3696
3697    BTRFS_I(inode)->root = root;
3698    memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
3699    BTRFS_I(inode)->dummy_inode = 1;
3700
3701    inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
3702    inode->i_op = &simple_dir_inode_operations;
3703    inode->i_fop = &simple_dir_operations;
3704    inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
3705    inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3706
3707    return inode;
3708}
3709
3710struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3711{
3712    struct inode *inode;
3713    struct btrfs_root *root = BTRFS_I(dir)->root;
3714    struct btrfs_root *sub_root = root;
3715    struct btrfs_key location;
3716    int index;
3717    int ret;
3718
3719    dentry->d_op = &btrfs_dentry_operations;
3720
3721    if (dentry->d_name.len > BTRFS_NAME_LEN)
3722        return ERR_PTR(-ENAMETOOLONG);
3723
3724    ret = btrfs_inode_by_name(dir, dentry, &location);
3725
3726    if (ret < 0)
3727        return ERR_PTR(ret);
3728
3729    if (location.objectid == 0)
3730        return NULL;
3731
3732    if (location.type == BTRFS_INODE_ITEM_KEY) {
3733        inode = btrfs_iget(dir->i_sb, &location, root, NULL);
3734        return inode;
3735    }
3736
3737    BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
3738
3739    index = srcu_read_lock(&root->fs_info->subvol_srcu);
3740    ret = fixup_tree_root_location(root, dir, dentry,
3741                       &location, &sub_root);
3742    if (ret < 0) {
3743        if (ret != -ENOENT)
3744            inode = ERR_PTR(ret);
3745        else
3746            inode = new_simple_dir(dir->i_sb, &location, sub_root);
3747    } else {
3748        inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
3749    }
3750    srcu_read_unlock(&root->fs_info->subvol_srcu, index);
3751
3752    if (root != sub_root) {
3753        down_read(&root->fs_info->cleanup_work_sem);
3754        if (!(inode->i_sb->s_flags & MS_RDONLY))
3755            btrfs_orphan_cleanup(sub_root);
3756        up_read(&root->fs_info->cleanup_work_sem);
3757    }
3758
3759    return inode;
3760}
3761
3762static int btrfs_dentry_delete(struct dentry *dentry)
3763{
3764    struct btrfs_root *root;
3765
3766    if (!dentry->d_inode && !IS_ROOT(dentry))
3767        dentry = dentry->d_parent;
3768
3769    if (dentry->d_inode) {
3770        root = BTRFS_I(dentry->d_inode)->root;
3771        if (btrfs_root_refs(&root->root_item) == 0)
3772            return 1;
3773    }
3774    return 0;
3775}
3776
3777static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3778                   struct nameidata *nd)
3779{
3780    struct inode *inode;
3781
3782    inode = btrfs_lookup_dentry(dir, dentry);
3783    if (IS_ERR(inode))
3784        return ERR_CAST(inode);
3785
3786    return d_splice_alias(inode, dentry);
3787}
3788
3789static unsigned char btrfs_filetype_table[] = {
3790    DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3791};
3792
3793static int btrfs_real_readdir(struct file *filp, void *dirent,
3794                  filldir_t filldir)
3795{
3796    struct inode *inode = filp->f_dentry->d_inode;
3797    struct btrfs_root *root = BTRFS_I(inode)->root;
3798    struct btrfs_item *item;
3799    struct btrfs_dir_item *di;
3800    struct btrfs_key key;
3801    struct btrfs_key found_key;
3802    struct btrfs_path *path;
3803    int ret;
3804    u32 nritems;
3805    struct extent_buffer *leaf;
3806    int slot;
3807    int advance;
3808    unsigned char d_type;
3809    int over = 0;
3810    u32 di_cur;
3811    u32 di_total;
3812    u32 di_len;
3813    int key_type = BTRFS_DIR_INDEX_KEY;
3814    char tmp_name[32];
3815    char *name_ptr;
3816    int name_len;
3817
3818    /* FIXME, use a real flag for deciding about the key type */
3819    if (root->fs_info->tree_root == root)
3820        key_type = BTRFS_DIR_ITEM_KEY;
3821
3822    /* special case for "." */
3823    if (filp->f_pos == 0) {
3824        over = filldir(dirent, ".", 1,
3825                   1, inode->i_ino,
3826                   DT_DIR);
3827        if (over)
3828            return 0;
3829        filp->f_pos = 1;
3830    }
3831    /* special case for .., just use the back ref */
3832    if (filp->f_pos == 1) {
3833        u64 pino = parent_ino(filp->f_path.dentry);
3834        over = filldir(dirent, "..", 2,
3835                   2, pino, DT_DIR);
3836        if (over)
3837            return 0;
3838        filp->f_pos = 2;
3839    }
3840    path = btrfs_alloc_path();
3841    path->reada = 2;
3842
3843    btrfs_set_key_type(&key, key_type);
3844    key.offset = filp->f_pos;
3845    key.objectid = inode->i_ino;
3846
3847    ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3848    if (ret < 0)
3849        goto err;
3850    advance = 0;
3851
3852    while (1) {
3853        leaf = path->nodes[0];
3854        nritems = btrfs_header_nritems(leaf);
3855        slot = path->slots[0];
3856        if (advance || slot >= nritems) {
3857            if (slot >= nritems - 1) {
3858                ret = btrfs_next_leaf(root, path);
3859                if (ret)
3860                    break;
3861                leaf = path->nodes[0];
3862                nritems = btrfs_header_nritems(leaf);
3863                slot = path->slots[0];
3864            } else {
3865                slot++;
3866                path->slots[0]++;
3867            }
3868        }
3869
3870        advance = 1;
3871        item = btrfs_item_nr(leaf, slot);
3872        btrfs_item_key_to_cpu(leaf, &found_key, slot);
3873
3874        if (found_key.objectid != key.objectid)
3875            break;
3876        if (btrfs_key_type(&found_key) != key_type)
3877            break;
3878        if (found_key.offset < filp->f_pos)
3879            continue;
3880
3881        filp->f_pos = found_key.offset;
3882
3883        di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3884        di_cur = 0;
3885        di_total = btrfs_item_size(leaf, item);
3886
3887        while (di_cur < di_total) {
3888            struct btrfs_key location;
3889
3890            name_len = btrfs_dir_name_len(leaf, di);
3891            if (name_len <= sizeof(tmp_name)) {
3892                name_ptr = tmp_name;
3893            } else {
3894                name_ptr = kmalloc(name_len, GFP_NOFS);
3895                if (!name_ptr) {
3896                    ret = -ENOMEM;
3897                    goto err;
3898                }
3899            }
3900            read_extent_buffer(leaf, name_ptr,
3901                       (unsigned long)(di + 1), name_len);
3902
3903            d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3904            btrfs_dir_item_key_to_cpu(leaf, di, &location);
3905
3906            /* is this a reference to our own snapshot? If so
3907             * skip it
3908             */
3909            if (location.type == BTRFS_ROOT_ITEM_KEY &&
3910                location.objectid == root->root_key.objectid) {
3911                over = 0;
3912                goto skip;
3913            }
3914            over = filldir(dirent, name_ptr, name_len,
3915                       found_key.offset, location.objectid,
3916                       d_type);
3917
3918skip:
3919            if (name_ptr != tmp_name)
3920                kfree(name_ptr);
3921
3922            if (over)
3923                goto nopos;
3924            di_len = btrfs_dir_name_len(leaf, di) +
3925                 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3926            di_cur += di_len;
3927            di = (struct btrfs_dir_item *)((char *)di + di_len);
3928        }
3929    }
3930
3931    /* Reached end of directory/root. Bump pos past the last item. */
3932    if (key_type == BTRFS_DIR_INDEX_KEY)
3933        /*
3934         * 32-bit glibc will use getdents64, but then strtol -
3935         * so the last number we can serve is this.
3936         */
3937        filp->f_pos = 0x7fffffff;
3938    else
3939        filp->f_pos++;
3940nopos:
3941    ret = 0;
3942err:
3943    btrfs_free_path(path);
3944    return ret;
3945}
3946
3947int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
3948{
3949    struct btrfs_root *root = BTRFS_I(inode)->root;
3950    struct btrfs_trans_handle *trans;
3951    int ret = 0;
3952
3953    if (root->fs_info->btree_inode == inode)
3954        return 0;
3955
3956    if (wbc->sync_mode == WB_SYNC_ALL) {
3957        trans = btrfs_join_transaction(root, 1);
3958        btrfs_set_trans_block_group(trans, inode);
3959        ret = btrfs_commit_transaction(trans, root);
3960    }
3961    return ret;
3962}
3963
3964/*
3965 * This is somewhat expensive, updating the tree every time the
3966 * inode changes. But, it is most likely to find the inode in cache.
3967 * FIXME, needs more benchmarking...there are no reasons other than performance
3968 * to keep or drop this code.
3969 */
3970void btrfs_dirty_inode(struct inode *inode)
3971{
3972    struct btrfs_root *root = BTRFS_I(inode)->root;
3973    struct btrfs_trans_handle *trans;
3974
3975    trans = btrfs_join_transaction(root, 1);
3976    btrfs_set_trans_block_group(trans, inode);
3977    btrfs_update_inode(trans, root, inode);
3978    btrfs_end_transaction(trans, root);
3979}
3980
3981/*
3982 * find the highest existing sequence number in a directory
3983 * and then set the in-memory index_cnt variable to reflect
3984 * free sequence numbers
3985 */
3986static int btrfs_set_inode_index_count(struct inode *inode)
3987{
3988    struct btrfs_root *root = BTRFS_I(inode)->root;
3989    struct btrfs_key key, found_key;
3990    struct btrfs_path *path;
3991    struct extent_buffer *leaf;
3992    int ret;
3993
3994    key.objectid = inode->i_ino;
3995    btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
3996    key.offset = (u64)-1;
3997
3998    path = btrfs_alloc_path();
3999    if (!path)
4000        return -ENOMEM;
4001
4002    ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4003    if (ret < 0)
4004        goto out;
4005    /* FIXME: we should be able to handle this */
4006    if (ret == 0)
4007        goto out;
4008    ret = 0;
4009
4010    /*
4011     * MAGIC NUMBER EXPLANATION:
4012     * since we search a directory based on f_pos we have to start at 2
4013     * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
4014     * else has to start at 2
4015     */
4016    if (path->slots[0] == 0) {
4017        BTRFS_I(inode)->index_cnt = 2;
4018        goto out;
4019    }
4020
4021    path->slots[0]--;
4022
4023    leaf = path->nodes[0];
4024    btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4025
4026    if (found_key.objectid != inode->i_ino ||
4027        btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
4028        BTRFS_I(inode)->index_cnt = 2;
4029        goto out;
4030    }
4031
4032    BTRFS_I(inode)->index_cnt = found_key.offset + 1;
4033out:
4034    btrfs_free_path(path);
4035    return ret;
4036}
4037
4038/*
4039 * helper to find a free sequence number in a given directory. This current
4040 * code is very simple, later versions will do smarter things in the btree
4041 */
4042int btrfs_set_inode_index(struct inode *dir, u64 *index)
4043{
4044    int ret = 0;
4045
4046    if (BTRFS_I(dir)->index_cnt == (u64)-1) {
4047        ret = btrfs_set_inode_index_count(dir);
4048        if (ret)
4049            return ret;
4050    }
4051
4052    *index = BTRFS_I(dir)->index_cnt;
4053    BTRFS_I(dir)->index_cnt++;
4054
4055    return ret;
4056}
4057
4058static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4059                     struct btrfs_root *root,
4060                     struct inode *dir,
4061                     const char *name, int name_len,
4062                     u64 ref_objectid, u64 objectid,
4063                     u64 alloc_hint, int mode, u64 *index)
4064{
4065    struct inode *inode;
4066    struct btrfs_inode_item *inode_item;
4067    struct btrfs_key *location;
4068    struct btrfs_path *path;
4069    struct btrfs_inode_ref *ref;
4070    struct btrfs_key key[2];
4071    u32 sizes[2];
4072    unsigned long ptr;
4073    int ret;
4074    int owner;
4075
4076    path = btrfs_alloc_path();
4077    BUG_ON(!path);
4078
4079    inode = new_inode(root->fs_info->sb);
4080    if (!inode)
4081        return ERR_PTR(-ENOMEM);
4082
4083    if (dir) {
4084        ret = btrfs_set_inode_index(dir, index);
4085        if (ret) {
4086            iput(inode);
4087            return ERR_PTR(ret);
4088        }
4089    }
4090    /*
4091     * index_cnt is ignored for everything but a dir,
4092     * btrfs_get_inode_index_count has an explanation for the magic
4093     * number
4094     */
4095    init_btrfs_i(inode);
4096    BTRFS_I(inode)->index_cnt = 2;
4097    BTRFS_I(inode)->root = root;
4098    BTRFS_I(inode)->generation = trans->transid;
4099    btrfs_set_inode_space_info(root, inode);
4100
4101    if (mode & S_IFDIR)
4102        owner = 0;
4103    else
4104        owner = 1;
4105    BTRFS_I(inode)->block_group =
4106            btrfs_find_block_group(root, 0, alloc_hint, owner);
4107
4108    key[0].objectid = objectid;
4109    btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
4110    key[0].offset = 0;
4111
4112    key[1].objectid = objectid;
4113    btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
4114    key[1].offset = ref_objectid;
4115
4116    sizes[0] = sizeof(struct btrfs_inode_item);
4117    sizes[1] = name_len + sizeof(*ref);
4118
4119    path->leave_spinning = 1;
4120    ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
4121    if (ret != 0)
4122        goto fail;
4123
4124    inode->i_uid = current_fsuid();
4125
4126    if (dir && (dir->i_mode & S_ISGID)) {
4127        inode->i_gid = dir->i_gid;
4128        if (S_ISDIR(mode))
4129            mode |= S_ISGID;
4130    } else
4131        inode->i_gid = current_fsgid();
4132
4133    inode->i_mode = mode;
4134    inode->i_ino = objectid;
4135    inode_set_bytes(inode, 0);
4136    inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4137    inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4138                  struct btrfs_inode_item);
4139    fill_inode_item(trans, path->nodes[0], inode_item, inode);
4140
4141    ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
4142                 struct btrfs_inode_ref);
4143    btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4144    btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4145    ptr = (unsigned long)(ref + 1);
4146    write_extent_buffer(path->nodes[0], name, ptr, name_len);
4147
4148    btrfs_mark_buffer_dirty(path->nodes[0]);
4149    btrfs_free_path(path);
4150
4151    location = &BTRFS_I(inode)->location;
4152    location->objectid = objectid;
4153    location->offset = 0;
4154    btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
4155
4156    btrfs_inherit_iflags(inode, dir);
4157
4158    if ((mode & S_IFREG)) {
4159        if (btrfs_test_opt(root, NODATASUM))
4160            BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4161        if (btrfs_test_opt(root, NODATACOW))
4162            BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
4163    }
4164
4165    insert_inode_hash(inode);
4166    inode_tree_add(inode);
4167    return inode;
4168fail:
4169    if (dir)
4170        BTRFS_I(dir)->index_cnt--;
4171    btrfs_free_path(path);
4172    iput(inode);
4173    return ERR_PTR(ret);
4174}
4175
4176static inline u8 btrfs_inode_type(struct inode *inode)
4177{
4178    return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
4179}
4180
4181/*
4182 * utility function to add 'inode' into 'parent_inode' with
4183 * a give name and a given sequence number.
4184 * if 'add_backref' is true, also insert a backref from the
4185 * inode to the parent directory.
4186 */
4187int btrfs_add_link(struct btrfs_trans_handle *trans,
4188           struct inode *parent_inode, struct inode *inode,
4189           const char *name, int name_len, int add_backref, u64 index)
4190{
4191    int ret = 0;
4192    struct btrfs_key key;
4193    struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4194
4195    if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4196        memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4197    } else {
4198        key.objectid = inode->i_ino;
4199        btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4200        key.offset = 0;
4201    }
4202
4203    if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4204        ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4205                     key.objectid, root->root_key.objectid,
4206                     parent_inode->i_ino,
4207                     index, name, name_len);
4208    } else if (add_backref) {
4209        ret = btrfs_insert_inode_ref(trans, root,
4210                         name, name_len, inode->i_ino,
4211                         parent_inode->i_ino, index);
4212    }
4213
4214    if (ret == 0) {
4215        ret = btrfs_insert_dir_item(trans, root, name, name_len,
4216                        parent_inode->i_ino, &key,
4217                        btrfs_inode_type(inode), index);
4218        BUG_ON(ret);
4219
4220        btrfs_i_size_write(parent_inode, parent_inode->i_size +
4221                   name_len * 2);
4222        parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4223        ret = btrfs_update_inode(trans, root, parent_inode);
4224    }
4225    return ret;
4226}
4227
4228static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4229                struct dentry *dentry, struct inode *inode,
4230                int backref, u64 index)
4231{
4232    int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
4233                 inode, dentry->d_name.name,
4234                 dentry->d_name.len, backref, index);
4235    if (!err) {
4236        d_instantiate(dentry, inode);
4237        return 0;
4238    }
4239    if (err > 0)
4240        err = -EEXIST;
4241    return err;
4242}
4243
4244static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4245            int mode, dev_t rdev)
4246{
4247    struct btrfs_trans_handle *trans;
4248    struct btrfs_root *root = BTRFS_I(dir)->root;
4249    struct inode *inode = NULL;
4250    int err;
4251    int drop_inode = 0;
4252    u64 objectid;
4253    unsigned long nr = 0;
4254    u64 index = 0;
4255
4256    if (!new_valid_dev(rdev))
4257        return -EINVAL;
4258
4259    /*
4260     * 2 for inode item and ref
4261     * 2 for dir items
4262     * 1 for xattr if selinux is on
4263     */
4264    err = btrfs_reserve_metadata_space(root, 5);
4265    if (err)
4266        return err;
4267
4268    trans = btrfs_start_transaction(root, 1);
4269    if (!trans)
4270        goto fail;
4271    btrfs_set_trans_block_group(trans, dir);
4272
4273    err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4274    if (err) {
4275        err = -ENOSPC;
4276        goto out_unlock;
4277    }
4278
4279    inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4280                dentry->d_name.len,
4281                dentry->d_parent->d_inode->i_ino, objectid,
4282                BTRFS_I(dir)->block_group, mode, &index);
4283    err = PTR_ERR(inode);
4284    if (IS_ERR(inode))
4285        goto out_unlock;
4286
4287    err = btrfs_init_inode_security(trans, inode, dir);
4288    if (err) {
4289        drop_inode = 1;
4290        goto out_unlock;
4291    }
4292
4293    btrfs_set_trans_block_group(trans, inode);
4294    err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4295    if (err)
4296        drop_inode = 1;
4297    else {
4298        inode->i_op = &btrfs_special_inode_operations;
4299        init_special_inode(inode, inode->i_mode, rdev);
4300        btrfs_update_inode(trans, root, inode);
4301    }
4302    btrfs_update_inode_block_group(trans, inode);
4303    btrfs_update_inode_block_group(trans, dir);
4304out_unlock:
4305    nr = trans->blocks_used;
4306    btrfs_end_transaction_throttle(trans, root);
4307fail:
4308    btrfs_unreserve_metadata_space(root, 5);
4309    if (drop_inode) {
4310        inode_dec_link_count(inode);
4311        iput(inode);
4312    }
4313    btrfs_btree_balance_dirty(root, nr);
4314    return err;
4315}
4316
4317static int btrfs_create(struct inode *dir, struct dentry *dentry,
4318            int mode, struct nameidata *nd)
4319{
4320    struct btrfs_trans_handle *trans;
4321    struct btrfs_root *root = BTRFS_I(dir)->root;
4322    struct inode *inode = NULL;
4323    int err;
4324    int drop_inode = 0;
4325    unsigned long nr = 0;
4326    u64 objectid;
4327    u64 index = 0;
4328
4329    /*
4330     * 2 for inode item and ref
4331     * 2 for dir items
4332     * 1 for xattr if selinux is on
4333     */
4334    err = btrfs_reserve_metadata_space(root, 5);
4335    if (err)
4336        return err;
4337
4338    trans = btrfs_start_transaction(root, 1);
4339    if (!trans)
4340        goto fail;
4341    btrfs_set_trans_block_group(trans, dir);
4342
4343    err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4344    if (err) {
4345        err = -ENOSPC;
4346        goto out_unlock;
4347    }
4348
4349    inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4350                dentry->d_name.len,
4351                dentry->d_parent->d_inode->i_ino,
4352                objectid, BTRFS_I(dir)->block_group, mode,
4353                &index);
4354    err = PTR_ERR(inode);
4355    if (IS_ERR(inode))
4356        goto out_unlock;
4357
4358    err = btrfs_init_inode_security(trans, inode, dir);
4359    if (err) {
4360        drop_inode = 1;
4361        goto out_unlock;
4362    }
4363
4364    btrfs_set_trans_block_group(trans, inode);
4365    err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4366    if (err)
4367        drop_inode = 1;
4368    else {
4369        inode->i_mapping->a_ops = &btrfs_aops;
4370        inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4371        inode->i_fop = &btrfs_file_operations;
4372        inode->i_op = &btrfs_file_inode_operations;
4373        BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4374    }
4375    btrfs_update_inode_block_group(trans, inode);
4376    btrfs_update_inode_block_group(trans, dir);
4377out_unlock:
4378    nr = trans->blocks_used;
4379    btrfs_end_transaction_throttle(trans, root);
4380fail:
4381    btrfs_unreserve_metadata_space(root, 5);
4382    if (drop_inode) {
4383        inode_dec_link_count(inode);
4384        iput(inode);
4385    }
4386    btrfs_btree_balance_dirty(root, nr);
4387    return err;
4388}
4389
4390static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4391              struct dentry *dentry)
4392{
4393    struct btrfs_trans_handle *trans;
4394    struct btrfs_root *root = BTRFS_I(dir)->root;
4395    struct inode *inode = old_dentry->d_inode;
4396    u64 index;
4397    unsigned long nr = 0;
4398    int err;
4399    int drop_inode = 0;
4400
4401    if (inode->i_nlink == 0)
4402        return -ENOENT;
4403
4404    /* do not allow sys_link's with other subvols of the same device */
4405    if (root->objectid != BTRFS_I(inode)->root->objectid)
4406        return -EPERM;
4407
4408    /*
4409     * 1 item for inode ref
4410     * 2 items for dir items
4411     */
4412    err = btrfs_reserve_metadata_space(root, 3);
4413    if (err)
4414        return err;
4415
4416    btrfs_inc_nlink(inode);
4417
4418    err = btrfs_set_inode_index(dir, &index);
4419    if (err)
4420        goto fail;
4421
4422    trans = btrfs_start_transaction(root, 1);
4423
4424    btrfs_set_trans_block_group(trans, dir);
4425    atomic_inc(&inode->i_count);
4426
4427    err = btrfs_add_nondir(trans, dentry, inode, 1, index);
4428
4429    if (err) {
4430        drop_inode = 1;
4431    } else {
4432        btrfs_update_inode_block_group(trans, dir);
4433        err = btrfs_update_inode(trans, root, inode);
4434        BUG_ON(err);
4435        btrfs_log_new_name(trans, inode, NULL, dentry->d_parent);
4436    }
4437
4438    nr = trans->blocks_used;
4439    btrfs_end_transaction_throttle(trans, root);
4440fail:
4441    btrfs_unreserve_metadata_space(root, 3);
4442    if (drop_inode) {
4443        inode_dec_link_count(inode);
4444        iput(inode);
4445    }
4446    btrfs_btree_balance_dirty(root, nr);
4447    return err;
4448}
4449
4450static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4451{
4452    struct inode *inode = NULL;
4453    struct btrfs_trans_handle *trans;
4454    struct btrfs_root *root = BTRFS_I(dir)->root;
4455    int err = 0;
4456    int drop_on_err = 0;
4457    u64 objectid = 0;
4458    u64 index = 0;
4459    unsigned long nr = 1;
4460
4461    /*
4462     * 2 items for inode and ref
4463     * 2 items for dir items
4464     * 1 for xattr if selinux is on
4465     */
4466    err = btrfs_reserve_metadata_space(root, 5);
4467    if (err)
4468        return err;
4469
4470    trans = btrfs_start_transaction(root, 1);
4471    if (!trans) {
4472        err = -ENOMEM;
4473        goto out_unlock;
4474    }
4475    btrfs_set_trans_block_group(trans, dir);
4476
4477    err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4478    if (err) {
4479        err = -ENOSPC;
4480        goto out_fail;
4481    }
4482
4483    inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4484                dentry->d_name.len,
4485                dentry->d_parent->d_inode->i_ino, objectid,
4486                BTRFS_I(dir)->block_group, S_IFDIR | mode,
4487                &index);
4488    if (IS_ERR(inode)) {
4489        err = PTR_ERR(inode);
4490        goto out_fail;
4491    }
4492
4493    drop_on_err = 1;
4494
4495    err = btrfs_init_inode_security(trans, inode, dir);
4496    if (err)
4497        goto out_fail;
4498
4499    inode->i_op = &btrfs_dir_inode_operations;
4500    inode->i_fop = &btrfs_dir_file_operations;
4501    btrfs_set_trans_block_group(trans, inode);
4502
4503    btrfs_i_size_write(inode, 0);
4504    err = btrfs_update_inode(trans, root, inode);
4505    if (err)
4506        goto out_fail;
4507
4508    err = btrfs_add_link(trans, dentry->d_parent->d_inode,
4509                 inode, dentry->d_name.name,
4510                 dentry->d_name.len, 0, index);
4511    if (err)
4512        goto out_fail;
4513
4514    d_instantiate(dentry, inode);
4515    drop_on_err = 0;
4516    btrfs_update_inode_block_group(trans, inode);
4517    btrfs_update_inode_block_group(trans, dir);
4518
4519out_fail:
4520    nr = trans->blocks_used;
4521    btrfs_end_transaction_throttle(trans, root);
4522
4523out_unlock:
4524    btrfs_unreserve_metadata_space(root, 5);
4525    if (drop_on_err)
4526        iput(inode);
4527    btrfs_btree_balance_dirty(root, nr);
4528    return err;
4529}
4530
4531/* helper for btfs_get_extent. Given an existing extent in the tree,
4532 * and an extent that you want to insert, deal with overlap and insert
4533 * the new extent into the tree.
4534 */
4535static int merge_extent_mapping(struct extent_map_tree *em_tree,
4536                struct extent_map *existing,
4537                struct extent_map *em,
4538                u64 map_start, u64 map_len)
4539{
4540    u64 start_diff;
4541
4542    BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
4543    start_diff = map_start - em->start;
4544    em->start = map_start;
4545    em->len = map_len;
4546    if (em->block_start < EXTENT_MAP_LAST_BYTE &&
4547        !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4548        em->block_start += start_diff;
4549        em->block_len -= start_diff;
4550    }
4551    return add_extent_mapping(em_tree, em);
4552}
4553
4554static noinline int uncompress_inline(struct btrfs_path *path,
4555                      struct inode *inode, struct page *page,
4556                      size_t pg_offset, u64 extent_offset,
4557                      struct btrfs_file_extent_item *item)
4558{
4559    int ret;
4560    struct extent_buffer *leaf = path->nodes[0];
4561    char *tmp;
4562    size_t max_size;
4563    unsigned long inline_size;
4564    unsigned long ptr;
4565
4566    WARN_ON(pg_offset != 0);
4567    max_size = btrfs_file_extent_ram_bytes(leaf, item);
4568    inline_size = btrfs_file_extent_inline_item_len(leaf,
4569                    btrfs_item_nr(leaf, path->slots[0]));
4570    tmp = kmalloc(inline_size, GFP_NOFS);
4571    ptr = btrfs_file_extent_inline_start(item);
4572
4573    read_extent_buffer(leaf, tmp, ptr, inline_size);
4574
4575    max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
4576    ret = btrfs_zlib_decompress(tmp, page, extent_offset,
4577                    inline_size, max_size);
4578    if (ret) {
4579        char *kaddr = kmap_atomic(page, KM_USER0);
4580        unsigned long copy_size = min_t(u64,
4581                  PAGE_CACHE_SIZE - pg_offset,
4582                  max_size - extent_offset);
4583        memset(kaddr + pg_offset, 0, copy_size);
4584        kunmap_atomic(kaddr, KM_USER0);
4585    }
4586    kfree(tmp);
4587    return 0;
4588}
4589
4590/*
4591 * a bit scary, this does extent mapping from logical file offset to the disk.
4592 * the ugly parts come from merging extents from the disk with the in-ram
4593 * representation. This gets more complex because of the data=ordered code,
4594 * where the in-ram extents might be locked pending data=ordered completion.
4595 *
4596 * This also copies inline extents directly into the page.
4597 */
4598
4599struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
4600                    size_t pg_offset, u64 start, u64 len,
4601                    int create)
4602{
4603    int ret;
4604    int err = 0;
4605    u64 bytenr;
4606    u64 extent_start = 0;
4607    u64 extent_end = 0;
4608    u64 objectid = inode->i_ino;
4609    u32 found_type;
4610    struct btrfs_path *path = NULL;
4611    struct btrfs_root *root = BTRFS_I(inode)->root;
4612    struct btrfs_file_extent_item *item;
4613    struct extent_buffer *leaf;
4614    struct btrfs_key found_key;
4615    struct extent_map *em = NULL;
4616    struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4617    struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4618    struct btrfs_trans_handle *trans = NULL;
4619    int compressed;
4620
4621again:
4622    read_lock(&em_tree->lock);
4623    em = lookup_extent_mapping(em_tree, start, len);
4624    if (em)
4625        em->bdev = root->fs_info->fs_devices->latest_bdev;
4626    read_unlock(&em_tree->lock);
4627
4628    if (em) {
4629        if (em->start > start || em->start + em->len <= start)
4630            free_extent_map(em);
4631        else if (em->block_start == EXTENT_MAP_INLINE && page)
4632            free_extent_map(em);
4633        else
4634            goto out;
4635    }
4636    em = alloc_extent_map(GFP_NOFS);
4637    if (!em) {
4638        err = -ENOMEM;
4639        goto out;
4640    }
4641    em->bdev = root->fs_info->fs_devices->latest_bdev;
4642    em->start = EXTENT_MAP_HOLE;
4643    em->orig_start = EXTENT_MAP_HOLE;
4644    em->len = (u64)-1;
4645    em->block_len = (u64)-1;
4646
4647    if (!path) {
4648        path = btrfs_alloc_path();
4649        BUG_ON(!path);
4650    }
4651
4652    ret = btrfs_lookup_file_extent(trans, root, path,
4653                       objectid, start, trans != NULL);
4654    if (ret < 0) {
4655        err = ret;
4656        goto out;
4657    }
4658
4659    if (ret != 0) {
4660        if (path->slots[0] == 0)
4661            goto not_found;
4662        path->slots[0]--;
4663    }
4664
4665    leaf = path->nodes[0];
4666    item = btrfs_item_ptr(leaf, path->slots[0],
4667                  struct btrfs_file_extent_item);
4668    /* are we inside the extent that was found? */
4669    btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4670    found_type = btrfs_key_type(&found_key);
4671    if (found_key.objectid != objectid ||
4672        found_type != BTRFS_EXTENT_DATA_KEY) {
4673        goto not_found;
4674    }
4675
4676    found_type = btrfs_file_extent_type(leaf, item);
4677    extent_start = found_key.offset;
4678    compressed = btrfs_file_extent_compression(leaf, item);
4679    if (found_type == BTRFS_FILE_EXTENT_REG ||
4680        found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4681        extent_end = extent_start +
4682               btrfs_file_extent_num_bytes(leaf, item);
4683    } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4684        size_t size;
4685        size = btrfs_file_extent_inline_len(leaf, item);
4686        extent_end = (extent_start + size + root->sectorsize - 1) &
4687            ~((u64)root->sectorsize - 1);
4688    }
4689
4690    if (start >= extent_end) {
4691        path->slots[0]++;
4692        if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4693            ret = btrfs_next_leaf(root, path);
4694            if (ret < 0) {
4695                err = ret;
4696                goto out;
4697            }
4698            if (ret > 0)
4699                goto not_found;
4700            leaf = path->nodes[0];
4701        }
4702        btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4703        if (found_key.objectid != objectid ||
4704            found_key.type != BTRFS_EXTENT_DATA_KEY)
4705            goto not_found;
4706        if (start + len <= found_key.offset)
4707            goto not_found;
4708        em->start = start;
4709        em->len = found_key.offset - start;
4710        goto not_found_em;
4711    }
4712
4713    if (found_type == BTRFS_FILE_EXTENT_REG ||
4714        found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4715        em->start = extent_start;
4716        em->len = extent_end - extent_start;
4717        em->orig_start = extent_start -
4718                 btrfs_file_extent_offset(leaf, item);
4719        bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4720        if (bytenr == 0) {
4721            em->block_start = EXTENT_MAP_HOLE;
4722            goto insert;
4723        }
4724        if (compressed) {
4725            set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4726            em->block_start = bytenr;
4727            em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4728                                     item);
4729        } else {
4730            bytenr += btrfs_file_extent_offset(leaf, item);
4731            em->block_start = bytenr;
4732            em->block_len = em->len;
4733            if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4734                set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4735        }
4736        goto insert;
4737    } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4738        unsigned long ptr;
4739        char *map;
4740        size_t size;
4741        size_t extent_offset;
4742        size_t copy_size;
4743
4744        em->block_start = EXTENT_MAP_INLINE;
4745        if (!page || create) {
4746            em->start = extent_start;
4747            em->len = extent_end - extent_start;
4748            goto out;
4749        }
4750
4751        size = btrfs_file_extent_inline_len(leaf, item);
4752        extent_offset = page_offset(page) + pg_offset - extent_start;
4753        copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4754                size - extent_offset);
4755        em->start = extent_start + extent_offset;
4756        em->len = (copy_size + root->sectorsize - 1) &
4757            ~((u64)root->sectorsize - 1);
4758        em->orig_start = EXTENT_MAP_INLINE;
4759        if (compressed)
4760            set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4761        ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4762        if (create == 0 && !PageUptodate(page)) {
4763            if (btrfs_file_extent_compression(leaf, item) ==
4764                BTRFS_COMPRESS_ZLIB) {
4765                ret = uncompress_inline(path, inode, page,
4766                            pg_offset,
4767                            extent_offset, item);
4768                BUG_ON(ret);
4769            } else {
4770                map = kmap(page);
4771                read_extent_buffer(leaf, map + pg_offset, ptr,
4772                           copy_size);
4773                if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
4774                    memset(map + pg_offset + copy_size, 0,
4775                           PAGE_CACHE_SIZE - pg_offset -
4776                           copy_size);
4777                }
4778                kunmap(page);
4779            }
4780            flush_dcache_page(page);
4781        } else if (create && PageUptodate(page)) {
4782            if (!trans) {
4783                kunmap(page);
4784                free_extent_map(em);
4785                em = NULL;
4786                btrfs_release_path(root, path);
4787                trans = btrfs_join_transaction(root, 1);
4788                goto again;
4789            }
4790            map = kmap(page);
4791            write_extent_buffer(leaf, map + pg_offset, ptr,
4792                        copy_size);
4793            kunmap(page);
4794            btrfs_mark_buffer_dirty(leaf);
4795        }
4796        set_extent_uptodate(io_tree, em->start,
4797                    extent_map_end(em) - 1, GFP_NOFS);
4798        goto insert;
4799    } else {
4800        printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4801        WARN_ON(1);
4802    }
4803not_found:
4804    em->start = start;
4805    em->len = len;
4806not_found_em:
4807    em->block_start = EXTENT_MAP_HOLE;
4808    set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4809insert:
4810    btrfs_release_path(root, path);
4811    if (em->start > start || extent_map_end(em) <= start) {
4812        printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4813               "[%llu %llu]\n", (unsigned long long)em->start,
4814               (unsigned long long)em->len,
4815               (unsigned long long)start,
4816               (unsigned long long)len);
4817        err = -EIO;
4818        goto out;
4819    }
4820
4821    err = 0;
4822    write_lock(&em_tree->lock);
4823    ret = add_extent_mapping(em_tree, em);
4824    /* it is possible that someone inserted the extent into the tree
4825     * while we had the lock dropped. It is also possible that
4826     * an overlapping map exists in the tree
4827     */
4828    if (ret == -EEXIST) {
4829        struct extent_map *existing;
4830
4831        ret = 0;
4832
4833        existing = lookup_extent_mapping(em_tree, start, len);
4834        if (existing && (existing->start > start ||
4835            existing->start + existing->len <= start)) {
4836            free_extent_map(existing);
4837            existing = NULL;
4838        }
4839        if (!existing) {
4840            existing = lookup_extent_mapping(em_tree, em->start,
4841                             em->len);
4842            if (existing) {
4843                err = merge_extent_mapping(em_tree, existing,
4844                               em, start,
4845                               root->sectorsize);
4846                free_extent_map(existing);
4847                if (err) {
4848                    free_extent_map(em);
4849                    em = NULL;
4850                }
4851            } else {
4852                err = -EIO;
4853                free_extent_map(em);
4854                em = NULL;
4855            }
4856        } else {
4857            free_extent_map(em);
4858            em = existing;
4859            err = 0;
4860        }
4861    }
4862    write_unlock(&em_tree->lock);
4863out:
4864    if (path)
4865        btrfs_free_path(path);
4866    if (trans) {
4867        ret = btrfs_end_transaction(trans, root);
4868        if (!err)
4869            err = ret;
4870    }
4871    if (err) {
4872        free_extent_map(em);
4873        return ERR_PTR(err);
4874    }
4875    return em;
4876}
4877
4878static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4879            const struct iovec *iov, loff_t offset,
4880            unsigned long nr_segs)
4881{
4882    return -EINVAL;
4883}
4884
4885static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4886        __u64 start, __u64 len)
4887{
4888    return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4889}
4890
4891int btrfs_readpage(struct file *file, struct page *page)
4892{
4893    struct extent_io_tree *tree;
4894    tree = &BTRFS_I(page->mapping->host)->io_tree;
4895    return extent_read_full_page(tree, page, btrfs_get_extent);
4896}
4897
4898static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4899{
4900    struct extent_io_tree *tree;
4901
4902
4903    if (current->flags & PF_MEMALLOC) {
4904        redirty_page_for_writepage(wbc, page);
4905        unlock_page(page);
4906        return 0;
4907    }
4908    tree = &BTRFS_I(page->mapping->host)->io_tree;
4909    return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4910}
4911
4912int btrfs_writepages(struct address_space *mapping,
4913             struct writeback_control *wbc)
4914{
4915    struct extent_io_tree *tree;
4916
4917    tree = &BTRFS_I(mapping->host)->io_tree;
4918    return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4919}
4920
4921static int
4922btrfs_readpages(struct file *file, struct address_space *mapping,
4923        struct list_head *pages, unsigned nr_pages)
4924{
4925    struct extent_io_tree *tree;
4926    tree = &BTRFS_I(mapping->host)->io_tree;
4927    return extent_readpages(tree, mapping, pages, nr_pages,
4928                btrfs_get_extent);
4929}
4930static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4931{
4932    struct extent_io_tree *tree;
4933    struct extent_map_tree *map;
4934    int ret;
4935
4936    tree = &BTRFS_I(page->mapping->host)->io_tree;
4937    map = &BTRFS_I(page->mapping->host)->extent_tree;
4938    ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4939    if (ret == 1) {
4940        ClearPagePrivate(page);
4941        set_page_private(page, 0);
4942        page_cache_release(page);
4943    }
4944    return ret;
4945}
4946
4947static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4948{
4949    if (PageWriteback(page) || PageDirty(page))
4950        return 0;
4951    return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
4952}
4953
4954static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4955{
4956    struct extent_io_tree *tree;
4957    struct btrfs_ordered_extent *ordered;
4958    struct extent_state *cached_state = NULL;
4959    u64 page_start = page_offset(page);
4960    u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4961
4962
4963    /*
4964     * we have the page locked, so new writeback can't start,
4965     * and the dirty bit won't be cleared while we are here.
4966     *
4967     * Wait for IO on this page so that we can safely clear
4968     * the PagePrivate2 bit and do ordered accounting
4969     */
4970    wait_on_page_writeback(page);
4971
4972    tree = &BTRFS_I(page->mapping->host)->io_tree;
4973    if (offset) {
4974        btrfs_releasepage(page, GFP_NOFS);
4975        return;
4976    }
4977    lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
4978             GFP_NOFS);
4979    ordered = btrfs_lookup_ordered_extent(page->mapping->host,
4980                       page_offset(page));
4981    if (ordered) {
4982        /*
4983         * IO on this page will never be started, so we need
4984         * to account for any ordered extents now
4985         */
4986        clear_extent_bit(tree, page_start, page_end,
4987                 EXTENT_DIRTY | EXTENT_DELALLOC |
4988                 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
4989                 &cached_state, GFP_NOFS);
4990        /*
4991         * whoever cleared the private bit is responsible
4992         * for the finish_ordered_io
4993         */
4994        if (TestClearPagePrivate2(page)) {
4995            btrfs_finish_ordered_io(page->mapping->host,
4996                        page_start, page_end);
4997        }
4998        btrfs_put_ordered_extent(ordered);
4999        cached_state = NULL;
5000        lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
5001                 GFP_NOFS);
5002    }
5003    clear_extent_bit(tree, page_start, page_end,
5004         EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
5005         EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
5006    __btrfs_releasepage(page, GFP_NOFS);
5007
5008    ClearPageChecked(page);
5009    if (PagePrivate(page)) {
5010        ClearPagePrivate(page);
5011        set_page_private(page, 0);
5012        page_cache_release(page);
5013    }
5014}
5015
5016/*
5017 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
5018 * called from a page fault handler when a page is first dirtied. Hence we must
5019 * be careful to check for EOF conditions here. We set the page up correctly
5020 * for a written page which means we get ENOSPC checking when writing into
5021 * holes and correct delalloc and unwritten extent mapping on filesystems that
5022 * support these features.
5023 *
5024 * We are not allowed to take the i_mutex here so we have to play games to
5025 * protect against truncate races as the page could now be beyond EOF. Because
5026 * vmtruncate() writes the inode size before removing pages, once we have the
5027 * page lock we can determine safely if the page is beyond EOF. If it is not
5028 * beyond EOF, then the page is guaranteed safe against truncation until we
5029 * unlock the page.
5030 */
5031int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5032{
5033    struct page *page = vmf->page;
5034    struct inode *inode = fdentry(vma->vm_file)->d_inode;
5035    struct btrfs_root *root = BTRFS_I(inode)->root;
5036    struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5037    struct btrfs_ordered_extent *ordered;
5038    struct extent_state *cached_state = NULL;
5039    char *kaddr;
5040    unsigned long zero_start;
5041    loff_t size;
5042    int ret;
5043    u64 page_start;
5044    u64 page_end;
5045
5046    ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
5047    if (ret) {
5048        if (ret == -ENOMEM)
5049            ret = VM_FAULT_OOM;
5050        else /* -ENOSPC, -EIO, etc */
5051            ret = VM_FAULT_SIGBUS;
5052        goto out;
5053    }
5054
5055    ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
5056    if (ret) {
5057        btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5058        ret = VM_FAULT_SIGBUS;
5059        goto out;
5060    }
5061
5062    ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
5063again:
5064    lock_page(page);
5065    size = i_size_read(inode);
5066    page_start = page_offset(page);
5067    page_end = page_start + PAGE_CACHE_SIZE - 1;
5068
5069    if ((page->mapping != inode->i_mapping) ||
5070        (page_start >= size)) {
5071        btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5072        /* page got truncated out from underneath us */
5073        goto out_unlock;
5074    }
5075    wait_on_page_writeback(page);
5076
5077    lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
5078             GFP_NOFS);
5079    set_page_extent_mapped(page);
5080
5081    /*
5082     * we can't set the delalloc bits if there are pending ordered
5083     * extents. Drop our locks and wait for them to finish
5084     */
5085    ordered = btrfs_lookup_ordered_extent(inode, page_start);
5086    if (ordered) {
5087        unlock_extent_cached(io_tree, page_start, page_end,
5088                     &cached_state, GFP_NOFS);
5089        unlock_page(page);
5090        btrfs_start_ordered_extent(inode, ordered, 1);
5091        btrfs_put_ordered_extent(ordered);
5092        goto again;
5093    }
5094
5095    /*
5096     * XXX - page_mkwrite gets called every time the page is dirtied, even
5097     * if it was already dirty, so for space accounting reasons we need to
5098     * clear any delalloc bits for the range we are fixing to save. There
5099     * is probably a better way to do this, but for now keep consistent with
5100     * prepare_pages in the normal write path.
5101     */
5102    clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
5103              EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
5104              0, 0, &cached_state, GFP_NOFS);
5105
5106    ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
5107                    &cached_state);
5108    if (ret) {
5109        unlock_extent_cached(io_tree, page_start, page_end,
5110                     &cached_state, GFP_NOFS);
5111        ret = VM_FAULT_SIGBUS;
5112        btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5113        goto out_unlock;
5114    }
5115    ret = 0;
5116
5117    /* page is wholly or partially inside EOF */
5118    if (page_start + PAGE_CACHE_SIZE > size)
5119        zero_start = size & ~PAGE_CACHE_MASK;
5120    else
5121        zero_start = PAGE_CACHE_SIZE;
5122
5123    if (zero_start != PAGE_CACHE_SIZE) {
5124        kaddr = kmap(page);
5125        memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
5126        flush_dcache_page(page);
5127        kunmap(page);
5128    }
5129    ClearPageChecked(page);
5130    set_page_dirty(page);
5131    SetPageUptodate(page);
5132
5133    BTRFS_I(inode)->last_trans = root->fs_info->generation;
5134    BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
5135
5136    unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
5137
5138out_unlock:
5139    btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
5140    if (!ret)
5141        return VM_FAULT_LOCKED;
5142    unlock_page(page);
5143out:
5144    return ret;
5145}
5146
5147static void btrfs_truncate(struct inode *inode)
5148{
5149    struct btrfs_root *root = BTRFS_I(inode)->root;
5150    int ret;
5151    struct btrfs_trans_handle *trans;
5152    unsigned long nr;
5153    u64 mask = root->sectorsize - 1;
5154
5155    if (!S_ISREG(inode->i_mode)) {
5156        WARN_ON(1);
5157        return;
5158    }
5159
5160    ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
5161    if (ret)
5162        return;
5163
5164    btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
5165    btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
5166
5167    trans = btrfs_start_transaction(root, 1);
5168    btrfs_set_trans_block_group(trans, inode);
5169
5170    /*
5171     * setattr is responsible for setting the ordered_data_close flag,
5172     * but that is only tested during the last file release. That
5173     * could happen well after the next commit, leaving a great big
5174     * window where new writes may get lost if someone chooses to write
5175     * to this file after truncating to zero
5176     *
5177     * The inode doesn't have any dirty data here, and so if we commit
5178     * this is a noop. If someone immediately starts writing to the inode
5179     * it is very likely we'll catch some of their writes in this
5180     * transaction, and the commit will find this file on the ordered
5181     * data list with good things to send down.
5182     *
5183     * This is a best effort solution, there is still a window where
5184     * using truncate to replace the contents of the file will
5185     * end up with a zero length file after a crash.
5186     */
5187    if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
5188        btrfs_add_ordered_operation(trans, root, inode);
5189
5190    while (1) {
5191        ret = btrfs_truncate_inode_items(trans, root, inode,
5192                         inode->i_size,
5193                         BTRFS_EXTENT_DATA_KEY);
5194        if (ret != -EAGAIN)
5195            break;
5196
5197        ret = btrfs_update_inode(trans, root, inode);
5198        BUG_ON(ret);
5199
5200        nr = trans->blocks_used;
5201        btrfs_end_transaction(trans, root);
5202        btrfs_btree_balance_dirty(root, nr);
5203
5204        trans = btrfs_start_transaction(root, 1);
5205        btrfs_set_trans_block_group(trans, inode);
5206    }
5207
5208    if (ret == 0 && inode->i_nlink > 0) {
5209        ret = btrfs_orphan_del(trans, inode);
5210        BUG_ON(ret);
5211    }
5212
5213    ret = btrfs_update_inode(trans, root, inode);
5214    BUG_ON(ret);
5215
5216    nr = trans->blocks_used;
5217    ret = btrfs_end_transaction_throttle(trans, root);
5218    BUG_ON(ret);
5219    btrfs_btree_balance_dirty(root, nr);
5220}
5221
5222/*
5223 * create a new subvolume directory/inode (helper for the ioctl).
5224 */
5225int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
5226                 struct btrfs_root *new_root,
5227                 u64 new_dirid, u64 alloc_hint)
5228{
5229    struct inode *inode;
5230    int err;
5231    u64 index = 0;
5232
5233    inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
5234                new_dirid, alloc_hint, S_IFDIR | 0700, &index);
5235    if (IS_ERR(inode))
5236        return PTR_ERR(inode);
5237    inode->i_op = &btrfs_dir_inode_operations;
5238    inode->i_fop = &btrfs_dir_file_operations;
5239
5240    inode->i_nlink = 1;
5241    btrfs_i_size_write(inode, 0);
5242
5243    err = btrfs_update_inode(trans, new_root, inode);
5244    BUG_ON(err);
5245
5246    iput(inode);
5247    return 0;
5248}
5249
5250/* helper function for file defrag and space balancing. This
5251 * forces readahead on a given range of bytes in an inode
5252 */
5253unsigned long btrfs_force_ra(struct address_space *mapping,
5254                  struct file_ra_state *ra, struct file *file,
5255                  pgoff_t offset, pgoff_t last_index)
5256{
5257    pgoff_t req_size = last_index - offset + 1;
5258
5259    page_cache_sync_readahead(mapping, ra, file, offset, req_size);
5260    return offset + req_size;
5261}
5262
5263struct inode *btrfs_alloc_inode(struct super_block *sb)
5264{
5265    struct btrfs_inode *ei;
5266
5267    ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
5268    if (!ei)
5269        return NULL;
5270    ei->last_trans = 0;
5271    ei->last_sub_trans = 0;
5272    ei->logged_trans = 0;
5273    ei->outstanding_extents = 0;
5274    ei->reserved_extents = 0;
5275    ei->root = NULL;
5276    spin_lock_init(&ei->accounting_lock);
5277    btrfs_ordered_inode_tree_init(&ei->ordered_tree);
5278    INIT_LIST_HEAD(&ei->i_orphan);
5279    INIT_LIST_HEAD(&ei->ordered_operations);
5280    return &ei->vfs_inode;
5281}
5282
5283void btrfs_destroy_inode(struct inode *inode)
5284{
5285    struct btrfs_ordered_extent *ordered;
5286    struct btrfs_root *root = BTRFS_I(inode)->root;
5287
5288    WARN_ON(!list_empty(&inode->i_dentry));
5289    WARN_ON(inode->i_data.nrpages);
5290
5291    /*
5292     * This can happen where we create an inode, but somebody else also
5293     * created the same inode and we need to destroy the one we already
5294     * created.
5295     */
5296    if (!root)
5297        goto free;
5298
5299    /*
5300     * Make sure we're properly removed from the ordered operation
5301     * lists.
5302     */
5303    smp_mb();
5304    if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
5305        spin_lock(&root->fs_info->ordered_extent_lock);
5306        list_del_init(&BTRFS_I(inode)->ordered_operations);
5307        spin_unlock(&root->fs_info->ordered_extent_lock);
5308    }
5309
5310    spin_lock(&root->list_lock);
5311    if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
5312        printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
5313               inode->i_ino);
5314        list_del_init(&BTRFS_I(inode)->i_orphan);
5315    }
5316    spin_unlock(&root->list_lock);
5317
5318    while (1) {
5319        ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
5320        if (!ordered)
5321            break;
5322        else {
5323            printk(KERN_ERR "btrfs found ordered "
5324                   "extent %llu %llu on inode cleanup\n",
5325                   (unsigned long long)ordered->file_offset,
5326                   (unsigned long long)ordered->len);
5327            btrfs_remove_ordered_extent(inode, ordered);
5328            btrfs_put_ordered_extent(ordered);
5329            btrfs_put_ordered_extent(ordered);
5330        }
5331    }
5332    inode_tree_del(inode);
5333    btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
5334free:
5335    kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
5336}
5337
5338void btrfs_drop_inode(struct inode *inode)
5339{
5340    struct btrfs_root *root = BTRFS_I(inode)->root;
5341    if (inode->i_nlink > 0 && btrfs_root_refs(&root->root_item) == 0)
5342        generic_delete_inode(inode);
5343    else
5344        generic_drop_inode(inode);
5345}
5346
5347static void init_once(void *foo)
5348{
5349    struct btrfs_inode *ei = (struct btrfs_inode *) foo;
5350
5351    inode_init_once(&ei->vfs_inode);
5352}
5353
5354void btrfs_destroy_cachep(void)
5355{
5356    if (btrfs_inode_cachep)
5357        kmem_cache_destroy(btrfs_inode_cachep);
5358    if (btrfs_trans_handle_cachep)
5359        kmem_cache_destroy(btrfs_trans_handle_cachep);
5360    if (btrfs_transaction_cachep)
5361        kmem_cache_destroy(btrfs_transaction_cachep);
5362    if (btrfs_path_cachep)
5363        kmem_cache_destroy(btrfs_path_cachep);
5364}
5365
5366int btrfs_init_cachep(void)
5367{
5368    btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
5369            sizeof(struct btrfs_inode), 0,
5370            SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
5371    if (!btrfs_inode_cachep)
5372        goto fail;
5373
5374    btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
5375            sizeof(struct btrfs_trans_handle), 0,
5376            SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5377    if (!btrfs_trans_handle_cachep)
5378        goto fail;
5379
5380    btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
5381            sizeof(struct btrfs_transaction), 0,
5382            SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5383    if (!btrfs_transaction_cachep)
5384        goto fail;
5385
5386    btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
5387            sizeof(struct btrfs_path), 0,
5388            SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5389    if (!btrfs_path_cachep)
5390        goto fail;
5391
5392    return 0;
5393fail:
5394    btrfs_destroy_cachep();
5395    return -ENOMEM;
5396}
5397
5398static int btrfs_getattr(struct vfsmount *mnt,
5399             struct dentry *dentry, struct kstat *stat)
5400{
5401    struct inode *inode = dentry->d_inode;
5402    generic_fillattr(inode, stat);
5403    stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
5404    stat->blksize = PAGE_CACHE_SIZE;
5405    stat->blocks = (inode_get_bytes(inode) +
5406            BTRFS_I(inode)->delalloc_bytes) >> 9;
5407    return 0;
5408}
5409
5410static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5411               struct inode *new_dir, struct dentry *new_dentry)
5412{
5413    struct btrfs_trans_handle *trans;
5414    struct btrfs_root *root = BTRFS_I(old_dir)->root;
5415    struct btrfs_root *dest = BTRFS_I(new_dir)->root;
5416    struct inode *new_inode = new_dentry->d_inode;
5417    struct inode *old_inode = old_dentry->d_inode;
5418    struct timespec ctime = CURRENT_TIME;
5419    u64 index = 0;
5420    u64 root_objectid;
5421    int ret;
5422
5423    if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5424        return -EPERM;
5425
5426    /* we only allow rename subvolume link between subvolumes */
5427    if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
5428        return -EXDEV;
5429
5430    if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
5431        (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID))
5432        return -ENOTEMPTY;
5433
5434    if (S_ISDIR(old_inode->i_mode) && new_inode &&
5435        new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
5436        return -ENOTEMPTY;
5437
5438    /*
5439     * We want to reserve the absolute worst case amount of items. So if
5440     * both inodes are subvols and we need to unlink them then that would
5441     * require 4 item modifications, but if they are both normal inodes it
5442     * would require 5 item modifications, so we'll assume their normal
5443     * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
5444     * should cover the worst case number of items we'll modify.
5445     */
5446    ret = btrfs_reserve_metadata_space(root, 11);
5447    if (ret)
5448        return ret;
5449
5450    /*
5451     * we're using rename to replace one file with another.
5452     * and the replacement file is large. Start IO on it now so
5453     * we don't add too much work to the end of the transaction
5454     */
5455    if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
5456        old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
5457        filemap_flush(old_inode->i_mapping);
5458
5459    /* close the racy window with snapshot create/destroy ioctl */
5460    if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5461        down_read(&root->fs_info->subvol_sem);
5462
5463    trans = btrfs_start_transaction(root, 1);
5464    btrfs_set_trans_block_group(trans, new_dir);
5465
5466    if (dest != root)
5467        btrfs_record_root_in_trans(trans, dest);
5468
5469    ret = btrfs_set_inode_index(new_dir, &index);
5470    if (ret)
5471        goto out_fail;
5472
5473    if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
5474        /* force full log commit if subvolume involved. */
5475        root->fs_info->last_trans_log_full_commit = trans->transid;
5476    } else {
5477        ret = btrfs_insert_inode_ref(trans, dest,
5478                         new_dentry->d_name.name,
5479                         new_dentry->d_name.len,
5480                         old_inode->i_ino,
5481                         new_dir->i_ino, index);
5482        if (ret)
5483            goto out_fail;
5484        /*
5485         * this is an ugly little race, but the rename is required
5486         * to make sure that if we crash, the inode is either at the
5487         * old name or the new one. pinning the log transaction lets
5488         * us make sure we don't allow a log commit to come in after
5489         * we unlink the name but before we add the new name back in.
5490         */
5491        btrfs_pin_log_trans(root);
5492    }
5493    /*
5494     * make sure the inode gets flushed if it is replacing
5495     * something.
5496     */
5497    if (new_inode && new_inode->i_size &&
5498        old_inode && S_ISREG(old_inode->i_mode)) {
5499        btrfs_add_ordered_operation(trans, root, old_inode);
5500    }
5501
5502    old_dir->i_ctime = old_dir->i_mtime = ctime;
5503    new_dir->i_ctime = new_dir->i_mtime = ctime;
5504    old_inode->i_ctime = ctime;
5505
5506    if (old_dentry->d_parent != new_dentry->d_parent)
5507        btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
5508
5509    if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
5510        root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
5511        ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
5512                    old_dentry->d_name.name,
5513                    old_dentry->d_name.len);
5514    } else {
5515        btrfs_inc_nlink(old_dentry->d_inode);
5516        ret = btrfs_unlink_inode(trans, root, old_dir,
5517                     old_dentry->d_inode,
5518                     old_dentry->d_name.name,
5519                     old_dentry->d_name.len);
5520    }
5521    BUG_ON(ret);
5522
5523    if (new_inode) {
5524        new_inode->i_ctime = CURRENT_TIME;
5525        if (unlikely(new_inode->i_ino ==
5526                 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
5527            root_objectid = BTRFS_I(new_inode)->location.objectid;
5528            ret = btrfs_unlink_subvol(trans, dest, new_dir,
5529                        root_objectid,
5530                        new_dentry->d_name.name,
5531                        new_dentry->d_name.len);
5532            BUG_ON(new_inode->i_nlink == 0);
5533        } else {
5534            ret = btrfs_unlink_inode(trans, dest, new_dir,
5535                         new_dentry->d_inode,
5536                         new_dentry->d_name.name,
5537                         new_dentry->d_name.len);
5538        }
5539        BUG_ON(ret);
5540        if (new_inode->i_nlink == 0) {
5541            ret = btrfs_orphan_add(trans, new_dentry->d_inode);
5542            BUG_ON(ret);
5543        }
5544    }
5545
5546    ret = btrfs_add_link(trans, new_dir, old_inode,
5547                 new_dentry->d_name.name,
5548                 new_dentry->d_name.len, 0, index);
5549    BUG_ON(ret);
5550
5551    if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
5552        btrfs_log_new_name(trans, old_inode, old_dir,
5553                   new_dentry->d_parent);
5554        btrfs_end_log_trans(root);
5555    }
5556out_fail:
5557    btrfs_end_transaction_throttle(trans, root);
5558
5559    if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5560        up_read(&root->fs_info->subvol_sem);
5561
5562    btrfs_unreserve_metadata_space(root, 11);
5563    return ret;
5564}
5565
5566/*
5567 * some fairly slow code that needs optimization. This walks the list
5568 * of all the inodes with pending delalloc and forces them to disk.
5569 */
5570int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
5571{
5572    struct list_head *head = &root->fs_info->delalloc_inodes;
5573    struct btrfs_inode *binode;
5574    struct inode *inode;
5575
5576    if (root->fs_info->sb->s_flags & MS_RDONLY)
5577        return -EROFS;
5578
5579    spin_lock(&root->fs_info->delalloc_lock);
5580    while (!list_empty(head)) {
5581        binode = list_entry(head->next, struct btrfs_inode,
5582                    delalloc_inodes);
5583        inode = igrab(&binode->vfs_inode);
5584        if (!inode)
5585            list_del_init(&binode->delalloc_inodes);
5586        spin_unlock(&root->fs_info->delalloc_lock);
5587        if (inode) {
5588            filemap_flush(inode->i_mapping);
5589            if (delay_iput)
5590                btrfs_add_delayed_iput(inode);
5591            else
5592                iput(inode);
5593