]> rtime.felk.cvut.cz Git - lisovros/linux_canprio.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 18 Mar 2010 23:50:55 +0000 (16:50 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 18 Mar 2010 23:50:55 +0000 (16:50 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: (30 commits)
  Btrfs: fix the inode ref searches done by btrfs_search_path_in_tree
  Btrfs: allow treeid==0 in the inode lookup ioctl
  Btrfs: return keys for large items to the search ioctl
  Btrfs: fix key checks and advance in the search ioctl
  Btrfs: buffer results in the space_info ioctl
  Btrfs: use __u64 types in ioctl.h
  Btrfs: fix search_ioctl key advance
  Btrfs: fix gfp flags masking in the compression code
  Btrfs: don't look at bio flags after submit_bio
  btrfs: using btrfs_stack_device_id() get devid
  btrfs: use memparse
  Btrfs: add a "df" ioctl for btrfs
  Btrfs: cache the extent state everywhere we possibly can V2
  Btrfs: cache ordered extent when completing io
  Btrfs: cache extent state in find_delalloc_range
  Btrfs: change the ordered tree to use a spinlock instead of a mutex
  Btrfs: finish read pages in the order they are submitted
  btrfs: fix btrfs_mkdir goto for no free objectids
  Btrfs: flush data on snapshot creation
  Btrfs: make df be a little bit more understandable
  ...

1  2 
fs/btrfs/ctree.h
fs/btrfs/file.c
fs/btrfs/inode.c

diff --combined fs/btrfs/ctree.h
index 8b5cfdd4bfc1b7b91e6e94ae94e5ee9f2dbaa875,11115847d875c0fa556d88e53e17bd8c9c277d87..0af2e3868573467b60d3d8990e9d82720bc26df3
@@@ -373,11 -373,13 +373,13 @@@ struct btrfs_super_block 
   * ones specified below then we will fail to mount
   */
  #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF  (1ULL << 0)
+ #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (2ULL << 0)
  
  #define BTRFS_FEATURE_COMPAT_SUPP             0ULL
  #define BTRFS_FEATURE_COMPAT_RO_SUPP          0ULL
  #define BTRFS_FEATURE_INCOMPAT_SUPP           \
-       BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF
+       (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \
+        BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL)
  
  /*
   * A leaf is full of items. offset and size tell us where to find
@@@ -1182,7 -1184,6 +1184,6 @@@ struct btrfs_root 
  #define BTRFS_INODE_NOATIME           (1 << 9)
  #define BTRFS_INODE_DIRSYNC           (1 << 10)
  
  /* some macros to generate set/get funcs for the struct fields.  This
   * assumes there is a lefoo_to_cpu for every type, so lets make a simple
   * one for u8:
@@@ -1842,7 -1843,7 +1843,7 @@@ BTRFS_SETGET_STACK_FUNCS(super_num_devi
  BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block,
                         compat_flags, 64);
  BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block,
-                        compat_flags, 64);
+                        compat_ro_flags, 64);
  BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block,
                         incompat_flags, 64);
  BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block,
@@@ -2310,7 -2311,8 +2311,8 @@@ int btrfs_truncate_inode_items(struct b
                               u32 min_type);
  
  int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
- int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end);
+ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
+                             struct extent_state **cached_state);
  int btrfs_writepages(struct address_space *mapping,
                     struct writeback_control *wbc);
  int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
@@@ -2326,7 -2328,7 +2328,7 @@@ int btrfs_page_mkwrite(struct vm_area_s
  int btrfs_readpage(struct file *file, struct page *page);
  void btrfs_delete_inode(struct inode *inode);
  void btrfs_put_inode(struct inode *inode);
 -int btrfs_write_inode(struct inode *inode, int wait);
 +int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
  void btrfs_dirty_inode(struct inode *inode);
  struct inode *btrfs_alloc_inode(struct super_block *sb);
  void btrfs_destroy_inode(struct inode *inode);
@@@ -2335,7 -2337,7 +2337,7 @@@ int btrfs_init_cachep(void)
  void btrfs_destroy_cachep(void);
  long btrfs_ioctl_trans_end(struct file *file);
  struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
-                        struct btrfs_root *root);
+                        struct btrfs_root *root, int *was_new);
  int btrfs_commit_write(struct file *file, struct page *page,
                       unsigned from, unsigned to);
  struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
@@@ -2386,7 -2388,6 +2388,6 @@@ void btrfs_sysfs_del_super(struct btrfs
  ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
  
  /* super.c */
- u64 btrfs_parse_size(char *str);
  int btrfs_parse_options(struct btrfs_root *root, char *options);
  int btrfs_sync_fs(struct super_block *sb, int wait);
  
diff --combined fs/btrfs/file.c
index 6ed434ac037faac00117e5d633478bd7a2e0805b,d146dde7efb6f73a0b41e554e41367c3fea69d2c..ee3323c7fc1c8fe2554ab222d2e424b6938ad335
@@@ -123,7 -123,8 +123,8 @@@ static noinline int dirty_and_release_p
                    root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  
        end_of_last_block = start_pos + num_bytes - 1;
-       err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
+       err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
+                                       NULL);
        if (err)
                return err;
  
@@@ -753,6 -754,7 +754,7 @@@ static noinline int prepare_pages(struc
                         loff_t pos, unsigned long first_index,
                         unsigned long last_index, size_t write_bytes)
  {
+       struct extent_state *cached_state = NULL;
        int i;
        unsigned long index = pos >> PAGE_CACHE_SHIFT;
        struct inode *inode = fdentry(file)->d_inode;
@@@ -781,16 -783,18 +783,18 @@@ again
        }
        if (start_pos < inode->i_size) {
                struct btrfs_ordered_extent *ordered;
-               lock_extent(&BTRFS_I(inode)->io_tree,
-                           start_pos, last_pos - 1, GFP_NOFS);
+               lock_extent_bits(&BTRFS_I(inode)->io_tree,
+                                start_pos, last_pos - 1, 0, &cached_state,
+                                GFP_NOFS);
                ordered = btrfs_lookup_first_ordered_extent(inode,
                                                            last_pos - 1);
                if (ordered &&
                    ordered->file_offset + ordered->len > start_pos &&
                    ordered->file_offset < last_pos) {
                        btrfs_put_ordered_extent(ordered);
-                       unlock_extent(&BTRFS_I(inode)->io_tree,
-                                     start_pos, last_pos - 1, GFP_NOFS);
+                       unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+                                            start_pos, last_pos - 1,
+                                            &cached_state, GFP_NOFS);
                        for (i = 0; i < num_pages; i++) {
                                unlock_page(pages[i]);
                                page_cache_release(pages[i]);
                if (ordered)
                        btrfs_put_ordered_extent(ordered);
  
-               clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
+               clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
                                  last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
-                                 EXTENT_DO_ACCOUNTING,
+                                 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
                                  GFP_NOFS);
-               unlock_extent(&BTRFS_I(inode)->io_tree,
-                             start_pos, last_pos - 1, GFP_NOFS);
+               unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+                                    start_pos, last_pos - 1, &cached_state,
+                                    GFP_NOFS);
        }
        for (i = 0; i < num_pages; i++) {
                clear_page_dirty_for_io(pages[i]);
@@@ -834,7 -839,7 +839,7 @@@ static ssize_t btrfs_file_write(struct 
        unsigned long last_index;
        int will_write;
  
 -      will_write = ((file->f_flags & O_SYNC) || IS_SYNC(inode) ||
 +      will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
                      (file->f_flags & O_DIRECT));
  
        nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
@@@ -1001,7 -1006,7 +1006,7 @@@ out_nolock
                if (err)
                        num_written = err;
  
 -              if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
 +              if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
                        trans = btrfs_start_transaction(root, 1);
                        ret = btrfs_log_dentry_safe(trans, root,
                                                    file->f_dentry);
diff --combined fs/btrfs/inode.c
index c41db6d45ab6fb2574ecf7a34fae9e43c18a39d3,2a337a09c650d0a8be9dab0971ad973006fce721..02bb099845fd07dcf3566a1adef85c2fffed71ab
@@@ -379,7 -379,8 +379,8 @@@ again
         * change at any time if we discover bad compression ratios.
         */
        if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
-           btrfs_test_opt(root, COMPRESS)) {
+           (btrfs_test_opt(root, COMPRESS) ||
+            (BTRFS_I(inode)->force_compress))) {
                WARN_ON(pages);
                pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
  
                nr_pages_ret = 0;
  
                /* flag the file so we don't compress in the future */
-               if (!btrfs_test_opt(root, FORCE_COMPRESS))
+               if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
+                   !(BTRFS_I(inode)->force_compress)) {
                        BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
+               }
        }
        if (will_compress) {
                *num_added += 1;
@@@ -570,8 -573,8 +573,8 @@@ retry
                        unsigned long nr_written = 0;
  
                        lock_extent(io_tree, async_extent->start,
-                                   async_extent->start +
-                                   async_extent->ram_size - 1, GFP_NOFS);
+                                        async_extent->start +
+                                        async_extent->ram_size - 1, GFP_NOFS);
  
                        /* allocate blocks */
                        ret = cow_file_range(inode, async_cow->locked_page,
@@@ -1211,7 -1214,8 +1214,8 @@@ static int run_delalloc_range(struct in
        else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
                ret = run_delalloc_nocow(inode, locked_page, start, end,
                                         page_started, 0, nr_written);
-       else if (!btrfs_test_opt(root, COMPRESS))
+       else if (!btrfs_test_opt(root, COMPRESS) &&
+                !(BTRFS_I(inode)->force_compress))
                ret = cow_file_range(inode, locked_page, start, end,
                                      page_started, nr_written, 1);
        else
@@@ -1508,12 -1512,13 +1512,13 @@@ static noinline int add_pending_csums(s
        return 0;
  }
  
- int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
+ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
+                             struct extent_state **cached_state)
  {
        if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
                WARN_ON(1);
        return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
-                                  GFP_NOFS);
+                                  cached_state, GFP_NOFS);
  }
  
  /* see btrfs_writepage_start_hook for details on why this is required */
@@@ -1526,6 -1531,7 +1531,7 @@@ static void btrfs_writepage_fixup_worke
  {
        struct btrfs_writepage_fixup *fixup;
        struct btrfs_ordered_extent *ordered;
+       struct extent_state *cached_state = NULL;
        struct page *page;
        struct inode *inode;
        u64 page_start;
@@@ -1544,7 -1550,8 +1550,8 @@@ again
        page_start = page_offset(page);
        page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
  
-       lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
+       lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
+                        &cached_state, GFP_NOFS);
  
        /* already ordered? We're done */
        if (PagePrivate2(page))
  
        ordered = btrfs_lookup_ordered_extent(inode, page_start);
        if (ordered) {
-               unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
-                             page_end, GFP_NOFS);
+               unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
+                                    page_end, &cached_state, GFP_NOFS);
                unlock_page(page);
                btrfs_start_ordered_extent(inode, ordered, 1);
                goto again;
        }
  
-       btrfs_set_extent_delalloc(inode, page_start, page_end);
+       btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
        ClearPageChecked(page);
  out:
-       unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
+       unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
+                            &cached_state, GFP_NOFS);
  out_page:
        unlock_page(page);
        page_cache_release(page);
@@@ -1691,14 -1699,14 +1699,14 @@@ static int btrfs_finish_ordered_io(stru
        struct btrfs_trans_handle *trans;
        struct btrfs_ordered_extent *ordered_extent = NULL;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+       struct extent_state *cached_state = NULL;
        int compressed = 0;
        int ret;
  
-       ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
+       ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
+                                            end - start + 1);
        if (!ret)
                return 0;
-       ordered_extent = btrfs_lookup_ordered_extent(inode, start);
        BUG_ON(!ordered_extent);
  
        if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
                goto out;
        }
  
-       lock_extent(io_tree, ordered_extent->file_offset,
-                   ordered_extent->file_offset + ordered_extent->len - 1,
-                   GFP_NOFS);
+       lock_extent_bits(io_tree, ordered_extent->file_offset,
+                        ordered_extent->file_offset + ordered_extent->len - 1,
+                        0, &cached_state, GFP_NOFS);
  
        trans = btrfs_join_transaction(root, 1);
  
                                   ordered_extent->len);
                BUG_ON(ret);
        }
-       unlock_extent(io_tree, ordered_extent->file_offset,
-                   ordered_extent->file_offset + ordered_extent->len - 1,
-                   GFP_NOFS);
+       unlock_extent_cached(io_tree, ordered_extent->file_offset,
+                            ordered_extent->file_offset +
+                            ordered_extent->len - 1, &cached_state, GFP_NOFS);
        add_pending_csums(trans, inode, ordered_extent->file_offset,
                          &ordered_extent->list);
  
@@@ -2153,7 -2162,7 +2162,7 @@@ void btrfs_orphan_cleanup(struct btrfs_
                found_key.objectid = found_key.offset;
                found_key.type = BTRFS_INODE_ITEM_KEY;
                found_key.offset = 0;
-               inode = btrfs_iget(root->fs_info->sb, &found_key, root);
+               inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
                if (IS_ERR(inode))
                        break;
  
@@@ -3081,6 -3090,7 +3090,7 @@@ static int btrfs_truncate_page(struct a
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        struct btrfs_ordered_extent *ordered;
+       struct extent_state *cached_state = NULL;
        char *kaddr;
        u32 blocksize = root->sectorsize;
        pgoff_t index = from >> PAGE_CACHE_SHIFT;
@@@ -3127,12 -3137,14 +3137,14 @@@ again
        }
        wait_on_page_writeback(page);
  
-       lock_extent(io_tree, page_start, page_end, GFP_NOFS);
+       lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
+                        GFP_NOFS);
        set_page_extent_mapped(page);
  
        ordered = btrfs_lookup_ordered_extent(inode, page_start);
        if (ordered) {
-               unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+               unlock_extent_cached(io_tree, page_start, page_end,
+                                    &cached_state, GFP_NOFS);
                unlock_page(page);
                page_cache_release(page);
                btrfs_start_ordered_extent(inode, ordered, 1);
                goto again;
        }
  
-       clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
+       clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
                          EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
-                         GFP_NOFS);
+                         0, 0, &cached_state, GFP_NOFS);
  
-       ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
+       ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
+                                       &cached_state);
        if (ret) {
-               unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+               unlock_extent_cached(io_tree, page_start, page_end,
+                                    &cached_state, GFP_NOFS);
                goto out_unlock;
        }
  
        }
        ClearPageChecked(page);
        set_page_dirty(page);
-       unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+       unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
+                            GFP_NOFS);
  
  out_unlock:
        if (ret)
@@@ -3177,6 -3192,7 +3192,7 @@@ int btrfs_cont_expand(struct inode *ino
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        struct extent_map *em;
+       struct extent_state *cached_state = NULL;
        u64 mask = root->sectorsize - 1;
        u64 hole_start = (inode->i_size + mask) & ~mask;
        u64 block_end = (size + mask) & ~mask;
                struct btrfs_ordered_extent *ordered;
                btrfs_wait_ordered_range(inode, hole_start,
                                         block_end - hole_start);
-               lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
+               lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
+                                &cached_state, GFP_NOFS);
                ordered = btrfs_lookup_ordered_extent(inode, hole_start);
                if (!ordered)
                        break;
-               unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
+               unlock_extent_cached(io_tree, hole_start, block_end - 1,
+                                    &cached_state, GFP_NOFS);
                btrfs_put_ordered_extent(ordered);
        }
  
                        break;
        }
  
-       unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
+       unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
+                            GFP_NOFS);
        return err;
  }
  
@@@ -3639,6 -3658,7 +3658,7 @@@ static noinline void init_btrfs_i(struc
        bi->index_cnt = (u64)-1;
        bi->last_unlink_trans = 0;
        bi->ordered_data_close = 0;
+       bi->force_compress = 0;
        extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
        extent_io_tree_init(&BTRFS_I(inode)->io_tree,
                             inode->i_mapping, GFP_NOFS);
@@@ -3687,7 -3707,7 +3707,7 @@@ static struct inode *btrfs_iget_locked(
   * Returns in *is_new if the inode was read from disk
   */
  struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
-                        struct btrfs_root *root)
+                        struct btrfs_root *root, int *new)
  {
        struct inode *inode;
  
  
                inode_tree_add(inode);
                unlock_new_inode(inode);
+               if (new)
+                       *new = 1;
        }
  
        return inode;
@@@ -3754,7 -3776,7 +3776,7 @@@ struct inode *btrfs_lookup_dentry(struc
                return NULL;
  
        if (location.type == BTRFS_INODE_ITEM_KEY) {
-               inode = btrfs_iget(dir->i_sb, &location, root);
+               inode = btrfs_iget(dir->i_sb, &location, root, NULL);
                return inode;
        }
  
                else
                        inode = new_simple_dir(dir->i_sb, &location, sub_root);
        } else {
-               inode = btrfs_iget(dir->i_sb, &location, sub_root);
+               inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
        }
        srcu_read_unlock(&root->fs_info->subvol_srcu, index);
  
@@@ -3968,7 -3990,7 +3990,7 @@@ err
        return ret;
  }
  
 -int btrfs_write_inode(struct inode *inode, int wait)
 +int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
  {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
        if (root->fs_info->btree_inode == inode)
                return 0;
  
 -      if (wait) {
 +      if (wbc->sync_mode == WB_SYNC_ALL) {
                trans = btrfs_join_transaction(root, 1);
                btrfs_set_trans_block_group(trans, inode);
                ret = btrfs_commit_transaction(trans, root);
@@@ -4501,7 -4523,7 +4523,7 @@@ static int btrfs_mkdir(struct inode *di
        err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
        if (err) {
                err = -ENOSPC;
-               goto out_unlock;
+               goto out_fail;
        }
  
        inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
@@@ -4979,6 -5001,7 +5001,7 @@@ static void btrfs_invalidatepage(struc
  {
        struct extent_io_tree *tree;
        struct btrfs_ordered_extent *ordered;
+       struct extent_state *cached_state = NULL;
        u64 page_start = page_offset(page);
        u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
  
                btrfs_releasepage(page, GFP_NOFS);
                return;
        }
-       lock_extent(tree, page_start, page_end, GFP_NOFS);
+       lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
+                        GFP_NOFS);
        ordered = btrfs_lookup_ordered_extent(page->mapping->host,
                                           page_offset(page));
        if (ordered) {
                clear_extent_bit(tree, page_start, page_end,
                                 EXTENT_DIRTY | EXTENT_DELALLOC |
                                 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
-                                NULL, GFP_NOFS);
+                                &cached_state, GFP_NOFS);
                /*
                 * whoever cleared the private bit is responsible
                 * for the finish_ordered_io
                                                page_start, page_end);
                }
                btrfs_put_ordered_extent(ordered);
-               lock_extent(tree, page_start, page_end, GFP_NOFS);
+               cached_state = NULL;
+               lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
+                                GFP_NOFS);
        }
        clear_extent_bit(tree, page_start, page_end,
                 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
-                EXTENT_DO_ACCOUNTING, 1, 1, NULL, GFP_NOFS);
+                EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
        __btrfs_releasepage(page, GFP_NOFS);
  
        ClearPageChecked(page);
@@@ -5055,6 -5081,7 +5081,7 @@@ int btrfs_page_mkwrite(struct vm_area_s
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        struct btrfs_ordered_extent *ordered;
+       struct extent_state *cached_state = NULL;
        char *kaddr;
        unsigned long zero_start;
        loff_t size;
@@@ -5093,7 -5120,8 +5120,8 @@@ again
        }
        wait_on_page_writeback(page);
  
-       lock_extent(io_tree, page_start, page_end, GFP_NOFS);
+       lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
+                        GFP_NOFS);
        set_page_extent_mapped(page);
  
        /*
         */
        ordered = btrfs_lookup_ordered_extent(inode, page_start);
        if (ordered) {
-               unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+               unlock_extent_cached(io_tree, page_start, page_end,
+                                    &cached_state, GFP_NOFS);
                unlock_page(page);
                btrfs_start_ordered_extent(inode, ordered, 1);
                btrfs_put_ordered_extent(ordered);
         * is probably a better way to do this, but for now keep consistent with
         * prepare_pages in the normal write path.
         */
-       clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
+       clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
                          EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
-                         GFP_NOFS);
+                         0, 0, &cached_state, GFP_NOFS);
  
-       ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
+       ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
+                                       &cached_state);
        if (ret) {
-               unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+               unlock_extent_cached(io_tree, page_start, page_end,
+                                    &cached_state, GFP_NOFS);
                ret = VM_FAULT_SIGBUS;
                btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
                goto out_unlock;
        BTRFS_I(inode)->last_trans = root->fs_info->generation;
        BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
  
-       unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+       unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
  
  out_unlock:
        btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
@@@ -5827,6 -5858,7 +5858,7 @@@ stop_trans
  static long btrfs_fallocate(struct inode *inode, int mode,
                            loff_t offset, loff_t len)
  {
+       struct extent_state *cached_state = NULL;
        u64 cur_offset;
        u64 last_byte;
        u64 alloc_start;
                /* the extent lock is ordered inside the running
                 * transaction
                 */
-               lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
-                           GFP_NOFS);
+               lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
+                                locked_end, 0, &cached_state, GFP_NOFS);
                ordered = btrfs_lookup_first_ordered_extent(inode,
                                                            alloc_end - 1);
                if (ordered &&
                    ordered->file_offset + ordered->len > alloc_start &&
                    ordered->file_offset < alloc_end) {
                        btrfs_put_ordered_extent(ordered);
-                       unlock_extent(&BTRFS_I(inode)->io_tree,
-                                     alloc_start, locked_end, GFP_NOFS);
+                       unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+                                            alloc_start, locked_end,
+                                            &cached_state, GFP_NOFS);
                        /*
                         * we can't wait on the range with the transaction
                         * running or with the extent lock held
                        break;
                }
        }
-       unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
-                     GFP_NOFS);
+       unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
+                            &cached_state, GFP_NOFS);
  
        btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode,
                                       alloc_end - alloc_start);