]> rtime.felk.cvut.cz Git - linux-imx.git/blob - fs/btrfs/transaction.c
Merge branch 'for-chris' of git://git.jan-o-sch.net/btrfs-unstable into integration
[linux-imx.git] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include "ctree.h"
26 #include "disk-io.h"
27 #include "transaction.h"
28 #include "locking.h"
29 #include "tree-log.h"
30 #include "inode-map.h"
31
32 #define BTRFS_ROOT_TRANS_TAG 0
33
34 static noinline void put_transaction(struct btrfs_transaction *transaction)
35 {
36         WARN_ON(atomic_read(&transaction->use_count) == 0);
37         if (atomic_dec_and_test(&transaction->use_count)) {
38                 BUG_ON(!list_empty(&transaction->list));
39                 WARN_ON(transaction->delayed_refs.root.rb_node);
40                 WARN_ON(!list_empty(&transaction->delayed_refs.seq_head));
41                 memset(transaction, 0, sizeof(*transaction));
42                 kmem_cache_free(btrfs_transaction_cachep, transaction);
43         }
44 }
45
46 static noinline void switch_commit_root(struct btrfs_root *root)
47 {
48         free_extent_buffer(root->commit_root);
49         root->commit_root = btrfs_root_node(root);
50 }
51
52 /*
53  * either allocate a new transaction or hop into the existing one
54  */
55 static noinline int join_transaction(struct btrfs_root *root, int nofail)
56 {
57         struct btrfs_transaction *cur_trans;
58
59         spin_lock(&root->fs_info->trans_lock);
60 loop:
61         if (root->fs_info->trans_no_join) {
62                 if (!nofail) {
63                         spin_unlock(&root->fs_info->trans_lock);
64                         return -EBUSY;
65                 }
66         }
67
68         cur_trans = root->fs_info->running_transaction;
69         if (cur_trans) {
70                 atomic_inc(&cur_trans->use_count);
71                 atomic_inc(&cur_trans->num_writers);
72                 cur_trans->num_joined++;
73                 spin_unlock(&root->fs_info->trans_lock);
74                 return 0;
75         }
76         spin_unlock(&root->fs_info->trans_lock);
77
78         cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
79         if (!cur_trans)
80                 return -ENOMEM;
81
82         spin_lock(&root->fs_info->trans_lock);
83         if (root->fs_info->running_transaction) {
84                 /*
85                  * someone started a transaction after we unlocked.  Make sure
86                  * to redo the trans_no_join checks above
87                  */
88                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
89                 cur_trans = root->fs_info->running_transaction;
90                 goto loop;
91         }
92
93         atomic_set(&cur_trans->num_writers, 1);
94         cur_trans->num_joined = 0;
95         init_waitqueue_head(&cur_trans->writer_wait);
96         init_waitqueue_head(&cur_trans->commit_wait);
97         cur_trans->in_commit = 0;
98         cur_trans->blocked = 0;
99         /*
100          * One for this trans handle, one so it will live on until we
101          * commit the transaction.
102          */
103         atomic_set(&cur_trans->use_count, 2);
104         cur_trans->commit_done = 0;
105         cur_trans->start_time = get_seconds();
106
107         cur_trans->delayed_refs.root = RB_ROOT;
108         cur_trans->delayed_refs.num_entries = 0;
109         cur_trans->delayed_refs.num_heads_ready = 0;
110         cur_trans->delayed_refs.num_heads = 0;
111         cur_trans->delayed_refs.flushing = 0;
112         cur_trans->delayed_refs.run_delayed_start = 0;
113         cur_trans->delayed_refs.seq = 1;
114         init_waitqueue_head(&cur_trans->delayed_refs.seq_wait);
115         spin_lock_init(&cur_trans->commit_lock);
116         spin_lock_init(&cur_trans->delayed_refs.lock);
117         INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head);
118
119         INIT_LIST_HEAD(&cur_trans->pending_snapshots);
120         list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
121         extent_io_tree_init(&cur_trans->dirty_pages,
122                              root->fs_info->btree_inode->i_mapping);
123         root->fs_info->generation++;
124         cur_trans->transid = root->fs_info->generation;
125         root->fs_info->running_transaction = cur_trans;
126         spin_unlock(&root->fs_info->trans_lock);
127
128         return 0;
129 }
130
131 /*
132  * this does all the record keeping required to make sure that a reference
133  * counted root is properly recorded in a given transaction.  This is required
134  * to make sure the old root from before we joined the transaction is deleted
135  * when the transaction commits
136  */
137 static int record_root_in_trans(struct btrfs_trans_handle *trans,
138                                struct btrfs_root *root)
139 {
140         if (root->ref_cows && root->last_trans < trans->transid) {
141                 WARN_ON(root == root->fs_info->extent_root);
142                 WARN_ON(root->commit_root != root->node);
143
144                 /*
145                  * see below for in_trans_setup usage rules
146                  * we have the reloc mutex held now, so there
147                  * is only one writer in this function
148                  */
149                 root->in_trans_setup = 1;
150
151                 /* make sure readers find in_trans_setup before
152                  * they find our root->last_trans update
153                  */
154                 smp_wmb();
155
156                 spin_lock(&root->fs_info->fs_roots_radix_lock);
157                 if (root->last_trans == trans->transid) {
158                         spin_unlock(&root->fs_info->fs_roots_radix_lock);
159                         return 0;
160                 }
161                 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
162                            (unsigned long)root->root_key.objectid,
163                            BTRFS_ROOT_TRANS_TAG);
164                 spin_unlock(&root->fs_info->fs_roots_radix_lock);
165                 root->last_trans = trans->transid;
166
167                 /* this is pretty tricky.  We don't want to
168                  * take the relocation lock in btrfs_record_root_in_trans
169                  * unless we're really doing the first setup for this root in
170                  * this transaction.
171                  *
172                  * Normally we'd use root->last_trans as a flag to decide
173                  * if we want to take the expensive mutex.
174                  *
175                  * But, we have to set root->last_trans before we
176                  * init the relocation root, otherwise, we trip over warnings
177                  * in ctree.c.  The solution used here is to flag ourselves
178                  * with root->in_trans_setup.  When this is 1, we're still
179                  * fixing up the reloc trees and everyone must wait.
180                  *
181                  * When this is zero, they can trust root->last_trans and fly
182                  * through btrfs_record_root_in_trans without having to take the
183                  * lock.  smp_wmb() makes sure that all the writes above are
184                  * done before we pop in the zero below
185                  */
186                 btrfs_init_reloc_root(trans, root);
187                 smp_wmb();
188                 root->in_trans_setup = 0;
189         }
190         return 0;
191 }
192
193
194 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
195                                struct btrfs_root *root)
196 {
197         if (!root->ref_cows)
198                 return 0;
199
200         /*
201          * see record_root_in_trans for comments about in_trans_setup usage
202          * and barriers
203          */
204         smp_rmb();
205         if (root->last_trans == trans->transid &&
206             !root->in_trans_setup)
207                 return 0;
208
209         mutex_lock(&root->fs_info->reloc_mutex);
210         record_root_in_trans(trans, root);
211         mutex_unlock(&root->fs_info->reloc_mutex);
212
213         return 0;
214 }
215
216 /* wait for commit against the current transaction to become unblocked
217  * when this is done, it is safe to start a new transaction, but the current
218  * transaction might not be fully on disk.
219  */
220 static void wait_current_trans(struct btrfs_root *root)
221 {
222         struct btrfs_transaction *cur_trans;
223
224         spin_lock(&root->fs_info->trans_lock);
225         cur_trans = root->fs_info->running_transaction;
226         if (cur_trans && cur_trans->blocked) {
227                 atomic_inc(&cur_trans->use_count);
228                 spin_unlock(&root->fs_info->trans_lock);
229
230                 wait_event(root->fs_info->transaction_wait,
231                            !cur_trans->blocked);
232                 put_transaction(cur_trans);
233         } else {
234                 spin_unlock(&root->fs_info->trans_lock);
235         }
236 }
237
238 enum btrfs_trans_type {
239         TRANS_START,
240         TRANS_JOIN,
241         TRANS_USERSPACE,
242         TRANS_JOIN_NOLOCK,
243 };
244
245 static int may_wait_transaction(struct btrfs_root *root, int type)
246 {
247         if (root->fs_info->log_root_recovering)
248                 return 0;
249
250         if (type == TRANS_USERSPACE)
251                 return 1;
252
253         if (type == TRANS_START &&
254             !atomic_read(&root->fs_info->open_ioctl_trans))
255                 return 1;
256
257         return 0;
258 }
259
260 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
261                                                     u64 num_items, int type)
262 {
263         struct btrfs_trans_handle *h;
264         struct btrfs_transaction *cur_trans;
265         u64 num_bytes = 0;
266         int ret;
267
268         if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
269                 return ERR_PTR(-EROFS);
270
271         if (current->journal_info) {
272                 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
273                 h = current->journal_info;
274                 h->use_count++;
275                 h->orig_rsv = h->block_rsv;
276                 h->block_rsv = NULL;
277                 goto got_it;
278         }
279
280         /*
281          * Do the reservation before we join the transaction so we can do all
282          * the appropriate flushing if need be.
283          */
284         if (num_items > 0 && root != root->fs_info->chunk_root) {
285                 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
286                 ret = btrfs_block_rsv_add(root,
287                                           &root->fs_info->trans_block_rsv,
288                                           num_bytes);
289                 if (ret)
290                         return ERR_PTR(ret);
291         }
292 again:
293         h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
294         if (!h)
295                 return ERR_PTR(-ENOMEM);
296
297         if (may_wait_transaction(root, type))
298                 wait_current_trans(root);
299
300         do {
301                 ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
302                 if (ret == -EBUSY)
303                         wait_current_trans(root);
304         } while (ret == -EBUSY);
305
306         if (ret < 0) {
307                 kmem_cache_free(btrfs_trans_handle_cachep, h);
308                 return ERR_PTR(ret);
309         }
310
311         cur_trans = root->fs_info->running_transaction;
312
313         h->transid = cur_trans->transid;
314         h->transaction = cur_trans;
315         h->blocks_used = 0;
316         h->bytes_reserved = 0;
317         h->delayed_ref_updates = 0;
318         h->use_count = 1;
319         h->block_rsv = NULL;
320         h->orig_rsv = NULL;
321
322         smp_mb();
323         if (cur_trans->blocked && may_wait_transaction(root, type)) {
324                 btrfs_commit_transaction(h, root);
325                 goto again;
326         }
327
328         if (num_bytes) {
329                 h->block_rsv = &root->fs_info->trans_block_rsv;
330                 h->bytes_reserved = num_bytes;
331         }
332
333 got_it:
334         btrfs_record_root_in_trans(h, root);
335
336         if (!current->journal_info && type != TRANS_USERSPACE)
337                 current->journal_info = h;
338         return h;
339 }
340
341 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
342                                                    int num_items)
343 {
344         return start_transaction(root, num_items, TRANS_START);
345 }
346 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
347 {
348         return start_transaction(root, 0, TRANS_JOIN);
349 }
350
351 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
352 {
353         return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
354 }
355
356 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
357 {
358         return start_transaction(root, 0, TRANS_USERSPACE);
359 }
360
361 /* wait for a transaction commit to be fully complete */
362 static noinline void wait_for_commit(struct btrfs_root *root,
363                                     struct btrfs_transaction *commit)
364 {
365         wait_event(commit->commit_wait, commit->commit_done);
366 }
367
368 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
369 {
370         struct btrfs_transaction *cur_trans = NULL, *t;
371         int ret;
372
373         ret = 0;
374         if (transid) {
375                 if (transid <= root->fs_info->last_trans_committed)
376                         goto out;
377
378                 /* find specified transaction */
379                 spin_lock(&root->fs_info->trans_lock);
380                 list_for_each_entry(t, &root->fs_info->trans_list, list) {
381                         if (t->transid == transid) {
382                                 cur_trans = t;
383                                 atomic_inc(&cur_trans->use_count);
384                                 break;
385                         }
386                         if (t->transid > transid)
387                                 break;
388                 }
389                 spin_unlock(&root->fs_info->trans_lock);
390                 ret = -EINVAL;
391                 if (!cur_trans)
392                         goto out;  /* bad transid */
393         } else {
394                 /* find newest transaction that is committing | committed */
395                 spin_lock(&root->fs_info->trans_lock);
396                 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
397                                             list) {
398                         if (t->in_commit) {
399                                 if (t->commit_done)
400                                         break;
401                                 cur_trans = t;
402                                 atomic_inc(&cur_trans->use_count);
403                                 break;
404                         }
405                 }
406                 spin_unlock(&root->fs_info->trans_lock);
407                 if (!cur_trans)
408                         goto out;  /* nothing committing|committed */
409         }
410
411         wait_for_commit(root, cur_trans);
412
413         put_transaction(cur_trans);
414         ret = 0;
415 out:
416         return ret;
417 }
418
419 void btrfs_throttle(struct btrfs_root *root)
420 {
421         if (!atomic_read(&root->fs_info->open_ioctl_trans))
422                 wait_current_trans(root);
423 }
424
425 static int should_end_transaction(struct btrfs_trans_handle *trans,
426                                   struct btrfs_root *root)
427 {
428         int ret;
429
430         ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
431         return ret ? 1 : 0;
432 }
433
434 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
435                                  struct btrfs_root *root)
436 {
437         struct btrfs_transaction *cur_trans = trans->transaction;
438         struct btrfs_block_rsv *rsv = trans->block_rsv;
439         int updates;
440
441         smp_mb();
442         if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
443                 return 1;
444
445         /*
446          * We need to do this in case we're deleting csums so the global block
447          * rsv get's used instead of the csum block rsv.
448          */
449         trans->block_rsv = NULL;
450
451         updates = trans->delayed_ref_updates;
452         trans->delayed_ref_updates = 0;
453         if (updates)
454                 btrfs_run_delayed_refs(trans, root, updates);
455
456         trans->block_rsv = rsv;
457
458         return should_end_transaction(trans, root);
459 }
460
461 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
462                           struct btrfs_root *root, int throttle, int lock)
463 {
464         struct btrfs_transaction *cur_trans = trans->transaction;
465         struct btrfs_fs_info *info = root->fs_info;
466         int count = 0;
467
468         if (--trans->use_count) {
469                 trans->block_rsv = trans->orig_rsv;
470                 return 0;
471         }
472
473         btrfs_trans_release_metadata(trans, root);
474         trans->block_rsv = NULL;
475         while (count < 2) {
476                 unsigned long cur = trans->delayed_ref_updates;
477                 trans->delayed_ref_updates = 0;
478                 if (cur &&
479                     trans->transaction->delayed_refs.num_heads_ready > 64) {
480                         trans->delayed_ref_updates = 0;
481                         btrfs_run_delayed_refs(trans, root, cur);
482                 } else {
483                         break;
484                 }
485                 count++;
486         }
487
488         if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
489             should_end_transaction(trans, root)) {
490                 trans->transaction->blocked = 1;
491                 smp_wmb();
492         }
493
494         if (lock && cur_trans->blocked && !cur_trans->in_commit) {
495                 if (throttle) {
496                         /*
497                          * We may race with somebody else here so end up having
498                          * to call end_transaction on ourselves again, so inc
499                          * our use_count.
500                          */
501                         trans->use_count++;
502                         return btrfs_commit_transaction(trans, root);
503                 } else {
504                         wake_up_process(info->transaction_kthread);
505                 }
506         }
507
508         WARN_ON(cur_trans != info->running_transaction);
509         WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
510         atomic_dec(&cur_trans->num_writers);
511
512         smp_mb();
513         if (waitqueue_active(&cur_trans->writer_wait))
514                 wake_up(&cur_trans->writer_wait);
515         put_transaction(cur_trans);
516
517         if (current->journal_info == trans)
518                 current->journal_info = NULL;
519         memset(trans, 0, sizeof(*trans));
520         kmem_cache_free(btrfs_trans_handle_cachep, trans);
521
522         if (throttle)
523                 btrfs_run_delayed_iputs(root);
524
525         return 0;
526 }
527
528 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
529                           struct btrfs_root *root)
530 {
531         int ret;
532
533         ret = __btrfs_end_transaction(trans, root, 0, 1);
534         if (ret)
535                 return ret;
536         return 0;
537 }
538
539 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
540                                    struct btrfs_root *root)
541 {
542         int ret;
543
544         ret = __btrfs_end_transaction(trans, root, 1, 1);
545         if (ret)
546                 return ret;
547         return 0;
548 }
549
550 int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
551                                  struct btrfs_root *root)
552 {
553         int ret;
554
555         ret = __btrfs_end_transaction(trans, root, 0, 0);
556         if (ret)
557                 return ret;
558         return 0;
559 }
560
561 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
562                                 struct btrfs_root *root)
563 {
564         return __btrfs_end_transaction(trans, root, 1, 1);
565 }
566
567 /*
568  * when btree blocks are allocated, they have some corresponding bits set for
569  * them in one of two extent_io trees.  This is used to make sure all of
570  * those extents are sent to disk but does not wait on them
571  */
572 int btrfs_write_marked_extents(struct btrfs_root *root,
573                                struct extent_io_tree *dirty_pages, int mark)
574 {
575         int err = 0;
576         int werr = 0;
577         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
578         u64 start = 0;
579         u64 end;
580
581         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
582                                       mark)) {
583                 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark,
584                                    GFP_NOFS);
585                 err = filemap_fdatawrite_range(mapping, start, end);
586                 if (err)
587                         werr = err;
588                 cond_resched();
589                 start = end + 1;
590         }
591         if (err)
592                 werr = err;
593         return werr;
594 }
595
596 /*
597  * when btree blocks are allocated, they have some corresponding bits set for
598  * them in one of two extent_io trees.  This is used to make sure all of
599  * those extents are on disk for transaction or log commit.  We wait
600  * on all the pages and clear them from the dirty pages state tree
601  */
602 int btrfs_wait_marked_extents(struct btrfs_root *root,
603                               struct extent_io_tree *dirty_pages, int mark)
604 {
605         int err = 0;
606         int werr = 0;
607         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
608         u64 start = 0;
609         u64 end;
610
611         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
612                                       EXTENT_NEED_WAIT)) {
613                 clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS);
614                 err = filemap_fdatawait_range(mapping, start, end);
615                 if (err)
616                         werr = err;
617                 cond_resched();
618                 start = end + 1;
619         }
620         if (err)
621                 werr = err;
622         return werr;
623 }
624
625 /*
626  * when btree blocks are allocated, they have some corresponding bits set for
627  * them in one of two extent_io trees.  This is used to make sure all of
628  * those extents are on disk for transaction or log commit
629  */
630 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
631                                 struct extent_io_tree *dirty_pages, int mark)
632 {
633         int ret;
634         int ret2;
635
636         ret = btrfs_write_marked_extents(root, dirty_pages, mark);
637         ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
638
639         if (ret)
640                 return ret;
641         if (ret2)
642                 return ret2;
643         return 0;
644 }
645
646 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
647                                      struct btrfs_root *root)
648 {
649         if (!trans || !trans->transaction) {
650                 struct inode *btree_inode;
651                 btree_inode = root->fs_info->btree_inode;
652                 return filemap_write_and_wait(btree_inode->i_mapping);
653         }
654         return btrfs_write_and_wait_marked_extents(root,
655                                            &trans->transaction->dirty_pages,
656                                            EXTENT_DIRTY);
657 }
658
659 /*
660  * this is used to update the root pointer in the tree of tree roots.
661  *
662  * But, in the case of the extent allocation tree, updating the root
663  * pointer may allocate blocks which may change the root of the extent
664  * allocation tree.
665  *
666  * So, this loops and repeats and makes sure the cowonly root didn't
667  * change while the root pointer was being updated in the metadata.
668  */
669 static int update_cowonly_root(struct btrfs_trans_handle *trans,
670                                struct btrfs_root *root)
671 {
672         int ret;
673         u64 old_root_bytenr;
674         u64 old_root_used;
675         struct btrfs_root *tree_root = root->fs_info->tree_root;
676
677         old_root_used = btrfs_root_used(&root->root_item);
678         btrfs_write_dirty_block_groups(trans, root);
679
680         while (1) {
681                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
682                 if (old_root_bytenr == root->node->start &&
683                     old_root_used == btrfs_root_used(&root->root_item))
684                         break;
685
686                 btrfs_set_root_node(&root->root_item, root->node);
687                 ret = btrfs_update_root(trans, tree_root,
688                                         &root->root_key,
689                                         &root->root_item);
690                 BUG_ON(ret);
691
692                 old_root_used = btrfs_root_used(&root->root_item);
693                 ret = btrfs_write_dirty_block_groups(trans, root);
694                 BUG_ON(ret);
695         }
696
697         if (root != root->fs_info->extent_root)
698                 switch_commit_root(root);
699
700         return 0;
701 }
702
703 /*
704  * update all the cowonly tree roots on disk
705  */
706 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
707                                          struct btrfs_root *root)
708 {
709         struct btrfs_fs_info *fs_info = root->fs_info;
710         struct list_head *next;
711         struct extent_buffer *eb;
712         int ret;
713
714         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
715         BUG_ON(ret);
716
717         eb = btrfs_lock_root_node(fs_info->tree_root);
718         btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb);
719         btrfs_tree_unlock(eb);
720         free_extent_buffer(eb);
721
722         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
723         BUG_ON(ret);
724
725         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
726                 next = fs_info->dirty_cowonly_roots.next;
727                 list_del_init(next);
728                 root = list_entry(next, struct btrfs_root, dirty_list);
729
730                 update_cowonly_root(trans, root);
731         }
732
733         down_write(&fs_info->extent_commit_sem);
734         switch_commit_root(fs_info->extent_root);
735         up_write(&fs_info->extent_commit_sem);
736
737         return 0;
738 }
739
740 /*
741  * dead roots are old snapshots that need to be deleted.  This allocates
742  * a dirty root struct and adds it into the list of dead roots that need to
743  * be deleted
744  */
745 int btrfs_add_dead_root(struct btrfs_root *root)
746 {
747         spin_lock(&root->fs_info->trans_lock);
748         list_add(&root->root_list, &root->fs_info->dead_roots);
749         spin_unlock(&root->fs_info->trans_lock);
750         return 0;
751 }
752
753 /*
754  * update all the cowonly tree roots on disk
755  */
756 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
757                                     struct btrfs_root *root)
758 {
759         struct btrfs_root *gang[8];
760         struct btrfs_fs_info *fs_info = root->fs_info;
761         int i;
762         int ret;
763         int err = 0;
764
765         spin_lock(&fs_info->fs_roots_radix_lock);
766         while (1) {
767                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
768                                                  (void **)gang, 0,
769                                                  ARRAY_SIZE(gang),
770                                                  BTRFS_ROOT_TRANS_TAG);
771                 if (ret == 0)
772                         break;
773                 for (i = 0; i < ret; i++) {
774                         root = gang[i];
775                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
776                                         (unsigned long)root->root_key.objectid,
777                                         BTRFS_ROOT_TRANS_TAG);
778                         spin_unlock(&fs_info->fs_roots_radix_lock);
779
780                         btrfs_free_log(trans, root);
781                         btrfs_update_reloc_root(trans, root);
782                         btrfs_orphan_commit_root(trans, root);
783
784                         btrfs_save_ino_cache(root, trans);
785
786                         /* see comments in should_cow_block() */
787                         root->force_cow = 0;
788                         smp_wmb();
789
790                         if (root->commit_root != root->node) {
791                                 mutex_lock(&root->fs_commit_mutex);
792                                 switch_commit_root(root);
793                                 btrfs_unpin_free_ino(root);
794                                 mutex_unlock(&root->fs_commit_mutex);
795
796                                 btrfs_set_root_node(&root->root_item,
797                                                     root->node);
798                         }
799
800                         err = btrfs_update_root(trans, fs_info->tree_root,
801                                                 &root->root_key,
802                                                 &root->root_item);
803                         spin_lock(&fs_info->fs_roots_radix_lock);
804                         if (err)
805                                 break;
806                 }
807         }
808         spin_unlock(&fs_info->fs_roots_radix_lock);
809         return err;
810 }
811
812 /*
813  * defrag a given btree.  If cacheonly == 1, this won't read from the disk,
814  * otherwise every leaf in the btree is read and defragged.
815  */
816 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
817 {
818         struct btrfs_fs_info *info = root->fs_info;
819         struct btrfs_trans_handle *trans;
820         int ret;
821         unsigned long nr;
822
823         if (xchg(&root->defrag_running, 1))
824                 return 0;
825
826         while (1) {
827                 trans = btrfs_start_transaction(root, 0);
828                 if (IS_ERR(trans))
829                         return PTR_ERR(trans);
830
831                 ret = btrfs_defrag_leaves(trans, root, cacheonly);
832
833                 nr = trans->blocks_used;
834                 btrfs_end_transaction(trans, root);
835                 btrfs_btree_balance_dirty(info->tree_root, nr);
836                 cond_resched();
837
838                 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
839                         break;
840         }
841         root->defrag_running = 0;
842         return ret;
843 }
844
845 /*
846  * new snapshots need to be created at a very specific time in the
847  * transaction commit.  This does the actual creation
848  */
849 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
850                                    struct btrfs_fs_info *fs_info,
851                                    struct btrfs_pending_snapshot *pending)
852 {
853         struct btrfs_key key;
854         struct btrfs_root_item *new_root_item;
855         struct btrfs_root *tree_root = fs_info->tree_root;
856         struct btrfs_root *root = pending->root;
857         struct btrfs_root *parent_root;
858         struct btrfs_block_rsv *rsv;
859         struct inode *parent_inode;
860         struct dentry *parent;
861         struct dentry *dentry;
862         struct extent_buffer *tmp;
863         struct extent_buffer *old;
864         int ret;
865         u64 to_reserve = 0;
866         u64 index = 0;
867         u64 objectid;
868         u64 root_flags;
869
870         rsv = trans->block_rsv;
871
872         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
873         if (!new_root_item) {
874                 pending->error = -ENOMEM;
875                 goto fail;
876         }
877
878         ret = btrfs_find_free_objectid(tree_root, &objectid);
879         if (ret) {
880                 pending->error = ret;
881                 goto fail;
882         }
883
884         btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
885
886         if (to_reserve > 0) {
887                 ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
888                                                   to_reserve);
889                 if (ret) {
890                         pending->error = ret;
891                         goto fail;
892                 }
893         }
894
895         key.objectid = objectid;
896         key.offset = (u64)-1;
897         key.type = BTRFS_ROOT_ITEM_KEY;
898
899         trans->block_rsv = &pending->block_rsv;
900
901         dentry = pending->dentry;
902         parent = dget_parent(dentry);
903         parent_inode = parent->d_inode;
904         parent_root = BTRFS_I(parent_inode)->root;
905         record_root_in_trans(trans, parent_root);
906
907         /*
908          * insert the directory item
909          */
910         ret = btrfs_set_inode_index(parent_inode, &index);
911         BUG_ON(ret);
912         ret = btrfs_insert_dir_item(trans, parent_root,
913                                 dentry->d_name.name, dentry->d_name.len,
914                                 parent_inode, &key,
915                                 BTRFS_FT_DIR, index);
916         BUG_ON(ret);
917
918         btrfs_i_size_write(parent_inode, parent_inode->i_size +
919                                          dentry->d_name.len * 2);
920         ret = btrfs_update_inode(trans, parent_root, parent_inode);
921         BUG_ON(ret);
922
923         /*
924          * pull in the delayed directory update
925          * and the delayed inode item
926          * otherwise we corrupt the FS during
927          * snapshot
928          */
929         ret = btrfs_run_delayed_items(trans, root);
930         BUG_ON(ret);
931
932         record_root_in_trans(trans, root);
933         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
934         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
935         btrfs_check_and_init_root_item(new_root_item);
936
937         root_flags = btrfs_root_flags(new_root_item);
938         if (pending->readonly)
939                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
940         else
941                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
942         btrfs_set_root_flags(new_root_item, root_flags);
943
944         old = btrfs_lock_root_node(root);
945         btrfs_cow_block(trans, root, old, NULL, 0, &old);
946         btrfs_set_lock_blocking(old);
947
948         btrfs_copy_root(trans, root, old, &tmp, objectid);
949         btrfs_tree_unlock(old);
950         free_extent_buffer(old);
951
952         /* see comments in should_cow_block() */
953         root->force_cow = 1;
954         smp_wmb();
955
956         btrfs_set_root_node(new_root_item, tmp);
957         /* record when the snapshot was created in key.offset */
958         key.offset = trans->transid;
959         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
960         btrfs_tree_unlock(tmp);
961         free_extent_buffer(tmp);
962         BUG_ON(ret);
963
964         /*
965          * insert root back/forward references
966          */
967         ret = btrfs_add_root_ref(trans, tree_root, objectid,
968                                  parent_root->root_key.objectid,
969                                  btrfs_ino(parent_inode), index,
970                                  dentry->d_name.name, dentry->d_name.len);
971         BUG_ON(ret);
972         dput(parent);
973
974         key.offset = (u64)-1;
975         pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
976         BUG_ON(IS_ERR(pending->snap));
977
978         btrfs_reloc_post_snapshot(trans, pending);
979 fail:
980         kfree(new_root_item);
981         trans->block_rsv = rsv;
982         btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
983         return 0;
984 }
985
986 /*
987  * create all the snapshots we've scheduled for creation
988  */
989 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
990                                              struct btrfs_fs_info *fs_info)
991 {
992         struct btrfs_pending_snapshot *pending;
993         struct list_head *head = &trans->transaction->pending_snapshots;
994         int ret;
995
996         list_for_each_entry(pending, head, list) {
997                 ret = create_pending_snapshot(trans, fs_info, pending);
998                 BUG_ON(ret);
999         }
1000         return 0;
1001 }
1002
1003 static void update_super_roots(struct btrfs_root *root)
1004 {
1005         struct btrfs_root_item *root_item;
1006         struct btrfs_super_block *super;
1007
1008         super = root->fs_info->super_copy;
1009
1010         root_item = &root->fs_info->chunk_root->root_item;
1011         super->chunk_root = root_item->bytenr;
1012         super->chunk_root_generation = root_item->generation;
1013         super->chunk_root_level = root_item->level;
1014
1015         root_item = &root->fs_info->tree_root->root_item;
1016         super->root = root_item->bytenr;
1017         super->generation = root_item->generation;
1018         super->root_level = root_item->level;
1019         if (btrfs_test_opt(root, SPACE_CACHE))
1020                 super->cache_generation = root_item->generation;
1021 }
1022
1023 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1024 {
1025         int ret = 0;
1026         spin_lock(&info->trans_lock);
1027         if (info->running_transaction)
1028                 ret = info->running_transaction->in_commit;
1029         spin_unlock(&info->trans_lock);
1030         return ret;
1031 }
1032
1033 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1034 {
1035         int ret = 0;
1036         spin_lock(&info->trans_lock);
1037         if (info->running_transaction)
1038                 ret = info->running_transaction->blocked;
1039         spin_unlock(&info->trans_lock);
1040         return ret;
1041 }
1042
1043 /*
1044  * wait for the current transaction commit to start and block subsequent
1045  * transaction joins
1046  */
1047 static void wait_current_trans_commit_start(struct btrfs_root *root,
1048                                             struct btrfs_transaction *trans)
1049 {
1050         wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
1051 }
1052
1053 /*
1054  * wait for the current transaction to start and then become unblocked.
1055  * caller holds ref.
1056  */
1057 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1058                                          struct btrfs_transaction *trans)
1059 {
1060         wait_event(root->fs_info->transaction_wait,
1061                    trans->commit_done || (trans->in_commit && !trans->blocked));
1062 }
1063
1064 /*
1065  * commit transactions asynchronously. once btrfs_commit_transaction_async
1066  * returns, any subsequent transaction will not be allowed to join.
1067  */
1068 struct btrfs_async_commit {
1069         struct btrfs_trans_handle *newtrans;
1070         struct btrfs_root *root;
1071         struct delayed_work work;
1072 };
1073
1074 static void do_async_commit(struct work_struct *work)
1075 {
1076         struct btrfs_async_commit *ac =
1077                 container_of(work, struct btrfs_async_commit, work.work);
1078
1079         btrfs_commit_transaction(ac->newtrans, ac->root);
1080         kfree(ac);
1081 }
1082
1083 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1084                                    struct btrfs_root *root,
1085                                    int wait_for_unblock)
1086 {
1087         struct btrfs_async_commit *ac;
1088         struct btrfs_transaction *cur_trans;
1089
1090         ac = kmalloc(sizeof(*ac), GFP_NOFS);
1091         if (!ac)
1092                 return -ENOMEM;
1093
1094         INIT_DELAYED_WORK(&ac->work, do_async_commit);
1095         ac->root = root;
1096         ac->newtrans = btrfs_join_transaction(root);
1097         if (IS_ERR(ac->newtrans)) {
1098                 int err = PTR_ERR(ac->newtrans);
1099                 kfree(ac);
1100                 return err;
1101         }
1102
1103         /* take transaction reference */
1104         cur_trans = trans->transaction;
1105         atomic_inc(&cur_trans->use_count);
1106
1107         btrfs_end_transaction(trans, root);
1108         schedule_delayed_work(&ac->work, 0);
1109
1110         /* wait for transaction to start and unblock */
1111         if (wait_for_unblock)
1112                 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1113         else
1114                 wait_current_trans_commit_start(root, cur_trans);
1115
1116         if (current->journal_info == trans)
1117                 current->journal_info = NULL;
1118
1119         put_transaction(cur_trans);
1120         return 0;
1121 }
1122
1123 /*
1124  * btrfs_transaction state sequence:
1125  *    in_commit = 0, blocked = 0  (initial)
1126  *    in_commit = 1, blocked = 1
1127  *    blocked = 0
1128  *    commit_done = 1
1129  */
1130 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1131                              struct btrfs_root *root)
1132 {
1133         unsigned long joined = 0;
1134         struct btrfs_transaction *cur_trans;
1135         struct btrfs_transaction *prev_trans = NULL;
1136         DEFINE_WAIT(wait);
1137         int ret;
1138         int should_grow = 0;
1139         unsigned long now = get_seconds();
1140         int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
1141
1142         btrfs_run_ordered_operations(root, 0);
1143
1144         btrfs_trans_release_metadata(trans, root);
1145         trans->block_rsv = NULL;
1146
1147         /* make a pass through all the delayed refs we have so far
1148          * any runnings procs may add more while we are here
1149          */
1150         ret = btrfs_run_delayed_refs(trans, root, 0);
1151         BUG_ON(ret);
1152
1153         cur_trans = trans->transaction;
1154         /*
1155          * set the flushing flag so procs in this transaction have to
1156          * start sending their work down.
1157          */
1158         cur_trans->delayed_refs.flushing = 1;
1159
1160         ret = btrfs_run_delayed_refs(trans, root, 0);
1161         BUG_ON(ret);
1162
1163         spin_lock(&cur_trans->commit_lock);
1164         if (cur_trans->in_commit) {
1165                 spin_unlock(&cur_trans->commit_lock);
1166                 atomic_inc(&cur_trans->use_count);
1167                 btrfs_end_transaction(trans, root);
1168
1169                 wait_for_commit(root, cur_trans);
1170
1171                 put_transaction(cur_trans);
1172
1173                 return 0;
1174         }
1175
1176         trans->transaction->in_commit = 1;
1177         trans->transaction->blocked = 1;
1178         spin_unlock(&cur_trans->commit_lock);
1179         wake_up(&root->fs_info->transaction_blocked_wait);
1180
1181         spin_lock(&root->fs_info->trans_lock);
1182         if (cur_trans->list.prev != &root->fs_info->trans_list) {
1183                 prev_trans = list_entry(cur_trans->list.prev,
1184                                         struct btrfs_transaction, list);
1185                 if (!prev_trans->commit_done) {
1186                         atomic_inc(&prev_trans->use_count);
1187                         spin_unlock(&root->fs_info->trans_lock);
1188
1189                         wait_for_commit(root, prev_trans);
1190
1191                         put_transaction(prev_trans);
1192                 } else {
1193                         spin_unlock(&root->fs_info->trans_lock);
1194                 }
1195         } else {
1196                 spin_unlock(&root->fs_info->trans_lock);
1197         }
1198
1199         if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
1200                 should_grow = 1;
1201
1202         do {
1203                 int snap_pending = 0;
1204
1205                 joined = cur_trans->num_joined;
1206                 if (!list_empty(&trans->transaction->pending_snapshots))
1207                         snap_pending = 1;
1208
1209                 WARN_ON(cur_trans != trans->transaction);
1210
1211                 if (flush_on_commit || snap_pending) {
1212                         btrfs_start_delalloc_inodes(root, 1);
1213                         ret = btrfs_wait_ordered_extents(root, 0, 1);
1214                         BUG_ON(ret);
1215                 }
1216
1217                 ret = btrfs_run_delayed_items(trans, root);
1218                 BUG_ON(ret);
1219
1220                 /*
1221                  * rename don't use btrfs_join_transaction, so, once we
1222                  * set the transaction to blocked above, we aren't going
1223                  * to get any new ordered operations.  We can safely run
1224                  * it here and no for sure that nothing new will be added
1225                  * to the list
1226                  */
1227                 btrfs_run_ordered_operations(root, 1);
1228
1229                 prepare_to_wait(&cur_trans->writer_wait, &wait,
1230                                 TASK_UNINTERRUPTIBLE);
1231
1232                 if (atomic_read(&cur_trans->num_writers) > 1)
1233                         schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1234                 else if (should_grow)
1235                         schedule_timeout(1);
1236
1237                 finish_wait(&cur_trans->writer_wait, &wait);
1238         } while (atomic_read(&cur_trans->num_writers) > 1 ||
1239                  (should_grow && cur_trans->num_joined != joined));
1240
1241         /*
1242          * Ok now we need to make sure to block out any other joins while we
1243          * commit the transaction.  We could have started a join before setting
1244          * no_join so make sure to wait for num_writers to == 1 again.
1245          */
1246         spin_lock(&root->fs_info->trans_lock);
1247         root->fs_info->trans_no_join = 1;
1248         spin_unlock(&root->fs_info->trans_lock);
1249         wait_event(cur_trans->writer_wait,
1250                    atomic_read(&cur_trans->num_writers) == 1);
1251
1252         /*
1253          * the reloc mutex makes sure that we stop
1254          * the balancing code from coming in and moving
1255          * extents around in the middle of the commit
1256          */
1257         mutex_lock(&root->fs_info->reloc_mutex);
1258
1259         ret = btrfs_run_delayed_items(trans, root);
1260         BUG_ON(ret);
1261
1262         ret = create_pending_snapshots(trans, root->fs_info);
1263         BUG_ON(ret);
1264
1265         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1266         BUG_ON(ret);
1267
1268         /*
1269          * make sure none of the code above managed to slip in a
1270          * delayed item
1271          */
1272         btrfs_assert_delayed_root_empty(root);
1273
1274         WARN_ON(cur_trans != trans->transaction);
1275
1276         btrfs_scrub_pause(root);
1277         /* btrfs_commit_tree_roots is responsible for getting the
1278          * various roots consistent with each other.  Every pointer
1279          * in the tree of tree roots has to point to the most up to date
1280          * root for every subvolume and other tree.  So, we have to keep
1281          * the tree logging code from jumping in and changing any
1282          * of the trees.
1283          *
1284          * At this point in the commit, there can't be any tree-log
1285          * writers, but a little lower down we drop the trans mutex
1286          * and let new people in.  By holding the tree_log_mutex
1287          * from now until after the super is written, we avoid races
1288          * with the tree-log code.
1289          */
1290         mutex_lock(&root->fs_info->tree_log_mutex);
1291
1292         ret = commit_fs_roots(trans, root);
1293         BUG_ON(ret);
1294
1295         /* commit_fs_roots gets rid of all the tree log roots, it is now
1296          * safe to free the root of tree log roots
1297          */
1298         btrfs_free_log_root_tree(trans, root->fs_info);
1299
1300         ret = commit_cowonly_roots(trans, root);
1301         BUG_ON(ret);
1302
1303         btrfs_prepare_extent_commit(trans, root);
1304
1305         cur_trans = root->fs_info->running_transaction;
1306
1307         btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1308                             root->fs_info->tree_root->node);
1309         switch_commit_root(root->fs_info->tree_root);
1310
1311         btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1312                             root->fs_info->chunk_root->node);
1313         switch_commit_root(root->fs_info->chunk_root);
1314
1315         update_super_roots(root);
1316
1317         if (!root->fs_info->log_root_recovering) {
1318                 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1319                 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1320         }
1321
1322         memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1323                sizeof(*root->fs_info->super_copy));
1324
1325         trans->transaction->blocked = 0;
1326         spin_lock(&root->fs_info->trans_lock);
1327         root->fs_info->running_transaction = NULL;
1328         root->fs_info->trans_no_join = 0;
1329         spin_unlock(&root->fs_info->trans_lock);
1330         mutex_unlock(&root->fs_info->reloc_mutex);
1331
1332         wake_up(&root->fs_info->transaction_wait);
1333
1334         ret = btrfs_write_and_wait_transaction(trans, root);
1335         BUG_ON(ret);
1336         write_ctree_super(trans, root, 0);
1337
1338         /*
1339          * the super is written, we can safely allow the tree-loggers
1340          * to go about their business
1341          */
1342         mutex_unlock(&root->fs_info->tree_log_mutex);
1343
1344         btrfs_finish_extent_commit(trans, root);
1345
1346         cur_trans->commit_done = 1;
1347
1348         root->fs_info->last_trans_committed = cur_trans->transid;
1349
1350         wake_up(&cur_trans->commit_wait);
1351
1352         spin_lock(&root->fs_info->trans_lock);
1353         list_del_init(&cur_trans->list);
1354         spin_unlock(&root->fs_info->trans_lock);
1355
1356         put_transaction(cur_trans);
1357         put_transaction(cur_trans);
1358
1359         trace_btrfs_transaction_commit(root);
1360
1361         btrfs_scrub_continue(root);
1362
1363         if (current->journal_info == trans)
1364                 current->journal_info = NULL;
1365
1366         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1367
1368         if (current != root->fs_info->transaction_kthread)
1369                 btrfs_run_delayed_iputs(root);
1370
1371         return ret;
1372 }
1373
1374 /*
1375  * interface function to delete all the snapshots we have scheduled for deletion
1376  */
1377 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1378 {
1379         LIST_HEAD(list);
1380         struct btrfs_fs_info *fs_info = root->fs_info;
1381
1382         spin_lock(&fs_info->trans_lock);
1383         list_splice_init(&fs_info->dead_roots, &list);
1384         spin_unlock(&fs_info->trans_lock);
1385
1386         while (!list_empty(&list)) {
1387                 root = list_entry(list.next, struct btrfs_root, root_list);
1388                 list_del(&root->root_list);
1389
1390                 btrfs_kill_all_delayed_nodes(root);
1391
1392                 if (btrfs_header_backref_rev(root->node) <
1393                     BTRFS_MIXED_BACKREF_REV)
1394                         btrfs_drop_snapshot(root, NULL, 0, 0);
1395                 else
1396                         btrfs_drop_snapshot(root, NULL, 1, 0);
1397         }
1398         return 0;
1399 }