]> rtime.felk.cvut.cz Git - linux-imx.git/blob - fs/btrfs/extent-tree.c
ef4ce2c026d6a69a8d98bfa7ea3eae7fa9615382
[linux-imx.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include "compat.h"
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "raid56.h"
35 #include "locking.h"
36 #include "free-space-cache.h"
37 #include "math.h"
38
39 #undef SCRAMBLE_DELAYED_REFS
40
41 /*
42  * control flags for do_chunk_alloc's force field
43  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
44  * if we really need one.
45  *
46  * CHUNK_ALLOC_LIMITED means to only try and allocate one
47  * if we have very few chunks already allocated.  This is
48  * used as part of the clustering code to help make sure
49  * we have a good pool of storage to cluster in, without
50  * filling the FS with empty chunks
51  *
52  * CHUNK_ALLOC_FORCE means it must try to allocate one
53  *
54  */
55 enum {
56         CHUNK_ALLOC_NO_FORCE = 0,
57         CHUNK_ALLOC_LIMITED = 1,
58         CHUNK_ALLOC_FORCE = 2,
59 };
60
61 /*
62  * Control how reservations are dealt with.
63  *
64  * RESERVE_FREE - freeing a reservation.
65  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
66  *   ENOSPC accounting
67  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
68  *   bytes_may_use as the ENOSPC accounting is done elsewhere
69  */
70 enum {
71         RESERVE_FREE = 0,
72         RESERVE_ALLOC = 1,
73         RESERVE_ALLOC_NO_ACCOUNT = 2,
74 };
75
76 static int update_block_group(struct btrfs_root *root,
77                               u64 bytenr, u64 num_bytes, int alloc);
78 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
79                                 struct btrfs_root *root,
80                                 u64 bytenr, u64 num_bytes, u64 parent,
81                                 u64 root_objectid, u64 owner_objectid,
82                                 u64 owner_offset, int refs_to_drop,
83                                 struct btrfs_delayed_extent_op *extra_op);
84 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
85                                     struct extent_buffer *leaf,
86                                     struct btrfs_extent_item *ei);
87 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
88                                       struct btrfs_root *root,
89                                       u64 parent, u64 root_objectid,
90                                       u64 flags, u64 owner, u64 offset,
91                                       struct btrfs_key *ins, int ref_mod);
92 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
93                                      struct btrfs_root *root,
94                                      u64 parent, u64 root_objectid,
95                                      u64 flags, struct btrfs_disk_key *key,
96                                      int level, struct btrfs_key *ins);
97 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
98                           struct btrfs_root *extent_root, u64 flags,
99                           int force);
100 static int find_next_key(struct btrfs_path *path, int level,
101                          struct btrfs_key *key);
102 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
103                             int dump_block_groups);
104 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
105                                        u64 num_bytes, int reserve);
106 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
107                                u64 num_bytes);
108
109 static noinline int
110 block_group_cache_done(struct btrfs_block_group_cache *cache)
111 {
112         smp_mb();
113         return cache->cached == BTRFS_CACHE_FINISHED;
114 }
115
116 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
117 {
118         return (cache->flags & bits) == bits;
119 }
120
121 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
122 {
123         atomic_inc(&cache->count);
124 }
125
126 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
127 {
128         if (atomic_dec_and_test(&cache->count)) {
129                 WARN_ON(cache->pinned > 0);
130                 WARN_ON(cache->reserved > 0);
131                 kfree(cache->free_space_ctl);
132                 kfree(cache);
133         }
134 }
135
136 /*
137  * this adds the block group to the fs_info rb tree for the block group
138  * cache
139  */
140 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
141                                 struct btrfs_block_group_cache *block_group)
142 {
143         struct rb_node **p;
144         struct rb_node *parent = NULL;
145         struct btrfs_block_group_cache *cache;
146
147         spin_lock(&info->block_group_cache_lock);
148         p = &info->block_group_cache_tree.rb_node;
149
150         while (*p) {
151                 parent = *p;
152                 cache = rb_entry(parent, struct btrfs_block_group_cache,
153                                  cache_node);
154                 if (block_group->key.objectid < cache->key.objectid) {
155                         p = &(*p)->rb_left;
156                 } else if (block_group->key.objectid > cache->key.objectid) {
157                         p = &(*p)->rb_right;
158                 } else {
159                         spin_unlock(&info->block_group_cache_lock);
160                         return -EEXIST;
161                 }
162         }
163
164         rb_link_node(&block_group->cache_node, parent, p);
165         rb_insert_color(&block_group->cache_node,
166                         &info->block_group_cache_tree);
167
168         if (info->first_logical_byte > block_group->key.objectid)
169                 info->first_logical_byte = block_group->key.objectid;
170
171         spin_unlock(&info->block_group_cache_lock);
172
173         return 0;
174 }
175
176 /*
177  * This will return the block group at or after bytenr if contains is 0, else
178  * it will return the block group that contains the bytenr
179  */
180 static struct btrfs_block_group_cache *
181 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
182                               int contains)
183 {
184         struct btrfs_block_group_cache *cache, *ret = NULL;
185         struct rb_node *n;
186         u64 end, start;
187
188         spin_lock(&info->block_group_cache_lock);
189         n = info->block_group_cache_tree.rb_node;
190
191         while (n) {
192                 cache = rb_entry(n, struct btrfs_block_group_cache,
193                                  cache_node);
194                 end = cache->key.objectid + cache->key.offset - 1;
195                 start = cache->key.objectid;
196
197                 if (bytenr < start) {
198                         if (!contains && (!ret || start < ret->key.objectid))
199                                 ret = cache;
200                         n = n->rb_left;
201                 } else if (bytenr > start) {
202                         if (contains && bytenr <= end) {
203                                 ret = cache;
204                                 break;
205                         }
206                         n = n->rb_right;
207                 } else {
208                         ret = cache;
209                         break;
210                 }
211         }
212         if (ret) {
213                 btrfs_get_block_group(ret);
214                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
215                         info->first_logical_byte = ret->key.objectid;
216         }
217         spin_unlock(&info->block_group_cache_lock);
218
219         return ret;
220 }
221
222 static int add_excluded_extent(struct btrfs_root *root,
223                                u64 start, u64 num_bytes)
224 {
225         u64 end = start + num_bytes - 1;
226         set_extent_bits(&root->fs_info->freed_extents[0],
227                         start, end, EXTENT_UPTODATE, GFP_NOFS);
228         set_extent_bits(&root->fs_info->freed_extents[1],
229                         start, end, EXTENT_UPTODATE, GFP_NOFS);
230         return 0;
231 }
232
233 static void free_excluded_extents(struct btrfs_root *root,
234                                   struct btrfs_block_group_cache *cache)
235 {
236         u64 start, end;
237
238         start = cache->key.objectid;
239         end = start + cache->key.offset - 1;
240
241         clear_extent_bits(&root->fs_info->freed_extents[0],
242                           start, end, EXTENT_UPTODATE, GFP_NOFS);
243         clear_extent_bits(&root->fs_info->freed_extents[1],
244                           start, end, EXTENT_UPTODATE, GFP_NOFS);
245 }
246
247 static int exclude_super_stripes(struct btrfs_root *root,
248                                  struct btrfs_block_group_cache *cache)
249 {
250         u64 bytenr;
251         u64 *logical;
252         int stripe_len;
253         int i, nr, ret;
254
255         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
256                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
257                 cache->bytes_super += stripe_len;
258                 ret = add_excluded_extent(root, cache->key.objectid,
259                                           stripe_len);
260                 if (ret)
261                         return ret;
262         }
263
264         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
265                 bytenr = btrfs_sb_offset(i);
266                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
267                                        cache->key.objectid, bytenr,
268                                        0, &logical, &nr, &stripe_len);
269                 if (ret)
270                         return ret;
271
272                 while (nr--) {
273                         u64 start, len;
274
275                         if (logical[nr] > cache->key.objectid +
276                             cache->key.offset)
277                                 continue;
278
279                         if (logical[nr] + stripe_len <= cache->key.objectid)
280                                 continue;
281
282                         start = logical[nr];
283                         if (start < cache->key.objectid) {
284                                 start = cache->key.objectid;
285                                 len = (logical[nr] + stripe_len) - start;
286                         } else {
287                                 len = min_t(u64, stripe_len,
288                                             cache->key.objectid +
289                                             cache->key.offset - start);
290                         }
291
292                         cache->bytes_super += len;
293                         ret = add_excluded_extent(root, start, len);
294                         if (ret) {
295                                 kfree(logical);
296                                 return ret;
297                         }
298                 }
299
300                 kfree(logical);
301         }
302         return 0;
303 }
304
305 static struct btrfs_caching_control *
306 get_caching_control(struct btrfs_block_group_cache *cache)
307 {
308         struct btrfs_caching_control *ctl;
309
310         spin_lock(&cache->lock);
311         if (cache->cached != BTRFS_CACHE_STARTED) {
312                 spin_unlock(&cache->lock);
313                 return NULL;
314         }
315
316         /* We're loading it the fast way, so we don't have a caching_ctl. */
317         if (!cache->caching_ctl) {
318                 spin_unlock(&cache->lock);
319                 return NULL;
320         }
321
322         ctl = cache->caching_ctl;
323         atomic_inc(&ctl->count);
324         spin_unlock(&cache->lock);
325         return ctl;
326 }
327
328 static void put_caching_control(struct btrfs_caching_control *ctl)
329 {
330         if (atomic_dec_and_test(&ctl->count))
331                 kfree(ctl);
332 }
333
334 /*
335  * this is only called by cache_block_group, since we could have freed extents
336  * we need to check the pinned_extents for any extents that can't be used yet
337  * since their free space will be released as soon as the transaction commits.
338  */
339 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
340                               struct btrfs_fs_info *info, u64 start, u64 end)
341 {
342         u64 extent_start, extent_end, size, total_added = 0;
343         int ret;
344
345         while (start < end) {
346                 ret = find_first_extent_bit(info->pinned_extents, start,
347                                             &extent_start, &extent_end,
348                                             EXTENT_DIRTY | EXTENT_UPTODATE,
349                                             NULL);
350                 if (ret)
351                         break;
352
353                 if (extent_start <= start) {
354                         start = extent_end + 1;
355                 } else if (extent_start > start && extent_start < end) {
356                         size = extent_start - start;
357                         total_added += size;
358                         ret = btrfs_add_free_space(block_group, start,
359                                                    size);
360                         BUG_ON(ret); /* -ENOMEM or logic error */
361                         start = extent_end + 1;
362                 } else {
363                         break;
364                 }
365         }
366
367         if (start < end) {
368                 size = end - start;
369                 total_added += size;
370                 ret = btrfs_add_free_space(block_group, start, size);
371                 BUG_ON(ret); /* -ENOMEM or logic error */
372         }
373
374         return total_added;
375 }
376
377 static noinline void caching_thread(struct btrfs_work *work)
378 {
379         struct btrfs_block_group_cache *block_group;
380         struct btrfs_fs_info *fs_info;
381         struct btrfs_caching_control *caching_ctl;
382         struct btrfs_root *extent_root;
383         struct btrfs_path *path;
384         struct extent_buffer *leaf;
385         struct btrfs_key key;
386         u64 total_found = 0;
387         u64 last = 0;
388         u32 nritems;
389         int ret = 0;
390
391         caching_ctl = container_of(work, struct btrfs_caching_control, work);
392         block_group = caching_ctl->block_group;
393         fs_info = block_group->fs_info;
394         extent_root = fs_info->extent_root;
395
396         path = btrfs_alloc_path();
397         if (!path)
398                 goto out;
399
400         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
401
402         /*
403          * We don't want to deadlock with somebody trying to allocate a new
404          * extent for the extent root while also trying to search the extent
405          * root to add free space.  So we skip locking and search the commit
406          * root, since its read-only
407          */
408         path->skip_locking = 1;
409         path->search_commit_root = 1;
410         path->reada = 1;
411
412         key.objectid = last;
413         key.offset = 0;
414         key.type = BTRFS_EXTENT_ITEM_KEY;
415 again:
416         mutex_lock(&caching_ctl->mutex);
417         /* need to make sure the commit_root doesn't disappear */
418         down_read(&fs_info->extent_commit_sem);
419
420         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
421         if (ret < 0)
422                 goto err;
423
424         leaf = path->nodes[0];
425         nritems = btrfs_header_nritems(leaf);
426
427         while (1) {
428                 if (btrfs_fs_closing(fs_info) > 1) {
429                         last = (u64)-1;
430                         break;
431                 }
432
433                 if (path->slots[0] < nritems) {
434                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
435                 } else {
436                         ret = find_next_key(path, 0, &key);
437                         if (ret)
438                                 break;
439
440                         if (need_resched()) {
441                                 caching_ctl->progress = last;
442                                 btrfs_release_path(path);
443                                 up_read(&fs_info->extent_commit_sem);
444                                 mutex_unlock(&caching_ctl->mutex);
445                                 cond_resched();
446                                 goto again;
447                         }
448
449                         ret = btrfs_next_leaf(extent_root, path);
450                         if (ret < 0)
451                                 goto err;
452                         if (ret)
453                                 break;
454                         leaf = path->nodes[0];
455                         nritems = btrfs_header_nritems(leaf);
456                         continue;
457                 }
458
459                 if (key.objectid < block_group->key.objectid) {
460                         path->slots[0]++;
461                         continue;
462                 }
463
464                 if (key.objectid >= block_group->key.objectid +
465                     block_group->key.offset)
466                         break;
467
468                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
469                     key.type == BTRFS_METADATA_ITEM_KEY) {
470                         total_found += add_new_free_space(block_group,
471                                                           fs_info, last,
472                                                           key.objectid);
473                         if (key.type == BTRFS_METADATA_ITEM_KEY)
474                                 last = key.objectid +
475                                         fs_info->tree_root->leafsize;
476                         else
477                                 last = key.objectid + key.offset;
478
479                         if (total_found > (1024 * 1024 * 2)) {
480                                 total_found = 0;
481                                 wake_up(&caching_ctl->wait);
482                         }
483                 }
484                 path->slots[0]++;
485         }
486         ret = 0;
487
488         total_found += add_new_free_space(block_group, fs_info, last,
489                                           block_group->key.objectid +
490                                           block_group->key.offset);
491         caching_ctl->progress = (u64)-1;
492
493         spin_lock(&block_group->lock);
494         block_group->caching_ctl = NULL;
495         block_group->cached = BTRFS_CACHE_FINISHED;
496         spin_unlock(&block_group->lock);
497
498 err:
499         btrfs_free_path(path);
500         up_read(&fs_info->extent_commit_sem);
501
502         free_excluded_extents(extent_root, block_group);
503
504         mutex_unlock(&caching_ctl->mutex);
505 out:
506         wake_up(&caching_ctl->wait);
507
508         put_caching_control(caching_ctl);
509         btrfs_put_block_group(block_group);
510 }
511
512 static int cache_block_group(struct btrfs_block_group_cache *cache,
513                              int load_cache_only)
514 {
515         DEFINE_WAIT(wait);
516         struct btrfs_fs_info *fs_info = cache->fs_info;
517         struct btrfs_caching_control *caching_ctl;
518         int ret = 0;
519
520         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
521         if (!caching_ctl)
522                 return -ENOMEM;
523
524         INIT_LIST_HEAD(&caching_ctl->list);
525         mutex_init(&caching_ctl->mutex);
526         init_waitqueue_head(&caching_ctl->wait);
527         caching_ctl->block_group = cache;
528         caching_ctl->progress = cache->key.objectid;
529         atomic_set(&caching_ctl->count, 1);
530         caching_ctl->work.func = caching_thread;
531
532         spin_lock(&cache->lock);
533         /*
534          * This should be a rare occasion, but this could happen I think in the
535          * case where one thread starts to load the space cache info, and then
536          * some other thread starts a transaction commit which tries to do an
537          * allocation while the other thread is still loading the space cache
538          * info.  The previous loop should have kept us from choosing this block
539          * group, but if we've moved to the state where we will wait on caching
540          * block groups we need to first check if we're doing a fast load here,
541          * so we can wait for it to finish, otherwise we could end up allocating
542          * from a block group who's cache gets evicted for one reason or
543          * another.
544          */
545         while (cache->cached == BTRFS_CACHE_FAST) {
546                 struct btrfs_caching_control *ctl;
547
548                 ctl = cache->caching_ctl;
549                 atomic_inc(&ctl->count);
550                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
551                 spin_unlock(&cache->lock);
552
553                 schedule();
554
555                 finish_wait(&ctl->wait, &wait);
556                 put_caching_control(ctl);
557                 spin_lock(&cache->lock);
558         }
559
560         if (cache->cached != BTRFS_CACHE_NO) {
561                 spin_unlock(&cache->lock);
562                 kfree(caching_ctl);
563                 return 0;
564         }
565         WARN_ON(cache->caching_ctl);
566         cache->caching_ctl = caching_ctl;
567         cache->cached = BTRFS_CACHE_FAST;
568         spin_unlock(&cache->lock);
569
570         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
571                 ret = load_free_space_cache(fs_info, cache);
572
573                 spin_lock(&cache->lock);
574                 if (ret == 1) {
575                         cache->caching_ctl = NULL;
576                         cache->cached = BTRFS_CACHE_FINISHED;
577                         cache->last_byte_to_unpin = (u64)-1;
578                 } else {
579                         if (load_cache_only) {
580                                 cache->caching_ctl = NULL;
581                                 cache->cached = BTRFS_CACHE_NO;
582                         } else {
583                                 cache->cached = BTRFS_CACHE_STARTED;
584                         }
585                 }
586                 spin_unlock(&cache->lock);
587                 wake_up(&caching_ctl->wait);
588                 if (ret == 1) {
589                         put_caching_control(caching_ctl);
590                         free_excluded_extents(fs_info->extent_root, cache);
591                         return 0;
592                 }
593         } else {
594                 /*
595                  * We are not going to do the fast caching, set cached to the
596                  * appropriate value and wakeup any waiters.
597                  */
598                 spin_lock(&cache->lock);
599                 if (load_cache_only) {
600                         cache->caching_ctl = NULL;
601                         cache->cached = BTRFS_CACHE_NO;
602                 } else {
603                         cache->cached = BTRFS_CACHE_STARTED;
604                 }
605                 spin_unlock(&cache->lock);
606                 wake_up(&caching_ctl->wait);
607         }
608
609         if (load_cache_only) {
610                 put_caching_control(caching_ctl);
611                 return 0;
612         }
613
614         down_write(&fs_info->extent_commit_sem);
615         atomic_inc(&caching_ctl->count);
616         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
617         up_write(&fs_info->extent_commit_sem);
618
619         btrfs_get_block_group(cache);
620
621         btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
622
623         return ret;
624 }
625
626 /*
627  * return the block group that starts at or after bytenr
628  */
629 static struct btrfs_block_group_cache *
630 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
631 {
632         struct btrfs_block_group_cache *cache;
633
634         cache = block_group_cache_tree_search(info, bytenr, 0);
635
636         return cache;
637 }
638
639 /*
640  * return the block group that contains the given bytenr
641  */
642 struct btrfs_block_group_cache *btrfs_lookup_block_group(
643                                                  struct btrfs_fs_info *info,
644                                                  u64 bytenr)
645 {
646         struct btrfs_block_group_cache *cache;
647
648         cache = block_group_cache_tree_search(info, bytenr, 1);
649
650         return cache;
651 }
652
653 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
654                                                   u64 flags)
655 {
656         struct list_head *head = &info->space_info;
657         struct btrfs_space_info *found;
658
659         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
660
661         rcu_read_lock();
662         list_for_each_entry_rcu(found, head, list) {
663                 if (found->flags & flags) {
664                         rcu_read_unlock();
665                         return found;
666                 }
667         }
668         rcu_read_unlock();
669         return NULL;
670 }
671
672 /*
673  * after adding space to the filesystem, we need to clear the full flags
674  * on all the space infos.
675  */
676 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
677 {
678         struct list_head *head = &info->space_info;
679         struct btrfs_space_info *found;
680
681         rcu_read_lock();
682         list_for_each_entry_rcu(found, head, list)
683                 found->full = 0;
684         rcu_read_unlock();
685 }
686
687 u64 btrfs_find_block_group(struct btrfs_root *root,
688                            u64 search_start, u64 search_hint, int owner)
689 {
690         struct btrfs_block_group_cache *cache;
691         u64 used;
692         u64 last = max(search_hint, search_start);
693         u64 group_start = 0;
694         int full_search = 0;
695         int factor = 9;
696         int wrapped = 0;
697 again:
698         while (1) {
699                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
700                 if (!cache)
701                         break;
702
703                 spin_lock(&cache->lock);
704                 last = cache->key.objectid + cache->key.offset;
705                 used = btrfs_block_group_used(&cache->item);
706
707                 if ((full_search || !cache->ro) &&
708                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
709                         if (used + cache->pinned + cache->reserved <
710                             div_factor(cache->key.offset, factor)) {
711                                 group_start = cache->key.objectid;
712                                 spin_unlock(&cache->lock);
713                                 btrfs_put_block_group(cache);
714                                 goto found;
715                         }
716                 }
717                 spin_unlock(&cache->lock);
718                 btrfs_put_block_group(cache);
719                 cond_resched();
720         }
721         if (!wrapped) {
722                 last = search_start;
723                 wrapped = 1;
724                 goto again;
725         }
726         if (!full_search && factor < 10) {
727                 last = search_start;
728                 full_search = 1;
729                 factor = 10;
730                 goto again;
731         }
732 found:
733         return group_start;
734 }
735
736 /* simple helper to search for an existing extent at a given offset */
737 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
738 {
739         int ret;
740         struct btrfs_key key;
741         struct btrfs_path *path;
742
743         path = btrfs_alloc_path();
744         if (!path)
745                 return -ENOMEM;
746
747         key.objectid = start;
748         key.offset = len;
749         key.type = BTRFS_EXTENT_ITEM_KEY;
750         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
751                                 0, 0);
752         if (ret > 0) {
753                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
754                 if (key.objectid == start &&
755                     key.type == BTRFS_METADATA_ITEM_KEY)
756                         ret = 0;
757         }
758         btrfs_free_path(path);
759         return ret;
760 }
761
762 /*
763  * helper function to lookup reference count and flags of a tree block.
764  *
765  * the head node for delayed ref is used to store the sum of all the
766  * reference count modifications queued up in the rbtree. the head
767  * node may also store the extent flags to set. This way you can check
768  * to see what the reference count and extent flags would be if all of
769  * the delayed refs are not processed.
770  */
771 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
772                              struct btrfs_root *root, u64 bytenr,
773                              u64 offset, int metadata, u64 *refs, u64 *flags)
774 {
775         struct btrfs_delayed_ref_head *head;
776         struct btrfs_delayed_ref_root *delayed_refs;
777         struct btrfs_path *path;
778         struct btrfs_extent_item *ei;
779         struct extent_buffer *leaf;
780         struct btrfs_key key;
781         u32 item_size;
782         u64 num_refs;
783         u64 extent_flags;
784         int ret;
785
786         /*
787          * If we don't have skinny metadata, don't bother doing anything
788          * different
789          */
790         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
791                 offset = root->leafsize;
792                 metadata = 0;
793         }
794
795         path = btrfs_alloc_path();
796         if (!path)
797                 return -ENOMEM;
798
799         if (metadata) {
800                 key.objectid = bytenr;
801                 key.type = BTRFS_METADATA_ITEM_KEY;
802                 key.offset = offset;
803         } else {
804                 key.objectid = bytenr;
805                 key.type = BTRFS_EXTENT_ITEM_KEY;
806                 key.offset = offset;
807         }
808
809         if (!trans) {
810                 path->skip_locking = 1;
811                 path->search_commit_root = 1;
812         }
813 again:
814         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
815                                 &key, path, 0, 0);
816         if (ret < 0)
817                 goto out_free;
818
819         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
820                 key.type = BTRFS_EXTENT_ITEM_KEY;
821                 key.offset = root->leafsize;
822                 btrfs_release_path(path);
823                 goto again;
824         }
825
826         if (ret == 0) {
827                 leaf = path->nodes[0];
828                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
829                 if (item_size >= sizeof(*ei)) {
830                         ei = btrfs_item_ptr(leaf, path->slots[0],
831                                             struct btrfs_extent_item);
832                         num_refs = btrfs_extent_refs(leaf, ei);
833                         extent_flags = btrfs_extent_flags(leaf, ei);
834                 } else {
835 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
836                         struct btrfs_extent_item_v0 *ei0;
837                         BUG_ON(item_size != sizeof(*ei0));
838                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
839                                              struct btrfs_extent_item_v0);
840                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
841                         /* FIXME: this isn't correct for data */
842                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
843 #else
844                         BUG();
845 #endif
846                 }
847                 BUG_ON(num_refs == 0);
848         } else {
849                 num_refs = 0;
850                 extent_flags = 0;
851                 ret = 0;
852         }
853
854         if (!trans)
855                 goto out;
856
857         delayed_refs = &trans->transaction->delayed_refs;
858         spin_lock(&delayed_refs->lock);
859         head = btrfs_find_delayed_ref_head(trans, bytenr);
860         if (head) {
861                 if (!mutex_trylock(&head->mutex)) {
862                         atomic_inc(&head->node.refs);
863                         spin_unlock(&delayed_refs->lock);
864
865                         btrfs_release_path(path);
866
867                         /*
868                          * Mutex was contended, block until it's released and try
869                          * again
870                          */
871                         mutex_lock(&head->mutex);
872                         mutex_unlock(&head->mutex);
873                         btrfs_put_delayed_ref(&head->node);
874                         goto again;
875                 }
876                 if (head->extent_op && head->extent_op->update_flags)
877                         extent_flags |= head->extent_op->flags_to_set;
878                 else
879                         BUG_ON(num_refs == 0);
880
881                 num_refs += head->node.ref_mod;
882                 mutex_unlock(&head->mutex);
883         }
884         spin_unlock(&delayed_refs->lock);
885 out:
886         WARN_ON(num_refs == 0);
887         if (refs)
888                 *refs = num_refs;
889         if (flags)
890                 *flags = extent_flags;
891 out_free:
892         btrfs_free_path(path);
893         return ret;
894 }
895
896 /*
897  * Back reference rules.  Back refs have three main goals:
898  *
899  * 1) differentiate between all holders of references to an extent so that
900  *    when a reference is dropped we can make sure it was a valid reference
901  *    before freeing the extent.
902  *
903  * 2) Provide enough information to quickly find the holders of an extent
904  *    if we notice a given block is corrupted or bad.
905  *
906  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
907  *    maintenance.  This is actually the same as #2, but with a slightly
908  *    different use case.
909  *
910  * There are two kinds of back refs. The implicit back refs is optimized
911  * for pointers in non-shared tree blocks. For a given pointer in a block,
912  * back refs of this kind provide information about the block's owner tree
913  * and the pointer's key. These information allow us to find the block by
914  * b-tree searching. The full back refs is for pointers in tree blocks not
915  * referenced by their owner trees. The location of tree block is recorded
916  * in the back refs. Actually the full back refs is generic, and can be
917  * used in all cases the implicit back refs is used. The major shortcoming
918  * of the full back refs is its overhead. Every time a tree block gets
919  * COWed, we have to update back refs entry for all pointers in it.
920  *
921  * For a newly allocated tree block, we use implicit back refs for
922  * pointers in it. This means most tree related operations only involve
923  * implicit back refs. For a tree block created in old transaction, the
924  * only way to drop a reference to it is COW it. So we can detect the
925  * event that tree block loses its owner tree's reference and do the
926  * back refs conversion.
927  *
928  * When a tree block is COW'd through a tree, there are four cases:
929  *
930  * The reference count of the block is one and the tree is the block's
931  * owner tree. Nothing to do in this case.
932  *
933  * The reference count of the block is one and the tree is not the
934  * block's owner tree. In this case, full back refs is used for pointers
935  * in the block. Remove these full back refs, add implicit back refs for
936  * every pointers in the new block.
937  *
938  * The reference count of the block is greater than one and the tree is
939  * the block's owner tree. In this case, implicit back refs is used for
940  * pointers in the block. Add full back refs for every pointers in the
941  * block, increase lower level extents' reference counts. The original
942  * implicit back refs are entailed to the new block.
943  *
944  * The reference count of the block is greater than one and the tree is
945  * not the block's owner tree. Add implicit back refs for every pointer in
946  * the new block, increase lower level extents' reference count.
947  *
948  * Back Reference Key composing:
949  *
950  * The key objectid corresponds to the first byte in the extent,
951  * The key type is used to differentiate between types of back refs.
952  * There are different meanings of the key offset for different types
953  * of back refs.
954  *
955  * File extents can be referenced by:
956  *
957  * - multiple snapshots, subvolumes, or different generations in one subvol
958  * - different files inside a single subvolume
959  * - different offsets inside a file (bookend extents in file.c)
960  *
961  * The extent ref structure for the implicit back refs has fields for:
962  *
963  * - Objectid of the subvolume root
964  * - objectid of the file holding the reference
965  * - original offset in the file
966  * - how many bookend extents
967  *
968  * The key offset for the implicit back refs is hash of the first
969  * three fields.
970  *
971  * The extent ref structure for the full back refs has field for:
972  *
973  * - number of pointers in the tree leaf
974  *
975  * The key offset for the implicit back refs is the first byte of
976  * the tree leaf
977  *
978  * When a file extent is allocated, The implicit back refs is used.
979  * the fields are filled in:
980  *
981  *     (root_key.objectid, inode objectid, offset in file, 1)
982  *
983  * When a file extent is removed file truncation, we find the
984  * corresponding implicit back refs and check the following fields:
985  *
986  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
987  *
988  * Btree extents can be referenced by:
989  *
990  * - Different subvolumes
991  *
992  * Both the implicit back refs and the full back refs for tree blocks
993  * only consist of key. The key offset for the implicit back refs is
994  * objectid of block's owner tree. The key offset for the full back refs
995  * is the first byte of parent block.
996  *
997  * When implicit back refs is used, information about the lowest key and
998  * level of the tree block are required. These information are stored in
999  * tree block info structure.
1000  */
1001
1002 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1003 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1004                                   struct btrfs_root *root,
1005                                   struct btrfs_path *path,
1006                                   u64 owner, u32 extra_size)
1007 {
1008         struct btrfs_extent_item *item;
1009         struct btrfs_extent_item_v0 *ei0;
1010         struct btrfs_extent_ref_v0 *ref0;
1011         struct btrfs_tree_block_info *bi;
1012         struct extent_buffer *leaf;
1013         struct btrfs_key key;
1014         struct btrfs_key found_key;
1015         u32 new_size = sizeof(*item);
1016         u64 refs;
1017         int ret;
1018
1019         leaf = path->nodes[0];
1020         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1021
1022         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1023         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1024                              struct btrfs_extent_item_v0);
1025         refs = btrfs_extent_refs_v0(leaf, ei0);
1026
1027         if (owner == (u64)-1) {
1028                 while (1) {
1029                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1030                                 ret = btrfs_next_leaf(root, path);
1031                                 if (ret < 0)
1032                                         return ret;
1033                                 BUG_ON(ret > 0); /* Corruption */
1034                                 leaf = path->nodes[0];
1035                         }
1036                         btrfs_item_key_to_cpu(leaf, &found_key,
1037                                               path->slots[0]);
1038                         BUG_ON(key.objectid != found_key.objectid);
1039                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1040                                 path->slots[0]++;
1041                                 continue;
1042                         }
1043                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1044                                               struct btrfs_extent_ref_v0);
1045                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1046                         break;
1047                 }
1048         }
1049         btrfs_release_path(path);
1050
1051         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1052                 new_size += sizeof(*bi);
1053
1054         new_size -= sizeof(*ei0);
1055         ret = btrfs_search_slot(trans, root, &key, path,
1056                                 new_size + extra_size, 1);
1057         if (ret < 0)
1058                 return ret;
1059         BUG_ON(ret); /* Corruption */
1060
1061         btrfs_extend_item(root, path, new_size);
1062
1063         leaf = path->nodes[0];
1064         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1065         btrfs_set_extent_refs(leaf, item, refs);
1066         /* FIXME: get real generation */
1067         btrfs_set_extent_generation(leaf, item, 0);
1068         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1069                 btrfs_set_extent_flags(leaf, item,
1070                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1071                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1072                 bi = (struct btrfs_tree_block_info *)(item + 1);
1073                 /* FIXME: get first key of the block */
1074                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1075                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1076         } else {
1077                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1078         }
1079         btrfs_mark_buffer_dirty(leaf);
1080         return 0;
1081 }
1082 #endif
1083
1084 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1085 {
1086         u32 high_crc = ~(u32)0;
1087         u32 low_crc = ~(u32)0;
1088         __le64 lenum;
1089
1090         lenum = cpu_to_le64(root_objectid);
1091         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1092         lenum = cpu_to_le64(owner);
1093         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1094         lenum = cpu_to_le64(offset);
1095         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1096
1097         return ((u64)high_crc << 31) ^ (u64)low_crc;
1098 }
1099
1100 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1101                                      struct btrfs_extent_data_ref *ref)
1102 {
1103         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1104                                     btrfs_extent_data_ref_objectid(leaf, ref),
1105                                     btrfs_extent_data_ref_offset(leaf, ref));
1106 }
1107
1108 static int match_extent_data_ref(struct extent_buffer *leaf,
1109                                  struct btrfs_extent_data_ref *ref,
1110                                  u64 root_objectid, u64 owner, u64 offset)
1111 {
1112         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1113             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1114             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1115                 return 0;
1116         return 1;
1117 }
1118
1119 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1120                                            struct btrfs_root *root,
1121                                            struct btrfs_path *path,
1122                                            u64 bytenr, u64 parent,
1123                                            u64 root_objectid,
1124                                            u64 owner, u64 offset)
1125 {
1126         struct btrfs_key key;
1127         struct btrfs_extent_data_ref *ref;
1128         struct extent_buffer *leaf;
1129         u32 nritems;
1130         int ret;
1131         int recow;
1132         int err = -ENOENT;
1133
1134         key.objectid = bytenr;
1135         if (parent) {
1136                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1137                 key.offset = parent;
1138         } else {
1139                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1140                 key.offset = hash_extent_data_ref(root_objectid,
1141                                                   owner, offset);
1142         }
1143 again:
1144         recow = 0;
1145         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1146         if (ret < 0) {
1147                 err = ret;
1148                 goto fail;
1149         }
1150
1151         if (parent) {
1152                 if (!ret)
1153                         return 0;
1154 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1155                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1156                 btrfs_release_path(path);
1157                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1158                 if (ret < 0) {
1159                         err = ret;
1160                         goto fail;
1161                 }
1162                 if (!ret)
1163                         return 0;
1164 #endif
1165                 goto fail;
1166         }
1167
1168         leaf = path->nodes[0];
1169         nritems = btrfs_header_nritems(leaf);
1170         while (1) {
1171                 if (path->slots[0] >= nritems) {
1172                         ret = btrfs_next_leaf(root, path);
1173                         if (ret < 0)
1174                                 err = ret;
1175                         if (ret)
1176                                 goto fail;
1177
1178                         leaf = path->nodes[0];
1179                         nritems = btrfs_header_nritems(leaf);
1180                         recow = 1;
1181                 }
1182
1183                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1184                 if (key.objectid != bytenr ||
1185                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1186                         goto fail;
1187
1188                 ref = btrfs_item_ptr(leaf, path->slots[0],
1189                                      struct btrfs_extent_data_ref);
1190
1191                 if (match_extent_data_ref(leaf, ref, root_objectid,
1192                                           owner, offset)) {
1193                         if (recow) {
1194                                 btrfs_release_path(path);
1195                                 goto again;
1196                         }
1197                         err = 0;
1198                         break;
1199                 }
1200                 path->slots[0]++;
1201         }
1202 fail:
1203         return err;
1204 }
1205
1206 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1207                                            struct btrfs_root *root,
1208                                            struct btrfs_path *path,
1209                                            u64 bytenr, u64 parent,
1210                                            u64 root_objectid, u64 owner,
1211                                            u64 offset, int refs_to_add)
1212 {
1213         struct btrfs_key key;
1214         struct extent_buffer *leaf;
1215         u32 size;
1216         u32 num_refs;
1217         int ret;
1218
1219         key.objectid = bytenr;
1220         if (parent) {
1221                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1222                 key.offset = parent;
1223                 size = sizeof(struct btrfs_shared_data_ref);
1224         } else {
1225                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1226                 key.offset = hash_extent_data_ref(root_objectid,
1227                                                   owner, offset);
1228                 size = sizeof(struct btrfs_extent_data_ref);
1229         }
1230
1231         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1232         if (ret && ret != -EEXIST)
1233                 goto fail;
1234
1235         leaf = path->nodes[0];
1236         if (parent) {
1237                 struct btrfs_shared_data_ref *ref;
1238                 ref = btrfs_item_ptr(leaf, path->slots[0],
1239                                      struct btrfs_shared_data_ref);
1240                 if (ret == 0) {
1241                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1242                 } else {
1243                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1244                         num_refs += refs_to_add;
1245                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1246                 }
1247         } else {
1248                 struct btrfs_extent_data_ref *ref;
1249                 while (ret == -EEXIST) {
1250                         ref = btrfs_item_ptr(leaf, path->slots[0],
1251                                              struct btrfs_extent_data_ref);
1252                         if (match_extent_data_ref(leaf, ref, root_objectid,
1253                                                   owner, offset))
1254                                 break;
1255                         btrfs_release_path(path);
1256                         key.offset++;
1257                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1258                                                       size);
1259                         if (ret && ret != -EEXIST)
1260                                 goto fail;
1261
1262                         leaf = path->nodes[0];
1263                 }
1264                 ref = btrfs_item_ptr(leaf, path->slots[0],
1265                                      struct btrfs_extent_data_ref);
1266                 if (ret == 0) {
1267                         btrfs_set_extent_data_ref_root(leaf, ref,
1268                                                        root_objectid);
1269                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1270                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1271                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1272                 } else {
1273                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1274                         num_refs += refs_to_add;
1275                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1276                 }
1277         }
1278         btrfs_mark_buffer_dirty(leaf);
1279         ret = 0;
1280 fail:
1281         btrfs_release_path(path);
1282         return ret;
1283 }
1284
1285 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1286                                            struct btrfs_root *root,
1287                                            struct btrfs_path *path,
1288                                            int refs_to_drop)
1289 {
1290         struct btrfs_key key;
1291         struct btrfs_extent_data_ref *ref1 = NULL;
1292         struct btrfs_shared_data_ref *ref2 = NULL;
1293         struct extent_buffer *leaf;
1294         u32 num_refs = 0;
1295         int ret = 0;
1296
1297         leaf = path->nodes[0];
1298         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1299
1300         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1301                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1302                                       struct btrfs_extent_data_ref);
1303                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1304         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1305                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1306                                       struct btrfs_shared_data_ref);
1307                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1308 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1309         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1310                 struct btrfs_extent_ref_v0 *ref0;
1311                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1312                                       struct btrfs_extent_ref_v0);
1313                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1314 #endif
1315         } else {
1316                 BUG();
1317         }
1318
1319         BUG_ON(num_refs < refs_to_drop);
1320         num_refs -= refs_to_drop;
1321
1322         if (num_refs == 0) {
1323                 ret = btrfs_del_item(trans, root, path);
1324         } else {
1325                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1326                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1327                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1328                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1329 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1330                 else {
1331                         struct btrfs_extent_ref_v0 *ref0;
1332                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1333                                         struct btrfs_extent_ref_v0);
1334                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1335                 }
1336 #endif
1337                 btrfs_mark_buffer_dirty(leaf);
1338         }
1339         return ret;
1340 }
1341
1342 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1343                                           struct btrfs_path *path,
1344                                           struct btrfs_extent_inline_ref *iref)
1345 {
1346         struct btrfs_key key;
1347         struct extent_buffer *leaf;
1348         struct btrfs_extent_data_ref *ref1;
1349         struct btrfs_shared_data_ref *ref2;
1350         u32 num_refs = 0;
1351
1352         leaf = path->nodes[0];
1353         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1354         if (iref) {
1355                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1356                     BTRFS_EXTENT_DATA_REF_KEY) {
1357                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1358                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1359                 } else {
1360                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1361                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1362                 }
1363         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1364                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1365                                       struct btrfs_extent_data_ref);
1366                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1367         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1368                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1369                                       struct btrfs_shared_data_ref);
1370                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1371 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1372         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1373                 struct btrfs_extent_ref_v0 *ref0;
1374                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1375                                       struct btrfs_extent_ref_v0);
1376                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1377 #endif
1378         } else {
1379                 WARN_ON(1);
1380         }
1381         return num_refs;
1382 }
1383
1384 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1385                                           struct btrfs_root *root,
1386                                           struct btrfs_path *path,
1387                                           u64 bytenr, u64 parent,
1388                                           u64 root_objectid)
1389 {
1390         struct btrfs_key key;
1391         int ret;
1392
1393         key.objectid = bytenr;
1394         if (parent) {
1395                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1396                 key.offset = parent;
1397         } else {
1398                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1399                 key.offset = root_objectid;
1400         }
1401
1402         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1403         if (ret > 0)
1404                 ret = -ENOENT;
1405 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1406         if (ret == -ENOENT && parent) {
1407                 btrfs_release_path(path);
1408                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1409                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1410                 if (ret > 0)
1411                         ret = -ENOENT;
1412         }
1413 #endif
1414         return ret;
1415 }
1416
1417 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1418                                           struct btrfs_root *root,
1419                                           struct btrfs_path *path,
1420                                           u64 bytenr, u64 parent,
1421                                           u64 root_objectid)
1422 {
1423         struct btrfs_key key;
1424         int ret;
1425
1426         key.objectid = bytenr;
1427         if (parent) {
1428                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1429                 key.offset = parent;
1430         } else {
1431                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1432                 key.offset = root_objectid;
1433         }
1434
1435         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1436         btrfs_release_path(path);
1437         return ret;
1438 }
1439
1440 static inline int extent_ref_type(u64 parent, u64 owner)
1441 {
1442         int type;
1443         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1444                 if (parent > 0)
1445                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1446                 else
1447                         type = BTRFS_TREE_BLOCK_REF_KEY;
1448         } else {
1449                 if (parent > 0)
1450                         type = BTRFS_SHARED_DATA_REF_KEY;
1451                 else
1452                         type = BTRFS_EXTENT_DATA_REF_KEY;
1453         }
1454         return type;
1455 }
1456
1457 static int find_next_key(struct btrfs_path *path, int level,
1458                          struct btrfs_key *key)
1459
1460 {
1461         for (; level < BTRFS_MAX_LEVEL; level++) {
1462                 if (!path->nodes[level])
1463                         break;
1464                 if (path->slots[level] + 1 >=
1465                     btrfs_header_nritems(path->nodes[level]))
1466                         continue;
1467                 if (level == 0)
1468                         btrfs_item_key_to_cpu(path->nodes[level], key,
1469                                               path->slots[level] + 1);
1470                 else
1471                         btrfs_node_key_to_cpu(path->nodes[level], key,
1472                                               path->slots[level] + 1);
1473                 return 0;
1474         }
1475         return 1;
1476 }
1477
1478 /*
1479  * look for inline back ref. if back ref is found, *ref_ret is set
1480  * to the address of inline back ref, and 0 is returned.
1481  *
1482  * if back ref isn't found, *ref_ret is set to the address where it
1483  * should be inserted, and -ENOENT is returned.
1484  *
1485  * if insert is true and there are too many inline back refs, the path
1486  * points to the extent item, and -EAGAIN is returned.
1487  *
1488  * NOTE: inline back refs are ordered in the same way that back ref
1489  *       items in the tree are ordered.
1490  */
1491 static noinline_for_stack
1492 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1493                                  struct btrfs_root *root,
1494                                  struct btrfs_path *path,
1495                                  struct btrfs_extent_inline_ref **ref_ret,
1496                                  u64 bytenr, u64 num_bytes,
1497                                  u64 parent, u64 root_objectid,
1498                                  u64 owner, u64 offset, int insert)
1499 {
1500         struct btrfs_key key;
1501         struct extent_buffer *leaf;
1502         struct btrfs_extent_item *ei;
1503         struct btrfs_extent_inline_ref *iref;
1504         u64 flags;
1505         u64 item_size;
1506         unsigned long ptr;
1507         unsigned long end;
1508         int extra_size;
1509         int type;
1510         int want;
1511         int ret;
1512         int err = 0;
1513         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1514                                                  SKINNY_METADATA);
1515
1516         key.objectid = bytenr;
1517         key.type = BTRFS_EXTENT_ITEM_KEY;
1518         key.offset = num_bytes;
1519
1520         want = extent_ref_type(parent, owner);
1521         if (insert) {
1522                 extra_size = btrfs_extent_inline_ref_size(want);
1523                 path->keep_locks = 1;
1524         } else
1525                 extra_size = -1;
1526
1527         /*
1528          * Owner is our parent level, so we can just add one to get the level
1529          * for the block we are interested in.
1530          */
1531         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1532                 key.type = BTRFS_METADATA_ITEM_KEY;
1533                 key.offset = owner;
1534         }
1535
1536 again:
1537         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1538         if (ret < 0) {
1539                 err = ret;
1540                 goto out;
1541         }
1542
1543         /*
1544          * We may be a newly converted file system which still has the old fat
1545          * extent entries for metadata, so try and see if we have one of those.
1546          */
1547         if (ret > 0 && skinny_metadata) {
1548                 skinny_metadata = false;
1549                 if (path->slots[0]) {
1550                         path->slots[0]--;
1551                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1552                                               path->slots[0]);
1553                         if (key.objectid == bytenr &&
1554                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1555                             key.offset == num_bytes)
1556                                 ret = 0;
1557                 }
1558                 if (ret) {
1559                         key.type = BTRFS_EXTENT_ITEM_KEY;
1560                         key.offset = num_bytes;
1561                         btrfs_release_path(path);
1562                         goto again;
1563                 }
1564         }
1565
1566         if (ret && !insert) {
1567                 err = -ENOENT;
1568                 goto out;
1569         } else if (ret) {
1570                 err = -EIO;
1571                 WARN_ON(1);
1572                 goto out;
1573         }
1574
1575         leaf = path->nodes[0];
1576         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1577 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1578         if (item_size < sizeof(*ei)) {
1579                 if (!insert) {
1580                         err = -ENOENT;
1581                         goto out;
1582                 }
1583                 ret = convert_extent_item_v0(trans, root, path, owner,
1584                                              extra_size);
1585                 if (ret < 0) {
1586                         err = ret;
1587                         goto out;
1588                 }
1589                 leaf = path->nodes[0];
1590                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1591         }
1592 #endif
1593         BUG_ON(item_size < sizeof(*ei));
1594
1595         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1596         flags = btrfs_extent_flags(leaf, ei);
1597
1598         ptr = (unsigned long)(ei + 1);
1599         end = (unsigned long)ei + item_size;
1600
1601         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1602                 ptr += sizeof(struct btrfs_tree_block_info);
1603                 BUG_ON(ptr > end);
1604         }
1605
1606         err = -ENOENT;
1607         while (1) {
1608                 if (ptr >= end) {
1609                         WARN_ON(ptr > end);
1610                         break;
1611                 }
1612                 iref = (struct btrfs_extent_inline_ref *)ptr;
1613                 type = btrfs_extent_inline_ref_type(leaf, iref);
1614                 if (want < type)
1615                         break;
1616                 if (want > type) {
1617                         ptr += btrfs_extent_inline_ref_size(type);
1618                         continue;
1619                 }
1620
1621                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1622                         struct btrfs_extent_data_ref *dref;
1623                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1624                         if (match_extent_data_ref(leaf, dref, root_objectid,
1625                                                   owner, offset)) {
1626                                 err = 0;
1627                                 break;
1628                         }
1629                         if (hash_extent_data_ref_item(leaf, dref) <
1630                             hash_extent_data_ref(root_objectid, owner, offset))
1631                                 break;
1632                 } else {
1633                         u64 ref_offset;
1634                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1635                         if (parent > 0) {
1636                                 if (parent == ref_offset) {
1637                                         err = 0;
1638                                         break;
1639                                 }
1640                                 if (ref_offset < parent)
1641                                         break;
1642                         } else {
1643                                 if (root_objectid == ref_offset) {
1644                                         err = 0;
1645                                         break;
1646                                 }
1647                                 if (ref_offset < root_objectid)
1648                                         break;
1649                         }
1650                 }
1651                 ptr += btrfs_extent_inline_ref_size(type);
1652         }
1653         if (err == -ENOENT && insert) {
1654                 if (item_size + extra_size >=
1655                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1656                         err = -EAGAIN;
1657                         goto out;
1658                 }
1659                 /*
1660                  * To add new inline back ref, we have to make sure
1661                  * there is no corresponding back ref item.
1662                  * For simplicity, we just do not add new inline back
1663                  * ref if there is any kind of item for this block
1664                  */
1665                 if (find_next_key(path, 0, &key) == 0 &&
1666                     key.objectid == bytenr &&
1667                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1668                         err = -EAGAIN;
1669                         goto out;
1670                 }
1671         }
1672         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1673 out:
1674         if (insert) {
1675                 path->keep_locks = 0;
1676                 btrfs_unlock_up_safe(path, 1);
1677         }
1678         return err;
1679 }
1680
1681 /*
1682  * helper to add new inline back ref
1683  */
1684 static noinline_for_stack
1685 void setup_inline_extent_backref(struct btrfs_root *root,
1686                                  struct btrfs_path *path,
1687                                  struct btrfs_extent_inline_ref *iref,
1688                                  u64 parent, u64 root_objectid,
1689                                  u64 owner, u64 offset, int refs_to_add,
1690                                  struct btrfs_delayed_extent_op *extent_op)
1691 {
1692         struct extent_buffer *leaf;
1693         struct btrfs_extent_item *ei;
1694         unsigned long ptr;
1695         unsigned long end;
1696         unsigned long item_offset;
1697         u64 refs;
1698         int size;
1699         int type;
1700
1701         leaf = path->nodes[0];
1702         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1703         item_offset = (unsigned long)iref - (unsigned long)ei;
1704
1705         type = extent_ref_type(parent, owner);
1706         size = btrfs_extent_inline_ref_size(type);
1707
1708         btrfs_extend_item(root, path, size);
1709
1710         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1711         refs = btrfs_extent_refs(leaf, ei);
1712         refs += refs_to_add;
1713         btrfs_set_extent_refs(leaf, ei, refs);
1714         if (extent_op)
1715                 __run_delayed_extent_op(extent_op, leaf, ei);
1716
1717         ptr = (unsigned long)ei + item_offset;
1718         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1719         if (ptr < end - size)
1720                 memmove_extent_buffer(leaf, ptr + size, ptr,
1721                                       end - size - ptr);
1722
1723         iref = (struct btrfs_extent_inline_ref *)ptr;
1724         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1725         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1726                 struct btrfs_extent_data_ref *dref;
1727                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1728                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1729                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1730                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1731                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1732         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1733                 struct btrfs_shared_data_ref *sref;
1734                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1735                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1736                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1737         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1738                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1739         } else {
1740                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1741         }
1742         btrfs_mark_buffer_dirty(leaf);
1743 }
1744
1745 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1746                                  struct btrfs_root *root,
1747                                  struct btrfs_path *path,
1748                                  struct btrfs_extent_inline_ref **ref_ret,
1749                                  u64 bytenr, u64 num_bytes, u64 parent,
1750                                  u64 root_objectid, u64 owner, u64 offset)
1751 {
1752         int ret;
1753
1754         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1755                                            bytenr, num_bytes, parent,
1756                                            root_objectid, owner, offset, 0);
1757         if (ret != -ENOENT)
1758                 return ret;
1759
1760         btrfs_release_path(path);
1761         *ref_ret = NULL;
1762
1763         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1764                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1765                                             root_objectid);
1766         } else {
1767                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1768                                              root_objectid, owner, offset);
1769         }
1770         return ret;
1771 }
1772
1773 /*
1774  * helper to update/remove inline back ref
1775  */
1776 static noinline_for_stack
1777 void update_inline_extent_backref(struct btrfs_root *root,
1778                                   struct btrfs_path *path,
1779                                   struct btrfs_extent_inline_ref *iref,
1780                                   int refs_to_mod,
1781                                   struct btrfs_delayed_extent_op *extent_op)
1782 {
1783         struct extent_buffer *leaf;
1784         struct btrfs_extent_item *ei;
1785         struct btrfs_extent_data_ref *dref = NULL;
1786         struct btrfs_shared_data_ref *sref = NULL;
1787         unsigned long ptr;
1788         unsigned long end;
1789         u32 item_size;
1790         int size;
1791         int type;
1792         u64 refs;
1793
1794         leaf = path->nodes[0];
1795         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1796         refs = btrfs_extent_refs(leaf, ei);
1797         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1798         refs += refs_to_mod;
1799         btrfs_set_extent_refs(leaf, ei, refs);
1800         if (extent_op)
1801                 __run_delayed_extent_op(extent_op, leaf, ei);
1802
1803         type = btrfs_extent_inline_ref_type(leaf, iref);
1804
1805         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1806                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1807                 refs = btrfs_extent_data_ref_count(leaf, dref);
1808         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1809                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1810                 refs = btrfs_shared_data_ref_count(leaf, sref);
1811         } else {
1812                 refs = 1;
1813                 BUG_ON(refs_to_mod != -1);
1814         }
1815
1816         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1817         refs += refs_to_mod;
1818
1819         if (refs > 0) {
1820                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1821                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1822                 else
1823                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1824         } else {
1825                 size =  btrfs_extent_inline_ref_size(type);
1826                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1827                 ptr = (unsigned long)iref;
1828                 end = (unsigned long)ei + item_size;
1829                 if (ptr + size < end)
1830                         memmove_extent_buffer(leaf, ptr, ptr + size,
1831                                               end - ptr - size);
1832                 item_size -= size;
1833                 btrfs_truncate_item(root, path, item_size, 1);
1834         }
1835         btrfs_mark_buffer_dirty(leaf);
1836 }
1837
1838 static noinline_for_stack
1839 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1840                                  struct btrfs_root *root,
1841                                  struct btrfs_path *path,
1842                                  u64 bytenr, u64 num_bytes, u64 parent,
1843                                  u64 root_objectid, u64 owner,
1844                                  u64 offset, int refs_to_add,
1845                                  struct btrfs_delayed_extent_op *extent_op)
1846 {
1847         struct btrfs_extent_inline_ref *iref;
1848         int ret;
1849
1850         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1851                                            bytenr, num_bytes, parent,
1852                                            root_objectid, owner, offset, 1);
1853         if (ret == 0) {
1854                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1855                 update_inline_extent_backref(root, path, iref,
1856                                              refs_to_add, extent_op);
1857         } else if (ret == -ENOENT) {
1858                 setup_inline_extent_backref(root, path, iref, parent,
1859                                             root_objectid, owner, offset,
1860                                             refs_to_add, extent_op);
1861                 ret = 0;
1862         }
1863         return ret;
1864 }
1865
1866 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1867                                  struct btrfs_root *root,
1868                                  struct btrfs_path *path,
1869                                  u64 bytenr, u64 parent, u64 root_objectid,
1870                                  u64 owner, u64 offset, int refs_to_add)
1871 {
1872         int ret;
1873         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1874                 BUG_ON(refs_to_add != 1);
1875                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1876                                             parent, root_objectid);
1877         } else {
1878                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1879                                              parent, root_objectid,
1880                                              owner, offset, refs_to_add);
1881         }
1882         return ret;
1883 }
1884
1885 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1886                                  struct btrfs_root *root,
1887                                  struct btrfs_path *path,
1888                                  struct btrfs_extent_inline_ref *iref,
1889                                  int refs_to_drop, int is_data)
1890 {
1891         int ret = 0;
1892
1893         BUG_ON(!is_data && refs_to_drop != 1);
1894         if (iref) {
1895                 update_inline_extent_backref(root, path, iref,
1896                                              -refs_to_drop, NULL);
1897         } else if (is_data) {
1898                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1899         } else {
1900                 ret = btrfs_del_item(trans, root, path);
1901         }
1902         return ret;
1903 }
1904
1905 static int btrfs_issue_discard(struct block_device *bdev,
1906                                 u64 start, u64 len)
1907 {
1908         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1909 }
1910
1911 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1912                                 u64 num_bytes, u64 *actual_bytes)
1913 {
1914         int ret;
1915         u64 discarded_bytes = 0;
1916         struct btrfs_bio *bbio = NULL;
1917
1918
1919         /* Tell the block device(s) that the sectors can be discarded */
1920         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1921                               bytenr, &num_bytes, &bbio, 0);
1922         /* Error condition is -ENOMEM */
1923         if (!ret) {
1924                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1925                 int i;
1926
1927
1928                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1929                         if (!stripe->dev->can_discard)
1930                                 continue;
1931
1932                         ret = btrfs_issue_discard(stripe->dev->bdev,
1933                                                   stripe->physical,
1934                                                   stripe->length);
1935                         if (!ret)
1936                                 discarded_bytes += stripe->length;
1937                         else if (ret != -EOPNOTSUPP)
1938                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1939
1940                         /*
1941                          * Just in case we get back EOPNOTSUPP for some reason,
1942                          * just ignore the return value so we don't screw up
1943                          * people calling discard_extent.
1944                          */
1945                         ret = 0;
1946                 }
1947                 kfree(bbio);
1948         }
1949
1950         if (actual_bytes)
1951                 *actual_bytes = discarded_bytes;
1952
1953
1954         if (ret == -EOPNOTSUPP)
1955                 ret = 0;
1956         return ret;
1957 }
1958
1959 /* Can return -ENOMEM */
1960 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1961                          struct btrfs_root *root,
1962                          u64 bytenr, u64 num_bytes, u64 parent,
1963                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1964 {
1965         int ret;
1966         struct btrfs_fs_info *fs_info = root->fs_info;
1967
1968         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1969                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1970
1971         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1972                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1973                                         num_bytes,
1974                                         parent, root_objectid, (int)owner,
1975                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1976         } else {
1977                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1978                                         num_bytes,
1979                                         parent, root_objectid, owner, offset,
1980                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1981         }
1982         return ret;
1983 }
1984
1985 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1986                                   struct btrfs_root *root,
1987                                   u64 bytenr, u64 num_bytes,
1988                                   u64 parent, u64 root_objectid,
1989                                   u64 owner, u64 offset, int refs_to_add,
1990                                   struct btrfs_delayed_extent_op *extent_op)
1991 {
1992         struct btrfs_path *path;
1993         struct extent_buffer *leaf;
1994         struct btrfs_extent_item *item;
1995         u64 refs;
1996         int ret;
1997         int err = 0;
1998
1999         path = btrfs_alloc_path();
2000         if (!path)
2001                 return -ENOMEM;
2002
2003         path->reada = 1;
2004         path->leave_spinning = 1;
2005         /* this will setup the path even if it fails to insert the back ref */
2006         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
2007                                            path, bytenr, num_bytes, parent,
2008                                            root_objectid, owner, offset,
2009                                            refs_to_add, extent_op);
2010         if (ret == 0)
2011                 goto out;
2012
2013         if (ret != -EAGAIN) {
2014                 err = ret;
2015                 goto out;
2016         }
2017
2018         leaf = path->nodes[0];
2019         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2020         refs = btrfs_extent_refs(leaf, item);
2021         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2022         if (extent_op)
2023                 __run_delayed_extent_op(extent_op, leaf, item);
2024
2025         btrfs_mark_buffer_dirty(leaf);
2026         btrfs_release_path(path);
2027
2028         path->reada = 1;
2029         path->leave_spinning = 1;
2030
2031         /* now insert the actual backref */
2032         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2033                                     path, bytenr, parent, root_objectid,
2034                                     owner, offset, refs_to_add);
2035         if (ret)
2036                 btrfs_abort_transaction(trans, root, ret);
2037 out:
2038         btrfs_free_path(path);
2039         return err;
2040 }
2041
2042 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2043                                 struct btrfs_root *root,
2044                                 struct btrfs_delayed_ref_node *node,
2045                                 struct btrfs_delayed_extent_op *extent_op,
2046                                 int insert_reserved)
2047 {
2048         int ret = 0;
2049         struct btrfs_delayed_data_ref *ref;
2050         struct btrfs_key ins;
2051         u64 parent = 0;
2052         u64 ref_root = 0;
2053         u64 flags = 0;
2054
2055         ins.objectid = node->bytenr;
2056         ins.offset = node->num_bytes;
2057         ins.type = BTRFS_EXTENT_ITEM_KEY;
2058
2059         ref = btrfs_delayed_node_to_data_ref(node);
2060         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2061                 parent = ref->parent;
2062         else
2063                 ref_root = ref->root;
2064
2065         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2066                 if (extent_op)
2067                         flags |= extent_op->flags_to_set;
2068                 ret = alloc_reserved_file_extent(trans, root,
2069                                                  parent, ref_root, flags,
2070                                                  ref->objectid, ref->offset,
2071                                                  &ins, node->ref_mod);
2072         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2073                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2074                                              node->num_bytes, parent,
2075                                              ref_root, ref->objectid,
2076                                              ref->offset, node->ref_mod,
2077                                              extent_op);
2078         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2079                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2080                                           node->num_bytes, parent,
2081                                           ref_root, ref->objectid,
2082                                           ref->offset, node->ref_mod,
2083                                           extent_op);
2084         } else {
2085                 BUG();
2086         }
2087         return ret;
2088 }
2089
2090 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2091                                     struct extent_buffer *leaf,
2092                                     struct btrfs_extent_item *ei)
2093 {
2094         u64 flags = btrfs_extent_flags(leaf, ei);
2095         if (extent_op->update_flags) {
2096                 flags |= extent_op->flags_to_set;
2097                 btrfs_set_extent_flags(leaf, ei, flags);
2098         }
2099
2100         if (extent_op->update_key) {
2101                 struct btrfs_tree_block_info *bi;
2102                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2103                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2104                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2105         }
2106 }
2107
2108 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2109                                  struct btrfs_root *root,
2110                                  struct btrfs_delayed_ref_node *node,
2111                                  struct btrfs_delayed_extent_op *extent_op)
2112 {
2113         struct btrfs_key key;
2114         struct btrfs_path *path;
2115         struct btrfs_extent_item *ei;
2116         struct extent_buffer *leaf;
2117         u32 item_size;
2118         int ret;
2119         int err = 0;
2120         int metadata = (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2121                         node->type == BTRFS_SHARED_BLOCK_REF_KEY);
2122
2123         if (trans->aborted)
2124                 return 0;
2125
2126         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2127                 metadata = 0;
2128
2129         path = btrfs_alloc_path();
2130         if (!path)
2131                 return -ENOMEM;
2132
2133         key.objectid = node->bytenr;
2134
2135         if (metadata) {
2136                 struct btrfs_delayed_tree_ref *tree_ref;
2137
2138                 tree_ref = btrfs_delayed_node_to_tree_ref(node);
2139                 key.type = BTRFS_METADATA_ITEM_KEY;
2140                 key.offset = tree_ref->level;
2141         } else {
2142                 key.type = BTRFS_EXTENT_ITEM_KEY;
2143                 key.offset = node->num_bytes;
2144         }
2145
2146 again:
2147         path->reada = 1;
2148         path->leave_spinning = 1;
2149         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2150                                 path, 0, 1);
2151         if (ret < 0) {
2152                 err = ret;
2153                 goto out;
2154         }
2155         if (ret > 0) {
2156                 if (metadata) {
2157                         btrfs_release_path(path);
2158                         metadata = 0;
2159
2160                         key.offset = node->num_bytes;
2161                         key.type = BTRFS_EXTENT_ITEM_KEY;
2162                         goto again;
2163                 }
2164                 err = -EIO;
2165                 goto out;
2166         }
2167
2168         leaf = path->nodes[0];
2169         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2170 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2171         if (item_size < sizeof(*ei)) {
2172                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2173                                              path, (u64)-1, 0);
2174                 if (ret < 0) {
2175                         err = ret;
2176                         goto out;
2177                 }
2178                 leaf = path->nodes[0];
2179                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2180         }
2181 #endif
2182         BUG_ON(item_size < sizeof(*ei));
2183         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2184         __run_delayed_extent_op(extent_op, leaf, ei);
2185
2186         btrfs_mark_buffer_dirty(leaf);
2187 out:
2188         btrfs_free_path(path);
2189         return err;
2190 }
2191
2192 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2193                                 struct btrfs_root *root,
2194                                 struct btrfs_delayed_ref_node *node,
2195                                 struct btrfs_delayed_extent_op *extent_op,
2196                                 int insert_reserved)
2197 {
2198         int ret = 0;
2199         struct btrfs_delayed_tree_ref *ref;
2200         struct btrfs_key ins;
2201         u64 parent = 0;
2202         u64 ref_root = 0;
2203         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2204                                                  SKINNY_METADATA);
2205
2206         ref = btrfs_delayed_node_to_tree_ref(node);
2207         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2208                 parent = ref->parent;
2209         else
2210                 ref_root = ref->root;
2211
2212         ins.objectid = node->bytenr;
2213         if (skinny_metadata) {
2214                 ins.offset = ref->level;
2215                 ins.type = BTRFS_METADATA_ITEM_KEY;
2216         } else {
2217                 ins.offset = node->num_bytes;
2218                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2219         }
2220
2221         BUG_ON(node->ref_mod != 1);
2222         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2223                 BUG_ON(!extent_op || !extent_op->update_flags);
2224                 ret = alloc_reserved_tree_block(trans, root,
2225                                                 parent, ref_root,
2226                                                 extent_op->flags_to_set,
2227                                                 &extent_op->key,
2228                                                 ref->level, &ins);
2229         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2230                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2231                                              node->num_bytes, parent, ref_root,
2232                                              ref->level, 0, 1, extent_op);
2233         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2234                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2235                                           node->num_bytes, parent, ref_root,
2236                                           ref->level, 0, 1, extent_op);
2237         } else {
2238                 BUG();
2239         }
2240         return ret;
2241 }
2242
2243 /* helper function to actually process a single delayed ref entry */
2244 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2245                                struct btrfs_root *root,
2246                                struct btrfs_delayed_ref_node *node,
2247                                struct btrfs_delayed_extent_op *extent_op,
2248                                int insert_reserved)
2249 {
2250         int ret = 0;
2251
2252         if (trans->aborted)
2253                 return 0;
2254
2255         if (btrfs_delayed_ref_is_head(node)) {
2256                 struct btrfs_delayed_ref_head *head;
2257                 /*
2258                  * we've hit the end of the chain and we were supposed
2259                  * to insert this extent into the tree.  But, it got
2260                  * deleted before we ever needed to insert it, so all
2261                  * we have to do is clean up the accounting
2262                  */
2263                 BUG_ON(extent_op);
2264                 head = btrfs_delayed_node_to_head(node);
2265                 if (insert_reserved) {
2266                         btrfs_pin_extent(root, node->bytenr,
2267                                          node->num_bytes, 1);
2268                         if (head->is_data) {
2269                                 ret = btrfs_del_csums(trans, root,
2270                                                       node->bytenr,
2271                                                       node->num_bytes);
2272                         }
2273                 }
2274                 return ret;
2275         }
2276
2277         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2278             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2279                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2280                                            insert_reserved);
2281         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2282                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2283                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2284                                            insert_reserved);
2285         else
2286                 BUG();
2287         return ret;
2288 }
2289
2290 static noinline struct btrfs_delayed_ref_node *
2291 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2292 {
2293         struct rb_node *node;
2294         struct btrfs_delayed_ref_node *ref;
2295         int action = BTRFS_ADD_DELAYED_REF;
2296 again:
2297         /*
2298          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2299          * this prevents ref count from going down to zero when
2300          * there still are pending delayed ref.
2301          */
2302         node = rb_prev(&head->node.rb_node);
2303         while (1) {
2304                 if (!node)
2305                         break;
2306                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2307                                 rb_node);
2308                 if (ref->bytenr != head->node.bytenr)
2309                         break;
2310                 if (ref->action == action)
2311                         return ref;
2312                 node = rb_prev(node);
2313         }
2314         if (action == BTRFS_ADD_DELAYED_REF) {
2315                 action = BTRFS_DROP_DELAYED_REF;
2316                 goto again;
2317         }
2318         return NULL;
2319 }
2320
2321 /*
2322  * Returns 0 on success or if called with an already aborted transaction.
2323  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2324  */
2325 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2326                                        struct btrfs_root *root,
2327                                        struct list_head *cluster)
2328 {
2329         struct btrfs_delayed_ref_root *delayed_refs;
2330         struct btrfs_delayed_ref_node *ref;
2331         struct btrfs_delayed_ref_head *locked_ref = NULL;
2332         struct btrfs_delayed_extent_op *extent_op;
2333         struct btrfs_fs_info *fs_info = root->fs_info;
2334         int ret;
2335         int count = 0;
2336         int must_insert_reserved = 0;
2337
2338         delayed_refs = &trans->transaction->delayed_refs;
2339         while (1) {
2340                 if (!locked_ref) {
2341                         /* pick a new head ref from the cluster list */
2342                         if (list_empty(cluster))
2343                                 break;
2344
2345                         locked_ref = list_entry(cluster->next,
2346                                      struct btrfs_delayed_ref_head, cluster);
2347
2348                         /* grab the lock that says we are going to process
2349                          * all the refs for this head */
2350                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2351
2352                         /*
2353                          * we may have dropped the spin lock to get the head
2354                          * mutex lock, and that might have given someone else
2355                          * time to free the head.  If that's true, it has been
2356                          * removed from our list and we can move on.
2357                          */
2358                         if (ret == -EAGAIN) {
2359                                 locked_ref = NULL;
2360                                 count++;
2361                                 continue;
2362                         }
2363                 }
2364
2365                 /*
2366                  * We need to try and merge add/drops of the same ref since we
2367                  * can run into issues with relocate dropping the implicit ref
2368                  * and then it being added back again before the drop can
2369                  * finish.  If we merged anything we need to re-loop so we can
2370                  * get a good ref.
2371                  */
2372                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2373                                          locked_ref);
2374
2375                 /*
2376                  * locked_ref is the head node, so we have to go one
2377                  * node back for any delayed ref updates
2378                  */
2379                 ref = select_delayed_ref(locked_ref);
2380
2381                 if (ref && ref->seq &&
2382                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2383                         /*
2384                          * there are still refs with lower seq numbers in the
2385                          * process of being added. Don't run this ref yet.
2386                          */
2387                         list_del_init(&locked_ref->cluster);
2388                         btrfs_delayed_ref_unlock(locked_ref);
2389                         locked_ref = NULL;
2390                         delayed_refs->num_heads_ready++;
2391                         spin_unlock(&delayed_refs->lock);
2392                         cond_resched();
2393                         spin_lock(&delayed_refs->lock);
2394                         continue;
2395                 }
2396
2397                 /*
2398                  * record the must insert reserved flag before we
2399                  * drop the spin lock.
2400                  */
2401                 must_insert_reserved = locked_ref->must_insert_reserved;
2402                 locked_ref->must_insert_reserved = 0;
2403
2404                 extent_op = locked_ref->extent_op;
2405                 locked_ref->extent_op = NULL;
2406
2407                 if (!ref) {
2408                         /* All delayed refs have been processed, Go ahead
2409                          * and send the head node to run_one_delayed_ref,
2410                          * so that any accounting fixes can happen
2411                          */
2412                         ref = &locked_ref->node;
2413
2414                         if (extent_op && must_insert_reserved) {
2415                                 btrfs_free_delayed_extent_op(extent_op);
2416                                 extent_op = NULL;
2417                         }
2418
2419                         if (extent_op) {
2420                                 spin_unlock(&delayed_refs->lock);
2421
2422                                 ret = run_delayed_extent_op(trans, root,
2423                                                             ref, extent_op);
2424                                 btrfs_free_delayed_extent_op(extent_op);
2425
2426                                 if (ret) {
2427                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2428                                         spin_lock(&delayed_refs->lock);
2429                                         btrfs_delayed_ref_unlock(locked_ref);
2430                                         return ret;
2431                                 }
2432
2433                                 goto next;
2434                         }
2435                 }
2436
2437                 ref->in_tree = 0;
2438                 rb_erase(&ref->rb_node, &delayed_refs->root);
2439                 delayed_refs->num_entries--;
2440                 if (!btrfs_delayed_ref_is_head(ref)) {
2441                         /*
2442                          * when we play the delayed ref, also correct the
2443                          * ref_mod on head
2444                          */
2445                         switch (ref->action) {
2446                         case BTRFS_ADD_DELAYED_REF:
2447                         case BTRFS_ADD_DELAYED_EXTENT:
2448                                 locked_ref->node.ref_mod -= ref->ref_mod;
2449                                 break;
2450                         case BTRFS_DROP_DELAYED_REF:
2451                                 locked_ref->node.ref_mod += ref->ref_mod;
2452                                 break;
2453                         default:
2454                                 WARN_ON(1);
2455                         }
2456                 }
2457                 spin_unlock(&delayed_refs->lock);
2458
2459                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2460                                           must_insert_reserved);
2461
2462                 btrfs_free_delayed_extent_op(extent_op);
2463                 if (ret) {
2464                         btrfs_delayed_ref_unlock(locked_ref);
2465                         btrfs_put_delayed_ref(ref);
2466                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2467                         spin_lock(&delayed_refs->lock);
2468                         return ret;
2469                 }
2470
2471                 /*
2472                  * If this node is a head, that means all the refs in this head
2473                  * have been dealt with, and we will pick the next head to deal
2474                  * with, so we must unlock the head and drop it from the cluster
2475                  * list before we release it.
2476                  */
2477                 if (btrfs_delayed_ref_is_head(ref)) {
2478                         list_del_init(&locked_ref->cluster);
2479                         btrfs_delayed_ref_unlock(locked_ref);
2480                         locked_ref = NULL;
2481                 }
2482                 btrfs_put_delayed_ref(ref);
2483                 count++;
2484 next:
2485                 cond_resched();
2486                 spin_lock(&delayed_refs->lock);
2487         }
2488         return count;
2489 }
2490
2491 #ifdef SCRAMBLE_DELAYED_REFS
2492 /*
2493  * Normally delayed refs get processed in ascending bytenr order. This
2494  * correlates in most cases to the order added. To expose dependencies on this
2495  * order, we start to process the tree in the middle instead of the beginning
2496  */
2497 static u64 find_middle(struct rb_root *root)
2498 {
2499         struct rb_node *n = root->rb_node;
2500         struct btrfs_delayed_ref_node *entry;
2501         int alt = 1;
2502         u64 middle;
2503         u64 first = 0, last = 0;
2504
2505         n = rb_first(root);
2506         if (n) {
2507                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2508                 first = entry->bytenr;
2509         }
2510         n = rb_last(root);
2511         if (n) {
2512                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2513                 last = entry->bytenr;
2514         }
2515         n = root->rb_node;
2516
2517         while (n) {
2518                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2519                 WARN_ON(!entry->in_tree);
2520
2521                 middle = entry->bytenr;
2522
2523                 if (alt)
2524                         n = n->rb_left;
2525                 else
2526                         n = n->rb_right;
2527
2528                 alt = 1 - alt;
2529         }
2530         return middle;
2531 }
2532 #endif
2533
2534 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2535                                          struct btrfs_fs_info *fs_info)
2536 {
2537         struct qgroup_update *qgroup_update;
2538         int ret = 0;
2539
2540         if (list_empty(&trans->qgroup_ref_list) !=
2541             !trans->delayed_ref_elem.seq) {
2542                 /* list without seq or seq without list */
2543                 btrfs_err(fs_info,
2544                         "qgroup accounting update error, list is%s empty, seq is %#x.%x",
2545                         list_empty(&trans->qgroup_ref_list) ? "" : " not",
2546                         (u32)(trans->delayed_ref_elem.seq >> 32),
2547                         (u32)trans->delayed_ref_elem.seq);
2548                 BUG();
2549         }
2550
2551         if (!trans->delayed_ref_elem.seq)
2552                 return 0;
2553
2554         while (!list_empty(&trans->qgroup_ref_list)) {
2555                 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2556                                                  struct qgroup_update, list);
2557                 list_del(&qgroup_update->list);
2558                 if (!ret)
2559                         ret = btrfs_qgroup_account_ref(
2560                                         trans, fs_info, qgroup_update->node,
2561                                         qgroup_update->extent_op);
2562                 kfree(qgroup_update);
2563         }
2564
2565         btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2566
2567         return ret;
2568 }
2569
2570 static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
2571                       int count)
2572 {
2573         int val = atomic_read(&delayed_refs->ref_seq);
2574
2575         if (val < seq || val >= seq + count)
2576                 return 1;
2577         return 0;
2578 }
2579
2580 /*
2581  * this starts processing the delayed reference count updates and
2582  * extent insertions we have queued up so far.  count can be
2583  * 0, which means to process everything in the tree at the start
2584  * of the run (but not newly added entries), or it can be some target
2585  * number you'd like to process.
2586  *
2587  * Returns 0 on success or if called with an aborted transaction
2588  * Returns <0 on error and aborts the transaction
2589  */
2590 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2591                            struct btrfs_root *root, unsigned long count)
2592 {
2593         struct rb_node *node;
2594         struct btrfs_delayed_ref_root *delayed_refs;
2595         struct btrfs_delayed_ref_node *ref;
2596         struct list_head cluster;
2597         int ret;
2598         u64 delayed_start;
2599         int run_all = count == (unsigned long)-1;
2600         int run_most = 0;
2601         int loops;
2602
2603         /* We'll clean this up in btrfs_cleanup_transaction */
2604         if (trans->aborted)
2605                 return 0;
2606
2607         if (root == root->fs_info->extent_root)
2608                 root = root->fs_info->tree_root;
2609
2610         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2611
2612         delayed_refs = &trans->transaction->delayed_refs;
2613         INIT_LIST_HEAD(&cluster);
2614         if (count == 0) {
2615                 count = delayed_refs->num_entries * 2;
2616                 run_most = 1;
2617         }
2618
2619         if (!run_all && !run_most) {
2620                 int old;
2621                 int seq = atomic_read(&delayed_refs->ref_seq);
2622
2623 progress:
2624                 old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2625                 if (old) {
2626                         DEFINE_WAIT(__wait);
2627                         if (delayed_refs->num_entries < 16348)
2628                                 return 0;
2629
2630                         prepare_to_wait(&delayed_refs->wait, &__wait,
2631                                         TASK_UNINTERRUPTIBLE);
2632
2633                         old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2634                         if (old) {
2635                                 schedule();
2636                                 finish_wait(&delayed_refs->wait, &__wait);
2637
2638                                 if (!refs_newer(delayed_refs, seq, 256))
2639                                         goto progress;
2640                                 else
2641                                         return 0;
2642                         } else {
2643                                 finish_wait(&delayed_refs->wait, &__wait);
2644                                 goto again;
2645                         }
2646                 }
2647
2648         } else {
2649                 atomic_inc(&delayed_refs->procs_running_refs);
2650         }
2651
2652 again:
2653         loops = 0;
2654         spin_lock(&delayed_refs->lock);
2655
2656 #ifdef SCRAMBLE_DELAYED_REFS
2657         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2658 #endif
2659
2660         while (1) {
2661                 if (!(run_all || run_most) &&
2662                     delayed_refs->num_heads_ready < 64)
2663                         break;
2664
2665                 /*
2666                  * go find something we can process in the rbtree.  We start at
2667                  * the beginning of the tree, and then build a cluster
2668                  * of refs to process starting at the first one we are able to
2669                  * lock
2670                  */
2671                 delayed_start = delayed_refs->run_delayed_start;
2672                 ret = btrfs_find_ref_cluster(trans, &cluster,
2673                                              delayed_refs->run_delayed_start);
2674                 if (ret)
2675                         break;
2676
2677                 ret = run_clustered_refs(trans, root, &cluster);
2678                 if (ret < 0) {
2679                         btrfs_release_ref_cluster(&cluster);
2680                         spin_unlock(&delayed_refs->lock);
2681                         btrfs_abort_transaction(trans, root, ret);
2682                         atomic_dec(&delayed_refs->procs_running_refs);
2683                         return ret;
2684                 }
2685
2686                 atomic_add(ret, &delayed_refs->ref_seq);
2687
2688                 count -= min_t(unsigned long, ret, count);
2689
2690                 if (count == 0)
2691                         break;
2692
2693                 if (delayed_start >= delayed_refs->run_delayed_start) {
2694                         if (loops == 0) {
2695                                 /*
2696                                  * btrfs_find_ref_cluster looped. let's do one
2697                                  * more cycle. if we don't run any delayed ref
2698                                  * during that cycle (because we can't because
2699                                  * all of them are blocked), bail out.
2700                                  */
2701                                 loops = 1;
2702                         } else {
2703                                 /*
2704                                  * no runnable refs left, stop trying
2705                                  */
2706                                 BUG_ON(run_all);
2707                                 break;
2708                         }
2709                 }
2710                 if (ret) {
2711                         /* refs were run, let's reset staleness detection */
2712                         loops = 0;
2713                 }
2714         }
2715
2716         if (run_all) {
2717                 if (!list_empty(&trans->new_bgs)) {
2718                         spin_unlock(&delayed_refs->lock);
2719                         btrfs_create_pending_block_groups(trans, root);
2720                         spin_lock(&delayed_refs->lock);
2721                 }
2722
2723                 node = rb_first(&delayed_refs->root);
2724                 if (!node)
2725                         goto out;
2726                 count = (unsigned long)-1;
2727
2728                 while (node) {
2729                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2730                                        rb_node);
2731                         if (btrfs_delayed_ref_is_head(ref)) {
2732                                 struct btrfs_delayed_ref_head *head;
2733
2734                                 head = btrfs_delayed_node_to_head(ref);
2735                                 atomic_inc(&ref->refs);
2736
2737                                 spin_unlock(&delayed_refs->lock);
2738                                 /*
2739                                  * Mutex was contended, block until it's
2740                                  * released and try again
2741                                  */
2742                                 mutex_lock(&head->mutex);
2743                                 mutex_unlock(&head->mutex);
2744
2745                                 btrfs_put_delayed_ref(ref);
2746                                 cond_resched();
2747                                 goto again;
2748                         }
2749                         node = rb_next(node);
2750                 }
2751                 spin_unlock(&delayed_refs->lock);
2752                 schedule_timeout(1);
2753                 goto again;
2754         }
2755 out:
2756         atomic_dec(&delayed_refs->procs_running_refs);
2757         smp_mb();
2758         if (waitqueue_active(&delayed_refs->wait))
2759                 wake_up(&delayed_refs->wait);
2760
2761         spin_unlock(&delayed_refs->lock);
2762         assert_qgroups_uptodate(trans);
2763         return 0;
2764 }
2765
2766 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2767                                 struct btrfs_root *root,
2768                                 u64 bytenr, u64 num_bytes, u64 flags,
2769                                 int is_data)
2770 {
2771         struct btrfs_delayed_extent_op *extent_op;
2772         int ret;
2773
2774         extent_op = btrfs_alloc_delayed_extent_op();
2775         if (!extent_op)
2776                 return -ENOMEM;
2777
2778         extent_op->flags_to_set = flags;
2779         extent_op->update_flags = 1;
2780         extent_op->update_key = 0;
2781         extent_op->is_data = is_data ? 1 : 0;
2782
2783         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2784                                           num_bytes, extent_op);
2785         if (ret)
2786                 btrfs_free_delayed_extent_op(extent_op);
2787         return ret;
2788 }
2789
2790 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2791                                       struct btrfs_root *root,
2792                                       struct btrfs_path *path,
2793                                       u64 objectid, u64 offset, u64 bytenr)
2794 {
2795         struct btrfs_delayed_ref_head *head;
2796         struct btrfs_delayed_ref_node *ref;
2797         struct btrfs_delayed_data_ref *data_ref;
2798         struct btrfs_delayed_ref_root *delayed_refs;
2799         struct rb_node *node;
2800         int ret = 0;
2801
2802         ret = -ENOENT;
2803         delayed_refs = &trans->transaction->delayed_refs;
2804         spin_lock(&delayed_refs->lock);
2805         head = btrfs_find_delayed_ref_head(trans, bytenr);
2806         if (!head)
2807                 goto out;
2808
2809         if (!mutex_trylock(&head->mutex)) {
2810                 atomic_inc(&head->node.refs);
2811                 spin_unlock(&delayed_refs->lock);
2812
2813                 btrfs_release_path(path);
2814
2815                 /*
2816                  * Mutex was contended, block until it's released and let
2817                  * caller try again
2818                  */
2819                 mutex_lock(&head->mutex);
2820                 mutex_unlock(&head->mutex);
2821                 btrfs_put_delayed_ref(&head->node);
2822                 return -EAGAIN;
2823         }
2824
2825         node = rb_prev(&head->node.rb_node);
2826         if (!node)
2827                 goto out_unlock;
2828
2829         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2830
2831         if (ref->bytenr != bytenr)
2832                 goto out_unlock;
2833
2834         ret = 1;
2835         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2836                 goto out_unlock;
2837
2838         data_ref = btrfs_delayed_node_to_data_ref(ref);
2839
2840         node = rb_prev(node);
2841         if (node) {
2842                 int seq = ref->seq;
2843
2844                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2845                 if (ref->bytenr == bytenr && ref->seq == seq)
2846                         goto out_unlock;
2847         }
2848
2849         if (data_ref->root != root->root_key.objectid ||
2850             data_ref->objectid != objectid || data_ref->offset != offset)
2851                 goto out_unlock;
2852
2853         ret = 0;
2854 out_unlock:
2855         mutex_unlock(&head->mutex);
2856 out:
2857         spin_unlock(&delayed_refs->lock);
2858         return ret;
2859 }
2860
2861 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2862                                         struct btrfs_root *root,
2863                                         struct btrfs_path *path,
2864                                         u64 objectid, u64 offset, u64 bytenr)
2865 {
2866         struct btrfs_root *extent_root = root->fs_info->extent_root;
2867         struct extent_buffer *leaf;
2868         struct btrfs_extent_data_ref *ref;
2869         struct btrfs_extent_inline_ref *iref;
2870         struct btrfs_extent_item *ei;
2871         struct btrfs_key key;
2872         u32 item_size;
2873         int ret;
2874
2875         key.objectid = bytenr;
2876         key.offset = (u64)-1;
2877         key.type = BTRFS_EXTENT_ITEM_KEY;
2878
2879         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2880         if (ret < 0)
2881                 goto out;
2882         BUG_ON(ret == 0); /* Corruption */
2883
2884         ret = -ENOENT;
2885         if (path->slots[0] == 0)
2886                 goto out;
2887
2888         path->slots[0]--;
2889         leaf = path->nodes[0];
2890         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2891
2892         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2893                 goto out;
2894
2895         ret = 1;
2896         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2897 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2898         if (item_size < sizeof(*ei)) {
2899                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2900                 goto out;
2901         }
2902 #endif
2903         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2904
2905         if (item_size != sizeof(*ei) +
2906             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2907                 goto out;
2908
2909         if (btrfs_extent_generation(leaf, ei) <=
2910             btrfs_root_last_snapshot(&root->root_item))
2911                 goto out;
2912
2913         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2914         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2915             BTRFS_EXTENT_DATA_REF_KEY)
2916                 goto out;
2917
2918         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2919         if (btrfs_extent_refs(leaf, ei) !=
2920             btrfs_extent_data_ref_count(leaf, ref) ||
2921             btrfs_extent_data_ref_root(leaf, ref) !=
2922             root->root_key.objectid ||
2923             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2924             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2925                 goto out;
2926
2927         ret = 0;
2928 out:
2929         return ret;
2930 }
2931
2932 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2933                           struct btrfs_root *root,
2934                           u64 objectid, u64 offset, u64 bytenr)
2935 {
2936         struct btrfs_path *path;
2937         int ret;
2938         int ret2;
2939
2940         path = btrfs_alloc_path();
2941         if (!path)
2942                 return -ENOENT;
2943
2944         do {
2945                 ret = check_committed_ref(trans, root, path, objectid,
2946                                           offset, bytenr);
2947                 if (ret && ret != -ENOENT)
2948                         goto out;
2949
2950                 ret2 = check_delayed_ref(trans, root, path, objectid,
2951                                          offset, bytenr);
2952         } while (ret2 == -EAGAIN);
2953
2954         if (ret2 && ret2 != -ENOENT) {
2955                 ret = ret2;
2956                 goto out;
2957         }
2958
2959         if (ret != -ENOENT || ret2 != -ENOENT)
2960                 ret = 0;
2961 out:
2962         btrfs_free_path(path);
2963         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2964                 WARN_ON(ret > 0);
2965         return ret;
2966 }
2967
2968 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2969                            struct btrfs_root *root,
2970                            struct extent_buffer *buf,
2971                            int full_backref, int inc, int for_cow)
2972 {
2973         u64 bytenr;
2974         u64 num_bytes;
2975         u64 parent;
2976         u64 ref_root;
2977         u32 nritems;
2978         struct btrfs_key key;
2979         struct btrfs_file_extent_item *fi;
2980         int i;
2981         int level;
2982         int ret = 0;
2983         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2984                             u64, u64, u64, u64, u64, u64, int);
2985
2986         ref_root = btrfs_header_owner(buf);
2987         nritems = btrfs_header_nritems(buf);
2988         level = btrfs_header_level(buf);
2989
2990         if (!root->ref_cows && level == 0)
2991                 return 0;
2992
2993         if (inc)
2994                 process_func = btrfs_inc_extent_ref;
2995         else
2996                 process_func = btrfs_free_extent;
2997
2998         if (full_backref)
2999                 parent = buf->start;
3000         else
3001                 parent = 0;
3002
3003         for (i = 0; i < nritems; i++) {
3004                 if (level == 0) {
3005                         btrfs_item_key_to_cpu(buf, &key, i);
3006                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3007                                 continue;
3008                         fi = btrfs_item_ptr(buf, i,
3009                                             struct btrfs_file_extent_item);
3010                         if (btrfs_file_extent_type(buf, fi) ==
3011                             BTRFS_FILE_EXTENT_INLINE)
3012                                 continue;
3013                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3014                         if (bytenr == 0)
3015                                 continue;
3016
3017                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3018                         key.offset -= btrfs_file_extent_offset(buf, fi);
3019                         ret = process_func(trans, root, bytenr, num_bytes,
3020                                            parent, ref_root, key.objectid,
3021                                            key.offset, for_cow);
3022                         if (ret)
3023                                 goto fail;
3024                 } else {
3025                         bytenr = btrfs_node_blockptr(buf, i);
3026                         num_bytes = btrfs_level_size(root, level - 1);
3027                         ret = process_func(trans, root, bytenr, num_bytes,
3028                                            parent, ref_root, level - 1, 0,
3029                                            for_cow);
3030                         if (ret)
3031                                 goto fail;
3032                 }
3033         }
3034         return 0;
3035 fail:
3036         return ret;
3037 }
3038
3039 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3040                   struct extent_buffer *buf, int full_backref, int for_cow)
3041 {
3042         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
3043 }
3044
3045 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3046                   struct extent_buffer *buf, int full_backref, int for_cow)
3047 {
3048         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
3049 }
3050
3051 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3052                                  struct btrfs_root *root,
3053                                  struct btrfs_path *path,
3054                                  struct btrfs_block_group_cache *cache)
3055 {
3056         int ret;
3057         struct btrfs_root *extent_root = root->fs_info->extent_root;
3058         unsigned long bi;
3059         struct extent_buffer *leaf;
3060
3061         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3062         if (ret < 0)
3063                 goto fail;
3064         BUG_ON(ret); /* Corruption */
3065
3066         leaf = path->nodes[0];
3067         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3068         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3069         btrfs_mark_buffer_dirty(leaf);
3070         btrfs_release_path(path);
3071 fail:
3072         if (ret) {
3073                 btrfs_abort_transaction(trans, root, ret);
3074                 return ret;
3075         }
3076         return 0;
3077
3078 }
3079
3080 static struct btrfs_block_group_cache *
3081 next_block_group(struct btrfs_root *root,
3082                  struct btrfs_block_group_cache *cache)
3083 {
3084         struct rb_node *node;
3085         spin_lock(&root->fs_info->block_group_cache_lock);
3086         node = rb_next(&cache->cache_node);
3087         btrfs_put_block_group(cache);
3088         if (node) {
3089                 cache = rb_entry(node, struct btrfs_block_group_cache,
3090                                  cache_node);
3091                 btrfs_get_block_group(cache);
3092         } else
3093                 cache = NULL;
3094         spin_unlock(&root->fs_info->block_group_cache_lock);
3095         return cache;
3096 }
3097
3098 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3099                             struct btrfs_trans_handle *trans,
3100                             struct btrfs_path *path)
3101 {
3102         struct btrfs_root *root = block_group->fs_info->tree_root;
3103         struct inode *inode = NULL;
3104         u64 alloc_hint = 0;
3105         int dcs = BTRFS_DC_ERROR;
3106         int num_pages = 0;
3107         int retries = 0;
3108         int ret = 0;
3109
3110         /*
3111          * If this block group is smaller than 100 megs don't bother caching the
3112          * block group.
3113          */
3114         if (block_group->key.offset < (100 * 1024 * 1024)) {
3115                 spin_lock(&block_group->lock);
3116                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3117                 spin_unlock(&block_group->lock);
3118                 return 0;
3119         }
3120
3121 again:
3122         inode = lookup_free_space_inode(root, block_group, path);
3123         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3124                 ret = PTR_ERR(inode);
3125                 btrfs_release_path(path);
3126                 goto out;
3127         }
3128
3129         if (IS_ERR(inode)) {
3130                 BUG_ON(retries);
3131                 retries++;
3132
3133                 if (block_group->ro)
3134                         goto out_free;
3135
3136                 ret = create_free_space_inode(root, trans, block_group, path);
3137                 if (ret)
3138                         goto out_free;
3139                 goto again;
3140         }
3141
3142         /* We've already setup this transaction, go ahead and exit */
3143         if (block_group->cache_generation == trans->transid &&
3144             i_size_read(inode)) {
3145                 dcs = BTRFS_DC_SETUP;
3146                 goto out_put;
3147         }
3148
3149         /*
3150          * We want to set the generation to 0, that way if anything goes wrong
3151          * from here on out we know not to trust this cache when we load up next
3152          * time.
3153          */
3154         BTRFS_I(inode)->generation = 0;
3155         ret = btrfs_update_inode(trans, root, inode);
3156         WARN_ON(ret);
3157
3158         if (i_size_read(inode) > 0) {
3159                 ret = btrfs_truncate_free_space_cache(root, trans, path,
3160                                                       inode);
3161                 if (ret)
3162                         goto out_put;
3163         }
3164
3165         spin_lock(&block_group->lock);
3166         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3167             !btrfs_test_opt(root, SPACE_CACHE)) {
3168                 /*
3169                  * don't bother trying to write stuff out _if_
3170                  * a) we're not cached,
3171                  * b) we're with nospace_cache mount option.
3172                  */
3173                 dcs = BTRFS_DC_WRITTEN;
3174                 spin_unlock(&block_group->lock);
3175                 goto out_put;
3176         }
3177         spin_unlock(&block_group->lock);
3178
3179         /*
3180          * Try to preallocate enough space based on how big the block group is.
3181          * Keep in mind this has to include any pinned space which could end up
3182          * taking up quite a bit since it's not folded into the other space
3183          * cache.
3184          */
3185         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3186         if (!num_pages)
3187                 num_pages = 1;
3188
3189         num_pages *= 16;
3190         num_pages *= PAGE_CACHE_SIZE;
3191
3192         ret = btrfs_check_data_free_space(inode, num_pages);
3193         if (ret)
3194                 goto out_put;
3195
3196         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3197                                               num_pages, num_pages,
3198                                               &alloc_hint);
3199         if (!ret)
3200                 dcs = BTRFS_DC_SETUP;
3201         btrfs_free_reserved_data_space(inode, num_pages);
3202
3203 out_put:
3204         iput(inode);
3205 out_free:
3206         btrfs_release_path(path);
3207 out:
3208         spin_lock(&block_group->lock);
3209         if (!ret && dcs == BTRFS_DC_SETUP)
3210                 block_group->cache_generation = trans->transid;
3211         block_group->disk_cache_state = dcs;
3212         spin_unlock(&block_group->lock);
3213
3214         return ret;
3215 }
3216
3217 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3218                                    struct btrfs_root *root)
3219 {
3220         struct btrfs_block_group_cache *cache;
3221         int err = 0;
3222         struct btrfs_path *path;
3223         u64 last = 0;
3224
3225         path = btrfs_alloc_path();
3226         if (!path)
3227                 return -ENOMEM;
3228
3229 again:
3230         while (1) {
3231                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3232                 while (cache) {
3233                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3234                                 break;
3235                         cache = next_block_group(root, cache);
3236                 }
3237                 if (!cache) {
3238                         if (last == 0)
3239                                 break;
3240                         last = 0;
3241                         continue;
3242                 }
3243                 err = cache_save_setup(cache, trans, path);
3244                 last = cache->key.objectid + cache->key.offset;
3245                 btrfs_put_block_group(cache);
3246         }
3247
3248         while (1) {
3249                 if (last == 0) {
3250                         err = btrfs_run_delayed_refs(trans, root,
3251                                                      (unsigned long)-1);
3252                         if (err) /* File system offline */
3253                                 goto out;
3254                 }
3255
3256                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3257                 while (cache) {
3258                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3259                                 btrfs_put_block_group(cache);
3260                                 goto again;
3261                         }
3262
3263                         if (cache->dirty)
3264                                 break;
3265                         cache = next_block_group(root, cache);
3266                 }
3267                 if (!cache) {
3268                         if (last == 0)
3269                                 break;
3270                         last = 0;
3271                         continue;
3272                 }
3273
3274                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3275                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3276                 cache->dirty = 0;
3277                 last = cache->key.objectid + cache->key.offset;
3278
3279                 err = write_one_cache_group(trans, root, path, cache);
3280                 if (err) /* File system offline */
3281                         goto out;
3282
3283                 btrfs_put_block_group(cache);
3284         }
3285
3286         while (1) {
3287                 /*
3288                  * I don't think this is needed since we're just marking our
3289                  * preallocated extent as written, but just in case it can't
3290                  * hurt.
3291                  */
3292                 if (last == 0) {
3293                         err = btrfs_run_delayed_refs(trans, root,
3294                                                      (unsigned long)-1);
3295                         if (err) /* File system offline */
3296                                 goto out;
3297                 }
3298
3299                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3300                 while (cache) {
3301                         /*
3302                          * Really this shouldn't happen, but it could if we
3303                          * couldn't write the entire preallocated extent and
3304                          * splitting the extent resulted in a new block.
3305                          */
3306                         if (cache->dirty) {
3307                                 btrfs_put_block_group(cache);
3308                                 goto again;
3309                         }
3310                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3311                                 break;
3312                         cache = next_block_group(root, cache);
3313                 }
3314                 if (!cache) {
3315                         if (last == 0)
3316                                 break;
3317                         last = 0;
3318                         continue;
3319                 }
3320
3321                 err = btrfs_write_out_cache(root, trans, cache, path);
3322
3323                 /*
3324                  * If we didn't have an error then the cache state is still
3325                  * NEED_WRITE, so we can set it to WRITTEN.
3326                  */
3327                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3328                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3329                 last = cache->key.objectid + cache->key.offset;
3330                 btrfs_put_block_group(cache);
3331         }
3332 out:
3333
3334         btrfs_free_path(path);
3335         return err;
3336 }
3337
3338 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3339 {
3340         struct btrfs_block_group_cache *block_group;
3341         int readonly = 0;
3342
3343         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3344         if (!block_group || block_group->ro)
3345                 readonly = 1;
3346         if (block_group)
3347                 btrfs_put_block_group(block_group);
3348         return readonly;
3349 }
3350
3351 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3352                              u64 total_bytes, u64 bytes_used,
3353                              struct btrfs_space_info **space_info)
3354 {
3355         struct btrfs_space_info *found;
3356         int i;
3357         int factor;
3358
3359         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3360                      BTRFS_BLOCK_GROUP_RAID10))
3361                 factor = 2;
3362         else
3363                 factor = 1;
3364
3365         found = __find_space_info(info, flags);
3366         if (found) {
3367                 spin_lock(&found->lock);
3368                 found->total_bytes += total_bytes;
3369                 found->disk_total += total_bytes * factor;
3370                 found->bytes_used += bytes_used;
3371                 found->disk_used += bytes_used * factor;
3372                 found->full = 0;
3373                 spin_unlock(&found->lock);
3374                 *space_info = found;
3375                 return 0;
3376         }
3377         found = kzalloc(sizeof(*found), GFP_NOFS);
3378         if (!found)
3379                 return -ENOMEM;
3380
3381         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3382                 INIT_LIST_HEAD(&found->block_groups[i]);
3383         init_rwsem(&found->groups_sem);
3384         spin_lock_init(&found->lock);
3385         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3386         found->total_bytes = total_bytes;
3387         found->disk_total = total_bytes * factor;
3388         found->bytes_used = bytes_used;
3389         found->disk_used = bytes_used * factor;
3390         found->bytes_pinned = 0;
3391         found->bytes_reserved = 0;
3392         found->bytes_readonly = 0;
3393         found->bytes_may_use = 0;
3394         found->full = 0;
3395         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3396         found->chunk_alloc = 0;
3397         found->flush = 0;
3398         init_waitqueue_head(&found->wait);
3399         *space_info = found;
3400         list_add_rcu(&found->list, &info->space_info);
3401         if (flags & BTRFS_BLOCK_GROUP_DATA)
3402                 info->data_sinfo = found;
3403         return 0;
3404 }
3405
3406 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3407 {
3408         u64 extra_flags = chunk_to_extended(flags) &
3409                                 BTRFS_EXTENDED_PROFILE_MASK;
3410
3411         write_seqlock(&fs_info->profiles_lock);
3412         if (flags & BTRFS_BLOCK_GROUP_DATA)
3413                 fs_info->avail_data_alloc_bits |= extra_flags;
3414         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3415                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3416         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3417                 fs_info->avail_system_alloc_bits |= extra_flags;
3418         write_sequnlock(&fs_info->profiles_lock);
3419 }
3420
3421 /*
3422  * returns target flags in extended format or 0 if restripe for this
3423  * chunk_type is not in progress
3424  *
3425  * should be called with either volume_mutex or balance_lock held
3426  */
3427 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3428 {
3429         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3430         u64 target = 0;
3431
3432         if (!bctl)
3433                 return 0;
3434
3435         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3436             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3437                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3438         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3439                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3440                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3441         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3442                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3443                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3444         }
3445
3446         return target;
3447 }
3448
3449 /*
3450  * @flags: available profiles in extended format (see ctree.h)
3451  *
3452  * Returns reduced profile in chunk format.  If profile changing is in
3453  * progress (either running or paused) picks the target profile (if it's
3454  * already available), otherwise falls back to plain reducing.
3455  */
3456 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3457 {
3458         /*
3459          * we add in the count of missing devices because we want
3460          * to make sure that any RAID levels on a degraded FS
3461          * continue to be honored.
3462          */
3463         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3464                 root->fs_info->fs_devices->missing_devices;
3465         u64 target;
3466         u64 tmp;
3467
3468         /*
3469          * see if restripe for this chunk_type is in progress, if so
3470          * try to reduce to the target profile
3471          */
3472         spin_lock(&root->fs_info->balance_lock);
3473         target = get_restripe_target(root->fs_info, flags);
3474         if (target) {
3475                 /* pick target profile only if it's already available */
3476                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3477                         spin_unlock(&root->fs_info->balance_lock);
3478                         return extended_to_chunk(target);
3479                 }
3480         }
3481         spin_unlock(&root->fs_info->balance_lock);
3482
3483         /* First, mask out the RAID levels which aren't possible */
3484         if (num_devices == 1)
3485                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3486                            BTRFS_BLOCK_GROUP_RAID5);
3487         if (num_devices < 3)
3488                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3489         if (num_devices < 4)
3490                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3491
3492         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3493                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3494                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3495         flags &= ~tmp;
3496
3497         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3498                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3499         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3500                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3501         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3502                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3503         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3504                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3505         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3506                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3507
3508         return extended_to_chunk(flags | tmp);
3509 }
3510
3511 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3512 {
3513         unsigned seq;
3514
3515         do {
3516                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3517
3518                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3519                         flags |= root->fs_info->avail_data_alloc_bits;
3520                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3521                         flags |= root->fs_info->avail_system_alloc_bits;
3522                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3523                         flags |= root->fs_info->avail_metadata_alloc_bits;
3524         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3525
3526         return btrfs_reduce_alloc_profile(root, flags);
3527 }
3528
3529 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3530 {
3531         u64 flags;
3532         u64 ret;
3533
3534         if (data)
3535                 flags = BTRFS_BLOCK_GROUP_DATA;
3536         else if (root == root->fs_info->chunk_root)
3537                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3538         else
3539                 flags = BTRFS_BLOCK_GROUP_METADATA;
3540
3541         ret = get_alloc_profile(root, flags);
3542         return ret;
3543 }
3544
3545 /*
3546  * This will check the space that the inode allocates from to make sure we have
3547  * enough space for bytes.
3548  */
3549 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3550 {
3551         struct btrfs_space_info *data_sinfo;
3552         struct btrfs_root *root = BTRFS_I(inode)->root;
3553         struct btrfs_fs_info *fs_info = root->fs_info;
3554         u64 used;
3555         int ret = 0, committed = 0, alloc_chunk = 1;
3556
3557         /* make sure bytes are sectorsize aligned */
3558         bytes = ALIGN(bytes, root->sectorsize);
3559
3560         if (root == root->fs_info->tree_root ||
3561             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3562                 alloc_chunk = 0;
3563                 committed = 1;
3564         }
3565
3566         data_sinfo = fs_info->data_sinfo;
3567         if (!data_sinfo)
3568                 goto alloc;
3569
3570 again:
3571         /* make sure we have enough space to handle the data first */
3572         spin_lock(&data_sinfo->lock);
3573         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3574                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3575                 data_sinfo->bytes_may_use;
3576
3577         if (used + bytes > data_sinfo->total_bytes) {
3578                 struct btrfs_trans_handle *trans;
3579
3580                 /*
3581                  * if we don't have enough free bytes in this space then we need
3582                  * to alloc a new chunk.
3583                  */
3584                 if (!data_sinfo->full && alloc_chunk) {
3585                         u64 alloc_target;
3586
3587                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3588                         spin_unlock(&data_sinfo->lock);
3589 alloc:
3590                         alloc_target = btrfs_get_alloc_profile(root, 1);
3591                         trans = btrfs_join_transaction(root);
3592                         if (IS_ERR(trans))
3593                                 return PTR_ERR(trans);
3594
3595                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3596                                              alloc_target,
3597                                              CHUNK_ALLOC_NO_FORCE);
3598                         btrfs_end_transaction(trans, root);
3599                         if (ret < 0) {
3600                                 if (ret != -ENOSPC)
3601                                         return ret;
3602                                 else
3603                                         goto commit_trans;
3604                         }
3605
3606                         if (!data_sinfo)
3607                                 data_sinfo = fs_info->data_sinfo;
3608
3609                         goto again;
3610                 }
3611
3612                 /*
3613                  * If we have less pinned bytes than we want to allocate then
3614                  * don't bother committing the transaction, it won't help us.
3615                  */
3616                 if (data_sinfo->bytes_pinned < bytes)
3617                         committed = 1;
3618                 spin_unlock(&data_sinfo->lock);
3619
3620                 /* commit the current transaction and try again */
3621 commit_trans:
3622                 if (!committed &&
3623                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3624                         committed = 1;
3625                         trans = btrfs_join_transaction(root);
3626                         if (IS_ERR(trans))
3627                                 return PTR_ERR(trans);
3628                         ret = btrfs_commit_transaction(trans, root);
3629                         if (ret)
3630                                 return ret;
3631                         goto again;
3632                 }
3633
3634                 return -ENOSPC;
3635         }
3636         data_sinfo->bytes_may_use += bytes;
3637         trace_btrfs_space_reservation(root->fs_info, "space_info",
3638                                       data_sinfo->flags, bytes, 1);
3639         spin_unlock(&data_sinfo->lock);
3640
3641         return 0;
3642 }
3643
3644 /*
3645  * Called if we need to clear a data reservation for this inode.
3646  */
3647 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3648 {
3649         struct btrfs_root *root = BTRFS_I(inode)->root;
3650         struct btrfs_space_info *data_sinfo;
3651
3652         /* make sure bytes are sectorsize aligned */
3653         bytes = ALIGN(bytes, root->sectorsize);
3654
3655         data_sinfo = root->fs_info->data_sinfo;
3656         spin_lock(&data_sinfo->lock);
3657         data_sinfo->bytes_may_use -= bytes;
3658         trace_btrfs_space_reservation(root->fs_info, "space_info",
3659                                       data_sinfo->flags, bytes, 0);
3660         spin_unlock(&data_sinfo->lock);
3661 }
3662
3663 static void force_metadata_allocation(struct btrfs_fs_info *info)
3664 {
3665         struct list_head *head = &info->space_info;
3666         struct btrfs_space_info *found;
3667
3668         rcu_read_lock();
3669         list_for_each_entry_rcu(found, head, list) {
3670                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3671                         found->force_alloc = CHUNK_ALLOC_FORCE;
3672         }
3673         rcu_read_unlock();
3674 }
3675
3676 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3677 {
3678         return (global->size << 1);
3679 }
3680
3681 static int should_alloc_chunk(struct btrfs_root *root,
3682                               struct btrfs_space_info *sinfo, int force)
3683 {
3684         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3685         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3686         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3687         u64 thresh;
3688
3689         if (force == CHUNK_ALLOC_FORCE)
3690                 return 1;
3691
3692         /*
3693          * We need to take into account the global rsv because for all intents
3694          * and purposes it's used space.  Don't worry about locking the
3695          * global_rsv, it doesn't change except when the transaction commits.
3696          */
3697         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3698                 num_allocated += calc_global_rsv_need_space(global_rsv);
3699
3700         /*
3701          * in limited mode, we want to have some free space up to
3702          * about 1% of the FS size.
3703          */
3704         if (force == CHUNK_ALLOC_LIMITED) {
3705                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3706                 thresh = max_t(u64, 64 * 1024 * 1024,
3707                                div_factor_fine(thresh, 1));
3708
3709                 if (num_bytes - num_allocated < thresh)
3710                         return 1;
3711         }
3712
3713         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3714                 return 0;
3715         return 1;
3716 }
3717
3718 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3719 {
3720         u64 num_dev;
3721
3722         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3723                     BTRFS_BLOCK_GROUP_RAID0 |
3724                     BTRFS_BLOCK_GROUP_RAID5 |
3725                     BTRFS_BLOCK_GROUP_RAID6))
3726                 num_dev = root->fs_info->fs_devices->rw_devices;
3727         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3728                 num_dev = 2;
3729         else
3730                 num_dev = 1;    /* DUP or single */
3731
3732         /* metadata for updaing devices and chunk tree */
3733         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3734 }
3735
3736 static void check_system_chunk(struct btrfs_trans_handle *trans,
3737                                struct btrfs_root *root, u64 type)
3738 {
3739         struct btrfs_space_info *info;
3740         u64 left;
3741         u64 thresh;
3742
3743         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3744         spin_lock(&info->lock);
3745         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3746                 info->bytes_reserved - info->bytes_readonly;
3747         spin_unlock(&info->lock);
3748
3749         thresh = get_system_chunk_thresh(root, type);
3750         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3751                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3752                         left, thresh, type);
3753                 dump_space_info(info, 0, 0);
3754         }
3755
3756         if (left < thresh) {
3757                 u64 flags;
3758
3759                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3760                 btrfs_alloc_chunk(trans, root, flags);
3761         }
3762 }
3763
3764 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3765                           struct btrfs_root *extent_root, u64 flags, int force)
3766 {
3767         struct btrfs_space_info *space_info;
3768         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3769         int wait_for_alloc = 0;
3770         int ret = 0;
3771
3772         /* Don't re-enter if we're already allocating a chunk */
3773         if (trans->allocating_chunk)
3774                 return -ENOSPC;
3775
3776         space_info = __find_space_info(extent_root->fs_info, flags);
3777         if (!space_info) {
3778                 ret = update_space_info(extent_root->fs_info, flags,
3779                                         0, 0, &space_info);
3780                 BUG_ON(ret); /* -ENOMEM */
3781         }
3782         BUG_ON(!space_info); /* Logic error */
3783
3784 again:
3785         spin_lock(&space_info->lock);
3786         if (force < space_info->force_alloc)
3787                 force = space_info->force_alloc;
3788         if (space_info->full) {
3789                 spin_unlock(&space_info->lock);
3790                 return 0;
3791         }
3792
3793         if (!should_alloc_chunk(extent_root, space_info, force)) {
3794                 spin_unlock(&space_info->lock);
3795                 return 0;
3796         } else if (space_info->chunk_alloc) {
3797                 wait_for_alloc = 1;
3798         } else {
3799                 space_info->chunk_alloc = 1;
3800         }
3801
3802         spin_unlock(&space_info->lock);
3803
3804         mutex_lock(&fs_info->chunk_mutex);
3805
3806         /*
3807          * The chunk_mutex is held throughout the entirety of a chunk
3808          * allocation, so once we've acquired the chunk_mutex we know that the
3809          * other guy is done and we need to recheck and see if we should
3810          * allocate.
3811          */
3812         if (wait_for_alloc) {
3813                 mutex_unlock(&fs_info->chunk_mutex);
3814                 wait_for_alloc = 0;
3815                 goto again;
3816         }
3817
3818         trans->allocating_chunk = true;
3819
3820         /*
3821          * If we have mixed data/metadata chunks we want to make sure we keep
3822          * allocating mixed chunks instead of individual chunks.
3823          */
3824         if (btrfs_mixed_space_info(space_info))
3825                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3826
3827         /*
3828          * if we're doing a data chunk, go ahead and make sure that
3829          * we keep a reasonable number of metadata chunks allocated in the
3830          * FS as well.
3831          */
3832         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3833                 fs_info->data_chunk_allocations++;
3834                 if (!(fs_info->data_chunk_allocations %
3835                       fs_info->metadata_ratio))
3836                         force_metadata_allocation(fs_info);
3837         }
3838
3839         /*
3840          * Check if we have enough space in SYSTEM chunk because we may need
3841          * to update devices.
3842          */
3843         check_system_chunk(trans, extent_root, flags);
3844
3845         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3846         trans->allocating_chunk = false;
3847
3848         spin_lock(&space_info->lock);
3849         if (ret < 0 && ret != -ENOSPC)
3850                 goto out;
3851         if (ret)
3852                 space_info->full = 1;
3853         else
3854                 ret = 1;
3855
3856         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3857 out:
3858         space_info->chunk_alloc = 0;
3859         spin_unlock(&space_info->lock);
3860         mutex_unlock(&fs_info->chunk_mutex);
3861         return ret;
3862 }
3863
3864 static int can_overcommit(struct btrfs_root *root,
3865                           struct btrfs_space_info *space_info, u64 bytes,
3866                           enum btrfs_reserve_flush_enum flush)
3867 {
3868         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3869         u64 profile = btrfs_get_alloc_profile(root, 0);
3870         u64 space_size;
3871         u64 avail;
3872         u64 used;
3873         u64 to_add;
3874
3875         used = space_info->bytes_used + space_info->bytes_reserved +
3876                 space_info->bytes_pinned + space_info->bytes_readonly;
3877
3878         /*
3879          * We only want to allow over committing if we have lots of actual space
3880          * free, but if we don't have enough space to handle the global reserve
3881          * space then we could end up having a real enospc problem when trying
3882          * to allocate a chunk or some other such important allocation.
3883          */
3884         spin_lock(&global_rsv->lock);
3885         space_size = calc_global_rsv_need_space(global_rsv);
3886         spin_unlock(&global_rsv->lock);
3887         if (used + space_size >= space_info->total_bytes)
3888                 return 0;
3889
3890         used += space_info->bytes_may_use;
3891
3892         spin_lock(&root->fs_info->free_chunk_lock);
3893         avail = root->fs_info->free_chunk_space;
3894         spin_unlock(&root->fs_info->free_chunk_lock);
3895
3896         /*
3897          * If we have dup, raid1 or raid10 then only half of the free
3898          * space is actually useable.  For raid56, the space info used
3899          * doesn't include the parity drive, so we don't have to
3900          * change the math
3901          */
3902         if (profile & (BTRFS_BLOCK_GROUP_DUP |
3903                        BTRFS_BLOCK_GROUP_RAID1 |
3904                        BTRFS_BLOCK_GROUP_RAID10))
3905                 avail >>= 1;
3906
3907         to_add = space_info->total_bytes;
3908
3909         /*
3910          * If we aren't flushing all things, let us overcommit up to
3911          * 1/2th of the space. If we can flush, don't let us overcommit
3912          * too much, let it overcommit up to 1/8 of the space.
3913          */
3914         if (flush == BTRFS_RESERVE_FLUSH_ALL)
3915                 to_add >>= 3;
3916         else
3917                 to_add >>= 1;
3918
3919         /*
3920          * Limit the overcommit to the amount of free space we could possibly
3921          * allocate for chunks.
3922          */
3923         to_add = min(avail, to_add);
3924
3925         if (used + bytes < space_info->total_bytes + to_add)
3926                 return 1;
3927         return 0;
3928 }
3929
3930 void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3931                                   unsigned long nr_pages)
3932 {
3933         struct super_block *sb = root->fs_info->sb;
3934         int started;
3935
3936         /* If we can not start writeback, just sync all the delalloc file. */
3937         started = try_to_writeback_inodes_sb_nr(sb, nr_pages,
3938                                                       WB_REASON_FS_FREE_SPACE);
3939         if (!started) {
3940                 /*
3941                  * We needn't worry the filesystem going from r/w to r/o though
3942                  * we don't acquire ->s_umount mutex, because the filesystem
3943                  * should guarantee the delalloc inodes list be empty after
3944                  * the filesystem is readonly(all dirty pages are written to
3945                  * the disk).
3946                  */
3947                 btrfs_start_delalloc_inodes(root, 0);
3948                 if (!current->journal_info)
3949                         btrfs_wait_ordered_extents(root, 0);
3950         }
3951 }
3952
3953 /*
3954  * shrink metadata reservation for delalloc
3955  */
3956 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
3957                             bool wait_ordered)
3958 {
3959         struct btrfs_block_rsv *block_rsv;
3960         struct btrfs_space_info *space_info;
3961         struct btrfs_trans_handle *trans;
3962         u64 delalloc_bytes;
3963         u64 max_reclaim;
3964         long time_left;
3965         unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3966         int loops = 0;
3967         enum btrfs_reserve_flush_enum flush;
3968
3969         trans = (struct btrfs_trans_handle *)current->journal_info;
3970         block_rsv = &root->fs_info->delalloc_block_rsv;
3971         space_info = block_rsv->space_info;
3972
3973         smp_mb();
3974         delalloc_bytes = percpu_counter_sum_positive(
3975                                                 &root->fs_info->delalloc_bytes);
3976         if (delalloc_bytes == 0) {
3977                 if (trans)
3978                         return;
3979                 btrfs_wait_ordered_extents(root, 0);
3980                 return;
3981         }
3982
3983         while (delalloc_bytes && loops < 3) {
3984                 max_reclaim = min(delalloc_bytes, to_reclaim);
3985                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
3986                 btrfs_writeback_inodes_sb_nr(root, nr_pages);
3987                 /*
3988                  * We need to wait for the async pages to actually start before
3989                  * we do anything.
3990                  */
3991                 wait_event(root->fs_info->async_submit_wait,
3992                            !atomic_read(&root->fs_info->async_delalloc_pages));
3993
3994                 if (!trans)
3995                         flush = BTRFS_RESERVE_FLUSH_ALL;
3996                 else
3997                         flush = BTRFS_RESERVE_NO_FLUSH;
3998                 spin_lock(&space_info->lock);
3999                 if (can_overcommit(root, space_info, orig, flush)) {
4000                         spin_unlock(&space_info->lock);
4001                         break;
4002                 }
4003                 spin_unlock(&space_info->lock);
4004
4005                 loops++;
4006                 if (wait_ordered && !trans) {
4007                         btrfs_wait_ordered_extents(root, 0);
4008                 } else {
4009                         time_left = schedule_timeout_killable(1);
4010                         if (time_left)
4011                                 break;
4012                 }
4013                 smp_mb();
4014                 delalloc_bytes = percpu_counter_sum_positive(
4015                                                 &root->fs_info->delalloc_bytes);
4016         }
4017 }
4018
4019 /**
4020  * maybe_commit_transaction - possibly commit the transaction if its ok to
4021  * @root - the root we're allocating for
4022  * @bytes - the number of bytes we want to reserve
4023  * @force - force the commit
4024  *
4025  * This will check to make sure that committing the transaction will actually
4026  * get us somewhere and then commit the transaction if it does.  Otherwise it
4027  * will return -ENOSPC.
4028  */
4029 static int may_commit_transaction(struct btrfs_root *root,
4030                                   struct btrfs_space_info *space_info,
4031                                   u64 bytes, int force)
4032 {
4033         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4034         struct btrfs_trans_handle *trans;
4035
4036         trans = (struct btrfs_trans_handle *)current->journal_info;
4037         if (trans)
4038                 return -EAGAIN;
4039
4040         if (force)
4041                 goto commit;
4042
4043         /* See if there is enough pinned space to make this reservation */
4044         spin_lock(&space_info->lock);
4045         if (space_info->bytes_pinned >= bytes) {
4046                 spin_unlock(&space_info->lock);
4047                 goto commit;
4048         }
4049         spin_unlock(&space_info->lock);
4050
4051         /*
4052          * See if there is some space in the delayed insertion reservation for
4053          * this reservation.
4054          */
4055         if (space_info != delayed_rsv->space_info)
4056                 return -ENOSPC;
4057
4058         spin_lock(&space_info->lock);
4059         spin_lock(&delayed_rsv->lock);
4060         if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
4061                 spin_unlock(&delayed_rsv->lock);
4062                 spin_unlock(&space_info->lock);
4063                 return -ENOSPC;
4064         }
4065         spin_unlock(&delayed_rsv->lock);
4066         spin_unlock(&space_info->lock);
4067
4068 commit:
4069         trans = btrfs_join_transaction(root);
4070         if (IS_ERR(trans))
4071                 return -ENOSPC;
4072
4073         return btrfs_commit_transaction(trans, root);
4074 }
4075
4076 enum flush_state {
4077         FLUSH_DELAYED_ITEMS_NR  =       1,
4078         FLUSH_DELAYED_ITEMS     =       2,
4079         FLUSH_DELALLOC          =       3,
4080         FLUSH_DELALLOC_WAIT     =       4,
4081         ALLOC_CHUNK             =       5,
4082         COMMIT_TRANS            =       6,
4083 };
4084
4085 static int flush_space(struct btrfs_root *root,
4086                        struct btrfs_space_info *space_info, u64 num_bytes,
4087                        u64 orig_bytes, int state)
4088 {
4089         struct btrfs_trans_handle *trans;
4090         int nr;
4091         int ret = 0;
4092
4093         switch (state) {
4094         case FLUSH_DELAYED_ITEMS_NR:
4095         case FLUSH_DELAYED_ITEMS:
4096                 if (state == FLUSH_DELAYED_ITEMS_NR) {
4097                         u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
4098
4099                         nr = (int)div64_u64(num_bytes, bytes);
4100                         if (!nr)
4101                                 nr = 1;
4102                         nr *= 2;
4103                 } else {
4104                         nr = -1;
4105                 }
4106                 trans = btrfs_join_transaction(root);
4107                 if (IS_ERR(trans)) {
4108                         ret = PTR_ERR(trans);
4109                         break;
4110                 }
4111                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4112                 btrfs_end_transaction(trans, root);
4113                 break;
4114         case FLUSH_DELALLOC:
4115         case FLUSH_DELALLOC_WAIT:
4116                 shrink_delalloc(root, num_bytes, orig_bytes,
4117                                 state == FLUSH_DELALLOC_WAIT);
4118                 break;
4119         case ALLOC_CHUNK:
4120                 trans = btrfs_join_transaction(root);
4121                 if (IS_ERR(trans)) {
4122                         ret = PTR_ERR(trans);
4123                         break;
4124                 }
4125                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4126                                      btrfs_get_alloc_profile(root, 0),
4127                                      CHUNK_ALLOC_NO_FORCE);
4128                 btrfs_end_transaction(trans, root);
4129                 if (ret == -ENOSPC)
4130                         ret = 0;
4131                 break;
4132         case COMMIT_TRANS:
4133                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4134                 break;
4135         default:
4136                 ret = -ENOSPC;
4137                 break;
4138         }
4139
4140         return ret;
4141 }
4142 /**
4143  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4144  * @root - the root we're allocating for
4145  * @block_rsv - the block_rsv we're allocating for
4146  * @orig_bytes - the number of bytes we want
4147  * @flush - whether or not we can flush to make our reservation
4148  *
4149  * This will reserve orgi_bytes number of bytes from the space info associated
4150  * with the block_rsv.  If there is not enough space it will make an attempt to
4151  * flush out space to make room.  It will do this by flushing delalloc if
4152  * possible or committing the transaction.  If flush is 0 then no attempts to
4153  * regain reservations will be made and this will fail if there is not enough
4154  * space already.
4155  */
4156 static int reserve_metadata_bytes(struct btrfs_root *root,
4157                                   struct btrfs_block_rsv *block_rsv,
4158                                   u64 orig_bytes,
4159                                   enum btrfs_reserve_flush_enum flush)
4160 {
4161         struct btrfs_space_info *space_info = block_rsv->space_info;
4162         u64 used;
4163         u64 num_bytes = orig_bytes;
4164         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4165         int ret = 0;
4166         bool flushing = false;
4167
4168 again:
4169         ret = 0;
4170         spin_lock(&space_info->lock);
4171         /*
4172          * We only want to wait if somebody other than us is flushing and we
4173          * are actually allowed to flush all things.
4174          */
4175         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4176                space_info->flush) {
4177                 spin_unlock(&space_info->lock);
4178                 /*
4179                  * If we have a trans handle we can't wait because the flusher
4180                  * may have to commit the transaction, which would mean we would
4181                  * deadlock since we are waiting for the flusher to finish, but
4182                  * hold the current transaction open.
4183                  */
4184                 if (current->journal_info)
4185                         return -EAGAIN;
4186                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4187                 /* Must have been killed, return */
4188                 if (ret)
4189                         return -EINTR;
4190
4191                 spin_lock(&space_info->lock);
4192         }
4193
4194         ret = -ENOSPC;
4195         used = space_info->bytes_used + space_info->bytes_reserved +
4196                 space_info->bytes_pinned + space_info->bytes_readonly +
4197                 space_info->bytes_may_use;
4198
4199         /*
4200          * The idea here is that we've not already over-reserved the block group
4201          * then we can go ahead and save our reservation first and then start
4202          * flushing if we need to.  Otherwise if we've already overcommitted
4203          * lets start flushing stuff first and then come back and try to make
4204          * our reservation.
4205          */
4206         if (used <= space_info->total_bytes) {
4207                 if (used + orig_bytes <= space_info->total_bytes) {
4208                         space_info->bytes_may_use += orig_bytes;
4209                         trace_btrfs_space_reservation(root->fs_info,
4210                                 "space_info", space_info->flags, orig_bytes, 1);
4211                         ret = 0;
4212                 } else {
4213                         /*
4214                          * Ok set num_bytes to orig_bytes since we aren't
4215                          * overocmmitted, this way we only try and reclaim what
4216                          * we need.
4217                          */
4218                         num_bytes = orig_bytes;
4219                 }
4220         } else {
4221                 /*
4222                  * Ok we're over committed, set num_bytes to the overcommitted
4223                  * amount plus the amount of bytes that we need for this
4224                  * reservation.
4225                  */
4226                 num_bytes = used - space_info->total_bytes +
4227                         (orig_bytes * 2);
4228         }
4229
4230         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4231                 space_info->bytes_may_use += orig_bytes;
4232                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4233                                               space_info->flags, orig_bytes,
4234                                               1);
4235                 ret = 0;
4236         }
4237
4238         /*
4239          * Couldn't make our reservation, save our place so while we're trying
4240          * to reclaim space we can actually use it instead of somebody else
4241          * stealing it from us.
4242          *
4243          * We make the other tasks wait for the flush only when we can flush
4244          * all things.
4245          */
4246         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4247                 flushing = true;
4248                 space_info->flush = 1;
4249         }
4250
4251         spin_unlock(&space_info->lock);
4252
4253         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4254                 goto out;
4255
4256         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4257                           flush_state);
4258         flush_state++;
4259
4260         /*
4261          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4262          * would happen. So skip delalloc flush.
4263          */
4264         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4265             (flush_state == FLUSH_DELALLOC ||
4266              flush_state == FLUSH_DELALLOC_WAIT))
4267                 flush_state = ALLOC_CHUNK;
4268
4269         if (!ret)
4270                 goto again;
4271         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4272                  flush_state < COMMIT_TRANS)
4273                 goto again;
4274         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4275                  flush_state <= COMMIT_TRANS)
4276                 goto again;
4277
4278 out:
4279         if (ret == -ENOSPC &&
4280             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4281                 struct btrfs_block_rsv *global_rsv =
4282                         &root->fs_info->global_block_rsv;
4283
4284                 if (block_rsv != global_rsv &&
4285                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4286                         ret = 0;
4287         }
4288         if (flushing) {
4289                 spin_lock(&space_info->lock);
4290                 space_info->flush = 0;
4291                 wake_up_all(&space_info->wait);
4292                 spin_unlock(&space_info->lock);
4293         }
4294         return ret;
4295 }
4296
4297 static struct btrfs_block_rsv *get_block_rsv(
4298                                         const struct btrfs_trans_handle *trans,
4299                                         const struct btrfs_root *root)
4300 {
4301         struct btrfs_block_rsv *block_rsv = NULL;
4302
4303         if (root->ref_cows)
4304                 block_rsv = trans->block_rsv;
4305
4306         if (root == root->fs_info->csum_root && trans->adding_csums)
4307                 block_rsv = trans->block_rsv;
4308
4309         if (!block_rsv)
4310                 block_rsv = root->block_rsv;
4311
4312         if (!block_rsv)
4313                 block_rsv = &root->fs_info->empty_block_rsv;
4314
4315         return block_rsv;
4316 }
4317
4318 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4319                                u64 num_bytes)
4320 {
4321         int ret = -ENOSPC;
4322         spin_lock(&block_rsv->lock);
4323         if (block_rsv->reserved >= num_bytes) {
4324                 block_rsv->reserved -= num_bytes;
4325                 if (block_rsv->reserved < block_rsv->size)
4326                         block_rsv->full = 0;
4327                 ret = 0;
4328         }
4329         spin_unlock(&block_rsv->lock);
4330         return ret;
4331 }
4332
4333 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4334                                 u64 num_bytes, int update_size)
4335 {
4336         spin_lock(&block_rsv->lock);
4337         block_rsv->reserved += num_bytes;
4338         if (update_size)
4339                 block_rsv->size += num_bytes;
4340         else if (block_rsv->reserved >= block_rsv->size)
4341                 block_rsv->full = 1;
4342         spin_unlock(&block_rsv->lock);
4343 }
4344
4345 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4346                                     struct btrfs_block_rsv *block_rsv,
4347                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4348 {
4349         struct btrfs_space_info *space_info = block_rsv->space_info;
4350
4351         spin_lock(&block_rsv->lock);
4352         if (num_bytes == (u64)-1)
4353                 num_bytes = block_rsv->size;
4354         block_rsv->size -= num_bytes;
4355         if (block_rsv->reserved >= block_rsv->size) {
4356                 num_bytes = block_rsv->reserved - block_rsv->size;
4357                 block_rsv->reserved = block_rsv->size;
4358                 block_rsv->full = 1;
4359         } else {
4360                 num_bytes = 0;
4361         }
4362         spin_unlock(&block_rsv->lock);
4363
4364         if (num_bytes > 0) {
4365                 if (dest) {
4366                         spin_lock(&dest->lock);
4367                         if (!dest->full) {
4368                                 u64 bytes_to_add;
4369
4370                                 bytes_to_add = dest->size - dest->reserved;
4371                                 bytes_to_add = min(num_bytes, bytes_to_add);
4372                                 dest->reserved += bytes_to_add;
4373                                 if (dest->reserved >= dest->size)
4374                                         dest->full = 1;
4375                                 num_bytes -= bytes_to_add;
4376                         }
4377                         spin_unlock(&dest->lock);
4378                 }
4379                 if (num_bytes) {
4380                         spin_lock(&space_info->lock);
4381                         space_info->bytes_may_use -= num_bytes;
4382                         trace_btrfs_space_reservation(fs_info, "space_info",
4383                                         space_info->flags, num_bytes, 0);
4384                         space_info->reservation_progress++;
4385                         spin_unlock(&space_info->lock);
4386                 }
4387         }
4388 }
4389
4390 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4391                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4392 {
4393         int ret;
4394
4395         ret = block_rsv_use_bytes(src, num_bytes);
4396         if (ret)
4397                 return ret;
4398
4399         block_rsv_add_bytes(dst, num_bytes, 1);
4400         return 0;
4401 }
4402
4403 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4404 {
4405         memset(rsv, 0, sizeof(*rsv));
4406         spin_lock_init(&rsv->lock);
4407         rsv->type = type;
4408 }
4409
4410 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4411                                               unsigned short type)
4412 {
4413         struct btrfs_block_rsv *block_rsv;
4414         struct btrfs_fs_info *fs_info = root->fs_info;
4415
4416         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4417         if (!block_rsv)
4418                 return NULL;
4419
4420         btrfs_init_block_rsv(block_rsv, type);
4421         block_rsv->space_info = __find_space_info(fs_info,
4422                                                   BTRFS_BLOCK_GROUP_METADATA);
4423         return block_rsv;
4424 }
4425
4426 void btrfs_free_block_rsv(struct btrfs_root *root,
4427                           struct btrfs_block_rsv *rsv)
4428 {
4429         if (!rsv)
4430                 return;
4431         btrfs_block_rsv_release(root, rsv, (u64)-1);
4432         kfree(rsv);
4433 }
4434
4435 int btrfs_block_rsv_add(struct btrfs_root *root,
4436                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4437                         enum btrfs_reserve_flush_enum flush)
4438 {
4439         int ret;
4440
4441         if (num_bytes == 0)
4442                 return 0;
4443
4444         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4445         if (!ret) {
4446                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4447                 return 0;
4448         }
4449
4450         return ret;
4451 }
4452
4453 int btrfs_block_rsv_check(struct btrfs_root *root,
4454                           struct btrfs_block_rsv *block_rsv, int min_factor)
4455 {
4456         u64 num_bytes = 0;
4457         int ret = -ENOSPC;
4458
4459         if (!block_rsv)
4460                 return 0;
4461
4462         spin_lock(&block_rsv->lock);
4463         num_bytes = div_factor(block_rsv->size, min_factor);
4464         if (block_rsv->reserved >= num_bytes)
4465                 ret = 0;
4466         spin_unlock(&block_rsv->lock);
4467
4468         return ret;
4469 }
4470
4471 int btrfs_block_rsv_refill(struct btrfs_root *root,
4472                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4473                            enum btrfs_reserve_flush_enum flush)
4474 {
4475         u64 num_bytes = 0;
4476         int ret = -ENOSPC;
4477
4478         if (!block_rsv)
4479                 return 0;
4480
4481         spin_lock(&block_rsv->lock);
4482         num_bytes = min_reserved;
4483         if (block_rsv->reserved >= num_bytes)
4484                 ret = 0;
4485         else
4486                 num_bytes -= block_rsv->reserved;
4487         spin_unlock(&block_rsv->lock);
4488
4489         if (!ret)
4490                 return 0;
4491
4492         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4493         if (!ret) {
4494                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4495                 return 0;
4496         }
4497
4498         return ret;
4499 }
4500
4501 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4502                             struct btrfs_block_rsv *dst_rsv,
4503                             u64 num_bytes)
4504 {
4505         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4506 }
4507
4508 void btrfs_block_rsv_release(struct btrfs_root *root,
4509                              struct btrfs_block_rsv *block_rsv,
4510                              u64 num_bytes)
4511 {
4512         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4513         if (global_rsv->full || global_rsv == block_rsv ||
4514             block_rsv->space_info != global_rsv->space_info)
4515                 global_rsv = NULL;
4516         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4517                                 num_bytes);
4518 }
4519
4520 /*
4521  * helper to calculate size of global block reservation.
4522  * the desired value is sum of space used by extent tree,
4523  * checksum tree and root tree
4524  */
4525 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4526 {
4527         struct btrfs_space_info *sinfo;
4528         u64 num_bytes;
4529         u64 meta_used;
4530         u64 data_used;
4531         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4532
4533         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4534         spin_lock(&sinfo->lock);
4535         data_used = sinfo->bytes_used;
4536         spin_unlock(&sinfo->lock);
4537
4538         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4539         spin_lock(&sinfo->lock);
4540         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4541                 data_used = 0;
4542         meta_used = sinfo->bytes_used;
4543         spin_unlock(&sinfo->lock);
4544
4545         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4546                     csum_size * 2;
4547         num_bytes += div64_u64(data_used + meta_used, 50);
4548
4549         if (num_bytes * 3 > meta_used)
4550                 num_bytes = div64_u64(meta_used, 3);
4551
4552         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4553 }
4554
4555 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4556 {
4557         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4558         struct btrfs_space_info *sinfo = block_rsv->space_info;
4559         u64 num_bytes;
4560
4561         num_bytes = calc_global_metadata_size(fs_info);
4562
4563         spin_lock(&sinfo->lock);
4564         spin_lock(&block_rsv->lock);
4565
4566         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4567
4568         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4569                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4570                     sinfo->bytes_may_use;
4571
4572         if (sinfo->total_bytes > num_bytes) {
4573                 num_bytes = sinfo->total_bytes - num_bytes;
4574                 block_rsv->reserved += num_bytes;
4575                 sinfo->bytes_may_use += num_bytes;
4576                 trace_btrfs_space_reservation(fs_info, "space_info",
4577                                       sinfo->flags, num_bytes, 1);
4578         }
4579
4580         if (block_rsv->reserved >= block_rsv->size) {
4581                 num_bytes = block_rsv->reserved - block_rsv->size;
4582                 sinfo->bytes_may_use -= num_bytes;
4583                 trace_btrfs_space_reservation(fs_info, "space_info",
4584                                       sinfo->flags, num_bytes, 0);
4585                 sinfo->reservation_progress++;
4586                 block_rsv->reserved = block_rsv->size;
4587                 block_rsv->full = 1;
4588         }
4589
4590         spin_unlock(&block_rsv->lock);
4591         spin_unlock(&sinfo->lock);
4592 }
4593
4594 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4595 {
4596         struct btrfs_space_info *space_info;
4597
4598         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4599         fs_info->chunk_block_rsv.space_info = space_info;
4600
4601         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4602         fs_info->global_block_rsv.space_info = space_info;
4603         fs_info->delalloc_block_rsv.space_info = space_info;
4604         fs_info->trans_block_rsv.space_info = space_info;
4605         fs_info->empty_block_rsv.space_info = space_info;
4606         fs_info->delayed_block_rsv.space_info = space_info;
4607
4608         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4609         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4610         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4611         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4612         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4613
4614         update_global_block_rsv(fs_info);
4615 }
4616
4617 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4618 {
4619         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4620                                 (u64)-1);
4621         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4622         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4623         WARN_ON(fs_info->trans_block_rsv.size > 0);
4624         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4625         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4626         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4627         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4628         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4629 }
4630
4631 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4632                                   struct btrfs_root *root)
4633 {
4634         if (!trans->block_rsv)
4635                 return;
4636
4637         if (!trans->bytes_reserved)
4638                 return;
4639
4640         trace_btrfs_space_reservation(root->fs_info, "transaction",
4641                                       trans->transid, trans->bytes_reserved, 0);
4642         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4643         trans->bytes_reserved = 0;
4644 }
4645
4646 /* Can only return 0 or -ENOSPC */
4647 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4648                                   struct inode *inode)
4649 {
4650         struct btrfs_root *root = BTRFS_I(inode)->root;
4651         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4652         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4653
4654         /*
4655          * We need to hold space in order to delete our orphan item once we've
4656          * added it, so this takes the reservation so we can release it later
4657          * when we are truly done with the orphan item.
4658          */
4659         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4660         trace_btrfs_space_reservation(root->fs_info, "orphan",
4661                                       btrfs_ino(inode), num_bytes, 1);
4662         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4663 }
4664
4665 void btrfs_orphan_release_metadata(struct inode *inode)
4666 {
4667         struct btrfs_root *root = BTRFS_I(inode)->root;
4668         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4669         trace_btrfs_space_reservation(root->fs_info, "orphan",
4670                                       btrfs_ino(inode), num_bytes, 0);
4671         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4672 }
4673
4674 /*
4675  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4676  * root: the root of the parent directory
4677  * rsv: block reservation
4678  * items: the number of items that we need do reservation
4679  * qgroup_reserved: used to return the reserved size in qgroup
4680  *
4681  * This function is used to reserve the space for snapshot/subvolume
4682  * creation and deletion. Those operations are different with the
4683  * common file/directory operations, they change two fs/file trees
4684  * and root tree, the number of items that the qgroup reserves is
4685  * different with the free space reservation. So we can not use
4686  * the space reseravtion mechanism in start_transaction().
4687  */
4688 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4689                                      struct btrfs_block_rsv *rsv,
4690                                      int items,
4691                                      u64 *qgroup_reserved)
4692 {
4693         u64 num_bytes;
4694         int ret;
4695
4696         if (root->fs_info->quota_enabled) {
4697                 /* One for parent inode, two for dir entries */
4698                 num_bytes = 3 * root->leafsize;
4699                 ret = btrfs_qgroup_reserve(root, num_bytes);
4700                 if (ret)
4701                         return ret;
4702         } else {
4703                 num_bytes = 0;
4704         }
4705
4706         *qgroup_reserved = num_bytes;
4707
4708         num_bytes = btrfs_calc_trans_metadata_size(root, items);
4709         rsv->space_info = __find_space_info(root->fs_info,
4710                                             BTRFS_BLOCK_GROUP_METADATA);
4711         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
4712                                   BTRFS_RESERVE_FLUSH_ALL);
4713         if (ret) {
4714                 if (*qgroup_reserved)
4715                         btrfs_qgroup_free(root, *qgroup_reserved);
4716         }
4717
4718         return ret;
4719 }
4720
4721 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
4722                                       struct btrfs_block_rsv *rsv,
4723                                       u64 qgroup_reserved)
4724 {
4725         btrfs_block_rsv_release(root, rsv, (u64)-1);
4726         if (qgroup_reserved)
4727                 btrfs_qgroup_free(root, qgroup_reserved);
4728 }
4729
4730 /**
4731  * drop_outstanding_extent - drop an outstanding extent
4732  * @inode: the inode we're dropping the extent for
4733  *
4734  * This is called when we are freeing up an outstanding extent, either called
4735  * after an error or after an extent is written.  This will return the number of
4736  * reserved extents that need to be freed.  This must be called with
4737  * BTRFS_I(inode)->lock held.
4738  */
4739 static unsigned drop_outstanding_extent(struct inode *inode)
4740 {
4741         unsigned drop_inode_space = 0;
4742         unsigned dropped_extents = 0;
4743
4744         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4745         BTRFS_I(inode)->outstanding_extents--;
4746
4747         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4748             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4749                                &BTRFS_I(inode)->runtime_flags))
4750                 drop_inode_space = 1;
4751
4752         /*
4753          * If we have more or the same amount of outsanding extents than we have
4754          * reserved then we need to leave the reserved extents count alone.
4755          */
4756         if (BTRFS_I(inode)->outstanding_extents >=
4757             BTRFS_I(inode)->reserved_extents)
4758                 return drop_inode_space;
4759
4760         dropped_extents = BTRFS_I(inode)->reserved_extents -
4761                 BTRFS_I(inode)->outstanding_extents;
4762         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4763         return dropped_extents + drop_inode_space;
4764 }
4765
4766 /**
4767  * calc_csum_metadata_size - return the amount of metada space that must be
4768  *      reserved/free'd for the given bytes.
4769  * @inode: the inode we're manipulating
4770  * @num_bytes: the number of bytes in question
4771  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4772  *
4773  * This adjusts the number of csum_bytes in the inode and then returns the
4774  * correct amount of metadata that must either be reserved or freed.  We
4775  * calculate how many checksums we can fit into one leaf and then divide the
4776  * number of bytes that will need to be checksumed by this value to figure out
4777  * how many checksums will be required.  If we are adding bytes then the number
4778  * may go up and we will return the number of additional bytes that must be
4779  * reserved.  If it is going down we will return the number of bytes that must
4780  * be freed.
4781  *
4782  * This must be called with BTRFS_I(inode)->lock held.
4783  */
4784 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4785                                    int reserve)
4786 {
4787         struct btrfs_root *root = BTRFS_I(inode)->root;
4788         u64 csum_size;
4789         int num_csums_per_leaf;
4790         int num_csums;
4791         int old_csums;
4792
4793         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4794             BTRFS_I(inode)->csum_bytes == 0)
4795                 return 0;
4796
4797         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4798         if (reserve)
4799                 BTRFS_I(inode)->csum_bytes += num_bytes;
4800         else
4801                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4802         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4803         num_csums_per_leaf = (int)div64_u64(csum_size,
4804                                             sizeof(struct btrfs_csum_item) +
4805                                             sizeof(struct btrfs_disk_key));
4806         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4807         num_csums = num_csums + num_csums_per_leaf - 1;
4808         num_csums = num_csums / num_csums_per_leaf;
4809
4810         old_csums = old_csums + num_csums_per_leaf - 1;
4811         old_csums = old_csums / num_csums_per_leaf;
4812
4813         /* No change, no need to reserve more */
4814         if (old_csums == num_csums)
4815                 return 0;
4816
4817         if (reserve)
4818                 return btrfs_calc_trans_metadata_size(root,
4819                                                       num_csums - old_csums);
4820
4821         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4822 }
4823
4824 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4825 {
4826         struct btrfs_root *root = BTRFS_I(inode)->root;
4827         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4828         u64 to_reserve = 0;
4829         u64 csum_bytes;
4830         unsigned nr_extents = 0;
4831         int extra_reserve = 0;
4832         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4833         int ret = 0;
4834         bool delalloc_lock = true;
4835         u64 to_free = 0;
4836         unsigned dropped;
4837
4838         /* If we are a free space inode we need to not flush since we will be in
4839          * the middle of a transaction commit.  We also don't need the delalloc
4840          * mutex since we won't race with anybody.  We need this mostly to make
4841          * lockdep shut its filthy mouth.
4842          */
4843         if (btrfs_is_free_space_inode(inode)) {
4844                 flush = BTRFS_RESERVE_NO_FLUSH;
4845                 delalloc_lock = false;
4846         }
4847
4848         if (flush != BTRFS_RESERVE_NO_FLUSH &&
4849             btrfs_transaction_in_commit(root->fs_info))
4850                 schedule_timeout(1);
4851
4852         if (delalloc_lock)
4853                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4854
4855         num_bytes = ALIGN(num_bytes, root->sectorsize);
4856
4857         spin_lock(&BTRFS_I(inode)->lock);
4858         BTRFS_I(inode)->outstanding_extents++;
4859
4860         if (BTRFS_I(inode)->outstanding_extents >
4861             BTRFS_I(inode)->reserved_extents)
4862                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4863                         BTRFS_I(inode)->reserved_extents;
4864
4865         /*
4866          * Add an item to reserve for updating the inode when we complete the
4867          * delalloc io.
4868          */
4869         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4870                       &BTRFS_I(inode)->runtime_flags)) {
4871                 nr_extents++;
4872                 extra_reserve = 1;
4873         }
4874
4875         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4876         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4877         csum_bytes = BTRFS_I(inode)->csum_bytes;
4878         spin_unlock(&BTRFS_I(inode)->lock);
4879
4880         if (root->fs_info->quota_enabled) {
4881                 ret = btrfs_qgroup_reserve(root, num_bytes +
4882                                            nr_extents * root->leafsize);
4883                 if (ret)
4884                         goto out_fail;
4885         }
4886
4887         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4888         if (unlikely(ret)) {
4889                 if (root->fs_info->quota_enabled)
4890                         btrfs_qgroup_free(root, num_bytes +
4891                                                 nr_extents * root->leafsize);
4892                 goto out_fail;
4893         }
4894
4895         spin_lock(&BTRFS_I(inode)->lock);
4896         if (extra_reserve) {
4897                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4898                         &BTRFS_I(inode)->runtime_flags);
4899                 nr_extents--;
4900         }
4901         BTRFS_I(inode)->reserved_extents += nr_extents;
4902         spin_unlock(&BTRFS_I(inode)->lock);
4903
4904         if (delalloc_lock)
4905                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4906
4907         if (to_reserve)
4908                 trace_btrfs_space_reservation(root->fs_info,"delalloc",
4909                                               btrfs_ino(inode), to_reserve, 1);
4910         block_rsv_add_bytes(block_rsv, to_reserve, 1);
4911
4912         return 0;
4913
4914 out_fail:
4915         spin_lock(&BTRFS_I(inode)->lock);
4916         dropped = drop_outstanding_extent(inode);
4917         /*
4918          * If the inodes csum_bytes is the same as the original
4919          * csum_bytes then we know we haven't raced with any free()ers
4920          * so we can just reduce our inodes csum bytes and carry on.
4921          */
4922         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
4923                 calc_csum_metadata_size(inode, num_bytes, 0);
4924         } else {
4925                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
4926                 u64 bytes;
4927
4928                 /*
4929                  * This is tricky, but first we need to figure out how much we
4930                  * free'd from any free-ers that occured during this
4931                  * reservation, so we reset ->csum_bytes to the csum_bytes
4932                  * before we dropped our lock, and then call the free for the
4933                  * number of bytes that were freed while we were trying our
4934                  * reservation.
4935                  */
4936                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
4937                 BTRFS_I(inode)->csum_bytes = csum_bytes;
4938                 to_free = calc_csum_metadata_size(inode, bytes, 0);
4939
4940
4941                 /*
4942                  * Now we need to see how much we would have freed had we not
4943                  * been making this reservation and our ->csum_bytes were not
4944                  * artificially inflated.
4945                  */
4946                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
4947                 bytes = csum_bytes - orig_csum_bytes;
4948                 bytes = calc_csum_metadata_size(inode, bytes, 0);
4949
4950                 /*
4951                  * Now reset ->csum_bytes to what it should be.  If bytes is
4952                  * more than to_free then we would have free'd more space had we
4953                  * not had an artificially high ->csum_bytes, so we need to free
4954                  * the remainder.  If bytes is the same or less then we don't
4955                  * need to do anything, the other free-ers did the correct
4956                  * thing.
4957                  */
4958                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
4959                 if (bytes > to_free)
4960                         to_free = bytes - to_free;
4961                 else
4962                         to_free = 0;
4963         }
4964         spin_unlock(&BTRFS_I(inode)->lock);
4965         if (dropped)
4966                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4967
4968         if (to_free) {
4969                 btrfs_block_rsv_release(root, block_rsv, to_free);
4970                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
4971                                               btrfs_ino(inode), to_free, 0);
4972         }
4973         if (delalloc_lock)
4974                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4975         return ret;
4976 }
4977
4978 /**
4979  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4980  * @inode: the inode to release the reservation for
4981  * @num_bytes: the number of bytes we're releasing
4982  *
4983  * This will release the metadata reservation for an inode.  This can be called
4984  * once we complete IO for a given set of bytes to release their metadata
4985  * reservations.
4986  */
4987 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4988 {
4989         struct btrfs_root *root = BTRFS_I(inode)->root;
4990         u64 to_free = 0;
4991         unsigned dropped;
4992
4993         num_bytes = ALIGN(num_bytes, root->sectorsize);
4994         spin_lock(&BTRFS_I(inode)->lock);
4995         dropped = drop_outstanding_extent(inode);
4996
4997         if (num_bytes)
4998                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4999         spin_unlock(&BTRFS_I(inode)->lock);
5000         if (dropped > 0)
5001                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5002
5003         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5004                                       btrfs_ino(inode), to_free, 0);
5005         if (root->fs_info->quota_enabled) {
5006                 btrfs_qgroup_free(root, num_bytes +
5007                                         dropped * root->leafsize);
5008         }
5009
5010         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5011                                 to_free);
5012 }
5013
5014 /**
5015  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5016  * @inode: inode we're writing to
5017  * @num_bytes: the number of bytes we want to allocate
5018  *
5019  * This will do the following things
5020  *
5021  * o reserve space in the data space info for num_bytes
5022  * o reserve space in the metadata space info based on number of outstanding
5023  *   extents and how much csums will be needed
5024  * o add to the inodes ->delalloc_bytes
5025  * o add it to the fs_info's delalloc inodes list.
5026  *
5027  * This will return 0 for success and -ENOSPC if there is no space left.
5028  */
5029 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5030 {
5031         int ret;
5032
5033         ret = btrfs_check_data_free_space(inode, num_bytes);
5034         if (ret)
5035                 return ret;
5036
5037         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5038         if (ret) {
5039                 btrfs_free_reserved_data_space(inode, num_bytes);
5040                 return ret;
5041         }
5042
5043         return 0;
5044 }
5045
5046 /**
5047  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5048  * @inode: inode we're releasing space for
5049  * @num_bytes: the number of bytes we want to free up
5050  *
5051  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5052  * called in the case that we don't need the metadata AND data reservations
5053  * anymore.  So if there is an error or we insert an inline extent.
5054  *
5055  * This function will release the metadata space that was not used and will
5056  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5057  * list if there are no delalloc bytes left.
5058  */
5059 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5060 {
5061         btrfs_delalloc_release_metadata(inode, num_bytes);
5062         btrfs_free_reserved_data_space(inode, num_bytes);
5063 }
5064
5065 static int update_block_group(struct btrfs_root *root,
5066                               u64 bytenr, u64 num_bytes, int alloc)
5067 {
5068         struct btrfs_block_group_cache *cache = NULL;
5069         struct btrfs_fs_info *info = root->fs_info;
5070         u64 total = num_bytes;
5071         u64 old_val;
5072         u64 byte_in_group;
5073         int factor;
5074
5075         /* block accounting for super block */
5076         spin_lock(&info->delalloc_lock);
5077         old_val = btrfs_super_bytes_used(info->super_copy);
5078         if (alloc)
5079                 old_val += num_bytes;
5080         else
5081                 old_val -= num_bytes;
5082         btrfs_set_super_bytes_used(info->super_copy, old_val);
5083         spin_unlock(&info->delalloc_lock);
5084
5085         while (total) {
5086                 cache = btrfs_lookup_block_group(info, bytenr);
5087                 if (!cache)
5088                         return -ENOENT;
5089                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5090                                     BTRFS_BLOCK_GROUP_RAID1 |
5091                                     BTRFS_BLOCK_GROUP_RAID10))
5092                         factor = 2;
5093                 else
5094                         factor = 1;
5095                 /*
5096                  * If this block group has free space cache written out, we
5097                  * need to make sure to load it if we are removing space.  This
5098                  * is because we need the unpinning stage to actually add the
5099                  * space back to the block group, otherwise we will leak space.
5100                  */
5101                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5102                         cache_block_group(cache, 1);
5103
5104                 byte_in_group = bytenr - cache->key.objectid;
5105                 WARN_ON(byte_in_group > cache->key.offset);
5106
5107                 spin_lock(&cache->space_info->lock);
5108                 spin_lock(&cache->lock);
5109
5110                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5111                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5112                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5113
5114                 cache->dirty = 1;
5115                 old_val = btrfs_block_group_used(&cache->item);
5116                 num_bytes = min(total, cache->key.offset - byte_in_group);
5117                 if (alloc) {
5118                         old_val += num_bytes;
5119                         btrfs_set_block_group_used(&cache->item, old_val);
5120                         cache->reserved -= num_bytes;
5121                         cache->space_info->bytes_reserved -= num_bytes;
5122                         cache->space_info->bytes_used += num_bytes;
5123                         cache->space_info->disk_used += num_bytes * factor;
5124                         spin_unlock(&cache->lock);
5125                         spin_unlock(&cache->space_info->lock);
5126                 } else {
5127                         old_val -= num_bytes;
5128                         btrfs_set_block_group_used(&cache->item, old_val);
5129                         cache->pinned += num_bytes;
5130                         cache->space_info->bytes_pinned += num_bytes;
5131                         cache->space_info->bytes_used -= num_bytes;
5132                         cache->space_info->disk_used -= num_bytes * factor;
5133                         spin_unlock(&cache->lock);
5134                         spin_unlock(&cache->space_info->lock);
5135
5136                         set_extent_dirty(info->pinned_extents,
5137                                          bytenr, bytenr + num_bytes - 1,
5138                                          GFP_NOFS | __GFP_NOFAIL);
5139                 }
5140                 btrfs_put_block_group(cache);
5141                 total -= num_bytes;
5142                 bytenr += num_bytes;
5143         }
5144         return 0;
5145 }
5146
5147 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5148 {
5149         struct btrfs_block_group_cache *cache;
5150         u64 bytenr;
5151
5152         spin_lock(&root->fs_info->block_group_cache_lock);
5153         bytenr = root->fs_info->first_logical_byte;
5154         spin_unlock(&root->fs_info->block_group_cache_lock);
5155
5156         if (bytenr < (u64)-1)
5157                 return bytenr;
5158
5159         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5160         if (!cache)
5161                 return 0;
5162
5163         bytenr = cache->key.objectid;
5164         btrfs_put_block_group(cache);
5165
5166         return bytenr;
5167 }
5168
5169 static int pin_down_extent(struct btrfs_root *root,
5170                            struct btrfs_block_group_cache *cache,
5171                            u64 bytenr, u64 num_bytes, int reserved)
5172 {
5173         spin_lock(&cache->space_info->lock);
5174         spin_lock(&cache->lock);
5175         cache->pinned += num_bytes;
5176         cache->space_info->bytes_pinned += num_bytes;
5177         if (reserved) {
5178                 cache->reserved -= num_bytes;
5179                 cache->space_info->bytes_reserved -= num_bytes;
5180         }
5181         spin_unlock(&cache->lock);
5182         spin_unlock(&cache->space_info->lock);
5183
5184         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5185                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5186         return 0;
5187 }
5188
5189 /*
5190  * this function must be called within transaction
5191  */
5192 int btrfs_pin_extent(struct btrfs_root *root,
5193                      u64 bytenr, u64 num_bytes, int reserved)
5194 {
5195         struct btrfs_block_group_cache *cache;
5196
5197         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5198         BUG_ON(!cache); /* Logic error */
5199
5200         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5201
5202         btrfs_put_block_group(cache);
5203         return 0;
5204 }
5205
5206 /*
5207  * this function must be called within transaction
5208  */
5209 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5210                                     u64 bytenr, u64 num_bytes)
5211 {
5212         struct btrfs_block_group_cache *cache;
5213
5214         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5215         BUG_ON(!cache); /* Logic error */
5216
5217         /*
5218          * pull in the free space cache (if any) so that our pin
5219          * removes the free space from the cache.  We have load_only set
5220          * to one because the slow code to read in the free extents does check
5221          * the pinned extents.
5222          */
5223         cache_block_group(cache, 1);
5224
5225         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5226
5227         /* remove us from the free space cache (if we're there at all) */
5228         btrfs_remove_free_space(cache, bytenr, num_bytes);
5229         btrfs_put_block_group(cache);
5230         return 0;
5231 }
5232
5233 /**
5234  * btrfs_update_reserved_bytes - update the block_group and space info counters
5235  * @cache:      The cache we are manipulating
5236  * @num_bytes:  The number of bytes in question
5237  * @reserve:    One of the reservation enums
5238  *
5239  * This is called by the allocator when it reserves space, or by somebody who is
5240  * freeing space that was never actually used on disk.  For example if you
5241  * reserve some space for a new leaf in transaction A and before transaction A
5242  * commits you free that leaf, you call this with reserve set to 0 in order to
5243  * clear the reservation.
5244  *
5245  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5246  * ENOSPC accounting.  For data we handle the reservation through clearing the
5247  * delalloc bits in the io_tree.  We have to do this since we could end up
5248  * allocating less disk space for the amount of data we have reserved in the
5249  * case of compression.
5250  *
5251  * If this is a reservation and the block group has become read only we cannot
5252  * make the reservation and return -EAGAIN, otherwise this function always
5253  * succeeds.
5254  */
5255 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5256                                        u64 num_bytes, int reserve)
5257 {
5258         struct btrfs_space_info *space_info = cache->space_info;
5259         int ret = 0;
5260
5261         spin_lock(&space_info->lock);
5262         spin_lock(&cache->lock);
5263         if (reserve != RESERVE_FREE) {
5264                 if (cache->ro) {
5265                         ret = -EAGAIN;
5266                 } else {
5267                         cache->reserved += num_bytes;
5268                         space_info->bytes_reserved += num_bytes;
5269                         if (reserve == RESERVE_ALLOC) {
5270                                 trace_btrfs_space_reservation(cache->fs_info,
5271                                                 "space_info", space_info->flags,
5272                                                 num_bytes, 0);
5273                                 space_info->bytes_may_use -= num_bytes;
5274                         }
5275                 }
5276         } else {
5277                 if (cache->ro)
5278                         space_info->bytes_readonly += num_bytes;
5279                 cache->reserved -= num_bytes;
5280                 space_info->bytes_reserved -= num_bytes;
5281                 space_info->reservation_progress++;
5282         }
5283         spin_unlock(&cache->lock);
5284         spin_unlock(&space_info->lock);
5285         return ret;
5286 }
5287
5288 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5289                                 struct btrfs_root *root)
5290 {
5291         struct btrfs_fs_info *fs_info = root->fs_info;
5292         struct btrfs_caching_control *next;
5293         struct btrfs_caching_control *caching_ctl;
5294         struct btrfs_block_group_cache *cache;
5295
5296         down_write(&fs_info->extent_commit_sem);
5297
5298         list_for_each_entry_safe(caching_ctl, next,
5299                                  &fs_info->caching_block_groups, list) {
5300                 cache = caching_ctl->block_group;
5301                 if (block_group_cache_done(cache)) {
5302                         cache->last_byte_to_unpin = (u64)-1;
5303                         list_del_init(&caching_ctl->list);
5304                         put_caching_control(caching_ctl);
5305                 } else {
5306                         cache->last_byte_to_unpin = caching_ctl->progress;
5307                 }
5308         }
5309
5310         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5311                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5312         else
5313                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5314
5315         up_write(&fs_info->extent_commit_sem);
5316
5317         update_global_block_rsv(fs_info);
5318 }
5319
5320 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5321 {
5322         struct btrfs_fs_info *fs_info = root->fs_info;
5323         struct btrfs_block_group_cache *cache = NULL;
5324         struct btrfs_space_info *space_info;
5325         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5326         u64 len;
5327         bool readonly;
5328
5329         while (start <= end) {
5330                 readonly = false;
5331                 if (!cache ||
5332                     start >= cache->key.objectid + cache->key.offset) {
5333                         if (cache)
5334                                 btrfs_put_block_group(cache);
5335                         cache = btrfs_lookup_block_group(fs_info, start);
5336                         BUG_ON(!cache); /* Logic error */
5337                 }
5338
5339                 len = cache->key.objectid + cache->key.offset - start;
5340                 len = min(len, end + 1 - start);
5341
5342                 if (start < cache->last_byte_to_unpin) {
5343                         len = min(len, cache->last_byte_to_unpin - start);
5344                         btrfs_add_free_space(cache, start, len);
5345                 }
5346
5347                 start += len;
5348                 space_info = cache->space_info;
5349
5350                 spin_lock(&space_info->lock);
5351                 spin_lock(&cache->lock);
5352                 cache->pinned -= len;
5353                 space_info->bytes_pinned -= len;
5354                 if (cache->ro) {
5355                         space_info->bytes_readonly += len;
5356                         readonly = true;
5357                 }
5358                 spin_unlock(&cache->lock);
5359                 if (!readonly && global_rsv->space_info == space_info) {
5360                         spin_lock(&global_rsv->lock);
5361                         if (!global_rsv->full) {
5362                                 len = min(len, global_rsv->size -
5363                                           global_rsv->reserved);
5364                                 global_rsv->reserved += len;
5365                                 space_info->bytes_may_use += len;
5366                                 if (global_rsv->reserved >= global_rsv->size)
5367                                         global_rsv->full = 1;
5368                         }
5369                         spin_unlock(&global_rsv->lock);
5370                 }
5371                 spin_unlock(&space_info->lock);
5372         }
5373
5374         if (cache)
5375                 btrfs_put_block_group(cache);
5376         return 0;
5377 }
5378
5379 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5380                                struct btrfs_root *root)
5381 {
5382         struct btrfs_fs_info *fs_info = root->fs_info;
5383         struct extent_io_tree *unpin;
5384         u64 start;
5385         u64 end;
5386         int ret;
5387
5388         if (trans->aborted)
5389                 return 0;
5390
5391         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5392                 unpin = &fs_info->freed_extents[1];
5393         else
5394                 unpin = &fs_info->freed_extents[0];
5395
5396         while (1) {
5397                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5398                                             EXTENT_DIRTY, NULL);
5399                 if (ret)
5400                         break;
5401
5402                 if (btrfs_test_opt(root, DISCARD))
5403                         ret = btrfs_discard_extent(root, start,
5404                                                    end + 1 - start, NULL);
5405
5406                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5407                 unpin_extent_range(root, start, end);
5408                 cond_resched();
5409         }
5410
5411         return 0;
5412 }
5413
5414 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5415                                 struct btrfs_root *root,
5416                                 u64 bytenr, u64 num_bytes, u64 parent,
5417                                 u64 root_objectid, u64 owner_objectid,
5418                                 u64 owner_offset, int refs_to_drop,
5419                                 struct btrfs_delayed_extent_op *extent_op)
5420 {
5421         struct btrfs_key key;
5422         struct btrfs_path *path;
5423         struct btrfs_fs_info *info = root->fs_info;
5424         struct btrfs_root *extent_root = info->extent_root;
5425         struct extent_buffer *leaf;
5426         struct btrfs_extent_item *ei;
5427         struct btrfs_extent_inline_ref *iref;
5428         int ret;
5429         int is_data;
5430         int extent_slot = 0;
5431         int found_extent = 0;
5432         int num_to_del = 1;
5433         u32 item_size;
5434         u64 refs;
5435         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5436                                                  SKINNY_METADATA);
5437
5438         path = btrfs_alloc_path();
5439         if (!path)
5440                 return -ENOMEM;
5441
5442         path->reada = 1;
5443         path->leave_spinning = 1;
5444
5445         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5446         BUG_ON(!is_data && refs_to_drop != 1);
5447
5448         if (is_data)
5449                 skinny_metadata = 0;
5450
5451         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5452                                     bytenr, num_bytes, parent,
5453                                     root_objectid, owner_objectid,
5454                                     owner_offset);
5455         if (ret == 0) {
5456                 extent_slot = path->slots[0];
5457                 while (extent_slot >= 0) {
5458                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5459                                               extent_slot);
5460                         if (key.objectid != bytenr)
5461                                 break;
5462                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5463                             key.offset == num_bytes) {
5464                                 found_extent = 1;
5465                                 break;
5466                         }
5467                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
5468                             key.offset == owner_objectid) {
5469                                 found_extent = 1;
5470                                 break;
5471                         }
5472                         if (path->slots[0] - extent_slot > 5)
5473                                 break;
5474                         extent_slot--;
5475                 }
5476 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5477                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5478                 if (found_extent && item_size < sizeof(*ei))
5479                         found_extent = 0;
5480 #endif
5481                 if (!found_extent) {
5482                         BUG_ON(iref);
5483                         ret = remove_extent_backref(trans, extent_root, path,
5484                                                     NULL, refs_to_drop,
5485                                                     is_data);
5486                         if (ret) {
5487                                 btrfs_abort_transaction(trans, extent_root, ret);
5488                                 goto out;
5489                         }
5490                         btrfs_release_path(path);
5491                         path->leave_spinning = 1;
5492
5493                         key.objectid = bytenr;
5494                         key.type = BTRFS_EXTENT_ITEM_KEY;
5495                         key.offset = num_bytes;
5496
5497                         if (!is_data && skinny_metadata) {
5498                                 key.type = BTRFS_METADATA_ITEM_KEY;
5499                                 key.offset = owner_objectid;
5500                         }
5501
5502                         ret = btrfs_search_slot(trans, extent_root,
5503                                                 &key, path, -1, 1);
5504                         if (ret > 0 && skinny_metadata && path->slots[0]) {
5505                                 /*
5506                                  * Couldn't find our skinny metadata item,
5507                                  * see if we have ye olde extent item.
5508                                  */
5509                                 path->slots[0]--;
5510                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
5511                                                       path->slots[0]);
5512                                 if (key.objectid == bytenr &&
5513                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
5514                                     key.offset == num_bytes)
5515                                         ret = 0;
5516                         }
5517
5518                         if (ret > 0 && skinny_metadata) {
5519                                 skinny_metadata = false;
5520                                 key.type = BTRFS_EXTENT_ITEM_KEY;
5521                                 key.offset = num_bytes;
5522                                 btrfs_release_path(path);
5523                                 ret = btrfs_search_slot(trans, extent_root,
5524                                                         &key, path, -1, 1);
5525                         }
5526
5527                         if (ret) {
5528                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5529                                         ret, (unsigned long long)bytenr);
5530                                 if (ret > 0)
5531                                         btrfs_print_leaf(extent_root,
5532                                                          path->nodes[0]);
5533                         }
5534                         if (ret < 0) {
5535                                 btrfs_abort_transaction(trans, extent_root, ret);
5536                                 goto out;
5537                         }
5538                         extent_slot = path->slots[0];
5539                 }
5540         } else if (ret == -ENOENT) {
5541                 btrfs_print_leaf(extent_root, path->nodes[0]);
5542                 WARN_ON(1);
5543                 btrfs_err(info,
5544                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5545                         (unsigned long long)bytenr,
5546                         (unsigned long long)parent,
5547                         (unsigned long long)root_objectid,
5548                         (unsigned long long)owner_objectid,
5549                         (unsigned long long)owner_offset);
5550         } else {
5551                 btrfs_abort_transaction(trans, extent_root, ret);
5552                 goto out;
5553         }
5554
5555         leaf = path->nodes[0];
5556         item_size = btrfs_item_size_nr(leaf, extent_slot);
5557 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5558         if (item_size < sizeof(*ei)) {
5559                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5560                 ret = convert_extent_item_v0(trans, extent_root, path,
5561                                              owner_objectid, 0);
5562                 if (ret < 0) {
5563                         btrfs_abort_transaction(trans, extent_root, ret);
5564                         goto out;
5565                 }
5566
5567                 btrfs_release_path(path);
5568                 path->leave_spinning = 1;
5569
5570                 key.objectid = bytenr;
5571                 key.type = BTRFS_EXTENT_ITEM_KEY;
5572                 key.offset = num_bytes;
5573
5574                 ret = btrfs_search_slot(trans, extent_root, &key, path,
5575                                         -1, 1);
5576                 if (ret) {
5577                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5578                                 ret, (unsigned long long)bytenr);
5579                         btrfs_print_leaf(extent_root, path->nodes[0]);
5580                 }
5581                 if (ret < 0) {
5582                         btrfs_abort_transaction(trans, extent_root, ret);
5583                         goto out;
5584                 }
5585
5586                 extent_slot = path->slots[0];
5587                 leaf = path->nodes[0];
5588                 item_size = btrfs_item_size_nr(leaf, extent_slot);
5589         }
5590 #endif
5591         BUG_ON(item_size < sizeof(*ei));
5592         ei = btrfs_item_ptr(leaf, extent_slot,
5593                             struct btrfs_extent_item);
5594         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
5595             key.type == BTRFS_EXTENT_ITEM_KEY) {
5596                 struct btrfs_tree_block_info *bi;
5597                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5598                 bi = (struct btrfs_tree_block_info *)(ei + 1);
5599                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5600         }
5601
5602         refs = btrfs_extent_refs(leaf, ei);
5603         if (refs < refs_to_drop) {
5604                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
5605                           "for bytenr %Lu\n", refs_to_drop, refs, bytenr);
5606                 ret = -EINVAL;
5607                 btrfs_abort_transaction(trans, extent_root, ret);
5608                 goto out;
5609         }
5610         refs -= refs_to_drop;
5611
5612         if (refs > 0) {
5613                 if (extent_op)
5614                         __run_delayed_extent_op(extent_op, leaf, ei);
5615                 /*
5616                  * In the case of inline back ref, reference count will
5617                  * be updated by remove_extent_backref
5618                  */
5619                 if (iref) {
5620                         BUG_ON(!found_extent);
5621                 } else {
5622                         btrfs_set_extent_refs(leaf, ei, refs);
5623                         btrfs_mark_buffer_dirty(leaf);
5624                 }
5625                 if (found_extent) {
5626                         ret = remove_extent_backref(trans, extent_root, path,
5627                                                     iref, refs_to_drop,
5628                                                     is_data);
5629                         if (ret) {
5630                                 btrfs_abort_transaction(trans, extent_root, ret);
5631                                 goto out;
5632                         }
5633                 }
5634         } else {
5635                 if (found_extent) {
5636                         BUG_ON(is_data && refs_to_drop !=
5637                                extent_data_ref_count(root, path, iref));
5638                         if (iref) {
5639                                 BUG_ON(path->slots[0] != extent_slot);
5640                         } else {
5641                                 BUG_ON(path->slots[0] != extent_slot + 1);
5642                                 path->slots[0] = extent_slot;
5643                                 num_to_del = 2;
5644                         }
5645                 }
5646
5647                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5648                                       num_to_del);
5649                 if (ret) {
5650                         btrfs_abort_transaction(trans, extent_root, ret);
5651                         goto out;
5652                 }
5653                 btrfs_release_path(path);
5654
5655                 if (is_data) {
5656                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5657                         if (ret) {
5658                                 btrfs_abort_transaction(trans, extent_root, ret);
5659                                 goto out;
5660                         }
5661                 }
5662
5663                 ret = update_block_group(root, bytenr, num_bytes, 0);
5664                 if (ret) {
5665                         btrfs_abort_transaction(trans, extent_root, ret);
5666                         goto out;
5667                 }
5668         }
5669 out:
5670         btrfs_free_path(path);
5671         return ret;
5672 }
5673
5674 /*
5675  * when we free an block, it is possible (and likely) that we free the last
5676  * delayed ref for that extent as well.  This searches the delayed ref tree for
5677  * a given extent, and if there are no other delayed refs to be processed, it
5678  * removes it from the tree.
5679  */
5680 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5681                                       struct btrfs_root *root, u64 bytenr)
5682 {
5683         struct btrfs_delayed_ref_head *head;
5684         struct btrfs_delayed_ref_root *delayed_refs;
5685         struct btrfs_delayed_ref_node *ref;
5686         struct rb_node *node;
5687         int ret = 0;
5688
5689         delayed_refs = &trans->transaction->delayed_refs;
5690         spin_lock(&delayed_refs->lock);
5691         head = btrfs_find_delayed_ref_head(trans, bytenr);
5692         if (!head)
5693                 goto out;
5694
5695         node = rb_prev(&head->node.rb_node);
5696         if (!node)
5697                 goto out;
5698
5699         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5700
5701         /* there are still entries for this ref, we can't drop it */
5702         if (ref->bytenr == bytenr)
5703                 goto out;
5704
5705         if (head->extent_op) {
5706                 if (!head->must_insert_reserved)
5707                         goto out;
5708                 btrfs_free_delayed_extent_op(head->extent_op);
5709                 head->extent_op = NULL;
5710         }
5711
5712         /*
5713          * waiting for the lock here would deadlock.  If someone else has it
5714          * locked they are already in the process of dropping it anyway
5715          */
5716         if (!mutex_trylock(&head->mutex))
5717                 goto out;
5718
5719         /*
5720          * at this point we have a head with no other entries.  Go
5721          * ahead and process it.
5722          */
5723         head->node.in_tree = 0;
5724         rb_erase(&head->node.rb_node, &delayed_refs->root);
5725
5726         delayed_refs->num_entries--;
5727
5728         /*
5729          * we don't take a ref on the node because we're removing it from the
5730          * tree, so we just steal the ref the tree was holding.
5731          */
5732         delayed_refs->num_heads--;
5733         if (list_empty(&head->cluster))
5734                 delayed_refs->num_heads_ready--;
5735
5736         list_del_init(&head->cluster);
5737         spin_unlock(&delayed_refs->lock);
5738
5739         BUG_ON(head->extent_op);
5740         if (head->must_insert_reserved)
5741                 ret = 1;
5742
5743         mutex_unlock(&head->mutex);
5744         btrfs_put_delayed_ref(&head->node);
5745         return ret;
5746 out:
5747         spin_unlock(&delayed_refs->lock);
5748         return 0;
5749 }
5750
5751 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5752                            struct btrfs_root *root,
5753                            struct extent_buffer *buf,
5754                            u64 parent, int last_ref)
5755 {
5756         struct btrfs_block_group_cache *cache = NULL;
5757         int ret;
5758
5759         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5760                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5761                                         buf->start, buf->len,
5762                                         parent, root->root_key.objectid,
5763                                         btrfs_header_level(buf),
5764                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
5765                 BUG_ON(ret); /* -ENOMEM */
5766         }
5767
5768         if (!last_ref)
5769                 return;
5770
5771         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5772
5773         if (btrfs_header_generation(buf) == trans->transid) {
5774                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5775                         ret = check_ref_cleanup(trans, root, buf->start);
5776                         if (!ret)
5777                                 goto out;
5778                 }
5779
5780                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5781                         pin_down_extent(root, cache, buf->start, buf->len, 1);
5782                         goto out;
5783                 }
5784
5785                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5786
5787                 btrfs_add_free_space(cache, buf->start, buf->len);
5788                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5789         }
5790 out:
5791         /*
5792          * Deleting the buffer, clear the corrupt flag since it doesn't matter
5793          * anymore.
5794          */
5795         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5796         btrfs_put_block_group(cache);
5797 }
5798
5799 /* Can return -ENOMEM */
5800 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5801                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5802                       u64 owner, u64 offset, int for_cow)
5803 {
5804         int ret;
5805         struct btrfs_fs_info *fs_info = root->fs_info;
5806
5807         /*
5808          * tree log blocks never actually go into the extent allocation
5809          * tree, just update pinning info and exit early.
5810          */
5811         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5812                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5813                 /* unlocks the pinned mutex */
5814                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
5815                 ret = 0;
5816         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5817                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5818                                         num_bytes,
5819                                         parent, root_objectid, (int)owner,
5820                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5821         } else {
5822                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5823                                                 num_bytes,
5824                                                 parent, root_objectid, owner,
5825                                                 offset, BTRFS_DROP_DELAYED_REF,
5826                                                 NULL, for_cow);
5827         }
5828         return ret;
5829 }
5830
5831 static u64 stripe_align(struct btrfs_root *root,
5832                         struct btrfs_block_group_cache *cache,
5833                         u64 val, u64 num_bytes)
5834 {
5835         u64 ret = ALIGN(val, root->stripesize);
5836         return ret;
5837 }
5838
5839 /*
5840  * when we wait for progress in the block group caching, its because
5841  * our allocation attempt failed at least once.  So, we must sleep
5842  * and let some progress happen before we try again.
5843  *
5844  * This function will sleep at least once waiting for new free space to
5845  * show up, and then it will check the block group free space numbers
5846  * for our min num_bytes.  Another option is to have it go ahead
5847  * and look in the rbtree for a free extent of a given size, but this
5848  * is a good start.
5849  */
5850 static noinline int
5851 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5852                                 u64 num_bytes)
5853 {
5854         struct btrfs_caching_control *caching_ctl;
5855
5856         caching_ctl = get_caching_control(cache);
5857         if (!caching_ctl)
5858                 return 0;
5859
5860         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
5861                    (cache->free_space_ctl->free_space >= num_bytes));
5862
5863         put_caching_control(caching_ctl);
5864         return 0;
5865 }
5866
5867 static noinline int
5868 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5869 {
5870         struct btrfs_caching_control *caching_ctl;
5871
5872         caching_ctl = get_caching_control(cache);
5873         if (!caching_ctl)
5874                 return 0;
5875
5876         wait_event(caching_ctl->wait, block_group_cache_done(cache));
5877
5878         put_caching_control(caching_ctl);
5879         return 0;
5880 }
5881
5882 int __get_raid_index(u64 flags)
5883 {
5884         if (flags & BTRFS_BLOCK_GROUP_RAID10)
5885                 return BTRFS_RAID_RAID10;
5886         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
5887                 return BTRFS_RAID_RAID1;
5888         else if (flags & BTRFS_BLOCK_GROUP_DUP)
5889                 return BTRFS_RAID_DUP;
5890         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
5891                 return BTRFS_RAID_RAID0;
5892         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
5893                 return BTRFS_RAID_RAID5;
5894         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
5895                 return BTRFS_RAID_RAID6;
5896
5897         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
5898 }
5899
5900 static int get_block_group_index(struct btrfs_block_group_cache *cache)
5901 {
5902         return __get_raid_index(cache->flags);
5903 }
5904
5905 enum btrfs_loop_type {
5906         LOOP_CACHING_NOWAIT = 0,
5907         LOOP_CACHING_WAIT = 1,
5908         LOOP_ALLOC_CHUNK = 2,
5909         LOOP_NO_EMPTY_SIZE = 3,
5910 };
5911
5912 /*
5913  * walks the btree of allocated extents and find a hole of a given size.
5914  * The key ins is changed to record the hole:
5915  * ins->objectid == block start
5916  * ins->flags = BTRFS_EXTENT_ITEM_KEY
5917  * ins->offset == number of blocks
5918  * Any available blocks before search_start are skipped.
5919  */
5920 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5921                                      struct btrfs_root *orig_root,
5922                                      u64 num_bytes, u64 empty_size,
5923                                      u64 hint_byte, struct btrfs_key *ins,
5924                                      u64 data)
5925 {
5926         int ret = 0;
5927         struct btrfs_root *root = orig_root->fs_info->extent_root;
5928         struct btrfs_free_cluster *last_ptr = NULL;
5929         struct btrfs_block_group_cache *block_group = NULL;
5930         struct btrfs_block_group_cache *used_block_group;
5931         u64 search_start = 0;
5932         int empty_cluster = 2 * 1024 * 1024;
5933         struct btrfs_space_info *space_info;
5934         int loop = 0;
5935         int index = __get_raid_index(data);
5936         int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5937                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5938         bool found_uncached_bg = false;
5939         bool failed_cluster_refill = false;
5940         bool failed_alloc = false;
5941         bool use_cluster = true;
5942         bool have_caching_bg = false;
5943
5944         WARN_ON(num_bytes < root->sectorsize);
5945         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
5946         ins->objectid = 0;
5947         ins->offset = 0;
5948
5949         trace_find_free_extent(orig_root, num_bytes, empty_size, data);
5950
5951         space_info = __find_space_info(root->fs_info, data);
5952         if (!space_info) {
5953                 btrfs_err(root->fs_info, "No space info for %llu", data);
5954                 return -ENOSPC;
5955         }
5956
5957         /*
5958          * If the space info is for both data and metadata it means we have a
5959          * small filesystem and we can't use the clustering stuff.
5960          */
5961         if (btrfs_mixed_space_info(space_info))
5962                 use_cluster = false;
5963
5964         if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5965                 last_ptr = &root->fs_info->meta_alloc_cluster;
5966                 if (!btrfs_test_opt(root, SSD))
5967                         empty_cluster = 64 * 1024;
5968         }
5969
5970         if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5971             btrfs_test_opt(root, SSD)) {
5972                 last_ptr = &root->fs_info->data_alloc_cluster;
5973         }
5974
5975         if (last_ptr) {
5976                 spin_lock(&last_ptr->lock);
5977                 if (last_ptr->block_group)
5978                         hint_byte = last_ptr->window_start;
5979                 spin_unlock(&last_ptr->lock);
5980         }
5981
5982         search_start = max(search_start, first_logical_byte(root, 0));
5983         search_start = max(search_start, hint_byte);
5984
5985         if (!last_ptr)
5986                 empty_cluster = 0;
5987
5988         if (search_start == hint_byte) {
5989                 block_group = btrfs_lookup_block_group(root->fs_info,
5990                                                        search_start);
5991                 used_block_group = block_group;
5992                 /*
5993                  * we don't want to use the block group if it doesn't match our
5994                  * allocation bits, or if its not cached.
5995                  *
5996                  * However if we are re-searching with an ideal block group
5997                  * picked out then we don't care that the block group is cached.
5998                  */
5999                 if (block_group && block_group_bits(block_group, data) &&
6000                     block_group->cached != BTRFS_CACHE_NO) {
6001                         down_read(&space_info->groups_sem);
6002                         if (list_empty(&block_group->list) ||
6003                             block_group->ro) {
6004                                 /*
6005                                  * someone is removing this block group,
6006                                  * we can't jump into the have_block_group
6007                                  * target because our list pointers are not
6008                                  * valid
6009                                  */
6010                                 btrfs_put_block_group(block_group);
6011                                 up_read(&space_info->groups_sem);
6012                         } else {
6013                                 index = get_block_group_index(block_group);
6014                                 goto have_block_group;
6015                         }
6016                 } else if (block_group) {
6017                         btrfs_put_block_group(block_group);
6018                 }
6019         }
6020 search:
6021         have_caching_bg = false;
6022         down_read(&space_info->groups_sem);
6023         list_for_each_entry(block_group, &space_info->block_groups[index],
6024                             list) {
6025                 u64 offset;
6026                 int cached;
6027
6028                 used_block_group = block_group;
6029                 btrfs_get_block_group(block_group);
6030                 search_start = block_group->key.objectid;
6031
6032                 /*
6033                  * this can happen if we end up cycling through all the
6034                  * raid types, but we want to make sure we only allocate
6035                  * for the proper type.
6036                  */
6037                 if (!block_group_bits(block_group, data)) {
6038                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6039                                 BTRFS_BLOCK_GROUP_RAID1 |
6040                                 BTRFS_BLOCK_GROUP_RAID5 |
6041                                 BTRFS_BLOCK_GROUP_RAID6 |
6042                                 BTRFS_BLOCK_GROUP_RAID10;
6043
6044                         /*
6045                          * if they asked for extra copies and this block group
6046                          * doesn't provide them, bail.  This does allow us to
6047                          * fill raid0 from raid1.
6048                          */
6049                         if ((data & extra) && !(block_group->flags & extra))
6050                                 goto loop;
6051                 }
6052
6053 have_block_group:
6054                 cached = block_group_cache_done(block_group);
6055                 if (unlikely(!cached)) {
6056                         found_uncached_bg = true;
6057                         ret = cache_block_group(block_group, 0);
6058                         BUG_ON(ret < 0);
6059                         ret = 0;
6060                 }
6061
6062                 if (unlikely(block_group->ro))
6063                         goto loop;
6064
6065                 /*
6066                  * Ok we want to try and use the cluster allocator, so
6067                  * lets look there
6068                  */
6069                 if (last_ptr) {
6070                         unsigned long aligned_cluster;
6071                         /*
6072                          * the refill lock keeps out other
6073                          * people trying to start a new cluster
6074                          */
6075                         spin_lock(&last_ptr->refill_lock);
6076                         used_block_group = last_ptr->block_group;
6077                         if (used_block_group != block_group &&
6078                             (!used_block_group ||
6079                              used_block_group->ro ||
6080                              !block_group_bits(used_block_group, data))) {
6081                                 used_block_group = block_group;
6082                                 goto refill_cluster;
6083                         }
6084
6085                         if (used_block_group != block_group)
6086                                 btrfs_get_block_group(used_block_group);
6087
6088                         offset = btrfs_alloc_from_cluster(used_block_group,
6089                           last_ptr, num_bytes, used_block_group->key.objectid);
6090                         if (offset) {
6091                                 /* we have a block, we're done */
6092                                 spin_unlock(&last_ptr->refill_lock);
6093                                 trace_btrfs_reserve_extent_cluster(root,
6094                                         block_group, search_start, num_bytes);
6095                                 goto checks;
6096                         }
6097
6098                         WARN_ON(last_ptr->block_group != used_block_group);
6099                         if (used_block_group != block_group) {
6100                                 btrfs_put_block_group(used_block_group);
6101                                 used_block_group = block_group;
6102                         }
6103 refill_cluster:
6104                         BUG_ON(used_block_group != block_group);
6105                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6106                          * set up a new clusters, so lets just skip it
6107                          * and let the allocator find whatever block
6108                          * it can find.  If we reach this point, we
6109                          * will have tried the cluster allocator
6110                          * plenty of times and not have found
6111                          * anything, so we are likely way too
6112                          * fragmented for the clustering stuff to find
6113                          * anything.
6114                          *
6115                          * However, if the cluster is taken from the
6116                          * current block group, release the cluster
6117                          * first, so that we stand a better chance of
6118                          * succeeding in the unclustered
6119                          * allocation.  */
6120                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6121                             last_ptr->block_group != block_group) {
6122                                 spin_unlock(&last_ptr->refill_lock);
6123                                 goto unclustered_alloc;
6124                         }
6125
6126                         /*
6127                          * this cluster didn't work out, free it and
6128                          * start over
6129                          */
6130                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6131
6132                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6133                                 spin_unlock(&last_ptr->refill_lock);
6134                                 goto unclustered_alloc;
6135                         }
6136
6137                         aligned_cluster = max_t(unsigned long,
6138                                                 empty_cluster + empty_size,
6139                                               block_group->full_stripe_len);
6140
6141                         /* allocate a cluster in this block group */
6142                         ret = btrfs_find_space_cluster(trans, root,
6143                                                block_group, last_ptr,
6144                                                search_start, num_bytes,
6145                                                aligned_cluster);
6146                         if (ret == 0) {
6147                                 /*
6148                                  * now pull our allocation out of this
6149                                  * cluster
6150                                  */
6151                                 offset = btrfs_alloc_from_cluster(block_group,
6152                                                   last_ptr, num_bytes,
6153                                                   search_start);
6154                                 if (offset) {
6155                                         /* we found one, proceed */
6156                                         spin_unlock(&last_ptr->refill_lock);
6157                                         trace_btrfs_reserve_extent_cluster(root,
6158                                                 block_group, search_start,
6159                                                 num_bytes);
6160                                         goto checks;
6161                                 }
6162                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6163                                    && !failed_cluster_refill) {
6164                                 spin_unlock(&last_ptr->refill_lock);
6165
6166                                 failed_cluster_refill = true;
6167                                 wait_block_group_cache_progress(block_group,
6168                                        num_bytes + empty_cluster + empty_size);
6169                                 goto have_block_group;
6170                         }
6171
6172                         /*
6173                          * at this point we either didn't find a cluster
6174                          * or we weren't able to allocate a block from our
6175                          * cluster.  Free the cluster we've been trying
6176                          * to use, and go to the next block group
6177                          */
6178                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6179                         spin_unlock(&last_ptr->refill_lock);
6180                         goto loop;
6181                 }
6182
6183 unclustered_alloc:
6184                 spin_lock(&block_group->free_space_ctl->tree_lock);
6185                 if (cached &&
6186                     block_group->free_space_ctl->free_space <
6187                     num_bytes + empty_cluster + empty_size) {
6188                         spin_unlock(&block_group->free_space_ctl->tree_lock);
6189                         goto loop;
6190                 }
6191                 spin_unlock(&block_group->free_space_ctl->tree_lock);
6192
6193                 offset = btrfs_find_space_for_alloc(block_group, search_start,
6194                                                     num_bytes, empty_size);
6195                 /*
6196                  * If we didn't find a chunk, and we haven't failed on this
6197                  * block group before, and this block group is in the middle of
6198                  * caching and we are ok with waiting, then go ahead and wait
6199                  * for progress to be made, and set failed_alloc to true.
6200                  *
6201                  * If failed_alloc is true then we've already waited on this
6202                  * block group once and should move on to the next block group.
6203                  */
6204                 if (!offset && !failed_alloc && !cached &&
6205                     loop > LOOP_CACHING_NOWAIT) {
6206                         wait_block_group_cache_progress(block_group,
6207                                                 num_bytes + empty_size);
6208                         failed_alloc = true;
6209                         goto have_block_group;
6210                 } else if (!offset) {
6211                         if (!cached)
6212                                 have_caching_bg = true;
6213                         goto loop;
6214                 }
6215 checks:
6216                 search_start = stripe_align(root, used_block_group,
6217                                             offset, num_bytes);
6218
6219                 /* move on to the next group */
6220                 if (search_start + num_bytes >
6221                     used_block_group->key.objectid + used_block_group->key.offset) {
6222                         btrfs_add_free_space(used_block_group, offset, num_bytes);
6223                         goto loop;
6224                 }
6225
6226                 if (offset < search_start)
6227                         btrfs_add_free_space(used_block_group, offset,
6228                                              search_start - offset);
6229                 BUG_ON(offset > search_start);
6230
6231                 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
6232                                                   alloc_type);
6233                 if (ret == -EAGAIN) {
6234                         btrfs_add_free_space(used_block_group, offset, num_bytes);
6235                         goto loop;
6236                 }
6237
6238                 /* we are all good, lets return */
6239                 ins->objectid = search_start;
6240                 ins->offset = num_bytes;
6241
6242                 trace_btrfs_reserve_extent(orig_root, block_group,
6243                                            search_start, num_bytes);
6244                 if (used_block_group != block_group)
6245                         btrfs_put_block_group(used_block_group);
6246                 btrfs_put_block_group(block_group);
6247                 break;
6248 loop:
6249                 failed_cluster_refill = false;
6250                 failed_alloc = false;
6251                 BUG_ON(index != get_block_group_index(block_group));
6252                 if (used_block_group != block_group)
6253                         btrfs_put_block_group(used_block_group);
6254                 btrfs_put_block_group(block_group);
6255         }
6256         up_read(&space_info->groups_sem);
6257
6258         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6259                 goto search;
6260
6261         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6262                 goto search;
6263
6264         /*
6265          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6266          *                      caching kthreads as we move along
6267          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6268          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6269          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6270          *                      again
6271          */
6272         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6273                 index = 0;
6274                 loop++;
6275                 if (loop == LOOP_ALLOC_CHUNK) {
6276                         ret = do_chunk_alloc(trans, root, data,
6277                                              CHUNK_ALLOC_FORCE);
6278                         /*
6279                          * Do not bail out on ENOSPC since we
6280                          * can do more things.
6281                          */
6282                         if (ret < 0 && ret != -ENOSPC) {
6283                                 btrfs_abort_transaction(trans,
6284                                                         root, ret);
6285                                 goto out;
6286                         }
6287                 }
6288
6289                 if (loop == LOOP_NO_EMPTY_SIZE) {
6290                         empty_size = 0;
6291                         empty_cluster = 0;
6292                 }
6293
6294                 goto search;
6295         } else if (!ins->objectid) {
6296                 ret = -ENOSPC;
6297         } else if (ins->objectid) {
6298                 ret = 0;
6299         }
6300 out:
6301
6302         return ret;
6303 }
6304
6305 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6306                             int dump_block_groups)
6307 {
6308         struct btrfs_block_group_cache *cache;
6309         int index = 0;
6310
6311         spin_lock(&info->lock);
6312         printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
6313                (unsigned long long)info->flags,
6314                (unsigned long long)(info->total_bytes - info->bytes_used -
6315                                     info->bytes_pinned - info->bytes_reserved -
6316                                     info->bytes_readonly),
6317                (info->full) ? "" : "not ");
6318         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
6319                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6320                (unsigned long long)info->total_bytes,
6321                (unsigned long long)info->bytes_used,
6322                (unsigned long long)info->bytes_pinned,
6323                (unsigned long long)info->bytes_reserved,
6324                (unsigned long long)info->bytes_may_use,
6325                (unsigned long long)info->bytes_readonly);
6326         spin_unlock(&info->lock);
6327
6328         if (!dump_block_groups)
6329                 return;
6330
6331         down_read(&info->groups_sem);
6332 again:
6333         list_for_each_entry(cache, &info->block_groups[index], list) {
6334                 spin_lock(&cache->lock);
6335                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
6336                        (unsigned long long)cache->key.objectid,
6337                        (unsigned long long)cache->key.offset,
6338                        (unsigned long long)btrfs_block_group_used(&cache->item),
6339                        (unsigned long long)cache->pinned,
6340                        (unsigned long long)cache->reserved,
6341                        cache->ro ? "[readonly]" : "");
6342                 btrfs_dump_free_space(cache, bytes);
6343                 spin_unlock(&cache->lock);
6344         }
6345         if (++index < BTRFS_NR_RAID_TYPES)
6346                 goto again;
6347         up_read(&info->groups_sem);
6348 }
6349
6350 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
6351                          struct btrfs_root *root,
6352                          u64 num_bytes, u64 min_alloc_size,
6353                          u64 empty_size, u64 hint_byte,
6354                          struct btrfs_key *ins, u64 data)
6355 {
6356         bool final_tried = false;
6357         int ret;
6358
6359         data = btrfs_get_alloc_profile(root, data);
6360 again:
6361         WARN_ON(num_bytes < root->sectorsize);
6362         ret = find_free_extent(trans, root, num_bytes, empty_size,
6363                                hint_byte, ins, data);
6364
6365         if (ret == -ENOSPC) {
6366                 if (!final_tried) {
6367                         num_bytes = num_bytes >> 1;
6368                         num_bytes = round_down(num_bytes, root->sectorsize);
6369                         num_bytes = max(num_bytes, min_alloc_size);
6370                         if (num_bytes == min_alloc_size)
6371                                 final_tried = true;
6372                         goto again;
6373                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6374                         struct btrfs_space_info *sinfo;
6375
6376                         sinfo = __find_space_info(root->fs_info, data);
6377                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6378                                 (unsigned long long)data,
6379                                 (unsigned long long)num_bytes);
6380                         if (sinfo)
6381                                 dump_space_info(sinfo, num_bytes, 1);
6382                 }
6383         }
6384
6385         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6386
6387         return ret;
6388 }
6389
6390 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6391                                         u64 start, u64 len, int pin)
6392 {
6393         struct btrfs_block_group_cache *cache;
6394         int ret = 0;
6395
6396         cache = btrfs_lookup_block_group(root->fs_info, start);
6397         if (!cache) {
6398                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6399                         (unsigned long long)start);
6400                 return -ENOSPC;
6401         }
6402
6403         if (btrfs_test_opt(root, DISCARD))
6404                 ret = btrfs_discard_extent(root, start, len, NULL);
6405
6406         if (pin)
6407                 pin_down_extent(root, cache, start, len, 1);
6408         else {
6409                 btrfs_add_free_space(cache, start, len);
6410                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6411         }
6412         btrfs_put_block_group(cache);
6413
6414         trace_btrfs_reserved_extent_free(root, start, len);
6415
6416         return ret;
6417 }
6418
6419 int btrfs_free_reserved_extent(struct btrfs_root *root,
6420                                         u64 start, u64 len)
6421 {
6422         return __btrfs_free_reserved_extent(root, start, len, 0);
6423 }
6424
6425 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6426                                        u64 start, u64 len)
6427 {
6428         return __btrfs_free_reserved_extent(root, start, len, 1);
6429 }
6430
6431 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6432                                       struct btrfs_root *root,
6433                                       u64 parent, u64 root_objectid,
6434                                       u64 flags, u64 owner, u64 offset,
6435                                       struct btrfs_key *ins, int ref_mod)
6436 {
6437         int ret;
6438         struct btrfs_fs_info *fs_info = root->fs_info;
6439         struct btrfs_extent_item *extent_item;
6440         struct btrfs_extent_inline_ref *iref;
6441         struct btrfs_path *path;
6442         struct extent_buffer *leaf;
6443         int type;
6444         u32 size;
6445
6446         if (parent > 0)
6447                 type = BTRFS_SHARED_DATA_REF_KEY;
6448         else
6449                 type = BTRFS_EXTENT_DATA_REF_KEY;
6450
6451         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6452
6453         path = btrfs_alloc_path();
6454         if (!path)
6455                 return -ENOMEM;
6456
6457         path->leave_spinning = 1;
6458         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6459                                       ins, size);
6460         if (ret) {
6461                 btrfs_free_path(path);
6462                 return ret;
6463         }
6464
6465         leaf = path->nodes[0];
6466         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6467                                      struct btrfs_extent_item);
6468         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6469         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6470         btrfs_set_extent_flags(leaf, extent_item,
6471                                flags | BTRFS_EXTENT_FLAG_DATA);
6472
6473         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6474         btrfs_set_extent_inline_ref_type(leaf, iref, type);
6475         if (parent > 0) {
6476                 struct btrfs_shared_data_ref *ref;
6477                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6478                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6479                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6480         } else {
6481                 struct btrfs_extent_data_ref *ref;
6482                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6483                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6484                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6485                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6486                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6487         }
6488
6489         btrfs_mark_buffer_dirty(path->nodes[0]);
6490         btrfs_free_path(path);
6491
6492         ret = update_block_group(root, ins->objectid, ins->offset, 1);
6493         if (ret) { /* -ENOENT, logic error */
6494                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6495                         (unsigned long long)ins->objectid,
6496                         (unsigned long long)ins->offset);
6497                 BUG();
6498         }
6499         return ret;
6500 }
6501
6502 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6503                                      struct btrfs_root *root,
6504                                      u64 parent, u64 root_objectid,
6505                                      u64 flags, struct btrfs_disk_key *key,
6506                                      int level, struct btrfs_key *ins)
6507 {
6508         int ret;
6509         struct btrfs_fs_info *fs_info = root->fs_info;
6510         struct btrfs_extent_item *extent_item;
6511         struct btrfs_tree_block_info *block_info;
6512         struct btrfs_extent_inline_ref *iref;
6513         struct btrfs_path *path;
6514         struct extent_buffer *leaf;
6515         u32 size = sizeof(*extent_item) + sizeof(*iref);
6516         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6517                                                  SKINNY_METADATA);
6518
6519         if (!skinny_metadata)
6520                 size += sizeof(*block_info);
6521
6522         path = btrfs_alloc_path();
6523         if (!path)
6524                 return -ENOMEM;
6525
6526         path->leave_spinning = 1;
6527         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6528                                       ins, size);
6529         if (ret) {
6530                 btrfs_free_path(path);
6531                 return ret;
6532         }
6533
6534         leaf = path->nodes[0];
6535         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6536                                      struct btrfs_extent_item);
6537         btrfs_set_extent_refs(leaf, extent_item, 1);
6538         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6539         btrfs_set_extent_flags(leaf, extent_item,
6540                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6541
6542         if (skinny_metadata) {
6543                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6544         } else {
6545                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6546                 btrfs_set_tree_block_key(leaf, block_info, key);
6547                 btrfs_set_tree_block_level(leaf, block_info, level);
6548                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6549         }
6550
6551         if (parent > 0) {
6552                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6553                 btrfs_set_extent_inline_ref_type(leaf, iref,
6554                                                  BTRFS_SHARED_BLOCK_REF_KEY);
6555                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6556         } else {
6557                 btrfs_set_extent_inline_ref_type(leaf, iref,
6558                                                  BTRFS_TREE_BLOCK_REF_KEY);
6559                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6560         }
6561
6562         btrfs_mark_buffer_dirty(leaf);
6563         btrfs_free_path(path);
6564
6565         ret = update_block_group(root, ins->objectid, root->leafsize, 1);
6566         if (ret) { /* -ENOENT, logic error */
6567                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6568                         (unsigned long long)ins->objectid,
6569                         (unsigned long long)ins->offset);
6570                 BUG();
6571         }
6572         return ret;
6573 }
6574
6575 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6576                                      struct btrfs_root *root,
6577                                      u64 root_objectid, u64 owner,
6578                                      u64 offset, struct btrfs_key *ins)
6579 {
6580         int ret;
6581
6582         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6583
6584         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6585                                          ins->offset, 0,
6586                                          root_objectid, owner, offset,
6587                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6588         return ret;
6589 }
6590
6591 /*
6592  * this is used by the tree logging recovery code.  It records that
6593  * an extent has been allocated and makes sure to clear the free
6594  * space cache bits as well
6595  */
6596 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6597                                    struct btrfs_root *root,
6598                                    u64 root_objectid, u64 owner, u64 offset,
6599                                    struct btrfs_key *ins)
6600 {
6601         int ret;
6602         struct btrfs_block_group_cache *block_group;
6603         struct btrfs_caching_control *caching_ctl;
6604         u64 start = ins->objectid;
6605         u64 num_bytes = ins->offset;
6606
6607         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6608         cache_block_group(block_group, 0);
6609         caching_ctl = get_caching_control(block_group);
6610
6611         if (!caching_ctl) {
6612                 BUG_ON(!block_group_cache_done(block_group));
6613                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6614                 BUG_ON(ret); /* -ENOMEM */
6615         } else {
6616                 mutex_lock(&caching_ctl->mutex);
6617
6618                 if (start >= caching_ctl->progress) {
6619                         ret = add_excluded_extent(root, start, num_bytes);
6620                         BUG_ON(ret); /* -ENOMEM */
6621                 } else if (start + num_bytes <= caching_ctl->progress) {
6622                         ret = btrfs_remove_free_space(block_group,
6623                                                       start, num_bytes);
6624                         BUG_ON(ret); /* -ENOMEM */
6625                 } else {
6626                         num_bytes = caching_ctl->progress - start;
6627                         ret = btrfs_remove_free_space(block_group,
6628                                                       start, num_bytes);
6629                         BUG_ON(ret); /* -ENOMEM */
6630
6631                         start = caching_ctl->progress;
6632                         num_bytes = ins->objectid + ins->offset -
6633                                     caching_ctl->progress;
6634                         ret = add_excluded_extent(root, start, num_bytes);
6635                         BUG_ON(ret); /* -ENOMEM */
6636                 }
6637
6638                 mutex_unlock(&caching_ctl->mutex);
6639                 put_caching_control(caching_ctl);
6640         }
6641
6642         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6643                                           RESERVE_ALLOC_NO_ACCOUNT);
6644         BUG_ON(ret); /* logic error */
6645         btrfs_put_block_group(block_group);
6646         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6647                                          0, owner, offset, ins, 1);
6648         return ret;
6649 }
6650
6651 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
6652                                             struct btrfs_root *root,
6653                                             u64 bytenr, u32 blocksize,
6654                                             int level)
6655 {
6656         struct extent_buffer *buf;
6657
6658         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6659         if (!buf)
6660                 return ERR_PTR(-ENOMEM);
6661         btrfs_set_header_generation(buf, trans->transid);
6662         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6663         btrfs_tree_lock(buf);
6664         clean_tree_block(trans, root, buf);
6665         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6666
6667         btrfs_set_lock_blocking(buf);
6668         btrfs_set_buffer_uptodate(buf);
6669
6670         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6671                 /*
6672                  * we allow two log transactions at a time, use different
6673                  * EXENT bit to differentiate dirty pages.
6674                  */
6675                 if (root->log_transid % 2 == 0)
6676                         set_extent_dirty(&root->dirty_log_pages, buf->start,
6677                                         buf->start + buf->len - 1, GFP_NOFS);
6678                 else
6679                         set_extent_new(&root->dirty_log_pages, buf->start,
6680                                         buf->start + buf->len - 1, GFP_NOFS);
6681         } else {
6682                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6683                          buf->start + buf->len - 1, GFP_NOFS);
6684         }
6685         trans->blocks_used++;
6686         /* this returns a buffer locked for blocking */
6687         return buf;
6688 }
6689
6690 static struct btrfs_block_rsv *
6691 use_block_rsv(struct btrfs_trans_handle *trans,
6692               struct btrfs_root *root, u32 blocksize)
6693 {
6694         struct btrfs_block_rsv *block_rsv;
6695         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6696         int ret;
6697
6698         block_rsv = get_block_rsv(trans, root);
6699
6700         if (block_rsv->size == 0) {
6701                 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6702                                              BTRFS_RESERVE_NO_FLUSH);
6703                 /*
6704                  * If we couldn't reserve metadata bytes try and use some from
6705                  * the global reserve.
6706                  */
6707                 if (ret && block_rsv != global_rsv) {
6708                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6709                         if (!ret)
6710                                 return global_rsv;
6711                         return ERR_PTR(ret);
6712                 } else if (ret) {
6713                         return ERR_PTR(ret);
6714                 }
6715                 return block_rsv;
6716         }
6717
6718         ret = block_rsv_use_bytes(block_rsv, blocksize);
6719         if (!ret)
6720                 return block_rsv;
6721         if (ret && !block_rsv->failfast) {
6722                 if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6723                         static DEFINE_RATELIMIT_STATE(_rs,
6724                                         DEFAULT_RATELIMIT_INTERVAL * 10,
6725                                         /*DEFAULT_RATELIMIT_BURST*/ 1);
6726                         if (__ratelimit(&_rs))
6727                                 WARN(1, KERN_DEBUG
6728                                         "btrfs: block rsv returned %d\n", ret);
6729                 }
6730                 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6731                                              BTRFS_RESERVE_NO_FLUSH);
6732                 if (!ret) {
6733                         return block_rsv;
6734                 } else if (ret && block_rsv != global_rsv) {
6735                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6736                         if (!ret)
6737                                 return global_rsv;
6738                 }
6739         }
6740
6741         return ERR_PTR(-ENOSPC);
6742 }
6743
6744 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6745                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
6746 {
6747         block_rsv_add_bytes(block_rsv, blocksize, 0);
6748         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6749 }
6750
6751 /*
6752  * finds a free extent and does all the dirty work required for allocation
6753  * returns the key for the extent through ins, and a tree buffer for
6754  * the first block of the extent through buf.
6755  *
6756  * returns the tree buffer or NULL.
6757  */
6758 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6759                                         struct btrfs_root *root, u32 blocksize,
6760                                         u64 parent, u64 root_objectid,
6761                                         struct btrfs_disk_key *key, int level,
6762                                         u64 hint, u64 empty_size)
6763 {
6764         struct btrfs_key ins;
6765         struct btrfs_block_rsv *block_rsv;
6766         struct extent_buffer *buf;
6767         u64 flags = 0;
6768         int ret;
6769         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6770                                                  SKINNY_METADATA);
6771
6772         block_rsv = use_block_rsv(trans, root, blocksize);
6773         if (IS_ERR(block_rsv))
6774                 return ERR_CAST(block_rsv);
6775
6776         ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6777                                    empty_size, hint, &ins, 0);
6778         if (ret) {
6779                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6780                 return ERR_PTR(ret);
6781         }
6782
6783         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6784                                     blocksize, level);
6785         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6786
6787         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6788                 if (parent == 0)
6789                         parent = ins.objectid;
6790                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6791         } else
6792                 BUG_ON(parent > 0);
6793
6794         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6795                 struct btrfs_delayed_extent_op *extent_op;
6796                 extent_op = btrfs_alloc_delayed_extent_op();
6797                 BUG_ON(!extent_op); /* -ENOMEM */
6798                 if (key)
6799                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
6800                 else
6801                         memset(&extent_op->key, 0, sizeof(extent_op->key));
6802                 extent_op->flags_to_set = flags;
6803                 if (skinny_metadata)
6804                         extent_op->update_key = 0;
6805                 else
6806                         extent_op->update_key = 1;
6807                 extent_op->update_flags = 1;
6808                 extent_op->is_data = 0;
6809
6810                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6811                                         ins.objectid,
6812                                         ins.offset, parent, root_objectid,
6813                                         level, BTRFS_ADD_DELAYED_EXTENT,
6814                                         extent_op, 0);
6815                 BUG_ON(ret); /* -ENOMEM */
6816         }
6817         return buf;
6818 }
6819
6820 struct walk_control {
6821         u64 refs[BTRFS_MAX_LEVEL];
6822         u64 flags[BTRFS_MAX_LEVEL];
6823         struct btrfs_key update_progress;
6824         int stage;
6825         int level;
6826         int shared_level;
6827         int update_ref;
6828         int keep_locks;
6829         int reada_slot;
6830         int reada_count;
6831         int for_reloc;
6832 };
6833
6834 #define DROP_REFERENCE  1
6835 #define UPDATE_BACKREF  2
6836
6837 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6838                                      struct btrfs_root *root,
6839                                      struct walk_control *wc,
6840                                      struct btrfs_path *path)
6841 {
6842         u64 bytenr;
6843         u64 generation;
6844         u64 refs;
6845         u64 flags;
6846         u32 nritems;
6847         u32 blocksize;
6848         struct btrfs_key key;
6849         struct extent_buffer *eb;
6850         int ret;
6851         int slot;
6852         int nread = 0;
6853
6854         if (path->slots[wc->level] < wc->reada_slot) {
6855                 wc->reada_count = wc->reada_count * 2 / 3;
6856                 wc->reada_count = max(wc->reada_count, 2);
6857         } else {
6858                 wc->reada_count = wc->reada_count * 3 / 2;
6859                 wc->reada_count = min_t(int, wc->reada_count,
6860                                         BTRFS_NODEPTRS_PER_BLOCK(root));
6861         }
6862
6863         eb = path->nodes[wc->level];
6864         nritems = btrfs_header_nritems(eb);
6865         blocksize = btrfs_level_size(root, wc->level - 1);
6866
6867         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6868                 if (nread >= wc->reada_count)
6869                         break;
6870
6871                 cond_resched();
6872                 bytenr = btrfs_node_blockptr(eb, slot);
6873                 generation = btrfs_node_ptr_generation(eb, slot);
6874
6875                 if (slot == path->slots[wc->level])
6876                         goto reada;
6877
6878                 if (wc->stage == UPDATE_BACKREF &&
6879                     generation <= root->root_key.offset)
6880                         continue;
6881
6882                 /* We don't lock the tree block, it's OK to be racy here */
6883                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
6884                                                wc->level - 1, 1, &refs,
6885                                                &flags);
6886                 /* We don't care about errors in readahead. */
6887                 if (ret < 0)
6888                         continue;
6889                 BUG_ON(refs == 0);
6890
6891                 if (wc->stage == DROP_REFERENCE) {
6892                         if (refs == 1)
6893                                 goto reada;
6894
6895                         if (wc->level == 1 &&
6896                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6897                                 continue;
6898                         if (!wc->update_ref ||
6899                             generation <= root->root_key.offset)
6900                                 continue;
6901                         btrfs_node_key_to_cpu(eb, &key, slot);
6902                         ret = btrfs_comp_cpu_keys(&key,
6903                                                   &wc->update_progress);
6904                         if (ret < 0)
6905                                 continue;
6906                 } else {
6907                         if (wc->level == 1 &&
6908                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6909                                 continue;
6910                 }
6911 reada:
6912                 ret = readahead_tree_block(root, bytenr, blocksize,
6913                                            generation);
6914                 if (ret)
6915                         break;
6916                 nread++;
6917         }
6918         wc->reada_slot = slot;
6919 }
6920
6921 /*
6922  * helper to process tree block while walking down the tree.
6923  *
6924  * when wc->stage == UPDATE_BACKREF, this function updates
6925  * back refs for pointers in the block.
6926  *
6927  * NOTE: return value 1 means we should stop walking down.
6928  */
6929 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6930                                    struct btrfs_root *root,
6931                                    struct btrfs_path *path,
6932                                    struct walk_control *wc, int lookup_info)
6933 {
6934         int level = wc->level;
6935         struct extent_buffer *eb = path->nodes[level];
6936         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6937         int ret;
6938
6939         if (wc->stage == UPDATE_BACKREF &&
6940             btrfs_header_owner(eb) != root->root_key.objectid)
6941                 return 1;
6942
6943         /*
6944          * when reference count of tree block is 1, it won't increase
6945          * again. once full backref flag is set, we never clear it.
6946          */
6947         if (lookup_info &&
6948             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6949              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
6950                 BUG_ON(!path->locks[level]);
6951                 ret = btrfs_lookup_extent_info(trans, root,
6952                                                eb->start, level, 1,
6953                                                &wc->refs[level],
6954                                                &wc->flags[level]);
6955                 BUG_ON(ret == -ENOMEM);
6956                 if (ret)
6957                         return ret;
6958                 BUG_ON(wc->refs[level] == 0);
6959         }
6960
6961         if (wc->stage == DROP_REFERENCE) {
6962                 if (wc->refs[level] > 1)
6963                         return 1;
6964
6965                 if (path->locks[level] && !wc->keep_locks) {
6966                         btrfs_tree_unlock_rw(eb, path->locks[level]);
6967                         path->locks[level] = 0;
6968                 }
6969                 return 0;
6970         }
6971
6972         /* wc->stage == UPDATE_BACKREF */
6973         if (!(wc->flags[level] & flag)) {
6974                 BUG_ON(!path->locks[level]);
6975                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
6976                 BUG_ON(ret); /* -ENOMEM */
6977                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6978                 BUG_ON(ret); /* -ENOMEM */
6979                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6980                                                   eb->len, flag, 0);
6981                 BUG_ON(ret); /* -ENOMEM */
6982                 wc->flags[level] |= flag;
6983         }
6984
6985         /*
6986          * the block is shared by multiple trees, so it's not good to
6987          * keep the tree lock
6988          */
6989         if (path->locks[level] && level > 0) {
6990                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6991                 path->locks[level] = 0;
6992         }
6993         return 0;
6994 }
6995
6996 /*
6997  * helper to process tree block pointer.
6998  *
6999  * when wc->stage == DROP_REFERENCE, this function checks
7000  * reference count of the block pointed to. if the block
7001  * is shared and we need update back refs for the subtree
7002  * rooted at the block, this function changes wc->stage to
7003  * UPDATE_BACKREF. if the block is shared and there is no
7004  * need to update back, this function drops the reference
7005  * to the block.
7006  *
7007  * NOTE: return value 1 means we should stop walking down.
7008  */
7009 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7010                                  struct btrfs_root *root,
7011                                  struct btrfs_path *path,
7012                                  struct walk_control *wc, int *lookup_info)
7013 {
7014         u64 bytenr;
7015         u64 generation;
7016         u64 parent;
7017         u32 blocksize;
7018         struct btrfs_key key;
7019         struct extent_buffer *next;
7020         int level = wc->level;
7021         int reada = 0;
7022         int ret = 0;
7023
7024         generation = btrfs_node_ptr_generation(path->nodes[level],
7025                                                path->slots[level]);
7026         /*
7027          * if the lower level block was created before the snapshot
7028          * was created, we know there is no need to update back refs
7029          * for the subtree
7030          */
7031         if (wc->stage == UPDATE_BACKREF &&
7032             generation <= root->root_key.offset) {
7033                 *lookup_info = 1;
7034                 return 1;
7035         }
7036
7037         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7038         blocksize = btrfs_level_size(root, level - 1);
7039
7040         next = btrfs_find_tree_block(root, bytenr, blocksize);
7041         if (!next) {
7042                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7043                 if (!next)
7044                         return -ENOMEM;
7045                 reada = 1;
7046         }
7047         btrfs_tree_lock(next);
7048         btrfs_set_lock_blocking(next);
7049
7050         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7051                                        &wc->refs[level - 1],
7052                                        &wc->flags[level - 1]);
7053         if (ret < 0) {
7054                 btrfs_tree_unlock(next);
7055                 return ret;
7056         }
7057
7058         if (unlikely(wc->refs[level - 1] == 0)) {
7059                 btrfs_err(root->fs_info, "Missing references.");
7060                 BUG();
7061         }
7062         *lookup_info = 0;
7063
7064         if (wc->stage == DROP_REFERENCE) {
7065                 if (wc->refs[level - 1] > 1) {
7066                         if (level == 1 &&
7067                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7068                                 goto skip;
7069
7070                         if (!wc->update_ref ||
7071                             generation <= root->root_key.offset)
7072                                 goto skip;
7073
7074                         btrfs_node_key_to_cpu(path->nodes[level], &key,
7075                                               path->slots[level]);
7076                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7077                         if (ret < 0)
7078                                 goto skip;
7079
7080                         wc->stage = UPDATE_BACKREF;
7081                         wc->shared_level = level - 1;
7082                 }
7083         } else {
7084                 if (level == 1 &&
7085                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7086                         goto skip;
7087         }
7088
7089         if (!btrfs_buffer_uptodate(next, generation, 0)) {
7090                 btrfs_tree_unlock(next);
7091                 free_extent_buffer(next);
7092                 next = NULL;
7093                 *lookup_info = 1;
7094         }
7095
7096         if (!next) {
7097                 if (reada && level == 1)
7098                         reada_walk_down(trans, root, wc, path);
7099                 next = read_tree_block(root, bytenr, blocksize, generation);
7100                 if (!next || !extent_buffer_uptodate(next)) {
7101                         free_extent_buffer(next);
7102                         return -EIO;
7103                 }
7104                 btrfs_tree_lock(next);
7105                 btrfs_set_lock_blocking(next);
7106         }
7107
7108         level--;
7109         BUG_ON(level != btrfs_header_level(next));
7110         path->nodes[level] = next;
7111         path->slots[level] = 0;
7112         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7113         wc->level = level;
7114         if (wc->level == 1)
7115                 wc->reada_slot = 0;
7116         return 0;
7117 skip:
7118         wc->refs[level - 1] = 0;
7119         wc->flags[level - 1] = 0;
7120         if (wc->stage == DROP_REFERENCE) {
7121                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7122                         parent = path->nodes[level]->start;
7123                 } else {
7124                         BUG_ON(root->root_key.objectid !=
7125                                btrfs_header_owner(path->nodes[level]));
7126                         parent = 0;
7127                 }
7128
7129                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7130                                 root->root_key.objectid, level - 1, 0, 0);
7131                 BUG_ON(ret); /* -ENOMEM */
7132         }
7133         btrfs_tree_unlock(next);
7134         free_extent_buffer(next);
7135         *lookup_info = 1;
7136         return 1;
7137 }
7138
7139 /*
7140  * helper to process tree block while walking up the tree.
7141  *
7142  * when wc->stage == DROP_REFERENCE, this function drops
7143  * reference count on the block.
7144  *
7145  * when wc->stage == UPDATE_BACKREF, this function changes
7146  * wc->stage back to DROP_REFERENCE if we changed wc->stage
7147  * to UPDATE_BACKREF previously while processing the block.
7148  *
7149  * NOTE: return value 1 means we should stop walking up.
7150  */
7151 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7152                                  struct btrfs_root *root,
7153                                  struct btrfs_path *path,
7154                                  struct walk_control *wc)
7155 {
7156         int ret;
7157         int level = wc->level;
7158         struct extent_buffer *eb = path->nodes[level];
7159         u64 parent = 0;
7160
7161         if (wc->stage == UPDATE_BACKREF) {
7162                 BUG_ON(wc->shared_level < level);
7163                 if (level < wc->shared_level)
7164                         goto out;
7165
7166                 ret = find_next_key(path, level + 1, &wc->update_progress);
7167                 if (ret > 0)
7168                         wc->update_ref = 0;
7169
7170                 wc->stage = DROP_REFERENCE;
7171                 wc->shared_level = -1;
7172                 path->slots[level] = 0;
7173
7174                 /*
7175                  * check reference count again if the block isn't locked.
7176                  * we should start walking down the tree again if reference
7177                  * count is one.
7178                  */
7179                 if (!path->locks[level]) {
7180                         BUG_ON(level == 0);
7181                         btrfs_tree_lock(eb);
7182                         btrfs_set_lock_blocking(eb);
7183                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7184
7185                         ret = btrfs_lookup_extent_info(trans, root,
7186                                                        eb->start, level, 1,
7187                                                        &wc->refs[level],
7188                                                        &wc->flags[level]);
7189                         if (ret < 0) {
7190                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7191                                 path->locks[level] = 0;
7192                                 return ret;
7193                         }
7194                         BUG_ON(wc->refs[level] == 0);
7195                         if (wc->refs[level] == 1) {
7196                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7197                                 path->locks[level] = 0;
7198                                 return 1;
7199                         }
7200                 }
7201         }
7202
7203         /* wc->stage == DROP_REFERENCE */
7204         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
7205
7206         if (wc->refs[level] == 1) {
7207                 if (level == 0) {
7208                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7209                                 ret = btrfs_dec_ref(trans, root, eb, 1,
7210                                                     wc->for_reloc);
7211                         else
7212                                 ret = btrfs_dec_ref(trans, root, eb, 0,
7213                                                     wc->for_reloc);
7214                         BUG_ON(ret); /* -ENOMEM */
7215                 }
7216                 /* make block locked assertion in clean_tree_block happy */
7217                 if (!path->locks[level] &&
7218                     btrfs_header_generation(eb) == trans->transid) {
7219                         btrfs_tree_lock(eb);
7220                         btrfs_set_lock_blocking(eb);
7221                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7222                 }
7223                 clean_tree_block(trans, root, eb);
7224         }
7225
7226         if (eb == root->node) {
7227                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7228                         parent = eb->start;
7229                 else
7230                         BUG_ON(root->root_key.objectid !=
7231                                btrfs_header_owner(eb));
7232         } else {
7233                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7234                         parent = path->nodes[level + 1]->start;
7235                 else
7236                         BUG_ON(root->root_key.objectid !=
7237                                btrfs_header_owner(path->nodes[level + 1]));
7238         }
7239
7240         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7241 out:
7242         wc->refs[level] = 0;
7243         wc->flags[level] = 0;
7244         return 0;
7245 }
7246
7247 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
7248                                    struct btrfs_root *root,
7249                                    struct btrfs_path *path,
7250                                    struct walk_control *wc)
7251 {
7252         int level = wc->level;
7253         int lookup_info = 1;
7254         int ret;
7255
7256         while (level >= 0) {
7257                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
7258                 if (ret > 0)
7259                         break;
7260
7261                 if (level == 0)
7262                         break;
7263
7264                 if (path->slots[level] >=
7265                     btrfs_header_nritems(path->nodes[level]))
7266                         break;
7267
7268                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
7269                 if (ret > 0) {
7270                         path->slots[level]++;
7271                         continue;
7272                 } else if (ret < 0)
7273                         return ret;
7274                 level = wc->level;
7275         }
7276         return 0;
7277 }
7278
7279 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
7280                                  struct btrfs_root *root,
7281                                  struct btrfs_path *path,
7282                                  struct walk_control *wc, int max_level)
7283 {
7284         int level = wc->level;
7285         int ret;
7286
7287         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
7288         while (level < max_level && path->nodes[level]) {
7289                 wc->level = level;
7290                 if (path->slots[level] + 1 <
7291                     btrfs_header_nritems(path->nodes[level])) {
7292                         path->slots[level]++;
7293                         return 0;
7294                 } else {
7295                         ret = walk_up_proc(trans, root, path, wc);
7296                         if (ret > 0)
7297                                 return 0;
7298
7299                         if (path->locks[level]) {
7300                                 btrfs_tree_unlock_rw(path->nodes[level],
7301                                                      path->locks[level]);
7302                                 path->locks[level] = 0;
7303                         }
7304                         free_extent_buffer(path->nodes[level]);
7305                         path->nodes[level] = NULL;
7306                         level++;
7307                 }
7308         }
7309         return 1;
7310 }
7311
7312 /*
7313  * drop a subvolume tree.
7314  *
7315  * this function traverses the tree freeing any blocks that only
7316  * referenced by the tree.
7317  *
7318  * when a shared tree block is found. this function decreases its
7319  * reference count by one. if update_ref is true, this function
7320  * also make sure backrefs for the shared block and all lower level
7321  * blocks are properly updated.
7322  *
7323  * If called with for_reloc == 0, may exit early with -EAGAIN
7324  */
7325 int btrfs_drop_snapshot(struct btrfs_root *root,
7326                          struct btrfs_block_rsv *block_rsv, int update_ref,
7327                          int for_reloc)
7328 {
7329         struct btrfs_path *path;
7330         struct btrfs_trans_handle *trans;
7331         struct btrfs_root *tree_root = root->fs_info->tree_root;
7332         struct btrfs_root_item *root_item = &root->root_item;
7333         struct walk_control *wc;
7334         struct btrfs_key key;
7335         int err = 0;
7336         int ret;
7337         int level;
7338
7339         path = btrfs_alloc_path();
7340         if (!path) {
7341                 err = -ENOMEM;
7342                 goto out;
7343         }
7344
7345         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7346         if (!wc) {
7347                 btrfs_free_path(path);
7348                 err = -ENOMEM;
7349                 goto out;
7350         }
7351
7352         trans = btrfs_start_transaction(tree_root, 0);
7353         if (IS_ERR(trans)) {
7354                 err = PTR_ERR(trans);
7355                 goto out_free;
7356         }
7357
7358         if (block_rsv)
7359                 trans->block_rsv = block_rsv;
7360
7361         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7362                 level = btrfs_header_level(root->node);
7363                 path->nodes[level] = btrfs_lock_root_node(root);
7364                 btrfs_set_lock_blocking(path->nodes[level]);
7365                 path->slots[level] = 0;
7366                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7367                 memset(&wc->update_progress, 0,
7368                        sizeof(wc->update_progress));
7369         } else {
7370                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7371                 memcpy(&wc->update_progress, &key,
7372                        sizeof(wc->update_progress));
7373
7374                 level = root_item->drop_level;
7375                 BUG_ON(level == 0);
7376                 path->lowest_level = level;
7377                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7378                 path->lowest_level = 0;
7379                 if (ret < 0) {
7380                         err = ret;
7381                         goto out_end_trans;
7382                 }
7383                 WARN_ON(ret > 0);
7384
7385                 /*
7386                  * unlock our path, this is safe because only this
7387                  * function is allowed to delete this snapshot
7388                  */
7389                 btrfs_unlock_up_safe(path, 0);
7390
7391                 level = btrfs_header_level(root->node);
7392                 while (1) {
7393                         btrfs_tree_lock(path->nodes[level]);
7394                         btrfs_set_lock_blocking(path->nodes[level]);
7395
7396                         ret = btrfs_lookup_extent_info(trans, root,
7397                                                 path->nodes[level]->start,
7398                                                 level, 1, &wc->refs[level],
7399                                                 &wc->flags[level]);
7400                         if (ret < 0) {
7401                                 err = ret;
7402                                 goto out_end_trans;
7403                         }
7404                         BUG_ON(wc->refs[level] == 0);
7405
7406                         if (level == root_item->drop_level)
7407                                 break;
7408
7409                         btrfs_tree_unlock(path->nodes[level]);
7410                         WARN_ON(wc->refs[level] != 1);
7411                         level--;
7412                 }
7413         }
7414
7415         wc->level = level;
7416         wc->shared_level = -1;
7417         wc->stage = DROP_REFERENCE;
7418         wc->update_ref = update_ref;
7419         wc->keep_locks = 0;
7420         wc->for_reloc = for_reloc;
7421         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7422
7423         while (1) {
7424                 if (!for_reloc && btrfs_fs_closing(root->fs_info)) {
7425                         pr_debug("btrfs: drop snapshot early exit\n");
7426                         err = -EAGAIN;
7427                         goto out_end_trans;
7428                 }
7429
7430                 ret = walk_down_tree(trans, root, path, wc);
7431                 if (ret < 0) {
7432                         err = ret;
7433                         break;
7434                 }
7435
7436                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7437                 if (ret < 0) {
7438                         err = ret;
7439                         break;
7440                 }
7441
7442                 if (ret > 0) {
7443                         BUG_ON(wc->stage != DROP_REFERENCE);
7444                         break;
7445                 }
7446
7447                 if (wc->stage == DROP_REFERENCE) {
7448                         level = wc->level;
7449                         btrfs_node_key(path->nodes[level],
7450                                        &root_item->drop_progress,
7451                                        path->slots[level]);
7452                         root_item->drop_level = level;
7453                 }
7454
7455                 BUG_ON(wc->level == 0);
7456                 if (btrfs_should_end_transaction(trans, tree_root)) {
7457                         ret = btrfs_update_root(trans, tree_root,
7458                                                 &root->root_key,
7459                                                 root_item);
7460                         if (ret) {
7461                                 btrfs_abort_transaction(trans, tree_root, ret);
7462                                 err = ret;
7463                                 goto out_end_trans;
7464                         }
7465
7466                         btrfs_end_transaction_throttle(trans, tree_root);
7467                         trans = btrfs_start_transaction(tree_root, 0);
7468                         if (IS_ERR(trans)) {
7469                                 err = PTR_ERR(trans);
7470                                 goto out_free;
7471                         }
7472                         if (block_rsv)
7473                                 trans->block_rsv = block_rsv;
7474                 }
7475         }
7476         btrfs_release_path(path);
7477         if (err)
7478                 goto out_end_trans;
7479
7480         ret = btrfs_del_root(trans, tree_root, &root->root_key);
7481         if (ret) {
7482                 btrfs_abort_transaction(trans, tree_root, ret);
7483                 goto out_end_trans;
7484         }
7485
7486         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7487                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
7488                                            NULL, NULL);
7489                 if (ret < 0) {
7490                         btrfs_abort_transaction(trans, tree_root, ret);
7491                         err = ret;
7492                         goto out_end_trans;
7493                 } else if (ret > 0) {
7494                         /* if we fail to delete the orphan item this time
7495                          * around, it'll get picked up the next time.
7496                          *
7497                          * The most common failure here is just -ENOENT.
7498                          */
7499                         btrfs_del_orphan_item(trans, tree_root,
7500                                               root->root_key.objectid);
7501                 }
7502         }
7503
7504         if (root->in_radix) {
7505                 btrfs_free_fs_root(tree_root->fs_info, root);
7506         } else {
7507                 free_extent_buffer(root->node);
7508                 free_extent_buffer(root->commit_root);
7509                 kfree(root);
7510         }
7511 out_end_trans:
7512         btrfs_end_transaction_throttle(trans, tree_root);
7513 out_free:
7514         kfree(wc);
7515         btrfs_free_path(path);
7516 out:
7517         if (err)
7518                 btrfs_std_error(root->fs_info, err);
7519         return err;
7520 }
7521
7522 /*
7523  * drop subtree rooted at tree block 'node'.
7524  *
7525  * NOTE: this function will unlock and release tree block 'node'
7526  * only used by relocation code
7527  */
7528 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7529                         struct btrfs_root *root,
7530                         struct extent_buffer *node,
7531                         struct extent_buffer *parent)
7532 {
7533         struct btrfs_path *path;
7534         struct walk_control *wc;
7535         int level;
7536         int parent_level;
7537         int ret = 0;
7538         int wret;
7539
7540         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7541
7542         path = btrfs_alloc_path();
7543         if (!path)
7544                 return -ENOMEM;
7545
7546         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7547         if (!wc) {
7548                 btrfs_free_path(path);
7549                 return -ENOMEM;
7550         }
7551
7552         btrfs_assert_tree_locked(parent);
7553         parent_level = btrfs_header_level(parent);
7554         extent_buffer_get(parent);
7555         path->nodes[parent_level] = parent;
7556         path->slots[parent_level] = btrfs_header_nritems(parent);
7557
7558         btrfs_assert_tree_locked(node);
7559         level = btrfs_header_level(node);
7560         path->nodes[level] = node;
7561         path->slots[level] = 0;
7562         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7563
7564         wc->refs[parent_level] = 1;
7565         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7566         wc->level = level;
7567         wc->shared_level = -1;
7568         wc->stage = DROP_REFERENCE;
7569         wc->update_ref = 0;
7570         wc->keep_locks = 1;
7571         wc->for_reloc = 1;
7572         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7573
7574         while (1) {
7575                 wret = walk_down_tree(trans, root, path, wc);
7576                 if (wret < 0) {
7577                         ret = wret;
7578                         break;
7579                 }
7580
7581                 wret = walk_up_tree(trans, root, path, wc, parent_level);
7582                 if (wret < 0)
7583                         ret = wret;
7584                 if (wret != 0)
7585                         break;
7586         }
7587
7588         kfree(wc);
7589         btrfs_free_path(path);
7590         return ret;
7591 }
7592
7593 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7594 {
7595         u64 num_devices;
7596         u64 stripped;
7597
7598         /*
7599          * if restripe for this chunk_type is on pick target profile and
7600          * return, otherwise do the usual balance
7601          */
7602         stripped = get_restripe_target(root->fs_info, flags);
7603         if (stripped)
7604                 return extended_to_chunk(stripped);
7605
7606         /*
7607          * we add in the count of missing devices because we want
7608          * to make sure that any RAID levels on a degraded FS
7609          * continue to be honored.
7610          */
7611         num_devices = root->fs_info->fs_devices->rw_devices +
7612                 root->fs_info->fs_devices->missing_devices;
7613
7614         stripped = BTRFS_BLOCK_GROUP_RAID0 |
7615                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7616                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7617
7618         if (num_devices == 1) {
7619                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7620                 stripped = flags & ~stripped;
7621
7622                 /* turn raid0 into single device chunks */
7623                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7624                         return stripped;
7625
7626                 /* turn mirroring into duplication */
7627                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7628                              BTRFS_BLOCK_GROUP_RAID10))
7629                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7630         } else {
7631                 /* they already had raid on here, just return */
7632                 if (flags & stripped)
7633                         return flags;
7634
7635                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7636                 stripped = flags & ~stripped;
7637
7638                 /* switch duplicated blocks with raid1 */
7639                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7640                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7641
7642                 /* this is drive concat, leave it alone */
7643         }
7644
7645         return flags;
7646 }
7647
7648 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7649 {
7650         struct btrfs_space_info *sinfo = cache->space_info;
7651         u64 num_bytes;
7652         u64 min_allocable_bytes;
7653         int ret = -ENOSPC;
7654
7655
7656         /*
7657          * We need some metadata space and system metadata space for
7658          * allocating chunks in some corner cases until we force to set
7659          * it to be readonly.
7660          */
7661         if ((sinfo->flags &
7662              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7663             !force)
7664                 min_allocable_bytes = 1 * 1024 * 1024;
7665         else
7666                 min_allocable_bytes = 0;
7667
7668         spin_lock(&sinfo->lock);
7669         spin_lock(&cache->lock);
7670
7671         if (cache->ro) {
7672                 ret = 0;
7673                 goto out;
7674         }
7675
7676         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7677                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7678
7679         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7680             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7681             min_allocable_bytes <= sinfo->total_bytes) {
7682                 sinfo->bytes_readonly += num_bytes;
7683                 cache->ro = 1;
7684                 ret = 0;
7685         }
7686 out:
7687         spin_unlock(&cache->lock);
7688         spin_unlock(&sinfo->lock);
7689         return ret;
7690 }
7691
7692 int btrfs_set_block_group_ro(struct btrfs_root *root,
7693                              struct btrfs_block_group_cache *cache)
7694
7695 {
7696         struct btrfs_trans_handle *trans;
7697         u64 alloc_flags;
7698         int ret;
7699
7700         BUG_ON(cache->ro);
7701
7702         trans = btrfs_join_transaction(root);
7703         if (IS_ERR(trans))
7704                 return PTR_ERR(trans);
7705
7706         alloc_flags = update_block_group_flags(root, cache->flags);
7707         if (alloc_flags != cache->flags) {
7708                 ret = do_chunk_alloc(trans, root, alloc_flags,
7709                                      CHUNK_ALLOC_FORCE);
7710                 if (ret < 0)
7711                         goto out;
7712         }
7713
7714         ret = set_block_group_ro(cache, 0);
7715         if (!ret)
7716                 goto out;
7717         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7718         ret = do_chunk_alloc(trans, root, alloc_flags,
7719                              CHUNK_ALLOC_FORCE);
7720         if (ret < 0)
7721                 goto out;
7722         ret = set_block_group_ro(cache, 0);
7723 out:
7724         btrfs_end_transaction(trans, root);
7725         return ret;
7726 }
7727
7728 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7729                             struct btrfs_root *root, u64 type)
7730 {
7731         u64 alloc_flags = get_alloc_profile(root, type);
7732         return do_chunk_alloc(trans, root, alloc_flags,
7733                               CHUNK_ALLOC_FORCE);
7734 }
7735
7736 /*
7737  * helper to account the unused space of all the readonly block group in the
7738  * list. takes mirrors into account.
7739  */
7740 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7741 {
7742         struct btrfs_block_group_cache *block_group;
7743         u64 free_bytes = 0;
7744         int factor;
7745
7746         list_for_each_entry(block_group, groups_list, list) {
7747                 spin_lock(&block_group->lock);
7748
7749                 if (!block_group->ro) {
7750                         spin_unlock(&block_group->lock);
7751                         continue;
7752                 }
7753
7754                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7755                                           BTRFS_BLOCK_GROUP_RAID10 |
7756                                           BTRFS_BLOCK_GROUP_DUP))
7757                         factor = 2;
7758                 else
7759                         factor = 1;
7760
7761                 free_bytes += (block_group->key.offset -
7762                                btrfs_block_group_used(&block_group->item)) *
7763                                factor;
7764
7765                 spin_unlock(&block_group->lock);
7766         }
7767
7768         return free_bytes;
7769 }
7770
7771 /*
7772  * helper to account the unused space of all the readonly block group in the
7773  * space_info. takes mirrors into account.
7774  */
7775 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7776 {
7777         int i;
7778         u64 free_bytes = 0;
7779
7780         spin_lock(&sinfo->lock);
7781
7782         for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7783                 if (!list_empty(&sinfo->block_groups[i]))
7784                         free_bytes += __btrfs_get_ro_block_group_free_space(
7785                                                 &sinfo->block_groups[i]);
7786
7787         spin_unlock(&sinfo->lock);
7788
7789         return free_bytes;
7790 }
7791
7792 void btrfs_set_block_group_rw(struct btrfs_root *root,
7793                               struct btrfs_block_group_cache *cache)
7794 {
7795         struct btrfs_space_info *sinfo = cache->space_info;
7796         u64 num_bytes;
7797
7798         BUG_ON(!cache->ro);
7799
7800         spin_lock(&sinfo->lock);
7801         spin_lock(&cache->lock);
7802         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7803                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7804         sinfo->bytes_readonly -= num_bytes;
7805         cache->ro = 0;
7806         spin_unlock(&cache->lock);
7807         spin_unlock(&sinfo->lock);
7808 }
7809
7810 /*
7811  * checks to see if its even possible to relocate this block group.
7812  *
7813  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7814  * ok to go ahead and try.
7815  */
7816 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7817 {
7818         struct btrfs_block_group_cache *block_group;
7819         struct btrfs_space_info *space_info;
7820         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7821         struct btrfs_device *device;
7822         u64 min_free;
7823         u64 dev_min = 1;
7824         u64 dev_nr = 0;
7825         u64 target;
7826         int index;
7827         int full = 0;
7828         int ret = 0;
7829
7830         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7831
7832         /* odd, couldn't find the block group, leave it alone */
7833         if (!block_group)
7834                 return -1;
7835
7836         min_free = btrfs_block_group_used(&block_group->item);
7837
7838         /* no bytes used, we're good */
7839         if (!min_free)
7840                 goto out;
7841
7842         space_info = block_group->space_info;
7843         spin_lock(&space_info->lock);
7844
7845         full = space_info->full;
7846
7847         /*
7848          * if this is the last block group we have in this space, we can't
7849          * relocate it unless we're able to allocate a new chunk below.
7850          *
7851          * Otherwise, we need to make sure we have room in the space to handle
7852          * all of the extents from this block group.  If we can, we're good
7853          */
7854         if ((space_info->total_bytes != block_group->key.offset) &&
7855             (space_info->bytes_used + space_info->bytes_reserved +
7856              space_info->bytes_pinned + space_info->bytes_readonly +
7857              min_free < space_info->total_bytes)) {
7858                 spin_unlock(&space_info->lock);
7859                 goto out;
7860         }
7861         spin_unlock(&space_info->lock);
7862
7863         /*
7864          * ok we don't have enough space, but maybe we have free space on our
7865          * devices to allocate new chunks for relocation, so loop through our
7866          * alloc devices and guess if we have enough space.  if this block
7867          * group is going to be restriped, run checks against the target
7868          * profile instead of the current one.
7869          */
7870         ret = -1;
7871
7872         /*
7873          * index:
7874          *      0: raid10
7875          *      1: raid1
7876          *      2: dup
7877          *      3: raid0
7878          *      4: single
7879          */
7880         target = get_restripe_target(root->fs_info, block_group->flags);
7881         if (target) {
7882                 index = __get_raid_index(extended_to_chunk(target));
7883         } else {
7884                 /*
7885                  * this is just a balance, so if we were marked as full
7886                  * we know there is no space for a new chunk
7887                  */
7888                 if (full)
7889                         goto out;
7890
7891                 index = get_block_group_index(block_group);
7892         }
7893
7894         if (index == BTRFS_RAID_RAID10) {
7895                 dev_min = 4;
7896                 /* Divide by 2 */
7897                 min_free >>= 1;
7898         } else if (index == BTRFS_RAID_RAID1) {
7899                 dev_min = 2;
7900         } else if (index == BTRFS_RAID_DUP) {
7901                 /* Multiply by 2 */
7902                 min_free <<= 1;
7903         } else if (index == BTRFS_RAID_RAID0) {
7904                 dev_min = fs_devices->rw_devices;
7905                 do_div(min_free, dev_min);
7906         }
7907
7908         mutex_lock(&root->fs_info->chunk_mutex);
7909         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7910                 u64 dev_offset;
7911
7912                 /*
7913                  * check to make sure we can actually find a chunk with enough
7914                  * space to fit our block group in.
7915                  */
7916                 if (device->total_bytes > device->bytes_used + min_free &&
7917                     !device->is_tgtdev_for_dev_replace) {
7918                         ret = find_free_dev_extent(device, min_free,
7919                                                    &dev_offset, NULL);
7920                         if (!ret)
7921                                 dev_nr++;
7922
7923                         if (dev_nr >= dev_min)
7924                                 break;
7925
7926                         ret = -1;
7927                 }
7928         }
7929         mutex_unlock(&root->fs_info->chunk_mutex);
7930 out:
7931         btrfs_put_block_group(block_group);
7932         return ret;
7933 }
7934
7935 static int find_first_block_group(struct btrfs_root *root,
7936                 struct btrfs_path *path, struct btrfs_key *key)
7937 {
7938         int ret = 0;
7939         struct btrfs_key found_key;
7940         struct extent_buffer *leaf;
7941         int slot;
7942
7943         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7944         if (ret < 0)
7945                 goto out;
7946
7947         while (1) {
7948                 slot = path->slots[0];
7949                 leaf = path->nodes[0];
7950                 if (slot >= btrfs_header_nritems(leaf)) {
7951                         ret = btrfs_next_leaf(root, path);
7952                         if (ret == 0)
7953                                 continue;
7954                         if (ret < 0)
7955                                 goto out;
7956                         break;
7957                 }
7958                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7959
7960                 if (found_key.objectid >= key->objectid &&
7961                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7962                         ret = 0;
7963                         goto out;
7964                 }
7965                 path->slots[0]++;
7966         }
7967 out:
7968         return ret;
7969 }
7970
7971 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7972 {
7973         struct btrfs_block_group_cache *block_group;
7974         u64 last = 0;
7975
7976         while (1) {
7977                 struct inode *inode;
7978
7979                 block_group = btrfs_lookup_first_block_group(info, last);
7980                 while (block_group) {
7981                         spin_lock(&block_group->lock);
7982                         if (block_group->iref)
7983                                 break;
7984                         spin_unlock(&block_group->lock);
7985                         block_group = next_block_group(info->tree_root,
7986                                                        block_group);
7987                 }
7988                 if (!block_group) {
7989                         if (last == 0)
7990                                 break;
7991                         last = 0;
7992                         continue;
7993                 }
7994
7995                 inode = block_group->inode;
7996                 block_group->iref = 0;
7997                 block_group->inode = NULL;
7998                 spin_unlock(&block_group->lock);
7999                 iput(inode);
8000                 last = block_group->key.objectid + block_group->key.offset;
8001                 btrfs_put_block_group(block_group);
8002         }
8003 }
8004
8005 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8006 {
8007         struct btrfs_block_group_cache *block_group;
8008         struct btrfs_space_info *space_info;
8009         struct btrfs_caching_control *caching_ctl;
8010         struct rb_node *n;
8011
8012         down_write(&info->extent_commit_sem);
8013         while (!list_empty(&info->caching_block_groups)) {
8014                 caching_ctl = list_entry(info->caching_block_groups.next,
8015                                          struct btrfs_caching_control, list);
8016                 list_del(&caching_ctl->list);
8017                 put_caching_control(caching_ctl);
8018         }
8019         up_write(&info->extent_commit_sem);
8020
8021         spin_lock(&info->block_group_cache_lock);
8022         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8023                 block_group = rb_entry(n, struct btrfs_block_group_cache,
8024                                        cache_node);
8025                 rb_erase(&block_group->cache_node,
8026                          &info->block_group_cache_tree);
8027                 spin_unlock(&info->block_group_cache_lock);
8028
8029                 down_write(&block_group->space_info->groups_sem);
8030                 list_del(&block_group->list);
8031                 up_write(&block_group->space_info->groups_sem);
8032
8033                 if (block_group->cached == BTRFS_CACHE_STARTED)
8034                         wait_block_group_cache_done(block_group);
8035
8036                 /*
8037                  * We haven't cached this block group, which means we could
8038                  * possibly have excluded extents on this block group.
8039                  */
8040                 if (block_group->cached == BTRFS_CACHE_NO)
8041                         free_excluded_extents(info->extent_root, block_group);
8042
8043                 btrfs_remove_free_space_cache(block_group);
8044                 btrfs_put_block_group(block_group);
8045
8046                 spin_lock(&info->block_group_cache_lock);
8047         }
8048         spin_unlock(&info->block_group_cache_lock);
8049
8050         /* now that all the block groups are freed, go through and
8051          * free all the space_info structs.  This is only called during
8052          * the final stages of unmount, and so we know nobody is
8053          * using them.  We call synchronize_rcu() once before we start,
8054          * just to be on the safe side.
8055          */
8056         synchronize_rcu();
8057
8058         release_global_block_rsv(info);
8059
8060         while(!list_empty(&info->space_info)) {
8061                 space_info = list_entry(info->space_info.next,
8062                                         struct btrfs_space_info,
8063                                         list);
8064                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8065                         if (space_info->bytes_pinned > 0 ||
8066                             space_info->bytes_reserved > 0 ||
8067                             space_info->bytes_may_use > 0) {
8068                                 WARN_ON(1);
8069                                 dump_space_info(space_info, 0, 0);
8070                         }
8071                 }
8072                 list_del(&space_info->list);
8073                 kfree(space_info);
8074         }
8075         return 0;
8076 }
8077
8078 static void __link_block_group(struct btrfs_space_info *space_info,
8079                                struct btrfs_block_group_cache *cache)
8080 {
8081         int index = get_block_group_index(cache);
8082
8083         down_write(&space_info->groups_sem);
8084         list_add_tail(&cache->list, &space_info->block_groups[index]);
8085         up_write(&space_info->groups_sem);
8086 }
8087
8088 int btrfs_read_block_groups(struct btrfs_root *root)
8089 {
8090         struct btrfs_path *path;
8091         int ret;
8092         struct btrfs_block_group_cache *cache;
8093         struct btrfs_fs_info *info = root->fs_info;
8094         struct btrfs_space_info *space_info;
8095         struct btrfs_key key;
8096         struct btrfs_key found_key;
8097         struct extent_buffer *leaf;
8098         int need_clear = 0;
8099         u64 cache_gen;
8100
8101         root = info->extent_root;
8102         key.objectid = 0;
8103         key.offset = 0;
8104         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8105         path = btrfs_alloc_path();
8106         if (!path)
8107                 return -ENOMEM;
8108         path->reada = 1;
8109
8110         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
8111         if (btrfs_test_opt(root, SPACE_CACHE) &&
8112             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
8113                 need_clear = 1;
8114         if (btrfs_test_opt(root, CLEAR_CACHE))
8115                 need_clear = 1;
8116
8117         while (1) {
8118                 ret = find_first_block_group(root, path, &key);
8119                 if (ret > 0)
8120                         break;
8121                 if (ret != 0)
8122                         goto error;
8123                 leaf = path->nodes[0];
8124                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8125                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8126                 if (!cache) {
8127                         ret = -ENOMEM;
8128                         goto error;
8129                 }
8130                 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8131                                                 GFP_NOFS);
8132                 if (!cache->free_space_ctl) {
8133                         kfree(cache);
8134                         ret = -ENOMEM;
8135                         goto error;
8136                 }
8137
8138                 atomic_set(&cache->count, 1);
8139                 spin_lock_init(&cache->lock);
8140                 cache->fs_info = info;
8141                 INIT_LIST_HEAD(&cache->list);
8142                 INIT_LIST_HEAD(&cache->cluster_list);
8143
8144                 if (need_clear) {
8145                         /*
8146                          * When we mount with old space cache, we need to
8147                          * set BTRFS_DC_CLEAR and set dirty flag.
8148                          *
8149                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
8150                          *    truncate the old free space cache inode and
8151                          *    setup a new one.
8152                          * b) Setting 'dirty flag' makes sure that we flush
8153                          *    the new space cache info onto disk.
8154                          */
8155                         cache->disk_cache_state = BTRFS_DC_CLEAR;
8156                         if (btrfs_test_opt(root, SPACE_CACHE))
8157                                 cache->dirty = 1;
8158                 }
8159
8160                 read_extent_buffer(leaf, &cache->item,
8161                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
8162                                    sizeof(cache->item));
8163                 memcpy(&cache->key, &found_key, sizeof(found_key));
8164
8165                 key.objectid = found_key.objectid + found_key.offset;
8166                 btrfs_release_path(path);
8167                 cache->flags = btrfs_block_group_flags(&cache->item);
8168                 cache->sectorsize = root->sectorsize;
8169                 cache->full_stripe_len = btrfs_full_stripe_len(root,
8170                                                &root->fs_info->mapping_tree,
8171                                                found_key.objectid);
8172                 btrfs_init_free_space_ctl(cache);
8173
8174                 /*
8175                  * We need to exclude the super stripes now so that the space
8176                  * info has super bytes accounted for, otherwise we'll think
8177                  * we have more space than we actually do.
8178                  */
8179                 ret = exclude_super_stripes(root, cache);
8180                 if (ret) {
8181                         /*
8182                          * We may have excluded something, so call this just in
8183                          * case.
8184                          */
8185                         free_excluded_extents(root, cache);
8186                         kfree(cache->free_space_ctl);
8187                         kfree(cache);
8188                         goto error;
8189                 }
8190
8191                 /*
8192                  * check for two cases, either we are full, and therefore
8193                  * don't need to bother with the caching work since we won't
8194                  * find any space, or we are empty, and we can just add all
8195                  * the space in and be done with it.  This saves us _alot_ of
8196                  * time, particularly in the full case.
8197                  */
8198                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8199                         cache->last_byte_to_unpin = (u64)-1;
8200                         cache->cached = BTRFS_CACHE_FINISHED;
8201                         free_excluded_extents(root, cache);
8202                 } else if (btrfs_block_group_used(&cache->item) == 0) {
8203                         cache->last_byte_to_unpin = (u64)-1;
8204                         cache->cached = BTRFS_CACHE_FINISHED;
8205                         add_new_free_space(cache, root->fs_info,
8206                                            found_key.objectid,
8207                                            found_key.objectid +
8208                                            found_key.offset);
8209                         free_excluded_extents(root, cache);
8210                 }
8211
8212                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8213                 if (ret) {
8214                         btrfs_remove_free_space_cache(cache);
8215                         btrfs_put_block_group(cache);
8216                         goto error;
8217                 }
8218
8219                 ret = update_space_info(info, cache->flags, found_key.offset,
8220                                         btrfs_block_group_used(&cache->item),
8221                                         &space_info);
8222                 if (ret) {
8223                         btrfs_remove_free_space_cache(cache);
8224                         spin_lock(&info->block_group_cache_lock);
8225                         rb_erase(&cache->cache_node,
8226                                  &info->block_group_cache_tree);
8227                         spin_unlock(&info->block_group_cache_lock);
8228                         btrfs_put_block_group(cache);
8229                         goto error;
8230                 }
8231
8232                 cache->space_info = space_info;
8233                 spin_lock(&cache->space_info->lock);
8234                 cache->space_info->bytes_readonly += cache->bytes_super;
8235                 spin_unlock(&cache->space_info->lock);
8236
8237                 __link_block_group(space_info, cache);
8238
8239                 set_avail_alloc_bits(root->fs_info, cache->flags);
8240                 if (btrfs_chunk_readonly(root, cache->key.objectid))
8241                         set_block_group_ro(cache, 1);
8242         }
8243
8244         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8245                 if (!(get_alloc_profile(root, space_info->flags) &
8246                       (BTRFS_BLOCK_GROUP_RAID10 |
8247                        BTRFS_BLOCK_GROUP_RAID1 |
8248                        BTRFS_BLOCK_GROUP_RAID5 |
8249                        BTRFS_BLOCK_GROUP_RAID6 |
8250                        BTRFS_BLOCK_GROUP_DUP)))
8251                         continue;
8252                 /*
8253                  * avoid allocating from un-mirrored block group if there are
8254                  * mirrored block groups.
8255                  */
8256                 list_for_each_entry(cache, &space_info->block_groups[3], list)
8257                         set_block_group_ro(cache, 1);
8258                 list_for_each_entry(cache, &space_info->block_groups[4], list)
8259                         set_block_group_ro(cache, 1);
8260         }
8261
8262         init_global_block_rsv(info);
8263         ret = 0;
8264 error:
8265         btrfs_free_path(path);
8266         return ret;
8267 }
8268
8269 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
8270                                        struct btrfs_root *root)
8271 {
8272         struct btrfs_block_group_cache *block_group, *tmp;
8273         struct btrfs_root *extent_root = root->fs_info->extent_root;
8274         struct btrfs_block_group_item item;
8275         struct btrfs_key key;
8276         int ret = 0;
8277
8278         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
8279                                  new_bg_list) {
8280                 list_del_init(&block_group->new_bg_list);
8281
8282                 if (ret)
8283                         continue;
8284
8285                 spin_lock(&block_group->lock);
8286                 memcpy(&item, &block_group->item, sizeof(item));
8287                 memcpy(&key, &block_group->key, sizeof(key));
8288                 spin_unlock(&block_group->lock);
8289
8290                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
8291                                         sizeof(item));
8292                 if (ret)
8293                         btrfs_abort_transaction(trans, extent_root, ret);
8294         }
8295 }
8296
8297 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8298                            struct btrfs_root *root, u64 bytes_used,
8299                            u64 type, u64 chunk_objectid, u64 chunk_offset,
8300                            u64 size)
8301 {
8302         int ret;
8303         struct btrfs_root *extent_root;
8304         struct btrfs_block_group_cache *cache;
8305
8306         extent_root = root->fs_info->extent_root;
8307
8308         root->fs_info->last_trans_log_full_commit = trans->transid;
8309
8310         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8311         if (!cache)
8312                 return -ENOMEM;
8313         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8314                                         GFP_NOFS);
8315         if (!cache->free_space_ctl) {
8316                 kfree(cache);
8317                 return -ENOMEM;
8318         }
8319
8320         cache->key.objectid = chunk_offset;
8321         cache->key.offset = size;
8322         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8323         cache->sectorsize = root->sectorsize;
8324         cache->fs_info = root->fs_info;
8325         cache->full_stripe_len = btrfs_full_stripe_len(root,
8326                                                &root->fs_info->mapping_tree,
8327                                                chunk_offset);
8328
8329         atomic_set(&cache->count, 1);
8330         spin_lock_init(&cache->lock);
8331         INIT_LIST_HEAD(&cache->list);
8332         INIT_LIST_HEAD(&cache->cluster_list);
8333         INIT_LIST_HEAD(&cache->new_bg_list);
8334
8335         btrfs_init_free_space_ctl(cache);
8336
8337         btrfs_set_block_group_used(&cache->item, bytes_used);
8338         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8339         cache->flags = type;
8340         btrfs_set_block_group_flags(&cache->item, type);
8341
8342         cache->last_byte_to_unpin = (u64)-1;
8343         cache->cached = BTRFS_CACHE_FINISHED;
8344         ret = exclude_super_stripes(root, cache);
8345         if (ret) {
8346                 /*
8347                  * We may have excluded something, so call this just in
8348                  * case.
8349                  */
8350                 free_excluded_extents(root, cache);
8351                 kfree(cache->free_space_ctl);
8352                 kfree(cache);
8353                 return ret;
8354         }
8355
8356         add_new_free_space(cache, root->fs_info, chunk_offset,
8357                            chunk_offset + size);
8358
8359         free_excluded_extents(root, cache);
8360
8361         ret = btrfs_add_block_group_cache(root->fs_info, cache);
8362         if (ret) {
8363                 btrfs_remove_free_space_cache(cache);
8364                 btrfs_put_block_group(cache);
8365                 return ret;
8366         }
8367
8368         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8369                                 &cache->space_info);
8370         if (ret) {
8371                 btrfs_remove_free_space_cache(cache);
8372                 spin_lock(&root->fs_info->block_group_cache_lock);
8373                 rb_erase(&cache->cache_node,
8374                          &root->fs_info->block_group_cache_tree);
8375                 spin_unlock(&root->fs_info->block_group_cache_lock);
8376                 btrfs_put_block_group(cache);
8377                 return ret;
8378         }
8379         update_global_block_rsv(root->fs_info);
8380
8381         spin_lock(&cache->space_info->lock);
8382         cache->space_info->bytes_readonly += cache->bytes_super;
8383         spin_unlock(&cache->space_info->lock);
8384
8385         __link_block_group(cache->space_info, cache);
8386
8387         list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8388
8389         set_avail_alloc_bits(extent_root->fs_info, type);
8390
8391         return 0;
8392 }
8393
8394 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
8395 {
8396         u64 extra_flags = chunk_to_extended(flags) &
8397                                 BTRFS_EXTENDED_PROFILE_MASK;
8398
8399         write_seqlock(&fs_info->profiles_lock);
8400         if (flags & BTRFS_BLOCK_GROUP_DATA)
8401                 fs_info->avail_data_alloc_bits &= ~extra_flags;
8402         if (flags & BTRFS_BLOCK_GROUP_METADATA)
8403                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
8404         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
8405                 fs_info->avail_system_alloc_bits &= ~extra_flags;
8406         write_sequnlock(&fs_info->profiles_lock);
8407 }
8408
8409 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8410                              struct btrfs_root *root, u64 group_start)
8411 {
8412         struct btrfs_path *path;
8413         struct btrfs_block_group_cache *block_group;
8414         struct btrfs_free_cluster *cluster;
8415         struct btrfs_root *tree_root = root->fs_info->tree_root;
8416         struct btrfs_key key;
8417         struct inode *inode;
8418         int ret;
8419         int index;
8420         int factor;
8421
8422         root = root->fs_info->extent_root;
8423
8424         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8425         BUG_ON(!block_group);
8426         BUG_ON(!block_group->ro);
8427
8428         /*
8429          * Free the reserved super bytes from this block group before
8430          * remove it.
8431          */
8432         free_excluded_extents(root, block_group);
8433
8434         memcpy(&key, &block_group->key, sizeof(key));
8435         index = get_block_group_index(block_group);
8436         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8437                                   BTRFS_BLOCK_GROUP_RAID1 |
8438                                   BTRFS_BLOCK_GROUP_RAID10))
8439                 factor = 2;
8440         else
8441                 factor = 1;
8442
8443         /* make sure this block group isn't part of an allocation cluster */
8444         cluster = &root->fs_info->data_alloc_cluster;
8445         spin_lock(&cluster->refill_lock);
8446         btrfs_return_cluster_to_free_space(block_group, cluster);
8447         spin_unlock(&cluster->refill_lock);
8448
8449         /*
8450          * make sure this block group isn't part of a metadata
8451          * allocation cluster
8452          */
8453         cluster = &root->fs_info->meta_alloc_cluster;
8454         spin_lock(&cluster->refill_lock);
8455         btrfs_return_cluster_to_free_space(block_group, cluster);
8456         spin_unlock(&cluster->refill_lock);
8457
8458         path = btrfs_alloc_path();
8459         if (!path) {
8460                 ret = -ENOMEM;
8461                 goto out;
8462         }
8463
8464         inode = lookup_free_space_inode(tree_root, block_group, path);
8465         if (!IS_ERR(inode)) {
8466                 ret = btrfs_orphan_add(trans, inode);
8467                 if (ret) {
8468                         btrfs_add_delayed_iput(inode);
8469                         goto out;
8470                 }
8471                 clear_nlink(inode);
8472                 /* One for the block groups ref */
8473                 spin_lock(&block_group->lock);
8474                 if (block_group->iref) {
8475                         block_group->iref = 0;
8476                         block_group->inode = NULL;
8477                         spin_unlock(&block_group->lock);
8478                         iput(inode);
8479                 } else {
8480                         spin_unlock(&block_group->lock);
8481                 }
8482                 /* One for our lookup ref */
8483                 btrfs_add_delayed_iput(inode);
8484         }
8485
8486         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8487         key.offset = block_group->key.objectid;
8488         key.type = 0;
8489
8490         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8491         if (ret < 0)
8492                 goto out;
8493         if (ret > 0)
8494                 btrfs_release_path(path);
8495         if (ret == 0) {
8496                 ret = btrfs_del_item(trans, tree_root, path);
8497                 if (ret)
8498                         goto out;
8499                 btrfs_release_path(path);
8500         }
8501
8502         spin_lock(&root->fs_info->block_group_cache_lock);
8503         rb_erase(&block_group->cache_node,
8504                  &root->fs_info->block_group_cache_tree);
8505
8506         if (root->fs_info->first_logical_byte == block_group->key.objectid)
8507                 root->fs_info->first_logical_byte = (u64)-1;
8508         spin_unlock(&root->fs_info->block_group_cache_lock);
8509
8510         down_write(&block_group->space_info->groups_sem);
8511         /*
8512          * we must use list_del_init so people can check to see if they
8513          * are still on the list after taking the semaphore
8514          */
8515         list_del_init(&block_group->list);
8516         if (list_empty(&block_group->space_info->block_groups[index]))
8517                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8518         up_write(&block_group->space_info->groups_sem);
8519
8520         if (block_group->cached == BTRFS_CACHE_STARTED)
8521                 wait_block_group_cache_done(block_group);
8522
8523         btrfs_remove_free_space_cache(block_group);
8524
8525         spin_lock(&block_group->space_info->lock);
8526         block_group->space_info->total_bytes -= block_group->key.offset;
8527         block_group->space_info->bytes_readonly -= block_group->key.offset;
8528         block_group->space_info->disk_total -= block_group->key.offset * factor;
8529         spin_unlock(&block_group->space_info->lock);
8530
8531         memcpy(&key, &block_group->key, sizeof(key));
8532
8533         btrfs_clear_space_info_full(root->fs_info);
8534
8535         btrfs_put_block_group(block_group);
8536         btrfs_put_block_group(block_group);
8537
8538         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8539         if (ret > 0)
8540                 ret = -EIO;
8541         if (ret < 0)
8542                 goto out;
8543
8544         ret = btrfs_del_item(trans, root, path);
8545 out:
8546         btrfs_free_path(path);
8547         return ret;
8548 }
8549
8550 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8551 {
8552         struct btrfs_space_info *space_info;
8553         struct btrfs_super_block *disk_super;
8554         u64 features;
8555         u64 flags;
8556         int mixed = 0;
8557         int ret;
8558
8559         disk_super = fs_info->super_copy;
8560         if (!btrfs_super_root(disk_super))
8561                 return 1;
8562
8563         features = btrfs_super_incompat_flags(disk_super);
8564         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8565                 mixed = 1;
8566
8567         flags = BTRFS_BLOCK_GROUP_SYSTEM;
8568         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8569         if (ret)
8570                 goto out;
8571
8572         if (mixed) {
8573                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8574                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8575         } else {
8576                 flags = BTRFS_BLOCK_GROUP_METADATA;
8577                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8578                 if (ret)
8579                         goto out;
8580
8581                 flags = BTRFS_BLOCK_GROUP_DATA;
8582                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8583         }
8584 out:
8585         return ret;
8586 }
8587
8588 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8589 {
8590         return unpin_extent_range(root, start, end);
8591 }
8592
8593 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8594                                u64 num_bytes, u64 *actual_bytes)
8595 {
8596         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8597 }
8598
8599 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8600 {
8601         struct btrfs_fs_info *fs_info = root->fs_info;
8602         struct btrfs_block_group_cache *cache = NULL;
8603         u64 group_trimmed;
8604         u64 start;
8605         u64 end;
8606         u64 trimmed = 0;
8607         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8608         int ret = 0;
8609
8610         /*
8611          * try to trim all FS space, our block group may start from non-zero.
8612          */
8613         if (range->len == total_bytes)
8614                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8615         else
8616                 cache = btrfs_lookup_block_group(fs_info, range->start);
8617
8618         while (cache) {
8619                 if (cache->key.objectid >= (range->start + range->len)) {
8620                         btrfs_put_block_group(cache);
8621                         break;
8622                 }
8623
8624                 start = max(range->start, cache->key.objectid);
8625                 end = min(range->start + range->len,
8626                                 cache->key.objectid + cache->key.offset);
8627
8628                 if (end - start >= range->minlen) {
8629                         if (!block_group_cache_done(cache)) {
8630                                 ret = cache_block_group(cache, 0);
8631                                 if (!ret)
8632                                         wait_block_group_cache_done(cache);
8633                         }
8634                         ret = btrfs_trim_block_group(cache,
8635                                                      &group_trimmed,
8636                                                      start,
8637                                                      end,
8638                                                      range->minlen);
8639
8640                         trimmed += group_trimmed;
8641                         if (ret) {
8642                                 btrfs_put_block_group(cache);
8643                                 break;
8644                         }
8645                 }
8646
8647                 cache = next_block_group(fs_info->tree_root, cache);
8648         }
8649
8650         range->len = trimmed;
8651         return ret;
8652 }