]> rtime.felk.cvut.cz Git - linux-imx.git/blob - fs/btrfs/extent-tree.c
Btrfs: don't wait on ordered extents if we have a trans open
[linux-imx.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include "compat.h"
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "raid56.h"
35 #include "locking.h"
36 #include "free-space-cache.h"
37 #include "math.h"
38
39 #undef SCRAMBLE_DELAYED_REFS
40
41 /*
42  * control flags for do_chunk_alloc's force field
43  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
44  * if we really need one.
45  *
46  * CHUNK_ALLOC_LIMITED means to only try and allocate one
47  * if we have very few chunks already allocated.  This is
48  * used as part of the clustering code to help make sure
49  * we have a good pool of storage to cluster in, without
50  * filling the FS with empty chunks
51  *
52  * CHUNK_ALLOC_FORCE means it must try to allocate one
53  *
54  */
55 enum {
56         CHUNK_ALLOC_NO_FORCE = 0,
57         CHUNK_ALLOC_LIMITED = 1,
58         CHUNK_ALLOC_FORCE = 2,
59 };
60
61 /*
62  * Control how reservations are dealt with.
63  *
64  * RESERVE_FREE - freeing a reservation.
65  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
66  *   ENOSPC accounting
67  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
68  *   bytes_may_use as the ENOSPC accounting is done elsewhere
69  */
70 enum {
71         RESERVE_FREE = 0,
72         RESERVE_ALLOC = 1,
73         RESERVE_ALLOC_NO_ACCOUNT = 2,
74 };
75
76 static int update_block_group(struct btrfs_root *root,
77                               u64 bytenr, u64 num_bytes, int alloc);
78 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
79                                 struct btrfs_root *root,
80                                 u64 bytenr, u64 num_bytes, u64 parent,
81                                 u64 root_objectid, u64 owner_objectid,
82                                 u64 owner_offset, int refs_to_drop,
83                                 struct btrfs_delayed_extent_op *extra_op);
84 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
85                                     struct extent_buffer *leaf,
86                                     struct btrfs_extent_item *ei);
87 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
88                                       struct btrfs_root *root,
89                                       u64 parent, u64 root_objectid,
90                                       u64 flags, u64 owner, u64 offset,
91                                       struct btrfs_key *ins, int ref_mod);
92 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
93                                      struct btrfs_root *root,
94                                      u64 parent, u64 root_objectid,
95                                      u64 flags, struct btrfs_disk_key *key,
96                                      int level, struct btrfs_key *ins);
97 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
98                           struct btrfs_root *extent_root, u64 flags,
99                           int force);
100 static int find_next_key(struct btrfs_path *path, int level,
101                          struct btrfs_key *key);
102 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
103                             int dump_block_groups);
104 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
105                                        u64 num_bytes, int reserve);
106 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
107                                u64 num_bytes);
108
109 static noinline int
110 block_group_cache_done(struct btrfs_block_group_cache *cache)
111 {
112         smp_mb();
113         return cache->cached == BTRFS_CACHE_FINISHED;
114 }
115
116 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
117 {
118         return (cache->flags & bits) == bits;
119 }
120
121 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
122 {
123         atomic_inc(&cache->count);
124 }
125
126 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
127 {
128         if (atomic_dec_and_test(&cache->count)) {
129                 WARN_ON(cache->pinned > 0);
130                 WARN_ON(cache->reserved > 0);
131                 kfree(cache->free_space_ctl);
132                 kfree(cache);
133         }
134 }
135
136 /*
137  * this adds the block group to the fs_info rb tree for the block group
138  * cache
139  */
140 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
141                                 struct btrfs_block_group_cache *block_group)
142 {
143         struct rb_node **p;
144         struct rb_node *parent = NULL;
145         struct btrfs_block_group_cache *cache;
146
147         spin_lock(&info->block_group_cache_lock);
148         p = &info->block_group_cache_tree.rb_node;
149
150         while (*p) {
151                 parent = *p;
152                 cache = rb_entry(parent, struct btrfs_block_group_cache,
153                                  cache_node);
154                 if (block_group->key.objectid < cache->key.objectid) {
155                         p = &(*p)->rb_left;
156                 } else if (block_group->key.objectid > cache->key.objectid) {
157                         p = &(*p)->rb_right;
158                 } else {
159                         spin_unlock(&info->block_group_cache_lock);
160                         return -EEXIST;
161                 }
162         }
163
164         rb_link_node(&block_group->cache_node, parent, p);
165         rb_insert_color(&block_group->cache_node,
166                         &info->block_group_cache_tree);
167
168         if (info->first_logical_byte > block_group->key.objectid)
169                 info->first_logical_byte = block_group->key.objectid;
170
171         spin_unlock(&info->block_group_cache_lock);
172
173         return 0;
174 }
175
176 /*
177  * This will return the block group at or after bytenr if contains is 0, else
178  * it will return the block group that contains the bytenr
179  */
180 static struct btrfs_block_group_cache *
181 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
182                               int contains)
183 {
184         struct btrfs_block_group_cache *cache, *ret = NULL;
185         struct rb_node *n;
186         u64 end, start;
187
188         spin_lock(&info->block_group_cache_lock);
189         n = info->block_group_cache_tree.rb_node;
190
191         while (n) {
192                 cache = rb_entry(n, struct btrfs_block_group_cache,
193                                  cache_node);
194                 end = cache->key.objectid + cache->key.offset - 1;
195                 start = cache->key.objectid;
196
197                 if (bytenr < start) {
198                         if (!contains && (!ret || start < ret->key.objectid))
199                                 ret = cache;
200                         n = n->rb_left;
201                 } else if (bytenr > start) {
202                         if (contains && bytenr <= end) {
203                                 ret = cache;
204                                 break;
205                         }
206                         n = n->rb_right;
207                 } else {
208                         ret = cache;
209                         break;
210                 }
211         }
212         if (ret) {
213                 btrfs_get_block_group(ret);
214                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
215                         info->first_logical_byte = ret->key.objectid;
216         }
217         spin_unlock(&info->block_group_cache_lock);
218
219         return ret;
220 }
221
222 static int add_excluded_extent(struct btrfs_root *root,
223                                u64 start, u64 num_bytes)
224 {
225         u64 end = start + num_bytes - 1;
226         set_extent_bits(&root->fs_info->freed_extents[0],
227                         start, end, EXTENT_UPTODATE, GFP_NOFS);
228         set_extent_bits(&root->fs_info->freed_extents[1],
229                         start, end, EXTENT_UPTODATE, GFP_NOFS);
230         return 0;
231 }
232
233 static void free_excluded_extents(struct btrfs_root *root,
234                                   struct btrfs_block_group_cache *cache)
235 {
236         u64 start, end;
237
238         start = cache->key.objectid;
239         end = start + cache->key.offset - 1;
240
241         clear_extent_bits(&root->fs_info->freed_extents[0],
242                           start, end, EXTENT_UPTODATE, GFP_NOFS);
243         clear_extent_bits(&root->fs_info->freed_extents[1],
244                           start, end, EXTENT_UPTODATE, GFP_NOFS);
245 }
246
247 static int exclude_super_stripes(struct btrfs_root *root,
248                                  struct btrfs_block_group_cache *cache)
249 {
250         u64 bytenr;
251         u64 *logical;
252         int stripe_len;
253         int i, nr, ret;
254
255         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
256                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
257                 cache->bytes_super += stripe_len;
258                 ret = add_excluded_extent(root, cache->key.objectid,
259                                           stripe_len);
260                 if (ret)
261                         return ret;
262         }
263
264         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
265                 bytenr = btrfs_sb_offset(i);
266                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
267                                        cache->key.objectid, bytenr,
268                                        0, &logical, &nr, &stripe_len);
269                 if (ret)
270                         return ret;
271
272                 while (nr--) {
273                         cache->bytes_super += stripe_len;
274                         ret = add_excluded_extent(root, logical[nr],
275                                                   stripe_len);
276                         if (ret) {
277                                 kfree(logical);
278                                 return ret;
279                         }
280                 }
281
282                 kfree(logical);
283         }
284         return 0;
285 }
286
287 static struct btrfs_caching_control *
288 get_caching_control(struct btrfs_block_group_cache *cache)
289 {
290         struct btrfs_caching_control *ctl;
291
292         spin_lock(&cache->lock);
293         if (cache->cached != BTRFS_CACHE_STARTED) {
294                 spin_unlock(&cache->lock);
295                 return NULL;
296         }
297
298         /* We're loading it the fast way, so we don't have a caching_ctl. */
299         if (!cache->caching_ctl) {
300                 spin_unlock(&cache->lock);
301                 return NULL;
302         }
303
304         ctl = cache->caching_ctl;
305         atomic_inc(&ctl->count);
306         spin_unlock(&cache->lock);
307         return ctl;
308 }
309
310 static void put_caching_control(struct btrfs_caching_control *ctl)
311 {
312         if (atomic_dec_and_test(&ctl->count))
313                 kfree(ctl);
314 }
315
316 /*
317  * this is only called by cache_block_group, since we could have freed extents
318  * we need to check the pinned_extents for any extents that can't be used yet
319  * since their free space will be released as soon as the transaction commits.
320  */
321 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
322                               struct btrfs_fs_info *info, u64 start, u64 end)
323 {
324         u64 extent_start, extent_end, size, total_added = 0;
325         int ret;
326
327         while (start < end) {
328                 ret = find_first_extent_bit(info->pinned_extents, start,
329                                             &extent_start, &extent_end,
330                                             EXTENT_DIRTY | EXTENT_UPTODATE,
331                                             NULL);
332                 if (ret)
333                         break;
334
335                 if (extent_start <= start) {
336                         start = extent_end + 1;
337                 } else if (extent_start > start && extent_start < end) {
338                         size = extent_start - start;
339                         total_added += size;
340                         ret = btrfs_add_free_space(block_group, start,
341                                                    size);
342                         BUG_ON(ret); /* -ENOMEM or logic error */
343                         start = extent_end + 1;
344                 } else {
345                         break;
346                 }
347         }
348
349         if (start < end) {
350                 size = end - start;
351                 total_added += size;
352                 ret = btrfs_add_free_space(block_group, start, size);
353                 BUG_ON(ret); /* -ENOMEM or logic error */
354         }
355
356         return total_added;
357 }
358
359 static noinline void caching_thread(struct btrfs_work *work)
360 {
361         struct btrfs_block_group_cache *block_group;
362         struct btrfs_fs_info *fs_info;
363         struct btrfs_caching_control *caching_ctl;
364         struct btrfs_root *extent_root;
365         struct btrfs_path *path;
366         struct extent_buffer *leaf;
367         struct btrfs_key key;
368         u64 total_found = 0;
369         u64 last = 0;
370         u32 nritems;
371         int ret = 0;
372
373         caching_ctl = container_of(work, struct btrfs_caching_control, work);
374         block_group = caching_ctl->block_group;
375         fs_info = block_group->fs_info;
376         extent_root = fs_info->extent_root;
377
378         path = btrfs_alloc_path();
379         if (!path)
380                 goto out;
381
382         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
383
384         /*
385          * We don't want to deadlock with somebody trying to allocate a new
386          * extent for the extent root while also trying to search the extent
387          * root to add free space.  So we skip locking and search the commit
388          * root, since its read-only
389          */
390         path->skip_locking = 1;
391         path->search_commit_root = 1;
392         path->reada = 1;
393
394         key.objectid = last;
395         key.offset = 0;
396         key.type = BTRFS_EXTENT_ITEM_KEY;
397 again:
398         mutex_lock(&caching_ctl->mutex);
399         /* need to make sure the commit_root doesn't disappear */
400         down_read(&fs_info->extent_commit_sem);
401
402         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
403         if (ret < 0)
404                 goto err;
405
406         leaf = path->nodes[0];
407         nritems = btrfs_header_nritems(leaf);
408
409         while (1) {
410                 if (btrfs_fs_closing(fs_info) > 1) {
411                         last = (u64)-1;
412                         break;
413                 }
414
415                 if (path->slots[0] < nritems) {
416                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
417                 } else {
418                         ret = find_next_key(path, 0, &key);
419                         if (ret)
420                                 break;
421
422                         if (need_resched() ||
423                             btrfs_next_leaf(extent_root, path)) {
424                                 caching_ctl->progress = last;
425                                 btrfs_release_path(path);
426                                 up_read(&fs_info->extent_commit_sem);
427                                 mutex_unlock(&caching_ctl->mutex);
428                                 cond_resched();
429                                 goto again;
430                         }
431                         leaf = path->nodes[0];
432                         nritems = btrfs_header_nritems(leaf);
433                         continue;
434                 }
435
436                 if (key.objectid < block_group->key.objectid) {
437                         path->slots[0]++;
438                         continue;
439                 }
440
441                 if (key.objectid >= block_group->key.objectid +
442                     block_group->key.offset)
443                         break;
444
445                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
446                     key.type == BTRFS_METADATA_ITEM_KEY) {
447                         total_found += add_new_free_space(block_group,
448                                                           fs_info, last,
449                                                           key.objectid);
450                         if (key.type == BTRFS_METADATA_ITEM_KEY)
451                                 last = key.objectid +
452                                         fs_info->tree_root->leafsize;
453                         else
454                                 last = key.objectid + key.offset;
455
456                         if (total_found > (1024 * 1024 * 2)) {
457                                 total_found = 0;
458                                 wake_up(&caching_ctl->wait);
459                         }
460                 }
461                 path->slots[0]++;
462         }
463         ret = 0;
464
465         total_found += add_new_free_space(block_group, fs_info, last,
466                                           block_group->key.objectid +
467                                           block_group->key.offset);
468         caching_ctl->progress = (u64)-1;
469
470         spin_lock(&block_group->lock);
471         block_group->caching_ctl = NULL;
472         block_group->cached = BTRFS_CACHE_FINISHED;
473         spin_unlock(&block_group->lock);
474
475 err:
476         btrfs_free_path(path);
477         up_read(&fs_info->extent_commit_sem);
478
479         free_excluded_extents(extent_root, block_group);
480
481         mutex_unlock(&caching_ctl->mutex);
482 out:
483         wake_up(&caching_ctl->wait);
484
485         put_caching_control(caching_ctl);
486         btrfs_put_block_group(block_group);
487 }
488
489 static int cache_block_group(struct btrfs_block_group_cache *cache,
490                              int load_cache_only)
491 {
492         DEFINE_WAIT(wait);
493         struct btrfs_fs_info *fs_info = cache->fs_info;
494         struct btrfs_caching_control *caching_ctl;
495         int ret = 0;
496
497         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
498         if (!caching_ctl)
499                 return -ENOMEM;
500
501         INIT_LIST_HEAD(&caching_ctl->list);
502         mutex_init(&caching_ctl->mutex);
503         init_waitqueue_head(&caching_ctl->wait);
504         caching_ctl->block_group = cache;
505         caching_ctl->progress = cache->key.objectid;
506         atomic_set(&caching_ctl->count, 1);
507         caching_ctl->work.func = caching_thread;
508
509         spin_lock(&cache->lock);
510         /*
511          * This should be a rare occasion, but this could happen I think in the
512          * case where one thread starts to load the space cache info, and then
513          * some other thread starts a transaction commit which tries to do an
514          * allocation while the other thread is still loading the space cache
515          * info.  The previous loop should have kept us from choosing this block
516          * group, but if we've moved to the state where we will wait on caching
517          * block groups we need to first check if we're doing a fast load here,
518          * so we can wait for it to finish, otherwise we could end up allocating
519          * from a block group who's cache gets evicted for one reason or
520          * another.
521          */
522         while (cache->cached == BTRFS_CACHE_FAST) {
523                 struct btrfs_caching_control *ctl;
524
525                 ctl = cache->caching_ctl;
526                 atomic_inc(&ctl->count);
527                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
528                 spin_unlock(&cache->lock);
529
530                 schedule();
531
532                 finish_wait(&ctl->wait, &wait);
533                 put_caching_control(ctl);
534                 spin_lock(&cache->lock);
535         }
536
537         if (cache->cached != BTRFS_CACHE_NO) {
538                 spin_unlock(&cache->lock);
539                 kfree(caching_ctl);
540                 return 0;
541         }
542         WARN_ON(cache->caching_ctl);
543         cache->caching_ctl = caching_ctl;
544         cache->cached = BTRFS_CACHE_FAST;
545         spin_unlock(&cache->lock);
546
547         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
548                 ret = load_free_space_cache(fs_info, cache);
549
550                 spin_lock(&cache->lock);
551                 if (ret == 1) {
552                         cache->caching_ctl = NULL;
553                         cache->cached = BTRFS_CACHE_FINISHED;
554                         cache->last_byte_to_unpin = (u64)-1;
555                 } else {
556                         if (load_cache_only) {
557                                 cache->caching_ctl = NULL;
558                                 cache->cached = BTRFS_CACHE_NO;
559                         } else {
560                                 cache->cached = BTRFS_CACHE_STARTED;
561                         }
562                 }
563                 spin_unlock(&cache->lock);
564                 wake_up(&caching_ctl->wait);
565                 if (ret == 1) {
566                         put_caching_control(caching_ctl);
567                         free_excluded_extents(fs_info->extent_root, cache);
568                         return 0;
569                 }
570         } else {
571                 /*
572                  * We are not going to do the fast caching, set cached to the
573                  * appropriate value and wakeup any waiters.
574                  */
575                 spin_lock(&cache->lock);
576                 if (load_cache_only) {
577                         cache->caching_ctl = NULL;
578                         cache->cached = BTRFS_CACHE_NO;
579                 } else {
580                         cache->cached = BTRFS_CACHE_STARTED;
581                 }
582                 spin_unlock(&cache->lock);
583                 wake_up(&caching_ctl->wait);
584         }
585
586         if (load_cache_only) {
587                 put_caching_control(caching_ctl);
588                 return 0;
589         }
590
591         down_write(&fs_info->extent_commit_sem);
592         atomic_inc(&caching_ctl->count);
593         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
594         up_write(&fs_info->extent_commit_sem);
595
596         btrfs_get_block_group(cache);
597
598         btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
599
600         return ret;
601 }
602
603 /*
604  * return the block group that starts at or after bytenr
605  */
606 static struct btrfs_block_group_cache *
607 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
608 {
609         struct btrfs_block_group_cache *cache;
610
611         cache = block_group_cache_tree_search(info, bytenr, 0);
612
613         return cache;
614 }
615
616 /*
617  * return the block group that contains the given bytenr
618  */
619 struct btrfs_block_group_cache *btrfs_lookup_block_group(
620                                                  struct btrfs_fs_info *info,
621                                                  u64 bytenr)
622 {
623         struct btrfs_block_group_cache *cache;
624
625         cache = block_group_cache_tree_search(info, bytenr, 1);
626
627         return cache;
628 }
629
630 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
631                                                   u64 flags)
632 {
633         struct list_head *head = &info->space_info;
634         struct btrfs_space_info *found;
635
636         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
637
638         rcu_read_lock();
639         list_for_each_entry_rcu(found, head, list) {
640                 if (found->flags & flags) {
641                         rcu_read_unlock();
642                         return found;
643                 }
644         }
645         rcu_read_unlock();
646         return NULL;
647 }
648
649 /*
650  * after adding space to the filesystem, we need to clear the full flags
651  * on all the space infos.
652  */
653 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
654 {
655         struct list_head *head = &info->space_info;
656         struct btrfs_space_info *found;
657
658         rcu_read_lock();
659         list_for_each_entry_rcu(found, head, list)
660                 found->full = 0;
661         rcu_read_unlock();
662 }
663
664 u64 btrfs_find_block_group(struct btrfs_root *root,
665                            u64 search_start, u64 search_hint, int owner)
666 {
667         struct btrfs_block_group_cache *cache;
668         u64 used;
669         u64 last = max(search_hint, search_start);
670         u64 group_start = 0;
671         int full_search = 0;
672         int factor = 9;
673         int wrapped = 0;
674 again:
675         while (1) {
676                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
677                 if (!cache)
678                         break;
679
680                 spin_lock(&cache->lock);
681                 last = cache->key.objectid + cache->key.offset;
682                 used = btrfs_block_group_used(&cache->item);
683
684                 if ((full_search || !cache->ro) &&
685                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
686                         if (used + cache->pinned + cache->reserved <
687                             div_factor(cache->key.offset, factor)) {
688                                 group_start = cache->key.objectid;
689                                 spin_unlock(&cache->lock);
690                                 btrfs_put_block_group(cache);
691                                 goto found;
692                         }
693                 }
694                 spin_unlock(&cache->lock);
695                 btrfs_put_block_group(cache);
696                 cond_resched();
697         }
698         if (!wrapped) {
699                 last = search_start;
700                 wrapped = 1;
701                 goto again;
702         }
703         if (!full_search && factor < 10) {
704                 last = search_start;
705                 full_search = 1;
706                 factor = 10;
707                 goto again;
708         }
709 found:
710         return group_start;
711 }
712
713 /* simple helper to search for an existing extent at a given offset */
714 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
715 {
716         int ret;
717         struct btrfs_key key;
718         struct btrfs_path *path;
719
720         path = btrfs_alloc_path();
721         if (!path)
722                 return -ENOMEM;
723
724         key.objectid = start;
725         key.offset = len;
726         key.type = BTRFS_EXTENT_ITEM_KEY;
727         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
728                                 0, 0);
729         if (ret > 0) {
730                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
731                 if (key.objectid == start &&
732                     key.type == BTRFS_METADATA_ITEM_KEY)
733                         ret = 0;
734         }
735         btrfs_free_path(path);
736         return ret;
737 }
738
739 /*
740  * helper function to lookup reference count and flags of a tree block.
741  *
742  * the head node for delayed ref is used to store the sum of all the
743  * reference count modifications queued up in the rbtree. the head
744  * node may also store the extent flags to set. This way you can check
745  * to see what the reference count and extent flags would be if all of
746  * the delayed refs are not processed.
747  */
748 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
749                              struct btrfs_root *root, u64 bytenr,
750                              u64 offset, int metadata, u64 *refs, u64 *flags)
751 {
752         struct btrfs_delayed_ref_head *head;
753         struct btrfs_delayed_ref_root *delayed_refs;
754         struct btrfs_path *path;
755         struct btrfs_extent_item *ei;
756         struct extent_buffer *leaf;
757         struct btrfs_key key;
758         u32 item_size;
759         u64 num_refs;
760         u64 extent_flags;
761         int ret;
762
763         /*
764          * If we don't have skinny metadata, don't bother doing anything
765          * different
766          */
767         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
768                 offset = root->leafsize;
769                 metadata = 0;
770         }
771
772         path = btrfs_alloc_path();
773         if (!path)
774                 return -ENOMEM;
775
776         if (metadata) {
777                 key.objectid = bytenr;
778                 key.type = BTRFS_METADATA_ITEM_KEY;
779                 key.offset = offset;
780         } else {
781                 key.objectid = bytenr;
782                 key.type = BTRFS_EXTENT_ITEM_KEY;
783                 key.offset = offset;
784         }
785
786         if (!trans) {
787                 path->skip_locking = 1;
788                 path->search_commit_root = 1;
789         }
790 again:
791         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
792                                 &key, path, 0, 0);
793         if (ret < 0)
794                 goto out_free;
795
796         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
797                 key.type = BTRFS_EXTENT_ITEM_KEY;
798                 key.offset = root->leafsize;
799                 btrfs_release_path(path);
800                 goto again;
801         }
802
803         if (ret == 0) {
804                 leaf = path->nodes[0];
805                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
806                 if (item_size >= sizeof(*ei)) {
807                         ei = btrfs_item_ptr(leaf, path->slots[0],
808                                             struct btrfs_extent_item);
809                         num_refs = btrfs_extent_refs(leaf, ei);
810                         extent_flags = btrfs_extent_flags(leaf, ei);
811                 } else {
812 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
813                         struct btrfs_extent_item_v0 *ei0;
814                         BUG_ON(item_size != sizeof(*ei0));
815                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
816                                              struct btrfs_extent_item_v0);
817                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
818                         /* FIXME: this isn't correct for data */
819                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
820 #else
821                         BUG();
822 #endif
823                 }
824                 BUG_ON(num_refs == 0);
825         } else {
826                 num_refs = 0;
827                 extent_flags = 0;
828                 ret = 0;
829         }
830
831         if (!trans)
832                 goto out;
833
834         delayed_refs = &trans->transaction->delayed_refs;
835         spin_lock(&delayed_refs->lock);
836         head = btrfs_find_delayed_ref_head(trans, bytenr);
837         if (head) {
838                 if (!mutex_trylock(&head->mutex)) {
839                         atomic_inc(&head->node.refs);
840                         spin_unlock(&delayed_refs->lock);
841
842                         btrfs_release_path(path);
843
844                         /*
845                          * Mutex was contended, block until it's released and try
846                          * again
847                          */
848                         mutex_lock(&head->mutex);
849                         mutex_unlock(&head->mutex);
850                         btrfs_put_delayed_ref(&head->node);
851                         goto again;
852                 }
853                 if (head->extent_op && head->extent_op->update_flags)
854                         extent_flags |= head->extent_op->flags_to_set;
855                 else
856                         BUG_ON(num_refs == 0);
857
858                 num_refs += head->node.ref_mod;
859                 mutex_unlock(&head->mutex);
860         }
861         spin_unlock(&delayed_refs->lock);
862 out:
863         WARN_ON(num_refs == 0);
864         if (refs)
865                 *refs = num_refs;
866         if (flags)
867                 *flags = extent_flags;
868 out_free:
869         btrfs_free_path(path);
870         return ret;
871 }
872
873 /*
874  * Back reference rules.  Back refs have three main goals:
875  *
876  * 1) differentiate between all holders of references to an extent so that
877  *    when a reference is dropped we can make sure it was a valid reference
878  *    before freeing the extent.
879  *
880  * 2) Provide enough information to quickly find the holders of an extent
881  *    if we notice a given block is corrupted or bad.
882  *
883  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
884  *    maintenance.  This is actually the same as #2, but with a slightly
885  *    different use case.
886  *
887  * There are two kinds of back refs. The implicit back refs is optimized
888  * for pointers in non-shared tree blocks. For a given pointer in a block,
889  * back refs of this kind provide information about the block's owner tree
890  * and the pointer's key. These information allow us to find the block by
891  * b-tree searching. The full back refs is for pointers in tree blocks not
892  * referenced by their owner trees. The location of tree block is recorded
893  * in the back refs. Actually the full back refs is generic, and can be
894  * used in all cases the implicit back refs is used. The major shortcoming
895  * of the full back refs is its overhead. Every time a tree block gets
896  * COWed, we have to update back refs entry for all pointers in it.
897  *
898  * For a newly allocated tree block, we use implicit back refs for
899  * pointers in it. This means most tree related operations only involve
900  * implicit back refs. For a tree block created in old transaction, the
901  * only way to drop a reference to it is COW it. So we can detect the
902  * event that tree block loses its owner tree's reference and do the
903  * back refs conversion.
904  *
905  * When a tree block is COW'd through a tree, there are four cases:
906  *
907  * The reference count of the block is one and the tree is the block's
908  * owner tree. Nothing to do in this case.
909  *
910  * The reference count of the block is one and the tree is not the
911  * block's owner tree. In this case, full back refs is used for pointers
912  * in the block. Remove these full back refs, add implicit back refs for
913  * every pointers in the new block.
914  *
915  * The reference count of the block is greater than one and the tree is
916  * the block's owner tree. In this case, implicit back refs is used for
917  * pointers in the block. Add full back refs for every pointers in the
918  * block, increase lower level extents' reference counts. The original
919  * implicit back refs are entailed to the new block.
920  *
921  * The reference count of the block is greater than one and the tree is
922  * not the block's owner tree. Add implicit back refs for every pointer in
923  * the new block, increase lower level extents' reference count.
924  *
925  * Back Reference Key composing:
926  *
927  * The key objectid corresponds to the first byte in the extent,
928  * The key type is used to differentiate between types of back refs.
929  * There are different meanings of the key offset for different types
930  * of back refs.
931  *
932  * File extents can be referenced by:
933  *
934  * - multiple snapshots, subvolumes, or different generations in one subvol
935  * - different files inside a single subvolume
936  * - different offsets inside a file (bookend extents in file.c)
937  *
938  * The extent ref structure for the implicit back refs has fields for:
939  *
940  * - Objectid of the subvolume root
941  * - objectid of the file holding the reference
942  * - original offset in the file
943  * - how many bookend extents
944  *
945  * The key offset for the implicit back refs is hash of the first
946  * three fields.
947  *
948  * The extent ref structure for the full back refs has field for:
949  *
950  * - number of pointers in the tree leaf
951  *
952  * The key offset for the implicit back refs is the first byte of
953  * the tree leaf
954  *
955  * When a file extent is allocated, The implicit back refs is used.
956  * the fields are filled in:
957  *
958  *     (root_key.objectid, inode objectid, offset in file, 1)
959  *
960  * When a file extent is removed file truncation, we find the
961  * corresponding implicit back refs and check the following fields:
962  *
963  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
964  *
965  * Btree extents can be referenced by:
966  *
967  * - Different subvolumes
968  *
969  * Both the implicit back refs and the full back refs for tree blocks
970  * only consist of key. The key offset for the implicit back refs is
971  * objectid of block's owner tree. The key offset for the full back refs
972  * is the first byte of parent block.
973  *
974  * When implicit back refs is used, information about the lowest key and
975  * level of the tree block are required. These information are stored in
976  * tree block info structure.
977  */
978
979 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
980 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
981                                   struct btrfs_root *root,
982                                   struct btrfs_path *path,
983                                   u64 owner, u32 extra_size)
984 {
985         struct btrfs_extent_item *item;
986         struct btrfs_extent_item_v0 *ei0;
987         struct btrfs_extent_ref_v0 *ref0;
988         struct btrfs_tree_block_info *bi;
989         struct extent_buffer *leaf;
990         struct btrfs_key key;
991         struct btrfs_key found_key;
992         u32 new_size = sizeof(*item);
993         u64 refs;
994         int ret;
995
996         leaf = path->nodes[0];
997         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
998
999         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1000         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1001                              struct btrfs_extent_item_v0);
1002         refs = btrfs_extent_refs_v0(leaf, ei0);
1003
1004         if (owner == (u64)-1) {
1005                 while (1) {
1006                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1007                                 ret = btrfs_next_leaf(root, path);
1008                                 if (ret < 0)
1009                                         return ret;
1010                                 BUG_ON(ret > 0); /* Corruption */
1011                                 leaf = path->nodes[0];
1012                         }
1013                         btrfs_item_key_to_cpu(leaf, &found_key,
1014                                               path->slots[0]);
1015                         BUG_ON(key.objectid != found_key.objectid);
1016                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1017                                 path->slots[0]++;
1018                                 continue;
1019                         }
1020                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1021                                               struct btrfs_extent_ref_v0);
1022                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1023                         break;
1024                 }
1025         }
1026         btrfs_release_path(path);
1027
1028         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1029                 new_size += sizeof(*bi);
1030
1031         new_size -= sizeof(*ei0);
1032         ret = btrfs_search_slot(trans, root, &key, path,
1033                                 new_size + extra_size, 1);
1034         if (ret < 0)
1035                 return ret;
1036         BUG_ON(ret); /* Corruption */
1037
1038         btrfs_extend_item(trans, root, path, new_size);
1039
1040         leaf = path->nodes[0];
1041         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1042         btrfs_set_extent_refs(leaf, item, refs);
1043         /* FIXME: get real generation */
1044         btrfs_set_extent_generation(leaf, item, 0);
1045         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1046                 btrfs_set_extent_flags(leaf, item,
1047                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1048                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1049                 bi = (struct btrfs_tree_block_info *)(item + 1);
1050                 /* FIXME: get first key of the block */
1051                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1052                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1053         } else {
1054                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1055         }
1056         btrfs_mark_buffer_dirty(leaf);
1057         return 0;
1058 }
1059 #endif
1060
1061 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1062 {
1063         u32 high_crc = ~(u32)0;
1064         u32 low_crc = ~(u32)0;
1065         __le64 lenum;
1066
1067         lenum = cpu_to_le64(root_objectid);
1068         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1069         lenum = cpu_to_le64(owner);
1070         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1071         lenum = cpu_to_le64(offset);
1072         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1073
1074         return ((u64)high_crc << 31) ^ (u64)low_crc;
1075 }
1076
1077 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1078                                      struct btrfs_extent_data_ref *ref)
1079 {
1080         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1081                                     btrfs_extent_data_ref_objectid(leaf, ref),
1082                                     btrfs_extent_data_ref_offset(leaf, ref));
1083 }
1084
1085 static int match_extent_data_ref(struct extent_buffer *leaf,
1086                                  struct btrfs_extent_data_ref *ref,
1087                                  u64 root_objectid, u64 owner, u64 offset)
1088 {
1089         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1090             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1091             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1092                 return 0;
1093         return 1;
1094 }
1095
1096 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1097                                            struct btrfs_root *root,
1098                                            struct btrfs_path *path,
1099                                            u64 bytenr, u64 parent,
1100                                            u64 root_objectid,
1101                                            u64 owner, u64 offset)
1102 {
1103         struct btrfs_key key;
1104         struct btrfs_extent_data_ref *ref;
1105         struct extent_buffer *leaf;
1106         u32 nritems;
1107         int ret;
1108         int recow;
1109         int err = -ENOENT;
1110
1111         key.objectid = bytenr;
1112         if (parent) {
1113                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1114                 key.offset = parent;
1115         } else {
1116                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1117                 key.offset = hash_extent_data_ref(root_objectid,
1118                                                   owner, offset);
1119         }
1120 again:
1121         recow = 0;
1122         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1123         if (ret < 0) {
1124                 err = ret;
1125                 goto fail;
1126         }
1127
1128         if (parent) {
1129                 if (!ret)
1130                         return 0;
1131 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1132                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1133                 btrfs_release_path(path);
1134                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1135                 if (ret < 0) {
1136                         err = ret;
1137                         goto fail;
1138                 }
1139                 if (!ret)
1140                         return 0;
1141 #endif
1142                 goto fail;
1143         }
1144
1145         leaf = path->nodes[0];
1146         nritems = btrfs_header_nritems(leaf);
1147         while (1) {
1148                 if (path->slots[0] >= nritems) {
1149                         ret = btrfs_next_leaf(root, path);
1150                         if (ret < 0)
1151                                 err = ret;
1152                         if (ret)
1153                                 goto fail;
1154
1155                         leaf = path->nodes[0];
1156                         nritems = btrfs_header_nritems(leaf);
1157                         recow = 1;
1158                 }
1159
1160                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1161                 if (key.objectid != bytenr ||
1162                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1163                         goto fail;
1164
1165                 ref = btrfs_item_ptr(leaf, path->slots[0],
1166                                      struct btrfs_extent_data_ref);
1167
1168                 if (match_extent_data_ref(leaf, ref, root_objectid,
1169                                           owner, offset)) {
1170                         if (recow) {
1171                                 btrfs_release_path(path);
1172                                 goto again;
1173                         }
1174                         err = 0;
1175                         break;
1176                 }
1177                 path->slots[0]++;
1178         }
1179 fail:
1180         return err;
1181 }
1182
1183 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1184                                            struct btrfs_root *root,
1185                                            struct btrfs_path *path,
1186                                            u64 bytenr, u64 parent,
1187                                            u64 root_objectid, u64 owner,
1188                                            u64 offset, int refs_to_add)
1189 {
1190         struct btrfs_key key;
1191         struct extent_buffer *leaf;
1192         u32 size;
1193         u32 num_refs;
1194         int ret;
1195
1196         key.objectid = bytenr;
1197         if (parent) {
1198                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1199                 key.offset = parent;
1200                 size = sizeof(struct btrfs_shared_data_ref);
1201         } else {
1202                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1203                 key.offset = hash_extent_data_ref(root_objectid,
1204                                                   owner, offset);
1205                 size = sizeof(struct btrfs_extent_data_ref);
1206         }
1207
1208         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1209         if (ret && ret != -EEXIST)
1210                 goto fail;
1211
1212         leaf = path->nodes[0];
1213         if (parent) {
1214                 struct btrfs_shared_data_ref *ref;
1215                 ref = btrfs_item_ptr(leaf, path->slots[0],
1216                                      struct btrfs_shared_data_ref);
1217                 if (ret == 0) {
1218                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1219                 } else {
1220                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1221                         num_refs += refs_to_add;
1222                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1223                 }
1224         } else {
1225                 struct btrfs_extent_data_ref *ref;
1226                 while (ret == -EEXIST) {
1227                         ref = btrfs_item_ptr(leaf, path->slots[0],
1228                                              struct btrfs_extent_data_ref);
1229                         if (match_extent_data_ref(leaf, ref, root_objectid,
1230                                                   owner, offset))
1231                                 break;
1232                         btrfs_release_path(path);
1233                         key.offset++;
1234                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1235                                                       size);
1236                         if (ret && ret != -EEXIST)
1237                                 goto fail;
1238
1239                         leaf = path->nodes[0];
1240                 }
1241                 ref = btrfs_item_ptr(leaf, path->slots[0],
1242                                      struct btrfs_extent_data_ref);
1243                 if (ret == 0) {
1244                         btrfs_set_extent_data_ref_root(leaf, ref,
1245                                                        root_objectid);
1246                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1247                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1248                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1249                 } else {
1250                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1251                         num_refs += refs_to_add;
1252                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1253                 }
1254         }
1255         btrfs_mark_buffer_dirty(leaf);
1256         ret = 0;
1257 fail:
1258         btrfs_release_path(path);
1259         return ret;
1260 }
1261
1262 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1263                                            struct btrfs_root *root,
1264                                            struct btrfs_path *path,
1265                                            int refs_to_drop)
1266 {
1267         struct btrfs_key key;
1268         struct btrfs_extent_data_ref *ref1 = NULL;
1269         struct btrfs_shared_data_ref *ref2 = NULL;
1270         struct extent_buffer *leaf;
1271         u32 num_refs = 0;
1272         int ret = 0;
1273
1274         leaf = path->nodes[0];
1275         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1276
1277         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1278                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1279                                       struct btrfs_extent_data_ref);
1280                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1281         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1282                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1283                                       struct btrfs_shared_data_ref);
1284                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1285 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1286         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1287                 struct btrfs_extent_ref_v0 *ref0;
1288                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1289                                       struct btrfs_extent_ref_v0);
1290                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1291 #endif
1292         } else {
1293                 BUG();
1294         }
1295
1296         BUG_ON(num_refs < refs_to_drop);
1297         num_refs -= refs_to_drop;
1298
1299         if (num_refs == 0) {
1300                 ret = btrfs_del_item(trans, root, path);
1301         } else {
1302                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1303                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1304                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1305                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1306 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1307                 else {
1308                         struct btrfs_extent_ref_v0 *ref0;
1309                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1310                                         struct btrfs_extent_ref_v0);
1311                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1312                 }
1313 #endif
1314                 btrfs_mark_buffer_dirty(leaf);
1315         }
1316         return ret;
1317 }
1318
1319 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1320                                           struct btrfs_path *path,
1321                                           struct btrfs_extent_inline_ref *iref)
1322 {
1323         struct btrfs_key key;
1324         struct extent_buffer *leaf;
1325         struct btrfs_extent_data_ref *ref1;
1326         struct btrfs_shared_data_ref *ref2;
1327         u32 num_refs = 0;
1328
1329         leaf = path->nodes[0];
1330         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1331         if (iref) {
1332                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1333                     BTRFS_EXTENT_DATA_REF_KEY) {
1334                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1335                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1336                 } else {
1337                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1338                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1339                 }
1340         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1341                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1342                                       struct btrfs_extent_data_ref);
1343                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1344         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1345                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1346                                       struct btrfs_shared_data_ref);
1347                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1348 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1349         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1350                 struct btrfs_extent_ref_v0 *ref0;
1351                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1352                                       struct btrfs_extent_ref_v0);
1353                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1354 #endif
1355         } else {
1356                 WARN_ON(1);
1357         }
1358         return num_refs;
1359 }
1360
1361 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1362                                           struct btrfs_root *root,
1363                                           struct btrfs_path *path,
1364                                           u64 bytenr, u64 parent,
1365                                           u64 root_objectid)
1366 {
1367         struct btrfs_key key;
1368         int ret;
1369
1370         key.objectid = bytenr;
1371         if (parent) {
1372                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1373                 key.offset = parent;
1374         } else {
1375                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1376                 key.offset = root_objectid;
1377         }
1378
1379         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1380         if (ret > 0)
1381                 ret = -ENOENT;
1382 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1383         if (ret == -ENOENT && parent) {
1384                 btrfs_release_path(path);
1385                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1386                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1387                 if (ret > 0)
1388                         ret = -ENOENT;
1389         }
1390 #endif
1391         return ret;
1392 }
1393
1394 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1395                                           struct btrfs_root *root,
1396                                           struct btrfs_path *path,
1397                                           u64 bytenr, u64 parent,
1398                                           u64 root_objectid)
1399 {
1400         struct btrfs_key key;
1401         int ret;
1402
1403         key.objectid = bytenr;
1404         if (parent) {
1405                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1406                 key.offset = parent;
1407         } else {
1408                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1409                 key.offset = root_objectid;
1410         }
1411
1412         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1413         btrfs_release_path(path);
1414         return ret;
1415 }
1416
1417 static inline int extent_ref_type(u64 parent, u64 owner)
1418 {
1419         int type;
1420         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1421                 if (parent > 0)
1422                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1423                 else
1424                         type = BTRFS_TREE_BLOCK_REF_KEY;
1425         } else {
1426                 if (parent > 0)
1427                         type = BTRFS_SHARED_DATA_REF_KEY;
1428                 else
1429                         type = BTRFS_EXTENT_DATA_REF_KEY;
1430         }
1431         return type;
1432 }
1433
1434 static int find_next_key(struct btrfs_path *path, int level,
1435                          struct btrfs_key *key)
1436
1437 {
1438         for (; level < BTRFS_MAX_LEVEL; level++) {
1439                 if (!path->nodes[level])
1440                         break;
1441                 if (path->slots[level] + 1 >=
1442                     btrfs_header_nritems(path->nodes[level]))
1443                         continue;
1444                 if (level == 0)
1445                         btrfs_item_key_to_cpu(path->nodes[level], key,
1446                                               path->slots[level] + 1);
1447                 else
1448                         btrfs_node_key_to_cpu(path->nodes[level], key,
1449                                               path->slots[level] + 1);
1450                 return 0;
1451         }
1452         return 1;
1453 }
1454
1455 /*
1456  * look for inline back ref. if back ref is found, *ref_ret is set
1457  * to the address of inline back ref, and 0 is returned.
1458  *
1459  * if back ref isn't found, *ref_ret is set to the address where it
1460  * should be inserted, and -ENOENT is returned.
1461  *
1462  * if insert is true and there are too many inline back refs, the path
1463  * points to the extent item, and -EAGAIN is returned.
1464  *
1465  * NOTE: inline back refs are ordered in the same way that back ref
1466  *       items in the tree are ordered.
1467  */
1468 static noinline_for_stack
1469 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1470                                  struct btrfs_root *root,
1471                                  struct btrfs_path *path,
1472                                  struct btrfs_extent_inline_ref **ref_ret,
1473                                  u64 bytenr, u64 num_bytes,
1474                                  u64 parent, u64 root_objectid,
1475                                  u64 owner, u64 offset, int insert)
1476 {
1477         struct btrfs_key key;
1478         struct extent_buffer *leaf;
1479         struct btrfs_extent_item *ei;
1480         struct btrfs_extent_inline_ref *iref;
1481         u64 flags;
1482         u64 item_size;
1483         unsigned long ptr;
1484         unsigned long end;
1485         int extra_size;
1486         int type;
1487         int want;
1488         int ret;
1489         int err = 0;
1490         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1491                                                  SKINNY_METADATA);
1492
1493         key.objectid = bytenr;
1494         key.type = BTRFS_EXTENT_ITEM_KEY;
1495         key.offset = num_bytes;
1496
1497         want = extent_ref_type(parent, owner);
1498         if (insert) {
1499                 extra_size = btrfs_extent_inline_ref_size(want);
1500                 path->keep_locks = 1;
1501         } else
1502                 extra_size = -1;
1503
1504         /*
1505          * Owner is our parent level, so we can just add one to get the level
1506          * for the block we are interested in.
1507          */
1508         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1509                 key.type = BTRFS_METADATA_ITEM_KEY;
1510                 key.offset = owner;
1511         }
1512
1513 again:
1514         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1515         if (ret < 0) {
1516                 err = ret;
1517                 goto out;
1518         }
1519
1520         /*
1521          * We may be a newly converted file system which still has the old fat
1522          * extent entries for metadata, so try and see if we have one of those.
1523          */
1524         if (ret > 0 && skinny_metadata) {
1525                 skinny_metadata = false;
1526                 if (path->slots[0]) {
1527                         path->slots[0]--;
1528                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1529                                               path->slots[0]);
1530                         if (key.objectid == bytenr &&
1531                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1532                             key.offset == num_bytes)
1533                                 ret = 0;
1534                 }
1535                 if (ret) {
1536                         key.type = BTRFS_EXTENT_ITEM_KEY;
1537                         key.offset = num_bytes;
1538                         btrfs_release_path(path);
1539                         goto again;
1540                 }
1541         }
1542
1543         if (ret && !insert) {
1544                 err = -ENOENT;
1545                 goto out;
1546         } else if (ret) {
1547                 err = -EIO;
1548                 WARN_ON(1);
1549                 goto out;
1550         }
1551
1552         leaf = path->nodes[0];
1553         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1554 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1555         if (item_size < sizeof(*ei)) {
1556                 if (!insert) {
1557                         err = -ENOENT;
1558                         goto out;
1559                 }
1560                 ret = convert_extent_item_v0(trans, root, path, owner,
1561                                              extra_size);
1562                 if (ret < 0) {
1563                         err = ret;
1564                         goto out;
1565                 }
1566                 leaf = path->nodes[0];
1567                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1568         }
1569 #endif
1570         BUG_ON(item_size < sizeof(*ei));
1571
1572         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1573         flags = btrfs_extent_flags(leaf, ei);
1574
1575         ptr = (unsigned long)(ei + 1);
1576         end = (unsigned long)ei + item_size;
1577
1578         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1579                 ptr += sizeof(struct btrfs_tree_block_info);
1580                 BUG_ON(ptr > end);
1581         }
1582
1583         err = -ENOENT;
1584         while (1) {
1585                 if (ptr >= end) {
1586                         WARN_ON(ptr > end);
1587                         break;
1588                 }
1589                 iref = (struct btrfs_extent_inline_ref *)ptr;
1590                 type = btrfs_extent_inline_ref_type(leaf, iref);
1591                 if (want < type)
1592                         break;
1593                 if (want > type) {
1594                         ptr += btrfs_extent_inline_ref_size(type);
1595                         continue;
1596                 }
1597
1598                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1599                         struct btrfs_extent_data_ref *dref;
1600                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1601                         if (match_extent_data_ref(leaf, dref, root_objectid,
1602                                                   owner, offset)) {
1603                                 err = 0;
1604                                 break;
1605                         }
1606                         if (hash_extent_data_ref_item(leaf, dref) <
1607                             hash_extent_data_ref(root_objectid, owner, offset))
1608                                 break;
1609                 } else {
1610                         u64 ref_offset;
1611                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1612                         if (parent > 0) {
1613                                 if (parent == ref_offset) {
1614                                         err = 0;
1615                                         break;
1616                                 }
1617                                 if (ref_offset < parent)
1618                                         break;
1619                         } else {
1620                                 if (root_objectid == ref_offset) {
1621                                         err = 0;
1622                                         break;
1623                                 }
1624                                 if (ref_offset < root_objectid)
1625                                         break;
1626                         }
1627                 }
1628                 ptr += btrfs_extent_inline_ref_size(type);
1629         }
1630         if (err == -ENOENT && insert) {
1631                 if (item_size + extra_size >=
1632                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1633                         err = -EAGAIN;
1634                         goto out;
1635                 }
1636                 /*
1637                  * To add new inline back ref, we have to make sure
1638                  * there is no corresponding back ref item.
1639                  * For simplicity, we just do not add new inline back
1640                  * ref if there is any kind of item for this block
1641                  */
1642                 if (find_next_key(path, 0, &key) == 0 &&
1643                     key.objectid == bytenr &&
1644                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1645                         err = -EAGAIN;
1646                         goto out;
1647                 }
1648         }
1649         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1650 out:
1651         if (insert) {
1652                 path->keep_locks = 0;
1653                 btrfs_unlock_up_safe(path, 1);
1654         }
1655         return err;
1656 }
1657
1658 /*
1659  * helper to add new inline back ref
1660  */
1661 static noinline_for_stack
1662 void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1663                                  struct btrfs_root *root,
1664                                  struct btrfs_path *path,
1665                                  struct btrfs_extent_inline_ref *iref,
1666                                  u64 parent, u64 root_objectid,
1667                                  u64 owner, u64 offset, int refs_to_add,
1668                                  struct btrfs_delayed_extent_op *extent_op)
1669 {
1670         struct extent_buffer *leaf;
1671         struct btrfs_extent_item *ei;
1672         unsigned long ptr;
1673         unsigned long end;
1674         unsigned long item_offset;
1675         u64 refs;
1676         int size;
1677         int type;
1678
1679         leaf = path->nodes[0];
1680         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1681         item_offset = (unsigned long)iref - (unsigned long)ei;
1682
1683         type = extent_ref_type(parent, owner);
1684         size = btrfs_extent_inline_ref_size(type);
1685
1686         btrfs_extend_item(trans, root, path, size);
1687
1688         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1689         refs = btrfs_extent_refs(leaf, ei);
1690         refs += refs_to_add;
1691         btrfs_set_extent_refs(leaf, ei, refs);
1692         if (extent_op)
1693                 __run_delayed_extent_op(extent_op, leaf, ei);
1694
1695         ptr = (unsigned long)ei + item_offset;
1696         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1697         if (ptr < end - size)
1698                 memmove_extent_buffer(leaf, ptr + size, ptr,
1699                                       end - size - ptr);
1700
1701         iref = (struct btrfs_extent_inline_ref *)ptr;
1702         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1703         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1704                 struct btrfs_extent_data_ref *dref;
1705                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1706                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1707                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1708                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1709                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1710         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1711                 struct btrfs_shared_data_ref *sref;
1712                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1713                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1714                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1715         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1716                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1717         } else {
1718                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1719         }
1720         btrfs_mark_buffer_dirty(leaf);
1721 }
1722
1723 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1724                                  struct btrfs_root *root,
1725                                  struct btrfs_path *path,
1726                                  struct btrfs_extent_inline_ref **ref_ret,
1727                                  u64 bytenr, u64 num_bytes, u64 parent,
1728                                  u64 root_objectid, u64 owner, u64 offset)
1729 {
1730         int ret;
1731
1732         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1733                                            bytenr, num_bytes, parent,
1734                                            root_objectid, owner, offset, 0);
1735         if (ret != -ENOENT)
1736                 return ret;
1737
1738         btrfs_release_path(path);
1739         *ref_ret = NULL;
1740
1741         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1742                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1743                                             root_objectid);
1744         } else {
1745                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1746                                              root_objectid, owner, offset);
1747         }
1748         return ret;
1749 }
1750
1751 /*
1752  * helper to update/remove inline back ref
1753  */
1754 static noinline_for_stack
1755 void update_inline_extent_backref(struct btrfs_trans_handle *trans,
1756                                   struct btrfs_root *root,
1757                                   struct btrfs_path *path,
1758                                   struct btrfs_extent_inline_ref *iref,
1759                                   int refs_to_mod,
1760                                   struct btrfs_delayed_extent_op *extent_op)
1761 {
1762         struct extent_buffer *leaf;
1763         struct btrfs_extent_item *ei;
1764         struct btrfs_extent_data_ref *dref = NULL;
1765         struct btrfs_shared_data_ref *sref = NULL;
1766         unsigned long ptr;
1767         unsigned long end;
1768         u32 item_size;
1769         int size;
1770         int type;
1771         u64 refs;
1772
1773         leaf = path->nodes[0];
1774         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1775         refs = btrfs_extent_refs(leaf, ei);
1776         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1777         refs += refs_to_mod;
1778         btrfs_set_extent_refs(leaf, ei, refs);
1779         if (extent_op)
1780                 __run_delayed_extent_op(extent_op, leaf, ei);
1781
1782         type = btrfs_extent_inline_ref_type(leaf, iref);
1783
1784         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1785                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1786                 refs = btrfs_extent_data_ref_count(leaf, dref);
1787         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1788                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1789                 refs = btrfs_shared_data_ref_count(leaf, sref);
1790         } else {
1791                 refs = 1;
1792                 BUG_ON(refs_to_mod != -1);
1793         }
1794
1795         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1796         refs += refs_to_mod;
1797
1798         if (refs > 0) {
1799                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1800                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1801                 else
1802                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1803         } else {
1804                 size =  btrfs_extent_inline_ref_size(type);
1805                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1806                 ptr = (unsigned long)iref;
1807                 end = (unsigned long)ei + item_size;
1808                 if (ptr + size < end)
1809                         memmove_extent_buffer(leaf, ptr, ptr + size,
1810                                               end - ptr - size);
1811                 item_size -= size;
1812                 btrfs_truncate_item(trans, root, path, item_size, 1);
1813         }
1814         btrfs_mark_buffer_dirty(leaf);
1815 }
1816
1817 static noinline_for_stack
1818 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1819                                  struct btrfs_root *root,
1820                                  struct btrfs_path *path,
1821                                  u64 bytenr, u64 num_bytes, u64 parent,
1822                                  u64 root_objectid, u64 owner,
1823                                  u64 offset, int refs_to_add,
1824                                  struct btrfs_delayed_extent_op *extent_op)
1825 {
1826         struct btrfs_extent_inline_ref *iref;
1827         int ret;
1828
1829         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1830                                            bytenr, num_bytes, parent,
1831                                            root_objectid, owner, offset, 1);
1832         if (ret == 0) {
1833                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1834                 update_inline_extent_backref(trans, root, path, iref,
1835                                              refs_to_add, extent_op);
1836         } else if (ret == -ENOENT) {
1837                 setup_inline_extent_backref(trans, root, path, iref, parent,
1838                                             root_objectid, owner, offset,
1839                                             refs_to_add, extent_op);
1840                 ret = 0;
1841         }
1842         return ret;
1843 }
1844
1845 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1846                                  struct btrfs_root *root,
1847                                  struct btrfs_path *path,
1848                                  u64 bytenr, u64 parent, u64 root_objectid,
1849                                  u64 owner, u64 offset, int refs_to_add)
1850 {
1851         int ret;
1852         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1853                 BUG_ON(refs_to_add != 1);
1854                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1855                                             parent, root_objectid);
1856         } else {
1857                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1858                                              parent, root_objectid,
1859                                              owner, offset, refs_to_add);
1860         }
1861         return ret;
1862 }
1863
1864 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1865                                  struct btrfs_root *root,
1866                                  struct btrfs_path *path,
1867                                  struct btrfs_extent_inline_ref *iref,
1868                                  int refs_to_drop, int is_data)
1869 {
1870         int ret = 0;
1871
1872         BUG_ON(!is_data && refs_to_drop != 1);
1873         if (iref) {
1874                 update_inline_extent_backref(trans, root, path, iref,
1875                                              -refs_to_drop, NULL);
1876         } else if (is_data) {
1877                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1878         } else {
1879                 ret = btrfs_del_item(trans, root, path);
1880         }
1881         return ret;
1882 }
1883
1884 static int btrfs_issue_discard(struct block_device *bdev,
1885                                 u64 start, u64 len)
1886 {
1887         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1888 }
1889
1890 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1891                                 u64 num_bytes, u64 *actual_bytes)
1892 {
1893         int ret;
1894         u64 discarded_bytes = 0;
1895         struct btrfs_bio *bbio = NULL;
1896
1897
1898         /* Tell the block device(s) that the sectors can be discarded */
1899         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1900                               bytenr, &num_bytes, &bbio, 0);
1901         /* Error condition is -ENOMEM */
1902         if (!ret) {
1903                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1904                 int i;
1905
1906
1907                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1908                         if (!stripe->dev->can_discard)
1909                                 continue;
1910
1911                         ret = btrfs_issue_discard(stripe->dev->bdev,
1912                                                   stripe->physical,
1913                                                   stripe->length);
1914                         if (!ret)
1915                                 discarded_bytes += stripe->length;
1916                         else if (ret != -EOPNOTSUPP)
1917                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1918
1919                         /*
1920                          * Just in case we get back EOPNOTSUPP for some reason,
1921                          * just ignore the return value so we don't screw up
1922                          * people calling discard_extent.
1923                          */
1924                         ret = 0;
1925                 }
1926                 kfree(bbio);
1927         }
1928
1929         if (actual_bytes)
1930                 *actual_bytes = discarded_bytes;
1931
1932
1933         if (ret == -EOPNOTSUPP)
1934                 ret = 0;
1935         return ret;
1936 }
1937
1938 /* Can return -ENOMEM */
1939 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1940                          struct btrfs_root *root,
1941                          u64 bytenr, u64 num_bytes, u64 parent,
1942                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1943 {
1944         int ret;
1945         struct btrfs_fs_info *fs_info = root->fs_info;
1946
1947         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1948                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1949
1950         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1951                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1952                                         num_bytes,
1953                                         parent, root_objectid, (int)owner,
1954                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1955         } else {
1956                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1957                                         num_bytes,
1958                                         parent, root_objectid, owner, offset,
1959                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1960         }
1961         return ret;
1962 }
1963
1964 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1965                                   struct btrfs_root *root,
1966                                   u64 bytenr, u64 num_bytes,
1967                                   u64 parent, u64 root_objectid,
1968                                   u64 owner, u64 offset, int refs_to_add,
1969                                   struct btrfs_delayed_extent_op *extent_op)
1970 {
1971         struct btrfs_path *path;
1972         struct extent_buffer *leaf;
1973         struct btrfs_extent_item *item;
1974         u64 refs;
1975         int ret;
1976         int err = 0;
1977
1978         path = btrfs_alloc_path();
1979         if (!path)
1980                 return -ENOMEM;
1981
1982         path->reada = 1;
1983         path->leave_spinning = 1;
1984         /* this will setup the path even if it fails to insert the back ref */
1985         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1986                                            path, bytenr, num_bytes, parent,
1987                                            root_objectid, owner, offset,
1988                                            refs_to_add, extent_op);
1989         if (ret == 0)
1990                 goto out;
1991
1992         if (ret != -EAGAIN) {
1993                 err = ret;
1994                 goto out;
1995         }
1996
1997         leaf = path->nodes[0];
1998         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1999         refs = btrfs_extent_refs(leaf, item);
2000         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2001         if (extent_op)
2002                 __run_delayed_extent_op(extent_op, leaf, item);
2003
2004         btrfs_mark_buffer_dirty(leaf);
2005         btrfs_release_path(path);
2006
2007         path->reada = 1;
2008         path->leave_spinning = 1;
2009
2010         /* now insert the actual backref */
2011         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2012                                     path, bytenr, parent, root_objectid,
2013                                     owner, offset, refs_to_add);
2014         if (ret)
2015                 btrfs_abort_transaction(trans, root, ret);
2016 out:
2017         btrfs_free_path(path);
2018         return err;
2019 }
2020
2021 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2022                                 struct btrfs_root *root,
2023                                 struct btrfs_delayed_ref_node *node,
2024                                 struct btrfs_delayed_extent_op *extent_op,
2025                                 int insert_reserved)
2026 {
2027         int ret = 0;
2028         struct btrfs_delayed_data_ref *ref;
2029         struct btrfs_key ins;
2030         u64 parent = 0;
2031         u64 ref_root = 0;
2032         u64 flags = 0;
2033
2034         ins.objectid = node->bytenr;
2035         ins.offset = node->num_bytes;
2036         ins.type = BTRFS_EXTENT_ITEM_KEY;
2037
2038         ref = btrfs_delayed_node_to_data_ref(node);
2039         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2040                 parent = ref->parent;
2041         else
2042                 ref_root = ref->root;
2043
2044         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2045                 if (extent_op)
2046                         flags |= extent_op->flags_to_set;
2047                 ret = alloc_reserved_file_extent(trans, root,
2048                                                  parent, ref_root, flags,
2049                                                  ref->objectid, ref->offset,
2050                                                  &ins, node->ref_mod);
2051         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2052                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2053                                              node->num_bytes, parent,
2054                                              ref_root, ref->objectid,
2055                                              ref->offset, node->ref_mod,
2056                                              extent_op);
2057         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2058                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2059                                           node->num_bytes, parent,
2060                                           ref_root, ref->objectid,
2061                                           ref->offset, node->ref_mod,
2062                                           extent_op);
2063         } else {
2064                 BUG();
2065         }
2066         return ret;
2067 }
2068
2069 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2070                                     struct extent_buffer *leaf,
2071                                     struct btrfs_extent_item *ei)
2072 {
2073         u64 flags = btrfs_extent_flags(leaf, ei);
2074         if (extent_op->update_flags) {
2075                 flags |= extent_op->flags_to_set;
2076                 btrfs_set_extent_flags(leaf, ei, flags);
2077         }
2078
2079         if (extent_op->update_key) {
2080                 struct btrfs_tree_block_info *bi;
2081                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2082                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2083                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2084         }
2085 }
2086
2087 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2088                                  struct btrfs_root *root,
2089                                  struct btrfs_delayed_ref_node *node,
2090                                  struct btrfs_delayed_extent_op *extent_op)
2091 {
2092         struct btrfs_key key;
2093         struct btrfs_path *path;
2094         struct btrfs_extent_item *ei;
2095         struct extent_buffer *leaf;
2096         u32 item_size;
2097         int ret;
2098         int err = 0;
2099         int metadata = (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2100                         node->type == BTRFS_SHARED_BLOCK_REF_KEY);
2101
2102         if (trans->aborted)
2103                 return 0;
2104
2105         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2106                 metadata = 0;
2107
2108         path = btrfs_alloc_path();
2109         if (!path)
2110                 return -ENOMEM;
2111
2112         key.objectid = node->bytenr;
2113
2114         if (metadata) {
2115                 struct btrfs_delayed_tree_ref *tree_ref;
2116
2117                 tree_ref = btrfs_delayed_node_to_tree_ref(node);
2118                 key.type = BTRFS_METADATA_ITEM_KEY;
2119                 key.offset = tree_ref->level;
2120         } else {
2121                 key.type = BTRFS_EXTENT_ITEM_KEY;
2122                 key.offset = node->num_bytes;
2123         }
2124
2125 again:
2126         path->reada = 1;
2127         path->leave_spinning = 1;
2128         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2129                                 path, 0, 1);
2130         if (ret < 0) {
2131                 err = ret;
2132                 goto out;
2133         }
2134         if (ret > 0) {
2135                 if (metadata) {
2136                         btrfs_release_path(path);
2137                         metadata = 0;
2138
2139                         key.offset = node->num_bytes;
2140                         key.type = BTRFS_EXTENT_ITEM_KEY;
2141                         goto again;
2142                 }
2143                 err = -EIO;
2144                 goto out;
2145         }
2146
2147         leaf = path->nodes[0];
2148         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2149 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2150         if (item_size < sizeof(*ei)) {
2151                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2152                                              path, (u64)-1, 0);
2153                 if (ret < 0) {
2154                         err = ret;
2155                         goto out;
2156                 }
2157                 leaf = path->nodes[0];
2158                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2159         }
2160 #endif
2161         BUG_ON(item_size < sizeof(*ei));
2162         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2163         __run_delayed_extent_op(extent_op, leaf, ei);
2164
2165         btrfs_mark_buffer_dirty(leaf);
2166 out:
2167         btrfs_free_path(path);
2168         return err;
2169 }
2170
2171 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2172                                 struct btrfs_root *root,
2173                                 struct btrfs_delayed_ref_node *node,
2174                                 struct btrfs_delayed_extent_op *extent_op,
2175                                 int insert_reserved)
2176 {
2177         int ret = 0;
2178         struct btrfs_delayed_tree_ref *ref;
2179         struct btrfs_key ins;
2180         u64 parent = 0;
2181         u64 ref_root = 0;
2182         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2183                                                  SKINNY_METADATA);
2184
2185         ref = btrfs_delayed_node_to_tree_ref(node);
2186         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2187                 parent = ref->parent;
2188         else
2189                 ref_root = ref->root;
2190
2191         ins.objectid = node->bytenr;
2192         if (skinny_metadata) {
2193                 ins.offset = ref->level;
2194                 ins.type = BTRFS_METADATA_ITEM_KEY;
2195         } else {
2196                 ins.offset = node->num_bytes;
2197                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2198         }
2199
2200         BUG_ON(node->ref_mod != 1);
2201         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2202                 BUG_ON(!extent_op || !extent_op->update_flags);
2203                 ret = alloc_reserved_tree_block(trans, root,
2204                                                 parent, ref_root,
2205                                                 extent_op->flags_to_set,
2206                                                 &extent_op->key,
2207                                                 ref->level, &ins);
2208         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2209                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2210                                              node->num_bytes, parent, ref_root,
2211                                              ref->level, 0, 1, extent_op);
2212         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2213                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2214                                           node->num_bytes, parent, ref_root,
2215                                           ref->level, 0, 1, extent_op);
2216         } else {
2217                 BUG();
2218         }
2219         return ret;
2220 }
2221
2222 /* helper function to actually process a single delayed ref entry */
2223 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2224                                struct btrfs_root *root,
2225                                struct btrfs_delayed_ref_node *node,
2226                                struct btrfs_delayed_extent_op *extent_op,
2227                                int insert_reserved)
2228 {
2229         int ret = 0;
2230
2231         if (trans->aborted)
2232                 return 0;
2233
2234         if (btrfs_delayed_ref_is_head(node)) {
2235                 struct btrfs_delayed_ref_head *head;
2236                 /*
2237                  * we've hit the end of the chain and we were supposed
2238                  * to insert this extent into the tree.  But, it got
2239                  * deleted before we ever needed to insert it, so all
2240                  * we have to do is clean up the accounting
2241                  */
2242                 BUG_ON(extent_op);
2243                 head = btrfs_delayed_node_to_head(node);
2244                 if (insert_reserved) {
2245                         btrfs_pin_extent(root, node->bytenr,
2246                                          node->num_bytes, 1);
2247                         if (head->is_data) {
2248                                 ret = btrfs_del_csums(trans, root,
2249                                                       node->bytenr,
2250                                                       node->num_bytes);
2251                         }
2252                 }
2253                 return ret;
2254         }
2255
2256         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2257             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2258                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2259                                            insert_reserved);
2260         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2261                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2262                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2263                                            insert_reserved);
2264         else
2265                 BUG();
2266         return ret;
2267 }
2268
2269 static noinline struct btrfs_delayed_ref_node *
2270 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2271 {
2272         struct rb_node *node;
2273         struct btrfs_delayed_ref_node *ref;
2274         int action = BTRFS_ADD_DELAYED_REF;
2275 again:
2276         /*
2277          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2278          * this prevents ref count from going down to zero when
2279          * there still are pending delayed ref.
2280          */
2281         node = rb_prev(&head->node.rb_node);
2282         while (1) {
2283                 if (!node)
2284                         break;
2285                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2286                                 rb_node);
2287                 if (ref->bytenr != head->node.bytenr)
2288                         break;
2289                 if (ref->action == action)
2290                         return ref;
2291                 node = rb_prev(node);
2292         }
2293         if (action == BTRFS_ADD_DELAYED_REF) {
2294                 action = BTRFS_DROP_DELAYED_REF;
2295                 goto again;
2296         }
2297         return NULL;
2298 }
2299
2300 /*
2301  * Returns 0 on success or if called with an already aborted transaction.
2302  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2303  */
2304 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2305                                        struct btrfs_root *root,
2306                                        struct list_head *cluster)
2307 {
2308         struct btrfs_delayed_ref_root *delayed_refs;
2309         struct btrfs_delayed_ref_node *ref;
2310         struct btrfs_delayed_ref_head *locked_ref = NULL;
2311         struct btrfs_delayed_extent_op *extent_op;
2312         struct btrfs_fs_info *fs_info = root->fs_info;
2313         int ret;
2314         int count = 0;
2315         int must_insert_reserved = 0;
2316
2317         delayed_refs = &trans->transaction->delayed_refs;
2318         while (1) {
2319                 if (!locked_ref) {
2320                         /* pick a new head ref from the cluster list */
2321                         if (list_empty(cluster))
2322                                 break;
2323
2324                         locked_ref = list_entry(cluster->next,
2325                                      struct btrfs_delayed_ref_head, cluster);
2326
2327                         /* grab the lock that says we are going to process
2328                          * all the refs for this head */
2329                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2330
2331                         /*
2332                          * we may have dropped the spin lock to get the head
2333                          * mutex lock, and that might have given someone else
2334                          * time to free the head.  If that's true, it has been
2335                          * removed from our list and we can move on.
2336                          */
2337                         if (ret == -EAGAIN) {
2338                                 locked_ref = NULL;
2339                                 count++;
2340                                 continue;
2341                         }
2342                 }
2343
2344                 /*
2345                  * We need to try and merge add/drops of the same ref since we
2346                  * can run into issues with relocate dropping the implicit ref
2347                  * and then it being added back again before the drop can
2348                  * finish.  If we merged anything we need to re-loop so we can
2349                  * get a good ref.
2350                  */
2351                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2352                                          locked_ref);
2353
2354                 /*
2355                  * locked_ref is the head node, so we have to go one
2356                  * node back for any delayed ref updates
2357                  */
2358                 ref = select_delayed_ref(locked_ref);
2359
2360                 if (ref && ref->seq &&
2361                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2362                         /*
2363                          * there are still refs with lower seq numbers in the
2364                          * process of being added. Don't run this ref yet.
2365                          */
2366                         list_del_init(&locked_ref->cluster);
2367                         btrfs_delayed_ref_unlock(locked_ref);
2368                         locked_ref = NULL;
2369                         delayed_refs->num_heads_ready++;
2370                         spin_unlock(&delayed_refs->lock);
2371                         cond_resched();
2372                         spin_lock(&delayed_refs->lock);
2373                         continue;
2374                 }
2375
2376                 /*
2377                  * record the must insert reserved flag before we
2378                  * drop the spin lock.
2379                  */
2380                 must_insert_reserved = locked_ref->must_insert_reserved;
2381                 locked_ref->must_insert_reserved = 0;
2382
2383                 extent_op = locked_ref->extent_op;
2384                 locked_ref->extent_op = NULL;
2385
2386                 if (!ref) {
2387                         /* All delayed refs have been processed, Go ahead
2388                          * and send the head node to run_one_delayed_ref,
2389                          * so that any accounting fixes can happen
2390                          */
2391                         ref = &locked_ref->node;
2392
2393                         if (extent_op && must_insert_reserved) {
2394                                 btrfs_free_delayed_extent_op(extent_op);
2395                                 extent_op = NULL;
2396                         }
2397
2398                         if (extent_op) {
2399                                 spin_unlock(&delayed_refs->lock);
2400
2401                                 ret = run_delayed_extent_op(trans, root,
2402                                                             ref, extent_op);
2403                                 btrfs_free_delayed_extent_op(extent_op);
2404
2405                                 if (ret) {
2406                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2407                                         spin_lock(&delayed_refs->lock);
2408                                         btrfs_delayed_ref_unlock(locked_ref);
2409                                         return ret;
2410                                 }
2411
2412                                 goto next;
2413                         }
2414                 }
2415
2416                 ref->in_tree = 0;
2417                 rb_erase(&ref->rb_node, &delayed_refs->root);
2418                 delayed_refs->num_entries--;
2419                 if (!btrfs_delayed_ref_is_head(ref)) {
2420                         /*
2421                          * when we play the delayed ref, also correct the
2422                          * ref_mod on head
2423                          */
2424                         switch (ref->action) {
2425                         case BTRFS_ADD_DELAYED_REF:
2426                         case BTRFS_ADD_DELAYED_EXTENT:
2427                                 locked_ref->node.ref_mod -= ref->ref_mod;
2428                                 break;
2429                         case BTRFS_DROP_DELAYED_REF:
2430                                 locked_ref->node.ref_mod += ref->ref_mod;
2431                                 break;
2432                         default:
2433                                 WARN_ON(1);
2434                         }
2435                 }
2436                 spin_unlock(&delayed_refs->lock);
2437
2438                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2439                                           must_insert_reserved);
2440
2441                 btrfs_free_delayed_extent_op(extent_op);
2442                 if (ret) {
2443                         btrfs_delayed_ref_unlock(locked_ref);
2444                         btrfs_put_delayed_ref(ref);
2445                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2446                         spin_lock(&delayed_refs->lock);
2447                         return ret;
2448                 }
2449
2450                 /*
2451                  * If this node is a head, that means all the refs in this head
2452                  * have been dealt with, and we will pick the next head to deal
2453                  * with, so we must unlock the head and drop it from the cluster
2454                  * list before we release it.
2455                  */
2456                 if (btrfs_delayed_ref_is_head(ref)) {
2457                         list_del_init(&locked_ref->cluster);
2458                         btrfs_delayed_ref_unlock(locked_ref);
2459                         locked_ref = NULL;
2460                 }
2461                 btrfs_put_delayed_ref(ref);
2462                 count++;
2463 next:
2464                 cond_resched();
2465                 spin_lock(&delayed_refs->lock);
2466         }
2467         return count;
2468 }
2469
2470 #ifdef SCRAMBLE_DELAYED_REFS
2471 /*
2472  * Normally delayed refs get processed in ascending bytenr order. This
2473  * correlates in most cases to the order added. To expose dependencies on this
2474  * order, we start to process the tree in the middle instead of the beginning
2475  */
2476 static u64 find_middle(struct rb_root *root)
2477 {
2478         struct rb_node *n = root->rb_node;
2479         struct btrfs_delayed_ref_node *entry;
2480         int alt = 1;
2481         u64 middle;
2482         u64 first = 0, last = 0;
2483
2484         n = rb_first(root);
2485         if (n) {
2486                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2487                 first = entry->bytenr;
2488         }
2489         n = rb_last(root);
2490         if (n) {
2491                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2492                 last = entry->bytenr;
2493         }
2494         n = root->rb_node;
2495
2496         while (n) {
2497                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2498                 WARN_ON(!entry->in_tree);
2499
2500                 middle = entry->bytenr;
2501
2502                 if (alt)
2503                         n = n->rb_left;
2504                 else
2505                         n = n->rb_right;
2506
2507                 alt = 1 - alt;
2508         }
2509         return middle;
2510 }
2511 #endif
2512
2513 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2514                                          struct btrfs_fs_info *fs_info)
2515 {
2516         struct qgroup_update *qgroup_update;
2517         int ret = 0;
2518
2519         if (list_empty(&trans->qgroup_ref_list) !=
2520             !trans->delayed_ref_elem.seq) {
2521                 /* list without seq or seq without list */
2522                 btrfs_err(fs_info,
2523                         "qgroup accounting update error, list is%s empty, seq is %llu",
2524                         list_empty(&trans->qgroup_ref_list) ? "" : " not",
2525                         trans->delayed_ref_elem.seq);
2526                 BUG();
2527         }
2528
2529         if (!trans->delayed_ref_elem.seq)
2530                 return 0;
2531
2532         while (!list_empty(&trans->qgroup_ref_list)) {
2533                 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2534                                                  struct qgroup_update, list);
2535                 list_del(&qgroup_update->list);
2536                 if (!ret)
2537                         ret = btrfs_qgroup_account_ref(
2538                                         trans, fs_info, qgroup_update->node,
2539                                         qgroup_update->extent_op);
2540                 kfree(qgroup_update);
2541         }
2542
2543         btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2544
2545         return ret;
2546 }
2547
2548 static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
2549                       int count)
2550 {
2551         int val = atomic_read(&delayed_refs->ref_seq);
2552
2553         if (val < seq || val >= seq + count)
2554                 return 1;
2555         return 0;
2556 }
2557
2558 /*
2559  * this starts processing the delayed reference count updates and
2560  * extent insertions we have queued up so far.  count can be
2561  * 0, which means to process everything in the tree at the start
2562  * of the run (but not newly added entries), or it can be some target
2563  * number you'd like to process.
2564  *
2565  * Returns 0 on success or if called with an aborted transaction
2566  * Returns <0 on error and aborts the transaction
2567  */
2568 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2569                            struct btrfs_root *root, unsigned long count)
2570 {
2571         struct rb_node *node;
2572         struct btrfs_delayed_ref_root *delayed_refs;
2573         struct btrfs_delayed_ref_node *ref;
2574         struct list_head cluster;
2575         int ret;
2576         u64 delayed_start;
2577         int run_all = count == (unsigned long)-1;
2578         int run_most = 0;
2579         int loops;
2580
2581         /* We'll clean this up in btrfs_cleanup_transaction */
2582         if (trans->aborted)
2583                 return 0;
2584
2585         if (root == root->fs_info->extent_root)
2586                 root = root->fs_info->tree_root;
2587
2588         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2589
2590         delayed_refs = &trans->transaction->delayed_refs;
2591         INIT_LIST_HEAD(&cluster);
2592         if (count == 0) {
2593                 count = delayed_refs->num_entries * 2;
2594                 run_most = 1;
2595         }
2596
2597         if (!run_all && !run_most) {
2598                 int old;
2599                 int seq = atomic_read(&delayed_refs->ref_seq);
2600
2601 progress:
2602                 old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2603                 if (old) {
2604                         DEFINE_WAIT(__wait);
2605                         if (delayed_refs->num_entries < 16348)
2606                                 return 0;
2607
2608                         prepare_to_wait(&delayed_refs->wait, &__wait,
2609                                         TASK_UNINTERRUPTIBLE);
2610
2611                         old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2612                         if (old) {
2613                                 schedule();
2614                                 finish_wait(&delayed_refs->wait, &__wait);
2615
2616                                 if (!refs_newer(delayed_refs, seq, 256))
2617                                         goto progress;
2618                                 else
2619                                         return 0;
2620                         } else {
2621                                 finish_wait(&delayed_refs->wait, &__wait);
2622                                 goto again;
2623                         }
2624                 }
2625
2626         } else {
2627                 atomic_inc(&delayed_refs->procs_running_refs);
2628         }
2629
2630 again:
2631         loops = 0;
2632         spin_lock(&delayed_refs->lock);
2633
2634 #ifdef SCRAMBLE_DELAYED_REFS
2635         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2636 #endif
2637
2638         while (1) {
2639                 if (!(run_all || run_most) &&
2640                     delayed_refs->num_heads_ready < 64)
2641                         break;
2642
2643                 /*
2644                  * go find something we can process in the rbtree.  We start at
2645                  * the beginning of the tree, and then build a cluster
2646                  * of refs to process starting at the first one we are able to
2647                  * lock
2648                  */
2649                 delayed_start = delayed_refs->run_delayed_start;
2650                 ret = btrfs_find_ref_cluster(trans, &cluster,
2651                                              delayed_refs->run_delayed_start);
2652                 if (ret)
2653                         break;
2654
2655                 ret = run_clustered_refs(trans, root, &cluster);
2656                 if (ret < 0) {
2657                         btrfs_release_ref_cluster(&cluster);
2658                         spin_unlock(&delayed_refs->lock);
2659                         btrfs_abort_transaction(trans, root, ret);
2660                         atomic_dec(&delayed_refs->procs_running_refs);
2661                         return ret;
2662                 }
2663
2664                 atomic_add(ret, &delayed_refs->ref_seq);
2665
2666                 count -= min_t(unsigned long, ret, count);
2667
2668                 if (count == 0)
2669                         break;
2670
2671                 if (delayed_start >= delayed_refs->run_delayed_start) {
2672                         if (loops == 0) {
2673                                 /*
2674                                  * btrfs_find_ref_cluster looped. let's do one
2675                                  * more cycle. if we don't run any delayed ref
2676                                  * during that cycle (because we can't because
2677                                  * all of them are blocked), bail out.
2678                                  */
2679                                 loops = 1;
2680                         } else {
2681                                 /*
2682                                  * no runnable refs left, stop trying
2683                                  */
2684                                 BUG_ON(run_all);
2685                                 break;
2686                         }
2687                 }
2688                 if (ret) {
2689                         /* refs were run, let's reset staleness detection */
2690                         loops = 0;
2691                 }
2692         }
2693
2694         if (run_all) {
2695                 if (!list_empty(&trans->new_bgs)) {
2696                         spin_unlock(&delayed_refs->lock);
2697                         btrfs_create_pending_block_groups(trans, root);
2698                         spin_lock(&delayed_refs->lock);
2699                 }
2700
2701                 node = rb_first(&delayed_refs->root);
2702                 if (!node)
2703                         goto out;
2704                 count = (unsigned long)-1;
2705
2706                 while (node) {
2707                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2708                                        rb_node);
2709                         if (btrfs_delayed_ref_is_head(ref)) {
2710                                 struct btrfs_delayed_ref_head *head;
2711
2712                                 head = btrfs_delayed_node_to_head(ref);
2713                                 atomic_inc(&ref->refs);
2714
2715                                 spin_unlock(&delayed_refs->lock);
2716                                 /*
2717                                  * Mutex was contended, block until it's
2718                                  * released and try again
2719                                  */
2720                                 mutex_lock(&head->mutex);
2721                                 mutex_unlock(&head->mutex);
2722
2723                                 btrfs_put_delayed_ref(ref);
2724                                 cond_resched();
2725                                 goto again;
2726                         }
2727                         node = rb_next(node);
2728                 }
2729                 spin_unlock(&delayed_refs->lock);
2730                 schedule_timeout(1);
2731                 goto again;
2732         }
2733 out:
2734         atomic_dec(&delayed_refs->procs_running_refs);
2735         smp_mb();
2736         if (waitqueue_active(&delayed_refs->wait))
2737                 wake_up(&delayed_refs->wait);
2738
2739         spin_unlock(&delayed_refs->lock);
2740         assert_qgroups_uptodate(trans);
2741         return 0;
2742 }
2743
2744 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2745                                 struct btrfs_root *root,
2746                                 u64 bytenr, u64 num_bytes, u64 flags,
2747                                 int is_data)
2748 {
2749         struct btrfs_delayed_extent_op *extent_op;
2750         int ret;
2751
2752         extent_op = btrfs_alloc_delayed_extent_op();
2753         if (!extent_op)
2754                 return -ENOMEM;
2755
2756         extent_op->flags_to_set = flags;
2757         extent_op->update_flags = 1;
2758         extent_op->update_key = 0;
2759         extent_op->is_data = is_data ? 1 : 0;
2760
2761         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2762                                           num_bytes, extent_op);
2763         if (ret)
2764                 btrfs_free_delayed_extent_op(extent_op);
2765         return ret;
2766 }
2767
2768 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2769                                       struct btrfs_root *root,
2770                                       struct btrfs_path *path,
2771                                       u64 objectid, u64 offset, u64 bytenr)
2772 {
2773         struct btrfs_delayed_ref_head *head;
2774         struct btrfs_delayed_ref_node *ref;
2775         struct btrfs_delayed_data_ref *data_ref;
2776         struct btrfs_delayed_ref_root *delayed_refs;
2777         struct rb_node *node;
2778         int ret = 0;
2779
2780         ret = -ENOENT;
2781         delayed_refs = &trans->transaction->delayed_refs;
2782         spin_lock(&delayed_refs->lock);
2783         head = btrfs_find_delayed_ref_head(trans, bytenr);
2784         if (!head)
2785                 goto out;
2786
2787         if (!mutex_trylock(&head->mutex)) {
2788                 atomic_inc(&head->node.refs);
2789                 spin_unlock(&delayed_refs->lock);
2790
2791                 btrfs_release_path(path);
2792
2793                 /*
2794                  * Mutex was contended, block until it's released and let
2795                  * caller try again
2796                  */
2797                 mutex_lock(&head->mutex);
2798                 mutex_unlock(&head->mutex);
2799                 btrfs_put_delayed_ref(&head->node);
2800                 return -EAGAIN;
2801         }
2802
2803         node = rb_prev(&head->node.rb_node);
2804         if (!node)
2805                 goto out_unlock;
2806
2807         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2808
2809         if (ref->bytenr != bytenr)
2810                 goto out_unlock;
2811
2812         ret = 1;
2813         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2814                 goto out_unlock;
2815
2816         data_ref = btrfs_delayed_node_to_data_ref(ref);
2817
2818         node = rb_prev(node);
2819         if (node) {
2820                 int seq = ref->seq;
2821
2822                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2823                 if (ref->bytenr == bytenr && ref->seq == seq)
2824                         goto out_unlock;
2825         }
2826
2827         if (data_ref->root != root->root_key.objectid ||
2828             data_ref->objectid != objectid || data_ref->offset != offset)
2829                 goto out_unlock;
2830
2831         ret = 0;
2832 out_unlock:
2833         mutex_unlock(&head->mutex);
2834 out:
2835         spin_unlock(&delayed_refs->lock);
2836         return ret;
2837 }
2838
2839 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2840                                         struct btrfs_root *root,
2841                                         struct btrfs_path *path,
2842                                         u64 objectid, u64 offset, u64 bytenr)
2843 {
2844         struct btrfs_root *extent_root = root->fs_info->extent_root;
2845         struct extent_buffer *leaf;
2846         struct btrfs_extent_data_ref *ref;
2847         struct btrfs_extent_inline_ref *iref;
2848         struct btrfs_extent_item *ei;
2849         struct btrfs_key key;
2850         u32 item_size;
2851         int ret;
2852
2853         key.objectid = bytenr;
2854         key.offset = (u64)-1;
2855         key.type = BTRFS_EXTENT_ITEM_KEY;
2856
2857         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2858         if (ret < 0)
2859                 goto out;
2860         BUG_ON(ret == 0); /* Corruption */
2861
2862         ret = -ENOENT;
2863         if (path->slots[0] == 0)
2864                 goto out;
2865
2866         path->slots[0]--;
2867         leaf = path->nodes[0];
2868         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2869
2870         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2871                 goto out;
2872
2873         ret = 1;
2874         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2875 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2876         if (item_size < sizeof(*ei)) {
2877                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2878                 goto out;
2879         }
2880 #endif
2881         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2882
2883         if (item_size != sizeof(*ei) +
2884             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2885                 goto out;
2886
2887         if (btrfs_extent_generation(leaf, ei) <=
2888             btrfs_root_last_snapshot(&root->root_item))
2889                 goto out;
2890
2891         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2892         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2893             BTRFS_EXTENT_DATA_REF_KEY)
2894                 goto out;
2895
2896         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2897         if (btrfs_extent_refs(leaf, ei) !=
2898             btrfs_extent_data_ref_count(leaf, ref) ||
2899             btrfs_extent_data_ref_root(leaf, ref) !=
2900             root->root_key.objectid ||
2901             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2902             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2903                 goto out;
2904
2905         ret = 0;
2906 out:
2907         return ret;
2908 }
2909
2910 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2911                           struct btrfs_root *root,
2912                           u64 objectid, u64 offset, u64 bytenr)
2913 {
2914         struct btrfs_path *path;
2915         int ret;
2916         int ret2;
2917
2918         path = btrfs_alloc_path();
2919         if (!path)
2920                 return -ENOENT;
2921
2922         do {
2923                 ret = check_committed_ref(trans, root, path, objectid,
2924                                           offset, bytenr);
2925                 if (ret && ret != -ENOENT)
2926                         goto out;
2927
2928                 ret2 = check_delayed_ref(trans, root, path, objectid,
2929                                          offset, bytenr);
2930         } while (ret2 == -EAGAIN);
2931
2932         if (ret2 && ret2 != -ENOENT) {
2933                 ret = ret2;
2934                 goto out;
2935         }
2936
2937         if (ret != -ENOENT || ret2 != -ENOENT)
2938                 ret = 0;
2939 out:
2940         btrfs_free_path(path);
2941         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2942                 WARN_ON(ret > 0);
2943         return ret;
2944 }
2945
2946 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2947                            struct btrfs_root *root,
2948                            struct extent_buffer *buf,
2949                            int full_backref, int inc, int for_cow)
2950 {
2951         u64 bytenr;
2952         u64 num_bytes;
2953         u64 parent;
2954         u64 ref_root;
2955         u32 nritems;
2956         struct btrfs_key key;
2957         struct btrfs_file_extent_item *fi;
2958         int i;
2959         int level;
2960         int ret = 0;
2961         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2962                             u64, u64, u64, u64, u64, u64, int);
2963
2964         ref_root = btrfs_header_owner(buf);
2965         nritems = btrfs_header_nritems(buf);
2966         level = btrfs_header_level(buf);
2967
2968         if (!root->ref_cows && level == 0)
2969                 return 0;
2970
2971         if (inc)
2972                 process_func = btrfs_inc_extent_ref;
2973         else
2974                 process_func = btrfs_free_extent;
2975
2976         if (full_backref)
2977                 parent = buf->start;
2978         else
2979                 parent = 0;
2980
2981         for (i = 0; i < nritems; i++) {
2982                 if (level == 0) {
2983                         btrfs_item_key_to_cpu(buf, &key, i);
2984                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2985                                 continue;
2986                         fi = btrfs_item_ptr(buf, i,
2987                                             struct btrfs_file_extent_item);
2988                         if (btrfs_file_extent_type(buf, fi) ==
2989                             BTRFS_FILE_EXTENT_INLINE)
2990                                 continue;
2991                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2992                         if (bytenr == 0)
2993                                 continue;
2994
2995                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2996                         key.offset -= btrfs_file_extent_offset(buf, fi);
2997                         ret = process_func(trans, root, bytenr, num_bytes,
2998                                            parent, ref_root, key.objectid,
2999                                            key.offset, for_cow);
3000                         if (ret)
3001                                 goto fail;
3002                 } else {
3003                         bytenr = btrfs_node_blockptr(buf, i);
3004                         num_bytes = btrfs_level_size(root, level - 1);
3005                         ret = process_func(trans, root, bytenr, num_bytes,
3006                                            parent, ref_root, level - 1, 0,
3007                                            for_cow);
3008                         if (ret)
3009                                 goto fail;
3010                 }
3011         }
3012         return 0;
3013 fail:
3014         return ret;
3015 }
3016
3017 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3018                   struct extent_buffer *buf, int full_backref, int for_cow)
3019 {
3020         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
3021 }
3022
3023 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3024                   struct extent_buffer *buf, int full_backref, int for_cow)
3025 {
3026         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
3027 }
3028
3029 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3030                                  struct btrfs_root *root,
3031                                  struct btrfs_path *path,
3032                                  struct btrfs_block_group_cache *cache)
3033 {
3034         int ret;
3035         struct btrfs_root *extent_root = root->fs_info->extent_root;
3036         unsigned long bi;
3037         struct extent_buffer *leaf;
3038
3039         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3040         if (ret < 0)
3041                 goto fail;
3042         BUG_ON(ret); /* Corruption */
3043
3044         leaf = path->nodes[0];
3045         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3046         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3047         btrfs_mark_buffer_dirty(leaf);
3048         btrfs_release_path(path);
3049 fail:
3050         if (ret) {
3051                 btrfs_abort_transaction(trans, root, ret);
3052                 return ret;
3053         }
3054         return 0;
3055
3056 }
3057
3058 static struct btrfs_block_group_cache *
3059 next_block_group(struct btrfs_root *root,
3060                  struct btrfs_block_group_cache *cache)
3061 {
3062         struct rb_node *node;
3063         spin_lock(&root->fs_info->block_group_cache_lock);
3064         node = rb_next(&cache->cache_node);
3065         btrfs_put_block_group(cache);
3066         if (node) {
3067                 cache = rb_entry(node, struct btrfs_block_group_cache,
3068                                  cache_node);
3069                 btrfs_get_block_group(cache);
3070         } else
3071                 cache = NULL;
3072         spin_unlock(&root->fs_info->block_group_cache_lock);
3073         return cache;
3074 }
3075
3076 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3077                             struct btrfs_trans_handle *trans,
3078                             struct btrfs_path *path)
3079 {
3080         struct btrfs_root *root = block_group->fs_info->tree_root;
3081         struct inode *inode = NULL;
3082         u64 alloc_hint = 0;
3083         int dcs = BTRFS_DC_ERROR;
3084         int num_pages = 0;
3085         int retries = 0;
3086         int ret = 0;
3087
3088         /*
3089          * If this block group is smaller than 100 megs don't bother caching the
3090          * block group.
3091          */
3092         if (block_group->key.offset < (100 * 1024 * 1024)) {
3093                 spin_lock(&block_group->lock);
3094                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3095                 spin_unlock(&block_group->lock);
3096                 return 0;
3097         }
3098
3099 again:
3100         inode = lookup_free_space_inode(root, block_group, path);
3101         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3102                 ret = PTR_ERR(inode);
3103                 btrfs_release_path(path);
3104                 goto out;
3105         }
3106
3107         if (IS_ERR(inode)) {
3108                 BUG_ON(retries);
3109                 retries++;
3110
3111                 if (block_group->ro)
3112                         goto out_free;
3113
3114                 ret = create_free_space_inode(root, trans, block_group, path);
3115                 if (ret)
3116                         goto out_free;
3117                 goto again;
3118         }
3119
3120         /* We've already setup this transaction, go ahead and exit */
3121         if (block_group->cache_generation == trans->transid &&
3122             i_size_read(inode)) {
3123                 dcs = BTRFS_DC_SETUP;
3124                 goto out_put;
3125         }
3126
3127         /*
3128          * We want to set the generation to 0, that way if anything goes wrong
3129          * from here on out we know not to trust this cache when we load up next
3130          * time.
3131          */
3132         BTRFS_I(inode)->generation = 0;
3133         ret = btrfs_update_inode(trans, root, inode);
3134         WARN_ON(ret);
3135
3136         if (i_size_read(inode) > 0) {
3137                 ret = btrfs_truncate_free_space_cache(root, trans, path,
3138                                                       inode);
3139                 if (ret)
3140                         goto out_put;
3141         }
3142
3143         spin_lock(&block_group->lock);
3144         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3145             !btrfs_test_opt(root, SPACE_CACHE)) {
3146                 /*
3147                  * don't bother trying to write stuff out _if_
3148                  * a) we're not cached,
3149                  * b) we're with nospace_cache mount option.
3150                  */
3151                 dcs = BTRFS_DC_WRITTEN;
3152                 spin_unlock(&block_group->lock);
3153                 goto out_put;
3154         }
3155         spin_unlock(&block_group->lock);
3156
3157         /*
3158          * Try to preallocate enough space based on how big the block group is.
3159          * Keep in mind this has to include any pinned space which could end up
3160          * taking up quite a bit since it's not folded into the other space
3161          * cache.
3162          */
3163         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3164         if (!num_pages)
3165                 num_pages = 1;
3166
3167         num_pages *= 16;
3168         num_pages *= PAGE_CACHE_SIZE;
3169
3170         ret = btrfs_check_data_free_space(inode, num_pages);
3171         if (ret)
3172                 goto out_put;
3173
3174         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3175                                               num_pages, num_pages,
3176                                               &alloc_hint);
3177         if (!ret)
3178                 dcs = BTRFS_DC_SETUP;
3179         btrfs_free_reserved_data_space(inode, num_pages);
3180
3181 out_put:
3182         iput(inode);
3183 out_free:
3184         btrfs_release_path(path);
3185 out:
3186         spin_lock(&block_group->lock);
3187         if (!ret && dcs == BTRFS_DC_SETUP)
3188                 block_group->cache_generation = trans->transid;
3189         block_group->disk_cache_state = dcs;
3190         spin_unlock(&block_group->lock);
3191
3192         return ret;
3193 }
3194
3195 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3196                                    struct btrfs_root *root)
3197 {
3198         struct btrfs_block_group_cache *cache;
3199         int err = 0;
3200         struct btrfs_path *path;
3201         u64 last = 0;
3202
3203         path = btrfs_alloc_path();
3204         if (!path)
3205                 return -ENOMEM;
3206
3207 again:
3208         while (1) {
3209                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3210                 while (cache) {
3211                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3212                                 break;
3213                         cache = next_block_group(root, cache);
3214                 }
3215                 if (!cache) {
3216                         if (last == 0)
3217                                 break;
3218                         last = 0;
3219                         continue;
3220                 }
3221                 err = cache_save_setup(cache, trans, path);
3222                 last = cache->key.objectid + cache->key.offset;
3223                 btrfs_put_block_group(cache);
3224         }
3225
3226         while (1) {
3227                 if (last == 0) {
3228                         err = btrfs_run_delayed_refs(trans, root,
3229                                                      (unsigned long)-1);
3230                         if (err) /* File system offline */
3231                                 goto out;
3232                 }
3233
3234                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3235                 while (cache) {
3236                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3237                                 btrfs_put_block_group(cache);
3238                                 goto again;
3239                         }
3240
3241                         if (cache->dirty)
3242                                 break;
3243                         cache = next_block_group(root, cache);
3244                 }
3245                 if (!cache) {
3246                         if (last == 0)
3247                                 break;
3248                         last = 0;
3249                         continue;
3250                 }
3251
3252                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3253                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3254                 cache->dirty = 0;
3255                 last = cache->key.objectid + cache->key.offset;
3256
3257                 err = write_one_cache_group(trans, root, path, cache);
3258                 if (err) /* File system offline */
3259                         goto out;
3260
3261                 btrfs_put_block_group(cache);
3262         }
3263
3264         while (1) {
3265                 /*
3266                  * I don't think this is needed since we're just marking our
3267                  * preallocated extent as written, but just in case it can't
3268                  * hurt.
3269                  */
3270                 if (last == 0) {
3271                         err = btrfs_run_delayed_refs(trans, root,
3272                                                      (unsigned long)-1);
3273                         if (err) /* File system offline */
3274                                 goto out;
3275                 }
3276
3277                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3278                 while (cache) {
3279                         /*
3280                          * Really this shouldn't happen, but it could if we
3281                          * couldn't write the entire preallocated extent and
3282                          * splitting the extent resulted in a new block.
3283                          */
3284                         if (cache->dirty) {
3285                                 btrfs_put_block_group(cache);
3286                                 goto again;
3287                         }
3288                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3289                                 break;
3290                         cache = next_block_group(root, cache);
3291                 }
3292                 if (!cache) {
3293                         if (last == 0)
3294                                 break;
3295                         last = 0;
3296                         continue;
3297                 }
3298
3299                 err = btrfs_write_out_cache(root, trans, cache, path);
3300
3301                 /*
3302                  * If we didn't have an error then the cache state is still
3303                  * NEED_WRITE, so we can set it to WRITTEN.
3304                  */
3305                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3306                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3307                 last = cache->key.objectid + cache->key.offset;
3308                 btrfs_put_block_group(cache);
3309         }
3310 out:
3311
3312         btrfs_free_path(path);
3313         return err;
3314 }
3315
3316 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3317 {
3318         struct btrfs_block_group_cache *block_group;
3319         int readonly = 0;
3320
3321         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3322         if (!block_group || block_group->ro)
3323                 readonly = 1;
3324         if (block_group)
3325                 btrfs_put_block_group(block_group);
3326         return readonly;
3327 }
3328
3329 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3330                              u64 total_bytes, u64 bytes_used,
3331                              struct btrfs_space_info **space_info)
3332 {
3333         struct btrfs_space_info *found;
3334         int i;
3335         int factor;
3336
3337         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3338                      BTRFS_BLOCK_GROUP_RAID10))
3339                 factor = 2;
3340         else
3341                 factor = 1;
3342
3343         found = __find_space_info(info, flags);
3344         if (found) {
3345                 spin_lock(&found->lock);
3346                 found->total_bytes += total_bytes;
3347                 found->disk_total += total_bytes * factor;
3348                 found->bytes_used += bytes_used;
3349                 found->disk_used += bytes_used * factor;
3350                 found->full = 0;
3351                 spin_unlock(&found->lock);
3352                 *space_info = found;
3353                 return 0;
3354         }
3355         found = kzalloc(sizeof(*found), GFP_NOFS);
3356         if (!found)
3357                 return -ENOMEM;
3358
3359         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3360                 INIT_LIST_HEAD(&found->block_groups[i]);
3361         init_rwsem(&found->groups_sem);
3362         spin_lock_init(&found->lock);
3363         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3364         found->total_bytes = total_bytes;
3365         found->disk_total = total_bytes * factor;
3366         found->bytes_used = bytes_used;
3367         found->disk_used = bytes_used * factor;
3368         found->bytes_pinned = 0;
3369         found->bytes_reserved = 0;
3370         found->bytes_readonly = 0;
3371         found->bytes_may_use = 0;
3372         found->full = 0;
3373         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3374         found->chunk_alloc = 0;
3375         found->flush = 0;
3376         init_waitqueue_head(&found->wait);
3377         *space_info = found;
3378         list_add_rcu(&found->list, &info->space_info);
3379         if (flags & BTRFS_BLOCK_GROUP_DATA)
3380                 info->data_sinfo = found;
3381         return 0;
3382 }
3383
3384 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3385 {
3386         u64 extra_flags = chunk_to_extended(flags) &
3387                                 BTRFS_EXTENDED_PROFILE_MASK;
3388
3389         write_seqlock(&fs_info->profiles_lock);
3390         if (flags & BTRFS_BLOCK_GROUP_DATA)
3391                 fs_info->avail_data_alloc_bits |= extra_flags;
3392         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3393                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3394         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3395                 fs_info->avail_system_alloc_bits |= extra_flags;
3396         write_sequnlock(&fs_info->profiles_lock);
3397 }
3398
3399 /*
3400  * returns target flags in extended format or 0 if restripe for this
3401  * chunk_type is not in progress
3402  *
3403  * should be called with either volume_mutex or balance_lock held
3404  */
3405 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3406 {
3407         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3408         u64 target = 0;
3409
3410         if (!bctl)
3411                 return 0;
3412
3413         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3414             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3415                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3416         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3417                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3418                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3419         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3420                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3421                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3422         }
3423
3424         return target;
3425 }
3426
3427 /*
3428  * @flags: available profiles in extended format (see ctree.h)
3429  *
3430  * Returns reduced profile in chunk format.  If profile changing is in
3431  * progress (either running or paused) picks the target profile (if it's
3432  * already available), otherwise falls back to plain reducing.
3433  */
3434 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3435 {
3436         /*
3437          * we add in the count of missing devices because we want
3438          * to make sure that any RAID levels on a degraded FS
3439          * continue to be honored.
3440          */
3441         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3442                 root->fs_info->fs_devices->missing_devices;
3443         u64 target;
3444         u64 tmp;
3445
3446         /*
3447          * see if restripe for this chunk_type is in progress, if so
3448          * try to reduce to the target profile
3449          */
3450         spin_lock(&root->fs_info->balance_lock);
3451         target = get_restripe_target(root->fs_info, flags);
3452         if (target) {
3453                 /* pick target profile only if it's already available */
3454                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3455                         spin_unlock(&root->fs_info->balance_lock);
3456                         return extended_to_chunk(target);
3457                 }
3458         }
3459         spin_unlock(&root->fs_info->balance_lock);
3460
3461         /* First, mask out the RAID levels which aren't possible */
3462         if (num_devices == 1)
3463                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3464                            BTRFS_BLOCK_GROUP_RAID5);
3465         if (num_devices < 3)
3466                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3467         if (num_devices < 4)
3468                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3469
3470         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3471                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3472                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3473         flags &= ~tmp;
3474
3475         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3476                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3477         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3478                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3479         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3480                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3481         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3482                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3483         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3484                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3485
3486         return extended_to_chunk(flags | tmp);
3487 }
3488
3489 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3490 {
3491         unsigned seq;
3492
3493         do {
3494                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3495
3496                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3497                         flags |= root->fs_info->avail_data_alloc_bits;
3498                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3499                         flags |= root->fs_info->avail_system_alloc_bits;
3500                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3501                         flags |= root->fs_info->avail_metadata_alloc_bits;
3502         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3503
3504         return btrfs_reduce_alloc_profile(root, flags);
3505 }
3506
3507 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3508 {
3509         u64 flags;
3510         u64 ret;
3511
3512         if (data)
3513                 flags = BTRFS_BLOCK_GROUP_DATA;
3514         else if (root == root->fs_info->chunk_root)
3515                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3516         else
3517                 flags = BTRFS_BLOCK_GROUP_METADATA;
3518
3519         ret = get_alloc_profile(root, flags);
3520         return ret;
3521 }
3522
3523 /*
3524  * This will check the space that the inode allocates from to make sure we have
3525  * enough space for bytes.
3526  */
3527 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3528 {
3529         struct btrfs_space_info *data_sinfo;
3530         struct btrfs_root *root = BTRFS_I(inode)->root;
3531         struct btrfs_fs_info *fs_info = root->fs_info;
3532         u64 used;
3533         int ret = 0, committed = 0, alloc_chunk = 1;
3534
3535         /* make sure bytes are sectorsize aligned */
3536         bytes = ALIGN(bytes, root->sectorsize);
3537
3538         if (root == root->fs_info->tree_root ||
3539             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3540                 alloc_chunk = 0;
3541                 committed = 1;
3542         }
3543
3544         data_sinfo = fs_info->data_sinfo;
3545         if (!data_sinfo)
3546                 goto alloc;
3547
3548 again:
3549         /* make sure we have enough space to handle the data first */
3550         spin_lock(&data_sinfo->lock);
3551         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3552                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3553                 data_sinfo->bytes_may_use;
3554
3555         if (used + bytes > data_sinfo->total_bytes) {
3556                 struct btrfs_trans_handle *trans;
3557
3558                 /*
3559                  * if we don't have enough free bytes in this space then we need
3560                  * to alloc a new chunk.
3561                  */
3562                 if (!data_sinfo->full && alloc_chunk) {
3563                         u64 alloc_target;
3564
3565                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3566                         spin_unlock(&data_sinfo->lock);
3567 alloc:
3568                         alloc_target = btrfs_get_alloc_profile(root, 1);
3569                         trans = btrfs_join_transaction(root);
3570                         if (IS_ERR(trans))
3571                                 return PTR_ERR(trans);
3572
3573                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3574                                              alloc_target,
3575                                              CHUNK_ALLOC_NO_FORCE);
3576                         btrfs_end_transaction(trans, root);
3577                         if (ret < 0) {
3578                                 if (ret != -ENOSPC)
3579                                         return ret;
3580                                 else
3581                                         goto commit_trans;
3582                         }
3583
3584                         if (!data_sinfo)
3585                                 data_sinfo = fs_info->data_sinfo;
3586
3587                         goto again;
3588                 }
3589
3590                 /*
3591                  * If we have less pinned bytes than we want to allocate then
3592                  * don't bother committing the transaction, it won't help us.
3593                  */
3594                 if (data_sinfo->bytes_pinned < bytes)
3595                         committed = 1;
3596                 spin_unlock(&data_sinfo->lock);
3597
3598                 /* commit the current transaction and try again */
3599 commit_trans:
3600                 if (!committed &&
3601                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3602                         committed = 1;
3603                         trans = btrfs_join_transaction(root);
3604                         if (IS_ERR(trans))
3605                                 return PTR_ERR(trans);
3606                         ret = btrfs_commit_transaction(trans, root);
3607                         if (ret)
3608                                 return ret;
3609                         goto again;
3610                 }
3611
3612                 return -ENOSPC;
3613         }
3614         data_sinfo->bytes_may_use += bytes;
3615         trace_btrfs_space_reservation(root->fs_info, "space_info",
3616                                       data_sinfo->flags, bytes, 1);
3617         spin_unlock(&data_sinfo->lock);
3618
3619         return 0;
3620 }
3621
3622 /*
3623  * Called if we need to clear a data reservation for this inode.
3624  */
3625 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3626 {
3627         struct btrfs_root *root = BTRFS_I(inode)->root;
3628         struct btrfs_space_info *data_sinfo;
3629
3630         /* make sure bytes are sectorsize aligned */
3631         bytes = ALIGN(bytes, root->sectorsize);
3632
3633         data_sinfo = root->fs_info->data_sinfo;
3634         spin_lock(&data_sinfo->lock);
3635         data_sinfo->bytes_may_use -= bytes;
3636         trace_btrfs_space_reservation(root->fs_info, "space_info",
3637                                       data_sinfo->flags, bytes, 0);
3638         spin_unlock(&data_sinfo->lock);
3639 }
3640
3641 static void force_metadata_allocation(struct btrfs_fs_info *info)
3642 {
3643         struct list_head *head = &info->space_info;
3644         struct btrfs_space_info *found;
3645
3646         rcu_read_lock();
3647         list_for_each_entry_rcu(found, head, list) {
3648                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3649                         found->force_alloc = CHUNK_ALLOC_FORCE;
3650         }
3651         rcu_read_unlock();
3652 }
3653
3654 static int should_alloc_chunk(struct btrfs_root *root,
3655                               struct btrfs_space_info *sinfo, int force)
3656 {
3657         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3658         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3659         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3660         u64 thresh;
3661
3662         if (force == CHUNK_ALLOC_FORCE)
3663                 return 1;
3664
3665         /*
3666          * We need to take into account the global rsv because for all intents
3667          * and purposes it's used space.  Don't worry about locking the
3668          * global_rsv, it doesn't change except when the transaction commits.
3669          */
3670         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3671                 num_allocated += global_rsv->size;
3672
3673         /*
3674          * in limited mode, we want to have some free space up to
3675          * about 1% of the FS size.
3676          */
3677         if (force == CHUNK_ALLOC_LIMITED) {
3678                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3679                 thresh = max_t(u64, 64 * 1024 * 1024,
3680                                div_factor_fine(thresh, 1));
3681
3682                 if (num_bytes - num_allocated < thresh)
3683                         return 1;
3684         }
3685
3686         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3687                 return 0;
3688         return 1;
3689 }
3690
3691 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3692 {
3693         u64 num_dev;
3694
3695         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3696                     BTRFS_BLOCK_GROUP_RAID0 |
3697                     BTRFS_BLOCK_GROUP_RAID5 |
3698                     BTRFS_BLOCK_GROUP_RAID6))
3699                 num_dev = root->fs_info->fs_devices->rw_devices;
3700         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3701                 num_dev = 2;
3702         else
3703                 num_dev = 1;    /* DUP or single */
3704
3705         /* metadata for updaing devices and chunk tree */
3706         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3707 }
3708
3709 static void check_system_chunk(struct btrfs_trans_handle *trans,
3710                                struct btrfs_root *root, u64 type)
3711 {
3712         struct btrfs_space_info *info;
3713         u64 left;
3714         u64 thresh;
3715
3716         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3717         spin_lock(&info->lock);
3718         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3719                 info->bytes_reserved - info->bytes_readonly;
3720         spin_unlock(&info->lock);
3721
3722         thresh = get_system_chunk_thresh(root, type);
3723         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3724                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3725                         left, thresh, type);
3726                 dump_space_info(info, 0, 0);
3727         }
3728
3729         if (left < thresh) {
3730                 u64 flags;
3731
3732                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3733                 btrfs_alloc_chunk(trans, root, flags);
3734         }
3735 }
3736
3737 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3738                           struct btrfs_root *extent_root, u64 flags, int force)
3739 {
3740         struct btrfs_space_info *space_info;
3741         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3742         int wait_for_alloc = 0;
3743         int ret = 0;
3744
3745         /* Don't re-enter if we're already allocating a chunk */
3746         if (trans->allocating_chunk)
3747                 return -ENOSPC;
3748
3749         space_info = __find_space_info(extent_root->fs_info, flags);
3750         if (!space_info) {
3751                 ret = update_space_info(extent_root->fs_info, flags,
3752                                         0, 0, &space_info);
3753                 BUG_ON(ret); /* -ENOMEM */
3754         }
3755         BUG_ON(!space_info); /* Logic error */
3756
3757 again:
3758         spin_lock(&space_info->lock);
3759         if (force < space_info->force_alloc)
3760                 force = space_info->force_alloc;
3761         if (space_info->full) {
3762                 spin_unlock(&space_info->lock);
3763                 return 0;
3764         }
3765
3766         if (!should_alloc_chunk(extent_root, space_info, force)) {
3767                 spin_unlock(&space_info->lock);
3768                 return 0;
3769         } else if (space_info->chunk_alloc) {
3770                 wait_for_alloc = 1;
3771         } else {
3772                 space_info->chunk_alloc = 1;
3773         }
3774
3775         spin_unlock(&space_info->lock);
3776
3777         mutex_lock(&fs_info->chunk_mutex);
3778
3779         /*
3780          * The chunk_mutex is held throughout the entirety of a chunk
3781          * allocation, so once we've acquired the chunk_mutex we know that the
3782          * other guy is done and we need to recheck and see if we should
3783          * allocate.
3784          */
3785         if (wait_for_alloc) {
3786                 mutex_unlock(&fs_info->chunk_mutex);
3787                 wait_for_alloc = 0;
3788                 goto again;
3789         }
3790
3791         trans->allocating_chunk = true;
3792
3793         /*
3794          * If we have mixed data/metadata chunks we want to make sure we keep
3795          * allocating mixed chunks instead of individual chunks.
3796          */
3797         if (btrfs_mixed_space_info(space_info))
3798                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3799
3800         /*
3801          * if we're doing a data chunk, go ahead and make sure that
3802          * we keep a reasonable number of metadata chunks allocated in the
3803          * FS as well.
3804          */
3805         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3806                 fs_info->data_chunk_allocations++;
3807                 if (!(fs_info->data_chunk_allocations %
3808                       fs_info->metadata_ratio))
3809                         force_metadata_allocation(fs_info);
3810         }
3811
3812         /*
3813          * Check if we have enough space in SYSTEM chunk because we may need
3814          * to update devices.
3815          */
3816         check_system_chunk(trans, extent_root, flags);
3817
3818         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3819         trans->allocating_chunk = false;
3820
3821         spin_lock(&space_info->lock);
3822         if (ret < 0 && ret != -ENOSPC)
3823                 goto out;
3824         if (ret)
3825                 space_info->full = 1;
3826         else
3827                 ret = 1;
3828
3829         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3830 out:
3831         space_info->chunk_alloc = 0;
3832         spin_unlock(&space_info->lock);
3833         mutex_unlock(&fs_info->chunk_mutex);
3834         return ret;
3835 }
3836
3837 static int can_overcommit(struct btrfs_root *root,
3838                           struct btrfs_space_info *space_info, u64 bytes,
3839                           enum btrfs_reserve_flush_enum flush)
3840 {
3841         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3842         u64 profile = btrfs_get_alloc_profile(root, 0);
3843         u64 rsv_size = 0;
3844         u64 avail;
3845         u64 used;
3846         u64 to_add;
3847
3848         used = space_info->bytes_used + space_info->bytes_reserved +
3849                 space_info->bytes_pinned + space_info->bytes_readonly;
3850
3851         spin_lock(&global_rsv->lock);
3852         rsv_size = global_rsv->size;
3853         spin_unlock(&global_rsv->lock);
3854
3855         /*
3856          * We only want to allow over committing if we have lots of actual space
3857          * free, but if we don't have enough space to handle the global reserve
3858          * space then we could end up having a real enospc problem when trying
3859          * to allocate a chunk or some other such important allocation.
3860          */
3861         rsv_size <<= 1;
3862         if (used + rsv_size >= space_info->total_bytes)
3863                 return 0;
3864
3865         used += space_info->bytes_may_use;
3866
3867         spin_lock(&root->fs_info->free_chunk_lock);
3868         avail = root->fs_info->free_chunk_space;
3869         spin_unlock(&root->fs_info->free_chunk_lock);
3870
3871         /*
3872          * If we have dup, raid1 or raid10 then only half of the free
3873          * space is actually useable.  For raid56, the space info used
3874          * doesn't include the parity drive, so we don't have to
3875          * change the math
3876          */
3877         if (profile & (BTRFS_BLOCK_GROUP_DUP |
3878                        BTRFS_BLOCK_GROUP_RAID1 |
3879                        BTRFS_BLOCK_GROUP_RAID10))
3880                 avail >>= 1;
3881
3882         to_add = space_info->total_bytes;
3883
3884         /*
3885          * If we aren't flushing all things, let us overcommit up to
3886          * 1/2th of the space. If we can flush, don't let us overcommit
3887          * too much, let it overcommit up to 1/8 of the space.
3888          */
3889         if (flush == BTRFS_RESERVE_FLUSH_ALL)
3890                 to_add >>= 3;
3891         else
3892                 to_add >>= 1;
3893
3894         /*
3895          * Limit the overcommit to the amount of free space we could possibly
3896          * allocate for chunks.
3897          */
3898         to_add = min(avail, to_add);
3899
3900         if (used + bytes < space_info->total_bytes + to_add)
3901                 return 1;
3902         return 0;
3903 }
3904
3905 void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3906                                   unsigned long nr_pages)
3907 {
3908         struct super_block *sb = root->fs_info->sb;
3909         int started;
3910
3911         /* If we can not start writeback, just sync all the delalloc file. */
3912         started = try_to_writeback_inodes_sb_nr(sb, nr_pages,
3913                                                       WB_REASON_FS_FREE_SPACE);
3914         if (!started) {
3915                 /*
3916                  * We needn't worry the filesystem going from r/w to r/o though
3917                  * we don't acquire ->s_umount mutex, because the filesystem
3918                  * should guarantee the delalloc inodes list be empty after
3919                  * the filesystem is readonly(all dirty pages are written to
3920                  * the disk).
3921                  */
3922                 btrfs_start_delalloc_inodes(root, 0);
3923                 if (!current->journal_info)
3924                         btrfs_wait_ordered_extents(root, 0);
3925         }
3926 }
3927
3928 /*
3929  * shrink metadata reservation for delalloc
3930  */
3931 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
3932                             bool wait_ordered)
3933 {
3934         struct btrfs_block_rsv *block_rsv;
3935         struct btrfs_space_info *space_info;
3936         struct btrfs_trans_handle *trans;
3937         u64 delalloc_bytes;
3938         u64 max_reclaim;
3939         long time_left;
3940         unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3941         int loops = 0;
3942         enum btrfs_reserve_flush_enum flush;
3943
3944         trans = (struct btrfs_trans_handle *)current->journal_info;
3945         block_rsv = &root->fs_info->delalloc_block_rsv;
3946         space_info = block_rsv->space_info;
3947
3948         smp_mb();
3949         delalloc_bytes = percpu_counter_sum_positive(
3950                                                 &root->fs_info->delalloc_bytes);
3951         if (delalloc_bytes == 0) {
3952                 if (trans)
3953                         return;
3954                 btrfs_wait_ordered_extents(root, 0);
3955                 return;
3956         }
3957
3958         while (delalloc_bytes && loops < 3) {
3959                 max_reclaim = min(delalloc_bytes, to_reclaim);
3960                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
3961                 btrfs_writeback_inodes_sb_nr(root, nr_pages);
3962                 /*
3963                  * We need to wait for the async pages to actually start before
3964                  * we do anything.
3965                  */
3966                 wait_event(root->fs_info->async_submit_wait,
3967                            !atomic_read(&root->fs_info->async_delalloc_pages));
3968
3969                 if (!trans)
3970                         flush = BTRFS_RESERVE_FLUSH_ALL;
3971                 else
3972                         flush = BTRFS_RESERVE_NO_FLUSH;
3973                 spin_lock(&space_info->lock);
3974                 if (can_overcommit(root, space_info, orig, flush)) {
3975                         spin_unlock(&space_info->lock);
3976                         break;
3977                 }
3978                 spin_unlock(&space_info->lock);
3979
3980                 loops++;
3981                 if (wait_ordered && !trans) {
3982                         btrfs_wait_ordered_extents(root, 0);
3983                 } else {
3984                         time_left = schedule_timeout_killable(1);
3985                         if (time_left)
3986                                 break;
3987                 }
3988                 smp_mb();
3989                 delalloc_bytes = percpu_counter_sum_positive(
3990                                                 &root->fs_info->delalloc_bytes);
3991         }
3992 }
3993
3994 /**
3995  * maybe_commit_transaction - possibly commit the transaction if its ok to
3996  * @root - the root we're allocating for
3997  * @bytes - the number of bytes we want to reserve
3998  * @force - force the commit
3999  *
4000  * This will check to make sure that committing the transaction will actually
4001  * get us somewhere and then commit the transaction if it does.  Otherwise it
4002  * will return -ENOSPC.
4003  */
4004 static int may_commit_transaction(struct btrfs_root *root,
4005                                   struct btrfs_space_info *space_info,
4006                                   u64 bytes, int force)
4007 {
4008         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4009         struct btrfs_trans_handle *trans;
4010
4011         trans = (struct btrfs_trans_handle *)current->journal_info;
4012         if (trans)
4013                 return -EAGAIN;
4014
4015         if (force)
4016                 goto commit;
4017
4018         /* See if there is enough pinned space to make this reservation */
4019         spin_lock(&space_info->lock);
4020         if (space_info->bytes_pinned >= bytes) {
4021                 spin_unlock(&space_info->lock);
4022                 goto commit;
4023         }
4024         spin_unlock(&space_info->lock);
4025
4026         /*
4027          * See if there is some space in the delayed insertion reservation for
4028          * this reservation.
4029          */
4030         if (space_info != delayed_rsv->space_info)
4031                 return -ENOSPC;
4032
4033         spin_lock(&space_info->lock);
4034         spin_lock(&delayed_rsv->lock);
4035         if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
4036                 spin_unlock(&delayed_rsv->lock);
4037                 spin_unlock(&space_info->lock);
4038                 return -ENOSPC;
4039         }
4040         spin_unlock(&delayed_rsv->lock);
4041         spin_unlock(&space_info->lock);
4042
4043 commit:
4044         trans = btrfs_join_transaction(root);
4045         if (IS_ERR(trans))
4046                 return -ENOSPC;
4047
4048         return btrfs_commit_transaction(trans, root);
4049 }
4050
4051 enum flush_state {
4052         FLUSH_DELAYED_ITEMS_NR  =       1,
4053         FLUSH_DELAYED_ITEMS     =       2,
4054         FLUSH_DELALLOC          =       3,
4055         FLUSH_DELALLOC_WAIT     =       4,
4056         ALLOC_CHUNK             =       5,
4057         COMMIT_TRANS            =       6,
4058 };
4059
4060 static int flush_space(struct btrfs_root *root,
4061                        struct btrfs_space_info *space_info, u64 num_bytes,
4062                        u64 orig_bytes, int state)
4063 {
4064         struct btrfs_trans_handle *trans;
4065         int nr;
4066         int ret = 0;
4067
4068         switch (state) {
4069         case FLUSH_DELAYED_ITEMS_NR:
4070         case FLUSH_DELAYED_ITEMS:
4071                 if (state == FLUSH_DELAYED_ITEMS_NR) {
4072                         u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
4073
4074                         nr = (int)div64_u64(num_bytes, bytes);
4075                         if (!nr)
4076                                 nr = 1;
4077                         nr *= 2;
4078                 } else {
4079                         nr = -1;
4080                 }
4081                 trans = btrfs_join_transaction(root);
4082                 if (IS_ERR(trans)) {
4083                         ret = PTR_ERR(trans);
4084                         break;
4085                 }
4086                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4087                 btrfs_end_transaction(trans, root);
4088                 break;
4089         case FLUSH_DELALLOC:
4090         case FLUSH_DELALLOC_WAIT:
4091                 shrink_delalloc(root, num_bytes, orig_bytes,
4092                                 state == FLUSH_DELALLOC_WAIT);
4093                 break;
4094         case ALLOC_CHUNK:
4095                 trans = btrfs_join_transaction(root);
4096                 if (IS_ERR(trans)) {
4097                         ret = PTR_ERR(trans);
4098                         break;
4099                 }
4100                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4101                                      btrfs_get_alloc_profile(root, 0),
4102                                      CHUNK_ALLOC_NO_FORCE);
4103                 btrfs_end_transaction(trans, root);
4104                 if (ret == -ENOSPC)
4105                         ret = 0;
4106                 break;
4107         case COMMIT_TRANS:
4108                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4109                 break;
4110         default:
4111                 ret = -ENOSPC;
4112                 break;
4113         }
4114
4115         return ret;
4116 }
4117 /**
4118  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4119  * @root - the root we're allocating for
4120  * @block_rsv - the block_rsv we're allocating for
4121  * @orig_bytes - the number of bytes we want
4122  * @flush - whether or not we can flush to make our reservation
4123  *
4124  * This will reserve orgi_bytes number of bytes from the space info associated
4125  * with the block_rsv.  If there is not enough space it will make an attempt to
4126  * flush out space to make room.  It will do this by flushing delalloc if
4127  * possible or committing the transaction.  If flush is 0 then no attempts to
4128  * regain reservations will be made and this will fail if there is not enough
4129  * space already.
4130  */
4131 static int reserve_metadata_bytes(struct btrfs_root *root,
4132                                   struct btrfs_block_rsv *block_rsv,
4133                                   u64 orig_bytes,
4134                                   enum btrfs_reserve_flush_enum flush)
4135 {
4136         struct btrfs_space_info *space_info = block_rsv->space_info;
4137         u64 used;
4138         u64 num_bytes = orig_bytes;
4139         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4140         int ret = 0;
4141         bool flushing = false;
4142
4143 again:
4144         ret = 0;
4145         spin_lock(&space_info->lock);
4146         /*
4147          * We only want to wait if somebody other than us is flushing and we
4148          * are actually allowed to flush all things.
4149          */
4150         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4151                space_info->flush) {
4152                 spin_unlock(&space_info->lock);
4153                 /*
4154                  * If we have a trans handle we can't wait because the flusher
4155                  * may have to commit the transaction, which would mean we would
4156                  * deadlock since we are waiting for the flusher to finish, but
4157                  * hold the current transaction open.
4158                  */
4159                 if (current->journal_info)
4160                         return -EAGAIN;
4161                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4162                 /* Must have been killed, return */
4163                 if (ret)
4164                         return -EINTR;
4165
4166                 spin_lock(&space_info->lock);
4167         }
4168
4169         ret = -ENOSPC;
4170         used = space_info->bytes_used + space_info->bytes_reserved +
4171                 space_info->bytes_pinned + space_info->bytes_readonly +
4172                 space_info->bytes_may_use;
4173
4174         /*
4175          * The idea here is that we've not already over-reserved the block group
4176          * then we can go ahead and save our reservation first and then start
4177          * flushing if we need to.  Otherwise if we've already overcommitted
4178          * lets start flushing stuff first and then come back and try to make
4179          * our reservation.
4180          */
4181         if (used <= space_info->total_bytes) {
4182                 if (used + orig_bytes <= space_info->total_bytes) {
4183                         space_info->bytes_may_use += orig_bytes;
4184                         trace_btrfs_space_reservation(root->fs_info,
4185                                 "space_info", space_info->flags, orig_bytes, 1);
4186                         ret = 0;
4187                 } else {
4188                         /*
4189                          * Ok set num_bytes to orig_bytes since we aren't
4190                          * overocmmitted, this way we only try and reclaim what
4191                          * we need.
4192                          */
4193                         num_bytes = orig_bytes;
4194                 }
4195         } else {
4196                 /*
4197                  * Ok we're over committed, set num_bytes to the overcommitted
4198                  * amount plus the amount of bytes that we need for this
4199                  * reservation.
4200                  */
4201                 num_bytes = used - space_info->total_bytes +
4202                         (orig_bytes * 2);
4203         }
4204
4205         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4206                 space_info->bytes_may_use += orig_bytes;
4207                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4208                                               space_info->flags, orig_bytes,
4209                                               1);
4210                 ret = 0;
4211         }
4212
4213         /*
4214          * Couldn't make our reservation, save our place so while we're trying
4215          * to reclaim space we can actually use it instead of somebody else
4216          * stealing it from us.
4217          *
4218          * We make the other tasks wait for the flush only when we can flush
4219          * all things.
4220          */
4221         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4222                 flushing = true;
4223                 space_info->flush = 1;
4224         }
4225
4226         spin_unlock(&space_info->lock);
4227
4228         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4229                 goto out;
4230
4231         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4232                           flush_state);
4233         flush_state++;
4234
4235         /*
4236          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4237          * would happen. So skip delalloc flush.
4238          */
4239         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4240             (flush_state == FLUSH_DELALLOC ||
4241              flush_state == FLUSH_DELALLOC_WAIT))
4242                 flush_state = ALLOC_CHUNK;
4243
4244         if (!ret)
4245                 goto again;
4246         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4247                  flush_state < COMMIT_TRANS)
4248                 goto again;
4249         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4250                  flush_state <= COMMIT_TRANS)
4251                 goto again;
4252
4253 out:
4254         if (ret == -ENOSPC &&
4255             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4256                 struct btrfs_block_rsv *global_rsv =
4257                         &root->fs_info->global_block_rsv;
4258
4259                 if (block_rsv != global_rsv &&
4260                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4261                         ret = 0;
4262         }
4263         if (flushing) {
4264                 spin_lock(&space_info->lock);
4265                 space_info->flush = 0;
4266                 wake_up_all(&space_info->wait);
4267                 spin_unlock(&space_info->lock);
4268         }
4269         return ret;
4270 }
4271
4272 static struct btrfs_block_rsv *get_block_rsv(
4273                                         const struct btrfs_trans_handle *trans,
4274                                         const struct btrfs_root *root)
4275 {
4276         struct btrfs_block_rsv *block_rsv = NULL;
4277
4278         if (root->ref_cows)
4279                 block_rsv = trans->block_rsv;
4280
4281         if (root == root->fs_info->csum_root && trans->adding_csums)
4282                 block_rsv = trans->block_rsv;
4283
4284         if (!block_rsv)
4285                 block_rsv = root->block_rsv;
4286
4287         if (!block_rsv)
4288                 block_rsv = &root->fs_info->empty_block_rsv;
4289
4290         return block_rsv;
4291 }
4292
4293 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4294                                u64 num_bytes)
4295 {
4296         int ret = -ENOSPC;
4297         spin_lock(&block_rsv->lock);
4298         if (block_rsv->reserved >= num_bytes) {
4299                 block_rsv->reserved -= num_bytes;
4300                 if (block_rsv->reserved < block_rsv->size)
4301                         block_rsv->full = 0;
4302                 ret = 0;
4303         }
4304         spin_unlock(&block_rsv->lock);
4305         return ret;
4306 }
4307
4308 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4309                                 u64 num_bytes, int update_size)
4310 {
4311         spin_lock(&block_rsv->lock);
4312         block_rsv->reserved += num_bytes;
4313         if (update_size)
4314                 block_rsv->size += num_bytes;
4315         else if (block_rsv->reserved >= block_rsv->size)
4316                 block_rsv->full = 1;
4317         spin_unlock(&block_rsv->lock);
4318 }
4319
4320 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4321                                     struct btrfs_block_rsv *block_rsv,
4322                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4323 {
4324         struct btrfs_space_info *space_info = block_rsv->space_info;
4325
4326         spin_lock(&block_rsv->lock);
4327         if (num_bytes == (u64)-1)
4328                 num_bytes = block_rsv->size;
4329         block_rsv->size -= num_bytes;
4330         if (block_rsv->reserved >= block_rsv->size) {
4331                 num_bytes = block_rsv->reserved - block_rsv->size;
4332                 block_rsv->reserved = block_rsv->size;
4333                 block_rsv->full = 1;
4334         } else {
4335                 num_bytes = 0;
4336         }
4337         spin_unlock(&block_rsv->lock);
4338
4339         if (num_bytes > 0) {
4340                 if (dest) {
4341                         spin_lock(&dest->lock);
4342                         if (!dest->full) {
4343                                 u64 bytes_to_add;
4344
4345                                 bytes_to_add = dest->size - dest->reserved;
4346                                 bytes_to_add = min(num_bytes, bytes_to_add);
4347                                 dest->reserved += bytes_to_add;
4348                                 if (dest->reserved >= dest->size)
4349                                         dest->full = 1;
4350                                 num_bytes -= bytes_to_add;
4351                         }
4352                         spin_unlock(&dest->lock);
4353                 }
4354                 if (num_bytes) {
4355                         spin_lock(&space_info->lock);
4356                         space_info->bytes_may_use -= num_bytes;
4357                         trace_btrfs_space_reservation(fs_info, "space_info",
4358                                         space_info->flags, num_bytes, 0);
4359                         space_info->reservation_progress++;
4360                         spin_unlock(&space_info->lock);
4361                 }
4362         }
4363 }
4364
4365 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4366                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4367 {
4368         int ret;
4369
4370         ret = block_rsv_use_bytes(src, num_bytes);
4371         if (ret)
4372                 return ret;
4373
4374         block_rsv_add_bytes(dst, num_bytes, 1);
4375         return 0;
4376 }
4377
4378 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4379 {
4380         memset(rsv, 0, sizeof(*rsv));
4381         spin_lock_init(&rsv->lock);
4382         rsv->type = type;
4383 }
4384
4385 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4386                                               unsigned short type)
4387 {
4388         struct btrfs_block_rsv *block_rsv;
4389         struct btrfs_fs_info *fs_info = root->fs_info;
4390
4391         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4392         if (!block_rsv)
4393                 return NULL;
4394
4395         btrfs_init_block_rsv(block_rsv, type);
4396         block_rsv->space_info = __find_space_info(fs_info,
4397                                                   BTRFS_BLOCK_GROUP_METADATA);
4398         return block_rsv;
4399 }
4400
4401 void btrfs_free_block_rsv(struct btrfs_root *root,
4402                           struct btrfs_block_rsv *rsv)
4403 {
4404         if (!rsv)
4405                 return;
4406         btrfs_block_rsv_release(root, rsv, (u64)-1);
4407         kfree(rsv);
4408 }
4409
4410 int btrfs_block_rsv_add(struct btrfs_root *root,
4411                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4412                         enum btrfs_reserve_flush_enum flush)
4413 {
4414         int ret;
4415
4416         if (num_bytes == 0)
4417                 return 0;
4418
4419         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4420         if (!ret) {
4421                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4422                 return 0;
4423         }
4424
4425         return ret;
4426 }
4427
4428 int btrfs_block_rsv_check(struct btrfs_root *root,
4429                           struct btrfs_block_rsv *block_rsv, int min_factor)
4430 {
4431         u64 num_bytes = 0;
4432         int ret = -ENOSPC;
4433
4434         if (!block_rsv)
4435                 return 0;
4436
4437         spin_lock(&block_rsv->lock);
4438         num_bytes = div_factor(block_rsv->size, min_factor);
4439         if (block_rsv->reserved >= num_bytes)
4440                 ret = 0;
4441         spin_unlock(&block_rsv->lock);
4442
4443         return ret;
4444 }
4445
4446 int btrfs_block_rsv_refill(struct btrfs_root *root,
4447                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4448                            enum btrfs_reserve_flush_enum flush)
4449 {
4450         u64 num_bytes = 0;
4451         int ret = -ENOSPC;
4452
4453         if (!block_rsv)
4454                 return 0;
4455
4456         spin_lock(&block_rsv->lock);
4457         num_bytes = min_reserved;
4458         if (block_rsv->reserved >= num_bytes)
4459                 ret = 0;
4460         else
4461                 num_bytes -= block_rsv->reserved;
4462         spin_unlock(&block_rsv->lock);
4463
4464         if (!ret)
4465                 return 0;
4466
4467         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4468         if (!ret) {
4469                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4470                 return 0;
4471         }
4472
4473         return ret;
4474 }
4475
4476 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4477                             struct btrfs_block_rsv *dst_rsv,
4478                             u64 num_bytes)
4479 {
4480         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4481 }
4482
4483 void btrfs_block_rsv_release(struct btrfs_root *root,
4484                              struct btrfs_block_rsv *block_rsv,
4485                              u64 num_bytes)
4486 {
4487         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4488         if (global_rsv->full || global_rsv == block_rsv ||
4489             block_rsv->space_info != global_rsv->space_info)
4490                 global_rsv = NULL;
4491         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4492                                 num_bytes);
4493 }
4494
4495 /*
4496  * helper to calculate size of global block reservation.
4497  * the desired value is sum of space used by extent tree,
4498  * checksum tree and root tree
4499  */
4500 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4501 {
4502         struct btrfs_space_info *sinfo;
4503         u64 num_bytes;
4504         u64 meta_used;
4505         u64 data_used;
4506         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4507
4508         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4509         spin_lock(&sinfo->lock);
4510         data_used = sinfo->bytes_used;
4511         spin_unlock(&sinfo->lock);
4512
4513         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4514         spin_lock(&sinfo->lock);
4515         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4516                 data_used = 0;
4517         meta_used = sinfo->bytes_used;
4518         spin_unlock(&sinfo->lock);
4519
4520         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4521                     csum_size * 2;
4522         num_bytes += div64_u64(data_used + meta_used, 50);
4523
4524         if (num_bytes * 3 > meta_used)
4525                 num_bytes = div64_u64(meta_used, 3);
4526
4527         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4528 }
4529
4530 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4531 {
4532         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4533         struct btrfs_space_info *sinfo = block_rsv->space_info;
4534         u64 num_bytes;
4535
4536         num_bytes = calc_global_metadata_size(fs_info);
4537
4538         spin_lock(&sinfo->lock);
4539         spin_lock(&block_rsv->lock);
4540
4541         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4542
4543         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4544                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4545                     sinfo->bytes_may_use;
4546
4547         if (sinfo->total_bytes > num_bytes) {
4548                 num_bytes = sinfo->total_bytes - num_bytes;
4549                 block_rsv->reserved += num_bytes;
4550                 sinfo->bytes_may_use += num_bytes;
4551                 trace_btrfs_space_reservation(fs_info, "space_info",
4552                                       sinfo->flags, num_bytes, 1);
4553         }
4554
4555         if (block_rsv->reserved >= block_rsv->size) {
4556                 num_bytes = block_rsv->reserved - block_rsv->size;
4557                 sinfo->bytes_may_use -= num_bytes;
4558                 trace_btrfs_space_reservation(fs_info, "space_info",
4559                                       sinfo->flags, num_bytes, 0);
4560                 sinfo->reservation_progress++;
4561                 block_rsv->reserved = block_rsv->size;
4562                 block_rsv->full = 1;
4563         }
4564
4565         spin_unlock(&block_rsv->lock);
4566         spin_unlock(&sinfo->lock);
4567 }
4568
4569 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4570 {
4571         struct btrfs_space_info *space_info;
4572
4573         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4574         fs_info->chunk_block_rsv.space_info = space_info;
4575
4576         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4577         fs_info->global_block_rsv.space_info = space_info;
4578         fs_info->delalloc_block_rsv.space_info = space_info;
4579         fs_info->trans_block_rsv.space_info = space_info;
4580         fs_info->empty_block_rsv.space_info = space_info;
4581         fs_info->delayed_block_rsv.space_info = space_info;
4582
4583         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4584         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4585         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4586         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4587         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4588
4589         update_global_block_rsv(fs_info);
4590 }
4591
4592 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4593 {
4594         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4595                                 (u64)-1);
4596         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4597         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4598         WARN_ON(fs_info->trans_block_rsv.size > 0);
4599         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4600         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4601         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4602         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4603         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4604 }
4605
4606 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4607                                   struct btrfs_root *root)
4608 {
4609         if (!trans->block_rsv)
4610                 return;
4611
4612         if (!trans->bytes_reserved)
4613                 return;
4614
4615         trace_btrfs_space_reservation(root->fs_info, "transaction",
4616                                       trans->transid, trans->bytes_reserved, 0);
4617         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4618         trans->bytes_reserved = 0;
4619 }
4620
4621 /* Can only return 0 or -ENOSPC */
4622 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4623                                   struct inode *inode)
4624 {
4625         struct btrfs_root *root = BTRFS_I(inode)->root;
4626         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4627         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4628
4629         /*
4630          * We need to hold space in order to delete our orphan item once we've
4631          * added it, so this takes the reservation so we can release it later
4632          * when we are truly done with the orphan item.
4633          */
4634         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4635         trace_btrfs_space_reservation(root->fs_info, "orphan",
4636                                       btrfs_ino(inode), num_bytes, 1);
4637         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4638 }
4639
4640 void btrfs_orphan_release_metadata(struct inode *inode)
4641 {
4642         struct btrfs_root *root = BTRFS_I(inode)->root;
4643         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4644         trace_btrfs_space_reservation(root->fs_info, "orphan",
4645                                       btrfs_ino(inode), num_bytes, 0);
4646         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4647 }
4648
4649 /*
4650  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4651  * root: the root of the parent directory
4652  * rsv: block reservation
4653  * items: the number of items that we need do reservation
4654  * qgroup_reserved: used to return the reserved size in qgroup
4655  *
4656  * This function is used to reserve the space for snapshot/subvolume
4657  * creation and deletion. Those operations are different with the
4658  * common file/directory operations, they change two fs/file trees
4659  * and root tree, the number of items that the qgroup reserves is
4660  * different with the free space reservation. So we can not use
4661  * the space reseravtion mechanism in start_transaction().
4662  */
4663 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4664                                      struct btrfs_block_rsv *rsv,
4665                                      int items,
4666                                      u64 *qgroup_reserved)
4667 {
4668         u64 num_bytes;
4669         int ret;
4670
4671         if (root->fs_info->quota_enabled) {
4672                 /* One for parent inode, two for dir entries */
4673                 num_bytes = 3 * root->leafsize;
4674                 ret = btrfs_qgroup_reserve(root, num_bytes);
4675                 if (ret)
4676                         return ret;
4677         } else {
4678                 num_bytes = 0;
4679         }
4680
4681         *qgroup_reserved = num_bytes;
4682
4683         num_bytes = btrfs_calc_trans_metadata_size(root, items);
4684         rsv->space_info = __find_space_info(root->fs_info,
4685                                             BTRFS_BLOCK_GROUP_METADATA);
4686         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
4687                                   BTRFS_RESERVE_FLUSH_ALL);
4688         if (ret) {
4689                 if (*qgroup_reserved)
4690                         btrfs_qgroup_free(root, *qgroup_reserved);
4691         }
4692
4693         return ret;
4694 }
4695
4696 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
4697                                       struct btrfs_block_rsv *rsv,
4698                                       u64 qgroup_reserved)
4699 {
4700         btrfs_block_rsv_release(root, rsv, (u64)-1);
4701         if (qgroup_reserved)
4702                 btrfs_qgroup_free(root, qgroup_reserved);
4703 }
4704
4705 /**
4706  * drop_outstanding_extent - drop an outstanding extent
4707  * @inode: the inode we're dropping the extent for
4708  *
4709  * This is called when we are freeing up an outstanding extent, either called
4710  * after an error or after an extent is written.  This will return the number of
4711  * reserved extents that need to be freed.  This must be called with
4712  * BTRFS_I(inode)->lock held.
4713  */
4714 static unsigned drop_outstanding_extent(struct inode *inode)
4715 {
4716         unsigned drop_inode_space = 0;
4717         unsigned dropped_extents = 0;
4718
4719         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4720         BTRFS_I(inode)->outstanding_extents--;
4721
4722         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4723             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4724                                &BTRFS_I(inode)->runtime_flags))
4725                 drop_inode_space = 1;
4726
4727         /*
4728          * If we have more or the same amount of outsanding extents than we have
4729          * reserved then we need to leave the reserved extents count alone.
4730          */
4731         if (BTRFS_I(inode)->outstanding_extents >=
4732             BTRFS_I(inode)->reserved_extents)
4733                 return drop_inode_space;
4734
4735         dropped_extents = BTRFS_I(inode)->reserved_extents -
4736                 BTRFS_I(inode)->outstanding_extents;
4737         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4738         return dropped_extents + drop_inode_space;
4739 }
4740
4741 /**
4742  * calc_csum_metadata_size - return the amount of metada space that must be
4743  *      reserved/free'd for the given bytes.
4744  * @inode: the inode we're manipulating
4745  * @num_bytes: the number of bytes in question
4746  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4747  *
4748  * This adjusts the number of csum_bytes in the inode and then returns the
4749  * correct amount of metadata that must either be reserved or freed.  We
4750  * calculate how many checksums we can fit into one leaf and then divide the
4751  * number of bytes that will need to be checksumed by this value to figure out
4752  * how many checksums will be required.  If we are adding bytes then the number
4753  * may go up and we will return the number of additional bytes that must be
4754  * reserved.  If it is going down we will return the number of bytes that must
4755  * be freed.
4756  *
4757  * This must be called with BTRFS_I(inode)->lock held.
4758  */
4759 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4760                                    int reserve)
4761 {
4762         struct btrfs_root *root = BTRFS_I(inode)->root;
4763         u64 csum_size;
4764         int num_csums_per_leaf;
4765         int num_csums;
4766         int old_csums;
4767
4768         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4769             BTRFS_I(inode)->csum_bytes == 0)
4770                 return 0;
4771
4772         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4773         if (reserve)
4774                 BTRFS_I(inode)->csum_bytes += num_bytes;
4775         else
4776                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4777         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4778         num_csums_per_leaf = (int)div64_u64(csum_size,
4779                                             sizeof(struct btrfs_csum_item) +
4780                                             sizeof(struct btrfs_disk_key));
4781         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4782         num_csums = num_csums + num_csums_per_leaf - 1;
4783         num_csums = num_csums / num_csums_per_leaf;
4784
4785         old_csums = old_csums + num_csums_per_leaf - 1;
4786         old_csums = old_csums / num_csums_per_leaf;
4787
4788         /* No change, no need to reserve more */
4789         if (old_csums == num_csums)
4790                 return 0;
4791
4792         if (reserve)
4793                 return btrfs_calc_trans_metadata_size(root,
4794                                                       num_csums - old_csums);
4795
4796         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4797 }
4798
4799 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4800 {
4801         struct btrfs_root *root = BTRFS_I(inode)->root;
4802         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4803         u64 to_reserve = 0;
4804         u64 csum_bytes;
4805         unsigned nr_extents = 0;
4806         int extra_reserve = 0;
4807         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4808         int ret = 0;
4809         bool delalloc_lock = true;
4810         u64 to_free = 0;
4811         unsigned dropped;
4812
4813         /* If we are a free space inode we need to not flush since we will be in
4814          * the middle of a transaction commit.  We also don't need the delalloc
4815          * mutex since we won't race with anybody.  We need this mostly to make
4816          * lockdep shut its filthy mouth.
4817          */
4818         if (btrfs_is_free_space_inode(inode)) {
4819                 flush = BTRFS_RESERVE_NO_FLUSH;
4820                 delalloc_lock = false;
4821         }
4822
4823         if (flush != BTRFS_RESERVE_NO_FLUSH &&
4824             btrfs_transaction_in_commit(root->fs_info))
4825                 schedule_timeout(1);
4826
4827         if (delalloc_lock)
4828                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4829
4830         num_bytes = ALIGN(num_bytes, root->sectorsize);
4831
4832         spin_lock(&BTRFS_I(inode)->lock);
4833         BTRFS_I(inode)->outstanding_extents++;
4834
4835         if (BTRFS_I(inode)->outstanding_extents >
4836             BTRFS_I(inode)->reserved_extents)
4837                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4838                         BTRFS_I(inode)->reserved_extents;
4839
4840         /*
4841          * Add an item to reserve for updating the inode when we complete the
4842          * delalloc io.
4843          */
4844         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4845                       &BTRFS_I(inode)->runtime_flags)) {
4846                 nr_extents++;
4847                 extra_reserve = 1;
4848         }
4849
4850         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4851         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4852         csum_bytes = BTRFS_I(inode)->csum_bytes;
4853         spin_unlock(&BTRFS_I(inode)->lock);
4854
4855         if (root->fs_info->quota_enabled) {
4856                 ret = btrfs_qgroup_reserve(root, num_bytes +
4857                                            nr_extents * root->leafsize);
4858                 if (ret)
4859                         goto out_fail;
4860         }
4861
4862         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4863         if (unlikely(ret)) {
4864                 if (root->fs_info->quota_enabled)
4865                         btrfs_qgroup_free(root, num_bytes +
4866                                                 nr_extents * root->leafsize);
4867                 goto out_fail;
4868         }
4869
4870         spin_lock(&BTRFS_I(inode)->lock);
4871         if (extra_reserve) {
4872                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4873                         &BTRFS_I(inode)->runtime_flags);
4874                 nr_extents--;
4875         }
4876         BTRFS_I(inode)->reserved_extents += nr_extents;
4877         spin_unlock(&BTRFS_I(inode)->lock);
4878
4879         if (delalloc_lock)
4880                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4881
4882         if (to_reserve)
4883                 trace_btrfs_space_reservation(root->fs_info,"delalloc",
4884                                               btrfs_ino(inode), to_reserve, 1);
4885         block_rsv_add_bytes(block_rsv, to_reserve, 1);
4886
4887         return 0;
4888
4889 out_fail:
4890         spin_lock(&BTRFS_I(inode)->lock);
4891         dropped = drop_outstanding_extent(inode);
4892         /*
4893          * If the inodes csum_bytes is the same as the original
4894          * csum_bytes then we know we haven't raced with any free()ers
4895          * so we can just reduce our inodes csum bytes and carry on.
4896          */
4897         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
4898                 calc_csum_metadata_size(inode, num_bytes, 0);
4899         } else {
4900                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
4901                 u64 bytes;
4902
4903                 /*
4904                  * This is tricky, but first we need to figure out how much we
4905                  * free'd from any free-ers that occured during this
4906                  * reservation, so we reset ->csum_bytes to the csum_bytes
4907                  * before we dropped our lock, and then call the free for the
4908                  * number of bytes that were freed while we were trying our
4909                  * reservation.
4910                  */
4911                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
4912                 BTRFS_I(inode)->csum_bytes = csum_bytes;
4913                 to_free = calc_csum_metadata_size(inode, bytes, 0);
4914
4915
4916                 /*
4917                  * Now we need to see how much we would have freed had we not
4918                  * been making this reservation and our ->csum_bytes were not
4919                  * artificially inflated.
4920                  */
4921                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
4922                 bytes = csum_bytes - orig_csum_bytes;
4923                 bytes = calc_csum_metadata_size(inode, bytes, 0);
4924
4925                 /*
4926                  * Now reset ->csum_bytes to what it should be.  If bytes is
4927                  * more than to_free then we would have free'd more space had we
4928                  * not had an artificially high ->csum_bytes, so we need to free
4929                  * the remainder.  If bytes is the same or less then we don't
4930                  * need to do anything, the other free-ers did the correct
4931                  * thing.
4932                  */
4933                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
4934                 if (bytes > to_free)
4935                         to_free = bytes - to_free;
4936                 else
4937                         to_free = 0;
4938         }
4939         spin_unlock(&BTRFS_I(inode)->lock);
4940         if (dropped)
4941                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4942
4943         if (to_free) {
4944                 btrfs_block_rsv_release(root, block_rsv, to_free);
4945                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
4946                                               btrfs_ino(inode), to_free, 0);
4947         }
4948         if (delalloc_lock)
4949                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4950         return ret;
4951 }
4952
4953 /**
4954  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4955  * @inode: the inode to release the reservation for
4956  * @num_bytes: the number of bytes we're releasing
4957  *
4958  * This will release the metadata reservation for an inode.  This can be called
4959  * once we complete IO for a given set of bytes to release their metadata
4960  * reservations.
4961  */
4962 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4963 {
4964         struct btrfs_root *root = BTRFS_I(inode)->root;
4965         u64 to_free = 0;
4966         unsigned dropped;
4967
4968         num_bytes = ALIGN(num_bytes, root->sectorsize);
4969         spin_lock(&BTRFS_I(inode)->lock);
4970         dropped = drop_outstanding_extent(inode);
4971
4972         if (num_bytes)
4973                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4974         spin_unlock(&BTRFS_I(inode)->lock);
4975         if (dropped > 0)
4976                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4977
4978         trace_btrfs_space_reservation(root->fs_info, "delalloc",
4979                                       btrfs_ino(inode), to_free, 0);
4980         if (root->fs_info->quota_enabled) {
4981                 btrfs_qgroup_free(root, num_bytes +
4982                                         dropped * root->leafsize);
4983         }
4984
4985         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4986                                 to_free);
4987 }
4988
4989 /**
4990  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4991  * @inode: inode we're writing to
4992  * @num_bytes: the number of bytes we want to allocate
4993  *
4994  * This will do the following things
4995  *
4996  * o reserve space in the data space info for num_bytes
4997  * o reserve space in the metadata space info based on number of outstanding
4998  *   extents and how much csums will be needed
4999  * o add to the inodes ->delalloc_bytes
5000  * o add it to the fs_info's delalloc inodes list.
5001  *
5002  * This will return 0 for success and -ENOSPC if there is no space left.
5003  */
5004 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5005 {
5006         int ret;
5007
5008         ret = btrfs_check_data_free_space(inode, num_bytes);
5009         if (ret)
5010                 return ret;
5011
5012         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5013         if (ret) {
5014                 btrfs_free_reserved_data_space(inode, num_bytes);
5015                 return ret;
5016         }
5017
5018         return 0;
5019 }
5020
5021 /**
5022  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5023  * @inode: inode we're releasing space for
5024  * @num_bytes: the number of bytes we want to free up
5025  *
5026  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5027  * called in the case that we don't need the metadata AND data reservations
5028  * anymore.  So if there is an error or we insert an inline extent.
5029  *
5030  * This function will release the metadata space that was not used and will
5031  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5032  * list if there are no delalloc bytes left.
5033  */
5034 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5035 {
5036         btrfs_delalloc_release_metadata(inode, num_bytes);
5037         btrfs_free_reserved_data_space(inode, num_bytes);
5038 }
5039
5040 static int update_block_group(struct btrfs_root *root,
5041                               u64 bytenr, u64 num_bytes, int alloc)
5042 {
5043         struct btrfs_block_group_cache *cache = NULL;
5044         struct btrfs_fs_info *info = root->fs_info;
5045         u64 total = num_bytes;
5046         u64 old_val;
5047         u64 byte_in_group;
5048         int factor;
5049
5050         /* block accounting for super block */
5051         spin_lock(&info->delalloc_lock);
5052         old_val = btrfs_super_bytes_used(info->super_copy);
5053         if (alloc)
5054                 old_val += num_bytes;
5055         else
5056                 old_val -= num_bytes;
5057         btrfs_set_super_bytes_used(info->super_copy, old_val);
5058         spin_unlock(&info->delalloc_lock);
5059
5060         while (total) {
5061                 cache = btrfs_lookup_block_group(info, bytenr);
5062                 if (!cache)
5063                         return -ENOENT;
5064                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5065                                     BTRFS_BLOCK_GROUP_RAID1 |
5066                                     BTRFS_BLOCK_GROUP_RAID10))
5067                         factor = 2;
5068                 else
5069                         factor = 1;
5070                 /*
5071                  * If this block group has free space cache written out, we
5072                  * need to make sure to load it if we are removing space.  This
5073                  * is because we need the unpinning stage to actually add the
5074                  * space back to the block group, otherwise we will leak space.
5075                  */
5076                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5077                         cache_block_group(cache, 1);
5078
5079                 byte_in_group = bytenr - cache->key.objectid;
5080                 WARN_ON(byte_in_group > cache->key.offset);
5081
5082                 spin_lock(&cache->space_info->lock);
5083                 spin_lock(&cache->lock);
5084
5085                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5086                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5087                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5088
5089                 cache->dirty = 1;
5090                 old_val = btrfs_block_group_used(&cache->item);
5091                 num_bytes = min(total, cache->key.offset - byte_in_group);
5092                 if (alloc) {
5093                         old_val += num_bytes;
5094                         btrfs_set_block_group_used(&cache->item, old_val);
5095                         cache->reserved -= num_bytes;
5096                         cache->space_info->bytes_reserved -= num_bytes;
5097                         cache->space_info->bytes_used += num_bytes;
5098                         cache->space_info->disk_used += num_bytes * factor;
5099                         spin_unlock(&cache->lock);
5100                         spin_unlock(&cache->space_info->lock);
5101                 } else {
5102                         old_val -= num_bytes;
5103                         btrfs_set_block_group_used(&cache->item, old_val);
5104                         cache->pinned += num_bytes;
5105                         cache->space_info->bytes_pinned += num_bytes;
5106                         cache->space_info->bytes_used -= num_bytes;
5107                         cache->space_info->disk_used -= num_bytes * factor;
5108                         spin_unlock(&cache->lock);
5109                         spin_unlock(&cache->space_info->lock);
5110
5111                         set_extent_dirty(info->pinned_extents,
5112                                          bytenr, bytenr + num_bytes - 1,
5113                                          GFP_NOFS | __GFP_NOFAIL);
5114                 }
5115                 btrfs_put_block_group(cache);
5116                 total -= num_bytes;
5117                 bytenr += num_bytes;
5118         }
5119         return 0;
5120 }
5121
5122 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5123 {
5124         struct btrfs_block_group_cache *cache;
5125         u64 bytenr;
5126
5127         spin_lock(&root->fs_info->block_group_cache_lock);
5128         bytenr = root->fs_info->first_logical_byte;
5129         spin_unlock(&root->fs_info->block_group_cache_lock);
5130
5131         if (bytenr < (u64)-1)
5132                 return bytenr;
5133
5134         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5135         if (!cache)
5136                 return 0;
5137
5138         bytenr = cache->key.objectid;
5139         btrfs_put_block_group(cache);
5140
5141         return bytenr;
5142 }
5143
5144 static int pin_down_extent(struct btrfs_root *root,
5145                            struct btrfs_block_group_cache *cache,
5146                            u64 bytenr, u64 num_bytes, int reserved)
5147 {
5148         spin_lock(&cache->space_info->lock);
5149         spin_lock(&cache->lock);
5150         cache->pinned += num_bytes;
5151         cache->space_info->bytes_pinned += num_bytes;
5152         if (reserved) {
5153                 cache->reserved -= num_bytes;
5154                 cache->space_info->bytes_reserved -= num_bytes;
5155         }
5156         spin_unlock(&cache->lock);
5157         spin_unlock(&cache->space_info->lock);
5158
5159         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5160                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5161         return 0;
5162 }
5163
5164 /*
5165  * this function must be called within transaction
5166  */
5167 int btrfs_pin_extent(struct btrfs_root *root,
5168                      u64 bytenr, u64 num_bytes, int reserved)
5169 {
5170         struct btrfs_block_group_cache *cache;
5171
5172         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5173         BUG_ON(!cache); /* Logic error */
5174
5175         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5176
5177         btrfs_put_block_group(cache);
5178         return 0;
5179 }
5180
5181 /*
5182  * this function must be called within transaction
5183  */
5184 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5185                                     u64 bytenr, u64 num_bytes)
5186 {
5187         struct btrfs_block_group_cache *cache;
5188
5189         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5190         BUG_ON(!cache); /* Logic error */
5191
5192         /*
5193          * pull in the free space cache (if any) so that our pin
5194          * removes the free space from the cache.  We have load_only set
5195          * to one because the slow code to read in the free extents does check
5196          * the pinned extents.
5197          */
5198         cache_block_group(cache, 1);
5199
5200         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5201
5202         /* remove us from the free space cache (if we're there at all) */
5203         btrfs_remove_free_space(cache, bytenr, num_bytes);
5204         btrfs_put_block_group(cache);
5205         return 0;
5206 }
5207
5208 /**
5209  * btrfs_update_reserved_bytes - update the block_group and space info counters
5210  * @cache:      The cache we are manipulating
5211  * @num_bytes:  The number of bytes in question
5212  * @reserve:    One of the reservation enums
5213  *
5214  * This is called by the allocator when it reserves space, or by somebody who is
5215  * freeing space that was never actually used on disk.  For example if you
5216  * reserve some space for a new leaf in transaction A and before transaction A
5217  * commits you free that leaf, you call this with reserve set to 0 in order to
5218  * clear the reservation.
5219  *
5220  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5221  * ENOSPC accounting.  For data we handle the reservation through clearing the
5222  * delalloc bits in the io_tree.  We have to do this since we could end up
5223  * allocating less disk space for the amount of data we have reserved in the
5224  * case of compression.
5225  *
5226  * If this is a reservation and the block group has become read only we cannot
5227  * make the reservation and return -EAGAIN, otherwise this function always
5228  * succeeds.
5229  */
5230 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5231                                        u64 num_bytes, int reserve)
5232 {
5233         struct btrfs_space_info *space_info = cache->space_info;
5234         int ret = 0;
5235
5236         spin_lock(&space_info->lock);
5237         spin_lock(&cache->lock);
5238         if (reserve != RESERVE_FREE) {
5239                 if (cache->ro) {
5240                         ret = -EAGAIN;
5241                 } else {
5242                         cache->reserved += num_bytes;
5243                         space_info->bytes_reserved += num_bytes;
5244                         if (reserve == RESERVE_ALLOC) {
5245                                 trace_btrfs_space_reservation(cache->fs_info,
5246                                                 "space_info", space_info->flags,
5247                                                 num_bytes, 0);
5248                                 space_info->bytes_may_use -= num_bytes;
5249                         }
5250                 }
5251         } else {
5252                 if (cache->ro)
5253                         space_info->bytes_readonly += num_bytes;
5254                 cache->reserved -= num_bytes;
5255                 space_info->bytes_reserved -= num_bytes;
5256                 space_info->reservation_progress++;
5257         }
5258         spin_unlock(&cache->lock);
5259         spin_unlock(&space_info->lock);
5260         return ret;
5261 }
5262
5263 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5264                                 struct btrfs_root *root)
5265 {
5266         struct btrfs_fs_info *fs_info = root->fs_info;
5267         struct btrfs_caching_control *next;
5268         struct btrfs_caching_control *caching_ctl;
5269         struct btrfs_block_group_cache *cache;
5270
5271         down_write(&fs_info->extent_commit_sem);
5272
5273         list_for_each_entry_safe(caching_ctl, next,
5274                                  &fs_info->caching_block_groups, list) {
5275                 cache = caching_ctl->block_group;
5276                 if (block_group_cache_done(cache)) {
5277                         cache->last_byte_to_unpin = (u64)-1;
5278                         list_del_init(&caching_ctl->list);
5279                         put_caching_control(caching_ctl);
5280                 } else {
5281                         cache->last_byte_to_unpin = caching_ctl->progress;
5282                 }
5283         }
5284
5285         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5286                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5287         else
5288                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5289
5290         up_write(&fs_info->extent_commit_sem);
5291
5292         update_global_block_rsv(fs_info);
5293 }
5294
5295 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5296 {
5297         struct btrfs_fs_info *fs_info = root->fs_info;
5298         struct btrfs_block_group_cache *cache = NULL;
5299         struct btrfs_space_info *space_info;
5300         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5301         u64 len;
5302         bool readonly;
5303
5304         while (start <= end) {
5305                 readonly = false;
5306                 if (!cache ||
5307                     start >= cache->key.objectid + cache->key.offset) {
5308                         if (cache)
5309                                 btrfs_put_block_group(cache);
5310                         cache = btrfs_lookup_block_group(fs_info, start);
5311                         BUG_ON(!cache); /* Logic error */
5312                 }
5313
5314                 len = cache->key.objectid + cache->key.offset - start;
5315                 len = min(len, end + 1 - start);
5316
5317                 if (start < cache->last_byte_to_unpin) {
5318                         len = min(len, cache->last_byte_to_unpin - start);
5319                         btrfs_add_free_space(cache, start, len);
5320                 }
5321
5322                 start += len;
5323                 space_info = cache->space_info;
5324
5325                 spin_lock(&space_info->lock);
5326                 spin_lock(&cache->lock);
5327                 cache->pinned -= len;
5328                 space_info->bytes_pinned -= len;
5329                 if (cache->ro) {
5330                         space_info->bytes_readonly += len;
5331                         readonly = true;
5332                 }
5333                 spin_unlock(&cache->lock);
5334                 if (!readonly && global_rsv->space_info == space_info) {
5335                         spin_lock(&global_rsv->lock);
5336                         if (!global_rsv->full) {
5337                                 len = min(len, global_rsv->size -
5338                                           global_rsv->reserved);
5339                                 global_rsv->reserved += len;
5340                                 space_info->bytes_may_use += len;
5341                                 if (global_rsv->reserved >= global_rsv->size)
5342                                         global_rsv->full = 1;
5343                         }
5344                         spin_unlock(&global_rsv->lock);
5345                 }
5346                 spin_unlock(&space_info->lock);
5347         }
5348
5349         if (cache)
5350                 btrfs_put_block_group(cache);
5351         return 0;
5352 }
5353
5354 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5355                                struct btrfs_root *root)
5356 {
5357         struct btrfs_fs_info *fs_info = root->fs_info;
5358         struct extent_io_tree *unpin;
5359         u64 start;
5360         u64 end;
5361         int ret;
5362
5363         if (trans->aborted)
5364                 return 0;
5365
5366         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5367                 unpin = &fs_info->freed_extents[1];
5368         else
5369                 unpin = &fs_info->freed_extents[0];
5370
5371         while (1) {
5372                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5373                                             EXTENT_DIRTY, NULL);
5374                 if (ret)
5375                         break;
5376
5377                 if (btrfs_test_opt(root, DISCARD))
5378                         ret = btrfs_discard_extent(root, start,
5379                                                    end + 1 - start, NULL);
5380
5381                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5382                 unpin_extent_range(root, start, end);
5383                 cond_resched();
5384         }
5385
5386         return 0;
5387 }
5388
5389 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5390                                 struct btrfs_root *root,
5391                                 u64 bytenr, u64 num_bytes, u64 parent,
5392                                 u64 root_objectid, u64 owner_objectid,
5393                                 u64 owner_offset, int refs_to_drop,
5394                                 struct btrfs_delayed_extent_op *extent_op)
5395 {
5396         struct btrfs_key key;
5397         struct btrfs_path *path;
5398         struct btrfs_fs_info *info = root->fs_info;
5399         struct btrfs_root *extent_root = info->extent_root;
5400         struct extent_buffer *leaf;
5401         struct btrfs_extent_item *ei;
5402         struct btrfs_extent_inline_ref *iref;
5403         int ret;
5404         int is_data;
5405         int extent_slot = 0;
5406         int found_extent = 0;
5407         int num_to_del = 1;
5408         u32 item_size;
5409         u64 refs;
5410         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5411                                                  SKINNY_METADATA);
5412
5413         path = btrfs_alloc_path();
5414         if (!path)
5415                 return -ENOMEM;
5416
5417         path->reada = 1;
5418         path->leave_spinning = 1;
5419
5420         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5421         BUG_ON(!is_data && refs_to_drop != 1);
5422
5423         if (is_data)
5424                 skinny_metadata = 0;
5425
5426         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5427                                     bytenr, num_bytes, parent,
5428                                     root_objectid, owner_objectid,
5429                                     owner_offset);
5430         if (ret == 0) {
5431                 extent_slot = path->slots[0];
5432                 while (extent_slot >= 0) {
5433                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5434                                               extent_slot);
5435                         if (key.objectid != bytenr)
5436                                 break;
5437                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5438                             key.offset == num_bytes) {
5439                                 found_extent = 1;
5440                                 break;
5441                         }
5442                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
5443                             key.offset == owner_objectid) {
5444                                 found_extent = 1;
5445                                 break;
5446                         }
5447                         if (path->slots[0] - extent_slot > 5)
5448                                 break;
5449                         extent_slot--;
5450                 }
5451 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5452                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5453                 if (found_extent && item_size < sizeof(*ei))
5454                         found_extent = 0;
5455 #endif
5456                 if (!found_extent) {
5457                         BUG_ON(iref);
5458                         ret = remove_extent_backref(trans, extent_root, path,
5459                                                     NULL, refs_to_drop,
5460                                                     is_data);
5461                         if (ret) {
5462                                 btrfs_abort_transaction(trans, extent_root, ret);
5463                                 goto out;
5464                         }
5465                         btrfs_release_path(path);
5466                         path->leave_spinning = 1;
5467
5468                         key.objectid = bytenr;
5469                         key.type = BTRFS_EXTENT_ITEM_KEY;
5470                         key.offset = num_bytes;
5471
5472                         if (!is_data && skinny_metadata) {
5473                                 key.type = BTRFS_METADATA_ITEM_KEY;
5474                                 key.offset = owner_objectid;
5475                         }
5476
5477                         ret = btrfs_search_slot(trans, extent_root,
5478                                                 &key, path, -1, 1);
5479                         if (ret > 0 && skinny_metadata && path->slots[0]) {
5480                                 /*
5481                                  * Couldn't find our skinny metadata item,
5482                                  * see if we have ye olde extent item.
5483                                  */
5484                                 path->slots[0]--;
5485                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
5486                                                       path->slots[0]);
5487                                 if (key.objectid == bytenr &&
5488                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
5489                                     key.offset == num_bytes)
5490                                         ret = 0;
5491                         }
5492
5493                         if (ret > 0 && skinny_metadata) {
5494                                 skinny_metadata = false;
5495                                 key.type = BTRFS_EXTENT_ITEM_KEY;
5496                                 key.offset = num_bytes;
5497                                 btrfs_release_path(path);
5498                                 ret = btrfs_search_slot(trans, extent_root,
5499                                                         &key, path, -1, 1);
5500                         }
5501
5502                         if (ret) {
5503                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5504                                         ret, (unsigned long long)bytenr);
5505                                 if (ret > 0)
5506                                         btrfs_print_leaf(extent_root,
5507                                                          path->nodes[0]);
5508                         }
5509                         if (ret < 0) {
5510                                 btrfs_abort_transaction(trans, extent_root, ret);
5511                                 goto out;
5512                         }
5513                         extent_slot = path->slots[0];
5514                 }
5515         } else if (ret == -ENOENT) {
5516                 btrfs_print_leaf(extent_root, path->nodes[0]);
5517                 WARN_ON(1);
5518                 btrfs_err(info,
5519                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5520                         (unsigned long long)bytenr,
5521                         (unsigned long long)parent,
5522                         (unsigned long long)root_objectid,
5523                         (unsigned long long)owner_objectid,
5524                         (unsigned long long)owner_offset);
5525         } else {
5526                 btrfs_abort_transaction(trans, extent_root, ret);
5527                 goto out;
5528         }
5529
5530         leaf = path->nodes[0];
5531         item_size = btrfs_item_size_nr(leaf, extent_slot);
5532 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5533         if (item_size < sizeof(*ei)) {
5534                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5535                 ret = convert_extent_item_v0(trans, extent_root, path,
5536                                              owner_objectid, 0);
5537                 if (ret < 0) {
5538                         btrfs_abort_transaction(trans, extent_root, ret);
5539                         goto out;
5540                 }
5541
5542                 btrfs_release_path(path);
5543                 path->leave_spinning = 1;
5544
5545                 key.objectid = bytenr;
5546                 key.type = BTRFS_EXTENT_ITEM_KEY;
5547                 key.offset = num_bytes;
5548
5549                 ret = btrfs_search_slot(trans, extent_root, &key, path,
5550                                         -1, 1);
5551                 if (ret) {
5552                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5553                                 ret, (unsigned long long)bytenr);
5554                         btrfs_print_leaf(extent_root, path->nodes[0]);
5555                 }
5556                 if (ret < 0) {
5557                         btrfs_abort_transaction(trans, extent_root, ret);
5558                         goto out;
5559                 }
5560
5561                 extent_slot = path->slots[0];
5562                 leaf = path->nodes[0];
5563                 item_size = btrfs_item_size_nr(leaf, extent_slot);
5564         }
5565 #endif
5566         BUG_ON(item_size < sizeof(*ei));
5567         ei = btrfs_item_ptr(leaf, extent_slot,
5568                             struct btrfs_extent_item);
5569         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
5570             key.type == BTRFS_EXTENT_ITEM_KEY) {
5571                 struct btrfs_tree_block_info *bi;
5572                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5573                 bi = (struct btrfs_tree_block_info *)(ei + 1);
5574                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5575         }
5576
5577         refs = btrfs_extent_refs(leaf, ei);
5578         BUG_ON(refs < refs_to_drop);
5579         refs -= refs_to_drop;
5580
5581         if (refs > 0) {
5582                 if (extent_op)
5583                         __run_delayed_extent_op(extent_op, leaf, ei);
5584                 /*
5585                  * In the case of inline back ref, reference count will
5586                  * be updated by remove_extent_backref
5587                  */
5588                 if (iref) {
5589                         BUG_ON(!found_extent);
5590                 } else {
5591                         btrfs_set_extent_refs(leaf, ei, refs);
5592                         btrfs_mark_buffer_dirty(leaf);
5593                 }
5594                 if (found_extent) {
5595                         ret = remove_extent_backref(trans, extent_root, path,
5596                                                     iref, refs_to_drop,
5597                                                     is_data);
5598                         if (ret) {
5599                                 btrfs_abort_transaction(trans, extent_root, ret);
5600                                 goto out;
5601                         }
5602                 }
5603         } else {
5604                 if (found_extent) {
5605                         BUG_ON(is_data && refs_to_drop !=
5606                                extent_data_ref_count(root, path, iref));
5607                         if (iref) {
5608                                 BUG_ON(path->slots[0] != extent_slot);
5609                         } else {
5610                                 BUG_ON(path->slots[0] != extent_slot + 1);
5611                                 path->slots[0] = extent_slot;
5612                                 num_to_del = 2;
5613                         }
5614                 }
5615
5616                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5617                                       num_to_del);
5618                 if (ret) {
5619                         btrfs_abort_transaction(trans, extent_root, ret);
5620                         goto out;
5621                 }
5622                 btrfs_release_path(path);
5623
5624                 if (is_data) {
5625                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5626                         if (ret) {
5627                                 btrfs_abort_transaction(trans, extent_root, ret);
5628                                 goto out;
5629                         }
5630                 }
5631
5632                 ret = update_block_group(root, bytenr, num_bytes, 0);
5633                 if (ret) {
5634                         btrfs_abort_transaction(trans, extent_root, ret);
5635                         goto out;
5636                 }
5637         }
5638 out:
5639         btrfs_free_path(path);
5640         return ret;
5641 }
5642
5643 /*
5644  * when we free an block, it is possible (and likely) that we free the last
5645  * delayed ref for that extent as well.  This searches the delayed ref tree for
5646  * a given extent, and if there are no other delayed refs to be processed, it
5647  * removes it from the tree.
5648  */
5649 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5650                                       struct btrfs_root *root, u64 bytenr)
5651 {
5652         struct btrfs_delayed_ref_head *head;
5653         struct btrfs_delayed_ref_root *delayed_refs;
5654         struct btrfs_delayed_ref_node *ref;
5655         struct rb_node *node;
5656         int ret = 0;
5657
5658         delayed_refs = &trans->transaction->delayed_refs;
5659         spin_lock(&delayed_refs->lock);
5660         head = btrfs_find_delayed_ref_head(trans, bytenr);
5661         if (!head)
5662                 goto out;
5663
5664         node = rb_prev(&head->node.rb_node);
5665         if (!node)
5666                 goto out;
5667
5668         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5669
5670         /* there are still entries for this ref, we can't drop it */
5671         if (ref->bytenr == bytenr)
5672                 goto out;
5673
5674         if (head->extent_op) {
5675                 if (!head->must_insert_reserved)
5676                         goto out;
5677                 btrfs_free_delayed_extent_op(head->extent_op);
5678                 head->extent_op = NULL;
5679         }
5680
5681         /*
5682          * waiting for the lock here would deadlock.  If someone else has it
5683          * locked they are already in the process of dropping it anyway
5684          */
5685         if (!mutex_trylock(&head->mutex))
5686                 goto out;
5687
5688         /*
5689          * at this point we have a head with no other entries.  Go
5690          * ahead and process it.
5691          */
5692         head->node.in_tree = 0;
5693         rb_erase(&head->node.rb_node, &delayed_refs->root);
5694
5695         delayed_refs->num_entries--;
5696
5697         /*
5698          * we don't take a ref on the node because we're removing it from the
5699          * tree, so we just steal the ref the tree was holding.
5700          */
5701         delayed_refs->num_heads--;
5702         if (list_empty(&head->cluster))
5703                 delayed_refs->num_heads_ready--;
5704
5705         list_del_init(&head->cluster);
5706         spin_unlock(&delayed_refs->lock);
5707
5708         BUG_ON(head->extent_op);
5709         if (head->must_insert_reserved)
5710                 ret = 1;
5711
5712         mutex_unlock(&head->mutex);
5713         btrfs_put_delayed_ref(&head->node);
5714         return ret;
5715 out:
5716         spin_unlock(&delayed_refs->lock);
5717         return 0;
5718 }
5719
5720 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5721                            struct btrfs_root *root,
5722                            struct extent_buffer *buf,
5723                            u64 parent, int last_ref)
5724 {
5725         struct btrfs_block_group_cache *cache = NULL;
5726         int ret;
5727
5728         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5729                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5730                                         buf->start, buf->len,
5731                                         parent, root->root_key.objectid,
5732                                         btrfs_header_level(buf),
5733                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
5734                 BUG_ON(ret); /* -ENOMEM */
5735         }
5736
5737         if (!last_ref)
5738                 return;
5739
5740         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5741
5742         if (btrfs_header_generation(buf) == trans->transid) {
5743                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5744                         ret = check_ref_cleanup(trans, root, buf->start);
5745                         if (!ret)
5746                                 goto out;
5747                 }
5748
5749                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5750                         pin_down_extent(root, cache, buf->start, buf->len, 1);
5751                         goto out;
5752                 }
5753
5754                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5755
5756                 btrfs_add_free_space(cache, buf->start, buf->len);
5757                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5758         }
5759 out:
5760         /*
5761          * Deleting the buffer, clear the corrupt flag since it doesn't matter
5762          * anymore.
5763          */
5764         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5765         btrfs_put_block_group(cache);
5766 }
5767
5768 /* Can return -ENOMEM */
5769 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5770                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5771                       u64 owner, u64 offset, int for_cow)
5772 {
5773         int ret;
5774         struct btrfs_fs_info *fs_info = root->fs_info;
5775
5776         /*
5777          * tree log blocks never actually go into the extent allocation
5778          * tree, just update pinning info and exit early.
5779          */
5780         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5781                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5782                 /* unlocks the pinned mutex */
5783                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
5784                 ret = 0;
5785         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5786                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5787                                         num_bytes,
5788                                         parent, root_objectid, (int)owner,
5789                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5790         } else {
5791                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5792                                                 num_bytes,
5793                                                 parent, root_objectid, owner,
5794                                                 offset, BTRFS_DROP_DELAYED_REF,
5795                                                 NULL, for_cow);
5796         }
5797         return ret;
5798 }
5799
5800 static u64 stripe_align(struct btrfs_root *root,
5801                         struct btrfs_block_group_cache *cache,
5802                         u64 val, u64 num_bytes)
5803 {
5804         u64 ret = ALIGN(val, root->stripesize);
5805         return ret;
5806 }
5807
5808 /*
5809  * when we wait for progress in the block group caching, its because
5810  * our allocation attempt failed at least once.  So, we must sleep
5811  * and let some progress happen before we try again.
5812  *
5813  * This function will sleep at least once waiting for new free space to
5814  * show up, and then it will check the block group free space numbers
5815  * for our min num_bytes.  Another option is to have it go ahead
5816  * and look in the rbtree for a free extent of a given size, but this
5817  * is a good start.
5818  */
5819 static noinline int
5820 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5821                                 u64 num_bytes)
5822 {
5823         struct btrfs_caching_control *caching_ctl;
5824
5825         caching_ctl = get_caching_control(cache);
5826         if (!caching_ctl)
5827                 return 0;
5828
5829         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
5830                    (cache->free_space_ctl->free_space >= num_bytes));
5831
5832         put_caching_control(caching_ctl);
5833         return 0;
5834 }
5835
5836 static noinline int
5837 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5838 {
5839         struct btrfs_caching_control *caching_ctl;
5840
5841         caching_ctl = get_caching_control(cache);
5842         if (!caching_ctl)
5843                 return 0;
5844
5845         wait_event(caching_ctl->wait, block_group_cache_done(cache));
5846
5847         put_caching_control(caching_ctl);
5848         return 0;
5849 }
5850
5851 int __get_raid_index(u64 flags)
5852 {
5853         if (flags & BTRFS_BLOCK_GROUP_RAID10)
5854                 return BTRFS_RAID_RAID10;
5855         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
5856                 return BTRFS_RAID_RAID1;
5857         else if (flags & BTRFS_BLOCK_GROUP_DUP)
5858                 return BTRFS_RAID_DUP;
5859         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
5860                 return BTRFS_RAID_RAID0;
5861         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
5862                 return BTRFS_RAID_RAID5;
5863         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
5864                 return BTRFS_RAID_RAID6;
5865
5866         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
5867 }
5868
5869 static int get_block_group_index(struct btrfs_block_group_cache *cache)
5870 {
5871         return __get_raid_index(cache->flags);
5872 }
5873
5874 enum btrfs_loop_type {
5875         LOOP_CACHING_NOWAIT = 0,
5876         LOOP_CACHING_WAIT = 1,
5877         LOOP_ALLOC_CHUNK = 2,
5878         LOOP_NO_EMPTY_SIZE = 3,
5879 };
5880
5881 /*
5882  * walks the btree of allocated extents and find a hole of a given size.
5883  * The key ins is changed to record the hole:
5884  * ins->objectid == block start
5885  * ins->flags = BTRFS_EXTENT_ITEM_KEY
5886  * ins->offset == number of blocks
5887  * Any available blocks before search_start are skipped.
5888  */
5889 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5890                                      struct btrfs_root *orig_root,
5891                                      u64 num_bytes, u64 empty_size,
5892                                      u64 hint_byte, struct btrfs_key *ins,
5893                                      u64 data)
5894 {
5895         int ret = 0;
5896         struct btrfs_root *root = orig_root->fs_info->extent_root;
5897         struct btrfs_free_cluster *last_ptr = NULL;
5898         struct btrfs_block_group_cache *block_group = NULL;
5899         struct btrfs_block_group_cache *used_block_group;
5900         u64 search_start = 0;
5901         int empty_cluster = 2 * 1024 * 1024;
5902         struct btrfs_space_info *space_info;
5903         int loop = 0;
5904         int index = __get_raid_index(data);
5905         int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5906                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5907         bool found_uncached_bg = false;
5908         bool failed_cluster_refill = false;
5909         bool failed_alloc = false;
5910         bool use_cluster = true;
5911         bool have_caching_bg = false;
5912
5913         WARN_ON(num_bytes < root->sectorsize);
5914         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
5915         ins->objectid = 0;
5916         ins->offset = 0;
5917
5918         trace_find_free_extent(orig_root, num_bytes, empty_size, data);
5919
5920         space_info = __find_space_info(root->fs_info, data);
5921         if (!space_info) {
5922                 btrfs_err(root->fs_info, "No space info for %llu", data);
5923                 return -ENOSPC;
5924         }
5925
5926         /*
5927          * If the space info is for both data and metadata it means we have a
5928          * small filesystem and we can't use the clustering stuff.
5929          */
5930         if (btrfs_mixed_space_info(space_info))
5931                 use_cluster = false;
5932
5933         if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5934                 last_ptr = &root->fs_info->meta_alloc_cluster;
5935                 if (!btrfs_test_opt(root, SSD))
5936                         empty_cluster = 64 * 1024;
5937         }
5938
5939         if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5940             btrfs_test_opt(root, SSD)) {
5941                 last_ptr = &root->fs_info->data_alloc_cluster;
5942         }
5943
5944         if (last_ptr) {
5945                 spin_lock(&last_ptr->lock);
5946                 if (last_ptr->block_group)
5947                         hint_byte = last_ptr->window_start;
5948                 spin_unlock(&last_ptr->lock);
5949         }
5950
5951         search_start = max(search_start, first_logical_byte(root, 0));
5952         search_start = max(search_start, hint_byte);
5953
5954         if (!last_ptr)
5955                 empty_cluster = 0;
5956
5957         if (search_start == hint_byte) {
5958                 block_group = btrfs_lookup_block_group(root->fs_info,
5959                                                        search_start);
5960                 used_block_group = block_group;
5961                 /*
5962                  * we don't want to use the block group if it doesn't match our
5963                  * allocation bits, or if its not cached.
5964                  *
5965                  * However if we are re-searching with an ideal block group
5966                  * picked out then we don't care that the block group is cached.
5967                  */
5968                 if (block_group && block_group_bits(block_group, data) &&
5969                     block_group->cached != BTRFS_CACHE_NO) {
5970                         down_read(&space_info->groups_sem);
5971                         if (list_empty(&block_group->list) ||
5972                             block_group->ro) {
5973                                 /*
5974                                  * someone is removing this block group,
5975                                  * we can't jump into the have_block_group
5976                                  * target because our list pointers are not
5977                                  * valid
5978                                  */
5979                                 btrfs_put_block_group(block_group);
5980                                 up_read(&space_info->groups_sem);
5981                         } else {
5982                                 index = get_block_group_index(block_group);
5983                                 goto have_block_group;
5984                         }
5985                 } else if (block_group) {
5986                         btrfs_put_block_group(block_group);
5987                 }
5988         }
5989 search:
5990         have_caching_bg = false;
5991         down_read(&space_info->groups_sem);
5992         list_for_each_entry(block_group, &space_info->block_groups[index],
5993                             list) {
5994                 u64 offset;
5995                 int cached;
5996
5997                 used_block_group = block_group;
5998                 btrfs_get_block_group(block_group);
5999                 search_start = block_group->key.objectid;
6000
6001                 /*
6002                  * this can happen if we end up cycling through all the
6003                  * raid types, but we want to make sure we only allocate
6004                  * for the proper type.
6005                  */
6006                 if (!block_group_bits(block_group, data)) {
6007                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6008                                 BTRFS_BLOCK_GROUP_RAID1 |
6009                                 BTRFS_BLOCK_GROUP_RAID5 |
6010                                 BTRFS_BLOCK_GROUP_RAID6 |
6011                                 BTRFS_BLOCK_GROUP_RAID10;
6012
6013                         /*
6014                          * if they asked for extra copies and this block group
6015                          * doesn't provide them, bail.  This does allow us to
6016                          * fill raid0 from raid1.
6017                          */
6018                         if ((data & extra) && !(block_group->flags & extra))
6019                                 goto loop;
6020                 }
6021
6022 have_block_group:
6023                 cached = block_group_cache_done(block_group);
6024                 if (unlikely(!cached)) {
6025                         found_uncached_bg = true;
6026                         ret = cache_block_group(block_group, 0);
6027                         BUG_ON(ret < 0);
6028                         ret = 0;
6029                 }
6030
6031                 if (unlikely(block_group->ro))
6032                         goto loop;
6033
6034                 /*
6035                  * Ok we want to try and use the cluster allocator, so
6036                  * lets look there
6037                  */
6038                 if (last_ptr) {
6039                         unsigned long aligned_cluster;
6040                         /*
6041                          * the refill lock keeps out other
6042                          * people trying to start a new cluster
6043                          */
6044                         spin_lock(&last_ptr->refill_lock);
6045                         used_block_group = last_ptr->block_group;
6046                         if (used_block_group != block_group &&
6047                             (!used_block_group ||
6048                              used_block_group->ro ||
6049                              !block_group_bits(used_block_group, data))) {
6050                                 used_block_group = block_group;
6051                                 goto refill_cluster;
6052                         }
6053
6054                         if (used_block_group != block_group)
6055                                 btrfs_get_block_group(used_block_group);
6056
6057                         offset = btrfs_alloc_from_cluster(used_block_group,
6058                           last_ptr, num_bytes, used_block_group->key.objectid);
6059                         if (offset) {
6060                                 /* we have a block, we're done */
6061                                 spin_unlock(&last_ptr->refill_lock);
6062                                 trace_btrfs_reserve_extent_cluster(root,
6063                                         block_group, search_start, num_bytes);
6064                                 goto checks;
6065                         }
6066
6067                         WARN_ON(last_ptr->block_group != used_block_group);
6068                         if (used_block_group != block_group) {
6069                                 btrfs_put_block_group(used_block_group);
6070                                 used_block_group = block_group;
6071                         }
6072 refill_cluster:
6073                         BUG_ON(used_block_group != block_group);
6074                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6075                          * set up a new clusters, so lets just skip it
6076                          * and let the allocator find whatever block
6077                          * it can find.  If we reach this point, we
6078                          * will have tried the cluster allocator
6079                          * plenty of times and not have found
6080                          * anything, so we are likely way too
6081                          * fragmented for the clustering stuff to find
6082                          * anything.
6083                          *
6084                          * However, if the cluster is taken from the
6085                          * current block group, release the cluster
6086                          * first, so that we stand a better chance of
6087                          * succeeding in the unclustered
6088                          * allocation.  */
6089                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6090                             last_ptr->block_group != block_group) {
6091                                 spin_unlock(&last_ptr->refill_lock);
6092                                 goto unclustered_alloc;
6093                         }
6094
6095                         /*
6096                          * this cluster didn't work out, free it and
6097                          * start over
6098                          */
6099                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6100
6101                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6102                                 spin_unlock(&last_ptr->refill_lock);
6103                                 goto unclustered_alloc;
6104                         }
6105
6106                         aligned_cluster = max_t(unsigned long,
6107                                                 empty_cluster + empty_size,
6108                                               block_group->full_stripe_len);
6109
6110                         /* allocate a cluster in this block group */
6111                         ret = btrfs_find_space_cluster(trans, root,
6112                                                block_group, last_ptr,
6113                                                search_start, num_bytes,
6114                                                aligned_cluster);
6115                         if (ret == 0) {
6116                                 /*
6117                                  * now pull our allocation out of this
6118                                  * cluster
6119                                  */
6120                                 offset = btrfs_alloc_from_cluster(block_group,
6121                                                   last_ptr, num_bytes,
6122                                                   search_start);
6123                                 if (offset) {
6124                                         /* we found one, proceed */
6125                                         spin_unlock(&last_ptr->refill_lock);
6126                                         trace_btrfs_reserve_extent_cluster(root,
6127                                                 block_group, search_start,
6128                                                 num_bytes);
6129                                         goto checks;
6130                                 }
6131                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6132                                    && !failed_cluster_refill) {
6133                                 spin_unlock(&last_ptr->refill_lock);
6134
6135                                 failed_cluster_refill = true;
6136                                 wait_block_group_cache_progress(block_group,
6137                                        num_bytes + empty_cluster + empty_size);
6138                                 goto have_block_group;
6139                         }
6140
6141                         /*
6142                          * at this point we either didn't find a cluster
6143                          * or we weren't able to allocate a block from our
6144                          * cluster.  Free the cluster we've been trying
6145                          * to use, and go to the next block group
6146                          */
6147                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6148                         spin_unlock(&last_ptr->refill_lock);
6149                         goto loop;
6150                 }
6151
6152 unclustered_alloc:
6153                 spin_lock(&block_group->free_space_ctl->tree_lock);
6154                 if (cached &&
6155                     block_group->free_space_ctl->free_space <
6156                     num_bytes + empty_cluster + empty_size) {
6157                         spin_unlock(&block_group->free_space_ctl->tree_lock);
6158                         goto loop;
6159                 }
6160                 spin_unlock(&block_group->free_space_ctl->tree_lock);
6161
6162                 offset = btrfs_find_space_for_alloc(block_group, search_start,
6163                                                     num_bytes, empty_size);
6164                 /*
6165                  * If we didn't find a chunk, and we haven't failed on this
6166                  * block group before, and this block group is in the middle of
6167                  * caching and we are ok with waiting, then go ahead and wait
6168                  * for progress to be made, and set failed_alloc to true.
6169                  *
6170                  * If failed_alloc is true then we've already waited on this
6171                  * block group once and should move on to the next block group.
6172                  */
6173                 if (!offset && !failed_alloc && !cached &&
6174                     loop > LOOP_CACHING_NOWAIT) {
6175                         wait_block_group_cache_progress(block_group,
6176                                                 num_bytes + empty_size);
6177                         failed_alloc = true;
6178                         goto have_block_group;
6179                 } else if (!offset) {
6180                         if (!cached)
6181                                 have_caching_bg = true;
6182                         goto loop;
6183                 }
6184 checks:
6185                 search_start = stripe_align(root, used_block_group,
6186                                             offset, num_bytes);
6187
6188                 /* move on to the next group */
6189                 if (search_start + num_bytes >
6190                     used_block_group->key.objectid + used_block_group->key.offset) {
6191                         btrfs_add_free_space(used_block_group, offset, num_bytes);
6192                         goto loop;
6193                 }
6194
6195                 if (offset < search_start)
6196                         btrfs_add_free_space(used_block_group, offset,
6197                                              search_start - offset);
6198                 BUG_ON(offset > search_start);
6199
6200                 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
6201                                                   alloc_type);
6202                 if (ret == -EAGAIN) {
6203                         btrfs_add_free_space(used_block_group, offset, num_bytes);
6204                         goto loop;
6205                 }
6206
6207                 /* we are all good, lets return */
6208                 ins->objectid = search_start;
6209                 ins->offset = num_bytes;
6210
6211                 trace_btrfs_reserve_extent(orig_root, block_group,
6212                                            search_start, num_bytes);
6213                 if (used_block_group != block_group)
6214                         btrfs_put_block_group(used_block_group);
6215                 btrfs_put_block_group(block_group);
6216                 break;
6217 loop:
6218                 failed_cluster_refill = false;
6219                 failed_alloc = false;
6220                 BUG_ON(index != get_block_group_index(block_group));
6221                 if (used_block_group != block_group)
6222                         btrfs_put_block_group(used_block_group);
6223                 btrfs_put_block_group(block_group);
6224         }
6225         up_read(&space_info->groups_sem);
6226
6227         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6228                 goto search;
6229
6230         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6231                 goto search;
6232
6233         /*
6234          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6235          *                      caching kthreads as we move along
6236          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6237          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6238          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6239          *                      again
6240          */
6241         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6242                 index = 0;
6243                 loop++;
6244                 if (loop == LOOP_ALLOC_CHUNK) {
6245                         ret = do_chunk_alloc(trans, root, data,
6246                                              CHUNK_ALLOC_FORCE);
6247                         /*
6248                          * Do not bail out on ENOSPC since we
6249                          * can do more things.
6250                          */
6251                         if (ret < 0 && ret != -ENOSPC) {
6252                                 btrfs_abort_transaction(trans,
6253                                                         root, ret);
6254                                 goto out;
6255                         }
6256                 }
6257
6258                 if (loop == LOOP_NO_EMPTY_SIZE) {
6259                         empty_size = 0;
6260                         empty_cluster = 0;
6261                 }
6262
6263                 goto search;
6264         } else if (!ins->objectid) {
6265                 ret = -ENOSPC;
6266         } else if (ins->objectid) {
6267                 ret = 0;
6268         }
6269 out:
6270
6271         return ret;
6272 }
6273
6274 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6275                             int dump_block_groups)
6276 {
6277         struct btrfs_block_group_cache *cache;
6278         int index = 0;
6279
6280         spin_lock(&info->lock);
6281         printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
6282                (unsigned long long)info->flags,
6283                (unsigned long long)(info->total_bytes - info->bytes_used -
6284                                     info->bytes_pinned - info->bytes_reserved -
6285                                     info->bytes_readonly),
6286                (info->full) ? "" : "not ");
6287         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
6288                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6289                (unsigned long long)info->total_bytes,
6290                (unsigned long long)info->bytes_used,
6291                (unsigned long long)info->bytes_pinned,
6292                (unsigned long long)info->bytes_reserved,
6293                (unsigned long long)info->bytes_may_use,
6294                (unsigned long long)info->bytes_readonly);
6295         spin_unlock(&info->lock);
6296
6297         if (!dump_block_groups)
6298                 return;
6299
6300         down_read(&info->groups_sem);
6301 again:
6302         list_for_each_entry(cache, &info->block_groups[index], list) {
6303                 spin_lock(&cache->lock);
6304                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
6305                        (unsigned long long)cache->key.objectid,
6306                        (unsigned long long)cache->key.offset,
6307                        (unsigned long long)btrfs_block_group_used(&cache->item),
6308                        (unsigned long long)cache->pinned,
6309                        (unsigned long long)cache->reserved,
6310                        cache->ro ? "[readonly]" : "");
6311                 btrfs_dump_free_space(cache, bytes);
6312                 spin_unlock(&cache->lock);
6313         }
6314         if (++index < BTRFS_NR_RAID_TYPES)
6315                 goto again;
6316         up_read(&info->groups_sem);
6317 }
6318
6319 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
6320                          struct btrfs_root *root,
6321                          u64 num_bytes, u64 min_alloc_size,
6322                          u64 empty_size, u64 hint_byte,
6323                          struct btrfs_key *ins, u64 data)
6324 {
6325         bool final_tried = false;
6326         int ret;
6327
6328         data = btrfs_get_alloc_profile(root, data);
6329 again:
6330         WARN_ON(num_bytes < root->sectorsize);
6331         ret = find_free_extent(trans, root, num_bytes, empty_size,
6332                                hint_byte, ins, data);
6333
6334         if (ret == -ENOSPC) {
6335                 if (!final_tried) {
6336                         num_bytes = num_bytes >> 1;
6337                         num_bytes = round_down(num_bytes, root->sectorsize);
6338                         num_bytes = max(num_bytes, min_alloc_size);
6339                         if (num_bytes == min_alloc_size)
6340                                 final_tried = true;
6341                         goto again;
6342                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6343                         struct btrfs_space_info *sinfo;
6344
6345                         sinfo = __find_space_info(root->fs_info, data);
6346                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6347                                 (unsigned long long)data,
6348                                 (unsigned long long)num_bytes);
6349                         if (sinfo)
6350                                 dump_space_info(sinfo, num_bytes, 1);
6351                 }
6352         }
6353
6354         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6355
6356         return ret;
6357 }
6358
6359 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6360                                         u64 start, u64 len, int pin)
6361 {
6362         struct btrfs_block_group_cache *cache;
6363         int ret = 0;
6364
6365         cache = btrfs_lookup_block_group(root->fs_info, start);
6366         if (!cache) {
6367                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6368                         (unsigned long long)start);
6369                 return -ENOSPC;
6370         }
6371
6372         if (btrfs_test_opt(root, DISCARD))
6373                 ret = btrfs_discard_extent(root, start, len, NULL);
6374
6375         if (pin)
6376                 pin_down_extent(root, cache, start, len, 1);
6377         else {
6378                 btrfs_add_free_space(cache, start, len);
6379                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6380         }
6381         btrfs_put_block_group(cache);
6382
6383         trace_btrfs_reserved_extent_free(root, start, len);
6384
6385         return ret;
6386 }
6387
6388 int btrfs_free_reserved_extent(struct btrfs_root *root,
6389                                         u64 start, u64 len)
6390 {
6391         return __btrfs_free_reserved_extent(root, start, len, 0);
6392 }
6393
6394 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6395                                        u64 start, u64 len)
6396 {
6397         return __btrfs_free_reserved_extent(root, start, len, 1);
6398 }
6399
6400 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6401                                       struct btrfs_root *root,
6402                                       u64 parent, u64 root_objectid,
6403                                       u64 flags, u64 owner, u64 offset,
6404                                       struct btrfs_key *ins, int ref_mod)
6405 {
6406         int ret;
6407         struct btrfs_fs_info *fs_info = root->fs_info;
6408         struct btrfs_extent_item *extent_item;
6409         struct btrfs_extent_inline_ref *iref;
6410         struct btrfs_path *path;
6411         struct extent_buffer *leaf;
6412         int type;
6413         u32 size;
6414
6415         if (parent > 0)
6416                 type = BTRFS_SHARED_DATA_REF_KEY;
6417         else
6418                 type = BTRFS_EXTENT_DATA_REF_KEY;
6419
6420         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6421
6422         path = btrfs_alloc_path();
6423         if (!path)
6424                 return -ENOMEM;
6425
6426         path->leave_spinning = 1;
6427         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6428                                       ins, size);
6429         if (ret) {
6430                 btrfs_free_path(path);
6431                 return ret;
6432         }
6433
6434         leaf = path->nodes[0];
6435         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6436                                      struct btrfs_extent_item);
6437         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6438         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6439         btrfs_set_extent_flags(leaf, extent_item,
6440                                flags | BTRFS_EXTENT_FLAG_DATA);
6441
6442         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6443         btrfs_set_extent_inline_ref_type(leaf, iref, type);
6444         if (parent > 0) {
6445                 struct btrfs_shared_data_ref *ref;
6446                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6447                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6448                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6449         } else {
6450                 struct btrfs_extent_data_ref *ref;
6451                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6452                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6453                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6454                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6455                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6456         }
6457
6458         btrfs_mark_buffer_dirty(path->nodes[0]);
6459         btrfs_free_path(path);
6460
6461         ret = update_block_group(root, ins->objectid, ins->offset, 1);
6462         if (ret) { /* -ENOENT, logic error */
6463                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6464                         (unsigned long long)ins->objectid,
6465                         (unsigned long long)ins->offset);
6466                 BUG();
6467         }
6468         return ret;
6469 }
6470
6471 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6472                                      struct btrfs_root *root,
6473                                      u64 parent, u64 root_objectid,
6474                                      u64 flags, struct btrfs_disk_key *key,
6475                                      int level, struct btrfs_key *ins)
6476 {
6477         int ret;
6478         struct btrfs_fs_info *fs_info = root->fs_info;
6479         struct btrfs_extent_item *extent_item;
6480         struct btrfs_tree_block_info *block_info;
6481         struct btrfs_extent_inline_ref *iref;
6482         struct btrfs_path *path;
6483         struct extent_buffer *leaf;
6484         u32 size = sizeof(*extent_item) + sizeof(*iref);
6485         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6486                                                  SKINNY_METADATA);
6487
6488         if (!skinny_metadata)
6489                 size += sizeof(*block_info);
6490
6491         path = btrfs_alloc_path();
6492         if (!path)
6493                 return -ENOMEM;
6494
6495         path->leave_spinning = 1;
6496         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6497                                       ins, size);
6498         if (ret) {
6499                 btrfs_free_path(path);
6500                 return ret;
6501         }
6502
6503         leaf = path->nodes[0];
6504         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6505                                      struct btrfs_extent_item);
6506         btrfs_set_extent_refs(leaf, extent_item, 1);
6507         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6508         btrfs_set_extent_flags(leaf, extent_item,
6509                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6510
6511         if (skinny_metadata) {
6512                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6513         } else {
6514                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6515                 btrfs_set_tree_block_key(leaf, block_info, key);
6516                 btrfs_set_tree_block_level(leaf, block_info, level);
6517                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6518         }
6519
6520         if (parent > 0) {
6521                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6522                 btrfs_set_extent_inline_ref_type(leaf, iref,
6523                                                  BTRFS_SHARED_BLOCK_REF_KEY);
6524                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6525         } else {
6526                 btrfs_set_extent_inline_ref_type(leaf, iref,
6527                                                  BTRFS_TREE_BLOCK_REF_KEY);
6528                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6529         }
6530
6531         btrfs_mark_buffer_dirty(leaf);
6532         btrfs_free_path(path);
6533
6534         ret = update_block_group(root, ins->objectid, root->leafsize, 1);
6535         if (ret) { /* -ENOENT, logic error */
6536                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6537                         (unsigned long long)ins->objectid,
6538                         (unsigned long long)ins->offset);
6539                 BUG();
6540         }
6541         return ret;
6542 }
6543
6544 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6545                                      struct btrfs_root *root,
6546                                      u64 root_objectid, u64 owner,
6547                                      u64 offset, struct btrfs_key *ins)
6548 {
6549         int ret;
6550
6551         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6552
6553         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6554                                          ins->offset, 0,
6555                                          root_objectid, owner, offset,
6556                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6557         return ret;
6558 }
6559
6560 /*
6561  * this is used by the tree logging recovery code.  It records that
6562  * an extent has been allocated and makes sure to clear the free
6563  * space cache bits as well
6564  */
6565 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6566                                    struct btrfs_root *root,
6567                                    u64 root_objectid, u64 owner, u64 offset,
6568                                    struct btrfs_key *ins)
6569 {
6570         int ret;
6571         struct btrfs_block_group_cache *block_group;
6572         struct btrfs_caching_control *caching_ctl;
6573         u64 start = ins->objectid;
6574         u64 num_bytes = ins->offset;
6575
6576         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6577         cache_block_group(block_group, 0);
6578         caching_ctl = get_caching_control(block_group);
6579
6580         if (!caching_ctl) {
6581                 BUG_ON(!block_group_cache_done(block_group));
6582                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6583                 BUG_ON(ret); /* -ENOMEM */
6584         } else {
6585                 mutex_lock(&caching_ctl->mutex);
6586
6587                 if (start >= caching_ctl->progress) {
6588                         ret = add_excluded_extent(root, start, num_bytes);
6589                         BUG_ON(ret); /* -ENOMEM */
6590                 } else if (start + num_bytes <= caching_ctl->progress) {
6591                         ret = btrfs_remove_free_space(block_group,
6592                                                       start, num_bytes);
6593                         BUG_ON(ret); /* -ENOMEM */
6594                 } else {
6595                         num_bytes = caching_ctl->progress - start;
6596                         ret = btrfs_remove_free_space(block_group,
6597                                                       start, num_bytes);
6598                         BUG_ON(ret); /* -ENOMEM */
6599
6600                         start = caching_ctl->progress;
6601                         num_bytes = ins->objectid + ins->offset -
6602                                     caching_ctl->progress;
6603                         ret = add_excluded_extent(root, start, num_bytes);
6604                         BUG_ON(ret); /* -ENOMEM */
6605                 }
6606
6607                 mutex_unlock(&caching_ctl->mutex);
6608                 put_caching_control(caching_ctl);
6609         }
6610
6611         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6612                                           RESERVE_ALLOC_NO_ACCOUNT);
6613         BUG_ON(ret); /* logic error */
6614         btrfs_put_block_group(block_group);
6615         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6616                                          0, owner, offset, ins, 1);
6617         return ret;
6618 }
6619
6620 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
6621                                             struct btrfs_root *root,
6622                                             u64 bytenr, u32 blocksize,
6623                                             int level)
6624 {
6625         struct extent_buffer *buf;
6626
6627         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6628         if (!buf)
6629                 return ERR_PTR(-ENOMEM);
6630         btrfs_set_header_generation(buf, trans->transid);
6631         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6632         btrfs_tree_lock(buf);
6633         clean_tree_block(trans, root, buf);
6634         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6635
6636         btrfs_set_lock_blocking(buf);
6637         btrfs_set_buffer_uptodate(buf);
6638
6639         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6640                 /*
6641                  * we allow two log transactions at a time, use different
6642                  * EXENT bit to differentiate dirty pages.
6643                  */
6644                 if (root->log_transid % 2 == 0)
6645                         set_extent_dirty(&root->dirty_log_pages, buf->start,
6646                                         buf->start + buf->len - 1, GFP_NOFS);
6647                 else
6648                         set_extent_new(&root->dirty_log_pages, buf->start,
6649                                         buf->start + buf->len - 1, GFP_NOFS);
6650         } else {
6651                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6652                          buf->start + buf->len - 1, GFP_NOFS);
6653         }
6654         trans->blocks_used++;
6655         /* this returns a buffer locked for blocking */
6656         return buf;
6657 }
6658
6659 static struct btrfs_block_rsv *
6660 use_block_rsv(struct btrfs_trans_handle *trans,
6661               struct btrfs_root *root, u32 blocksize)
6662 {
6663         struct btrfs_block_rsv *block_rsv;
6664         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6665         int ret;
6666
6667         block_rsv = get_block_rsv(trans, root);
6668
6669         if (block_rsv->size == 0) {
6670                 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6671                                              BTRFS_RESERVE_NO_FLUSH);
6672                 /*
6673                  * If we couldn't reserve metadata bytes try and use some from
6674                  * the global reserve.
6675                  */
6676                 if (ret && block_rsv != global_rsv) {
6677                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6678                         if (!ret)
6679                                 return global_rsv;
6680                         return ERR_PTR(ret);
6681                 } else if (ret) {
6682                         return ERR_PTR(ret);
6683                 }
6684                 return block_rsv;
6685         }
6686
6687         ret = block_rsv_use_bytes(block_rsv, blocksize);
6688         if (!ret)
6689                 return block_rsv;
6690         if (ret && !block_rsv->failfast) {
6691                 if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6692                         static DEFINE_RATELIMIT_STATE(_rs,
6693                                         DEFAULT_RATELIMIT_INTERVAL * 10,
6694                                         /*DEFAULT_RATELIMIT_BURST*/ 1);
6695                         if (__ratelimit(&_rs))
6696                                 WARN(1, KERN_DEBUG
6697                                         "btrfs: block rsv returned %d\n", ret);
6698                 }
6699                 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6700                                              BTRFS_RESERVE_NO_FLUSH);
6701                 if (!ret) {
6702                         return block_rsv;
6703                 } else if (ret && block_rsv != global_rsv) {
6704                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6705                         if (!ret)
6706                                 return global_rsv;
6707                 }
6708         }
6709
6710         return ERR_PTR(-ENOSPC);
6711 }
6712
6713 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6714                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
6715 {
6716         block_rsv_add_bytes(block_rsv, blocksize, 0);
6717         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6718 }
6719
6720 /*
6721  * finds a free extent and does all the dirty work required for allocation
6722  * returns the key for the extent through ins, and a tree buffer for
6723  * the first block of the extent through buf.
6724  *
6725  * returns the tree buffer or NULL.
6726  */
6727 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6728                                         struct btrfs_root *root, u32 blocksize,
6729                                         u64 parent, u64 root_objectid,
6730                                         struct btrfs_disk_key *key, int level,
6731                                         u64 hint, u64 empty_size)
6732 {
6733         struct btrfs_key ins;
6734         struct btrfs_block_rsv *block_rsv;
6735         struct extent_buffer *buf;
6736         u64 flags = 0;
6737         int ret;
6738         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6739                                                  SKINNY_METADATA);
6740
6741         block_rsv = use_block_rsv(trans, root, blocksize);
6742         if (IS_ERR(block_rsv))
6743                 return ERR_CAST(block_rsv);
6744
6745         ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6746                                    empty_size, hint, &ins, 0);
6747         if (ret) {
6748                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6749                 return ERR_PTR(ret);
6750         }
6751
6752         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6753                                     blocksize, level);
6754         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6755
6756         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6757                 if (parent == 0)
6758                         parent = ins.objectid;
6759                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6760         } else
6761                 BUG_ON(parent > 0);
6762
6763         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6764                 struct btrfs_delayed_extent_op *extent_op;
6765                 extent_op = btrfs_alloc_delayed_extent_op();
6766                 BUG_ON(!extent_op); /* -ENOMEM */
6767                 if (key)
6768                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
6769                 else
6770                         memset(&extent_op->key, 0, sizeof(extent_op->key));
6771                 extent_op->flags_to_set = flags;
6772                 if (skinny_metadata)
6773                         extent_op->update_key = 0;
6774                 else
6775                         extent_op->update_key = 1;
6776                 extent_op->update_flags = 1;
6777                 extent_op->is_data = 0;
6778
6779                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6780                                         ins.objectid,
6781                                         ins.offset, parent, root_objectid,
6782                                         level, BTRFS_ADD_DELAYED_EXTENT,
6783                                         extent_op, 0);
6784                 BUG_ON(ret); /* -ENOMEM */
6785         }
6786         return buf;
6787 }
6788
6789 struct walk_control {
6790         u64 refs[BTRFS_MAX_LEVEL];
6791         u64 flags[BTRFS_MAX_LEVEL];
6792         struct btrfs_key update_progress;
6793         int stage;
6794         int level;
6795         int shared_level;
6796         int update_ref;
6797         int keep_locks;
6798         int reada_slot;
6799         int reada_count;
6800         int for_reloc;
6801 };
6802
6803 #define DROP_REFERENCE  1
6804 #define UPDATE_BACKREF  2
6805
6806 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6807                                      struct btrfs_root *root,
6808                                      struct walk_control *wc,
6809                                      struct btrfs_path *path)
6810 {
6811         u64 bytenr;
6812         u64 generation;
6813         u64 refs;
6814         u64 flags;
6815         u32 nritems;
6816         u32 blocksize;
6817         struct btrfs_key key;
6818         struct extent_buffer *eb;
6819         int ret;
6820         int slot;
6821         int nread = 0;
6822
6823         if (path->slots[wc->level] < wc->reada_slot) {
6824                 wc->reada_count = wc->reada_count * 2 / 3;
6825                 wc->reada_count = max(wc->reada_count, 2);
6826         } else {
6827                 wc->reada_count = wc->reada_count * 3 / 2;
6828                 wc->reada_count = min_t(int, wc->reada_count,
6829                                         BTRFS_NODEPTRS_PER_BLOCK(root));
6830         }
6831
6832         eb = path->nodes[wc->level];
6833         nritems = btrfs_header_nritems(eb);
6834         blocksize = btrfs_level_size(root, wc->level - 1);
6835
6836         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6837                 if (nread >= wc->reada_count)
6838                         break;
6839
6840                 cond_resched();
6841                 bytenr = btrfs_node_blockptr(eb, slot);
6842                 generation = btrfs_node_ptr_generation(eb, slot);
6843
6844                 if (slot == path->slots[wc->level])
6845                         goto reada;
6846
6847                 if (wc->stage == UPDATE_BACKREF &&
6848                     generation <= root->root_key.offset)
6849                         continue;
6850
6851                 /* We don't lock the tree block, it's OK to be racy here */
6852                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
6853                                                wc->level - 1, 1, &refs,
6854                                                &flags);
6855                 /* We don't care about errors in readahead. */
6856                 if (ret < 0)
6857                         continue;
6858                 BUG_ON(refs == 0);
6859
6860                 if (wc->stage == DROP_REFERENCE) {
6861                         if (refs == 1)
6862                                 goto reada;
6863
6864                         if (wc->level == 1 &&
6865                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6866                                 continue;
6867                         if (!wc->update_ref ||
6868                             generation <= root->root_key.offset)
6869                                 continue;
6870                         btrfs_node_key_to_cpu(eb, &key, slot);
6871                         ret = btrfs_comp_cpu_keys(&key,
6872                                                   &wc->update_progress);
6873                         if (ret < 0)
6874                                 continue;
6875                 } else {
6876                         if (wc->level == 1 &&
6877                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6878                                 continue;
6879                 }
6880 reada:
6881                 ret = readahead_tree_block(root, bytenr, blocksize,
6882                                            generation);
6883                 if (ret)
6884                         break;
6885                 nread++;
6886         }
6887         wc->reada_slot = slot;
6888 }
6889
6890 /*
6891  * helper to process tree block while walking down the tree.
6892  *
6893  * when wc->stage == UPDATE_BACKREF, this function updates
6894  * back refs for pointers in the block.
6895  *
6896  * NOTE: return value 1 means we should stop walking down.
6897  */
6898 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6899                                    struct btrfs_root *root,
6900                                    struct btrfs_path *path,
6901                                    struct walk_control *wc, int lookup_info)
6902 {
6903         int level = wc->level;
6904         struct extent_buffer *eb = path->nodes[level];
6905         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6906         int ret;
6907
6908         if (wc->stage == UPDATE_BACKREF &&
6909             btrfs_header_owner(eb) != root->root_key.objectid)
6910                 return 1;
6911
6912         /*
6913          * when reference count of tree block is 1, it won't increase
6914          * again. once full backref flag is set, we never clear it.
6915          */
6916         if (lookup_info &&
6917             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6918              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
6919                 BUG_ON(!path->locks[level]);
6920                 ret = btrfs_lookup_extent_info(trans, root,
6921                                                eb->start, level, 1,
6922                                                &wc->refs[level],
6923                                                &wc->flags[level]);
6924                 BUG_ON(ret == -ENOMEM);
6925                 if (ret)
6926                         return ret;
6927                 BUG_ON(wc->refs[level] == 0);
6928         }
6929
6930         if (wc->stage == DROP_REFERENCE) {
6931                 if (wc->refs[level] > 1)
6932                         return 1;
6933
6934                 if (path->locks[level] && !wc->keep_locks) {
6935                         btrfs_tree_unlock_rw(eb, path->locks[level]);
6936                         path->locks[level] = 0;
6937                 }
6938                 return 0;
6939         }
6940
6941         /* wc->stage == UPDATE_BACKREF */
6942         if (!(wc->flags[level] & flag)) {
6943                 BUG_ON(!path->locks[level]);
6944                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
6945                 BUG_ON(ret); /* -ENOMEM */
6946                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6947                 BUG_ON(ret); /* -ENOMEM */
6948                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6949                                                   eb->len, flag, 0);
6950                 BUG_ON(ret); /* -ENOMEM */
6951                 wc->flags[level] |= flag;
6952         }
6953
6954         /*
6955          * the block is shared by multiple trees, so it's not good to
6956          * keep the tree lock
6957          */
6958         if (path->locks[level] && level > 0) {
6959                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6960                 path->locks[level] = 0;
6961         }
6962         return 0;
6963 }
6964
6965 /*
6966  * helper to process tree block pointer.
6967  *
6968  * when wc->stage == DROP_REFERENCE, this function checks
6969  * reference count of the block pointed to. if the block
6970  * is shared and we need update back refs for the subtree
6971  * rooted at the block, this function changes wc->stage to
6972  * UPDATE_BACKREF. if the block is shared and there is no
6973  * need to update back, this function drops the reference
6974  * to the block.
6975  *
6976  * NOTE: return value 1 means we should stop walking down.
6977  */
6978 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6979                                  struct btrfs_root *root,
6980                                  struct btrfs_path *path,
6981                                  struct walk_control *wc, int *lookup_info)
6982 {
6983         u64 bytenr;
6984         u64 generation;
6985         u64 parent;
6986         u32 blocksize;
6987         struct btrfs_key key;
6988         struct extent_buffer *next;
6989         int level = wc->level;
6990         int reada = 0;
6991         int ret = 0;
6992
6993         generation = btrfs_node_ptr_generation(path->nodes[level],
6994                                                path->slots[level]);
6995         /*
6996          * if the lower level block was created before the snapshot
6997          * was created, we know there is no need to update back refs
6998          * for the subtree
6999          */
7000         if (wc->stage == UPDATE_BACKREF &&
7001             generation <= root->root_key.offset) {
7002                 *lookup_info = 1;
7003                 return 1;
7004         }
7005
7006         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7007         blocksize = btrfs_level_size(root, level - 1);
7008
7009         next = btrfs_find_tree_block(root, bytenr, blocksize);
7010         if (!next) {
7011                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7012                 if (!next)
7013                         return -ENOMEM;
7014                 reada = 1;
7015         }
7016         btrfs_tree_lock(next);
7017         btrfs_set_lock_blocking(next);
7018
7019         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7020                                        &wc->refs[level - 1],
7021                                        &wc->flags[level - 1]);
7022         if (ret < 0) {
7023                 btrfs_tree_unlock(next);
7024                 return ret;
7025         }
7026
7027         if (unlikely(wc->refs[level - 1] == 0)) {
7028                 btrfs_err(root->fs_info, "Missing references.");
7029                 BUG();
7030         }
7031         *lookup_info = 0;
7032
7033         if (wc->stage == DROP_REFERENCE) {
7034                 if (wc->refs[level - 1] > 1) {
7035                         if (level == 1 &&
7036                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7037                                 goto skip;
7038
7039                         if (!wc->update_ref ||
7040                             generation <= root->root_key.offset)
7041                                 goto skip;
7042
7043                         btrfs_node_key_to_cpu(path->nodes[level], &key,
7044                                               path->slots[level]);
7045                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7046                         if (ret < 0)
7047                                 goto skip;
7048
7049                         wc->stage = UPDATE_BACKREF;
7050                         wc->shared_level = level - 1;
7051                 }
7052         } else {
7053                 if (level == 1 &&
7054                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7055                         goto skip;
7056         }
7057
7058         if (!btrfs_buffer_uptodate(next, generation, 0)) {
7059                 btrfs_tree_unlock(next);
7060                 free_extent_buffer(next);
7061                 next = NULL;
7062                 *lookup_info = 1;
7063         }
7064
7065         if (!next) {
7066                 if (reada && level == 1)
7067                         reada_walk_down(trans, root, wc, path);
7068                 next = read_tree_block(root, bytenr, blocksize, generation);
7069                 if (!next)
7070                         return -EIO;
7071                 btrfs_tree_lock(next);
7072                 btrfs_set_lock_blocking(next);
7073         }
7074
7075         level--;
7076         BUG_ON(level != btrfs_header_level(next));
7077         path->nodes[level] = next;
7078         path->slots[level] = 0;
7079         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7080         wc->level = level;
7081         if (wc->level == 1)
7082                 wc->reada_slot = 0;
7083         return 0;
7084 skip:
7085         wc->refs[level - 1] = 0;
7086         wc->flags[level - 1] = 0;
7087         if (wc->stage == DROP_REFERENCE) {
7088                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7089                         parent = path->nodes[level]->start;
7090                 } else {
7091                         BUG_ON(root->root_key.objectid !=
7092                                btrfs_header_owner(path->nodes[level]));
7093                         parent = 0;
7094                 }
7095
7096                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7097                                 root->root_key.objectid, level - 1, 0, 0);
7098                 BUG_ON(ret); /* -ENOMEM */
7099         }
7100         btrfs_tree_unlock(next);
7101         free_extent_buffer(next);
7102         *lookup_info = 1;
7103         return 1;
7104 }
7105
7106 /*
7107  * helper to process tree block while walking up the tree.
7108  *
7109  * when wc->stage == DROP_REFERENCE, this function drops
7110  * reference count on the block.
7111  *
7112  * when wc->stage == UPDATE_BACKREF, this function changes
7113  * wc->stage back to DROP_REFERENCE if we changed wc->stage
7114  * to UPDATE_BACKREF previously while processing the block.
7115  *
7116  * NOTE: return value 1 means we should stop walking up.
7117  */
7118 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7119                                  struct btrfs_root *root,
7120                                  struct btrfs_path *path,
7121                                  struct walk_control *wc)
7122 {
7123         int ret;
7124         int level = wc->level;
7125         struct extent_buffer *eb = path->nodes[level];
7126         u64 parent = 0;
7127
7128         if (wc->stage == UPDATE_BACKREF) {
7129                 BUG_ON(wc->shared_level < level);
7130                 if (level < wc->shared_level)
7131                         goto out;
7132
7133                 ret = find_next_key(path, level + 1, &wc->update_progress);
7134                 if (ret > 0)
7135                         wc->update_ref = 0;
7136
7137                 wc->stage = DROP_REFERENCE;
7138                 wc->shared_level = -1;
7139                 path->slots[level] = 0;
7140
7141                 /*
7142                  * check reference count again if the block isn't locked.
7143                  * we should start walking down the tree again if reference
7144                  * count is one.
7145                  */
7146                 if (!path->locks[level]) {
7147                         BUG_ON(level == 0);
7148                         btrfs_tree_lock(eb);
7149                         btrfs_set_lock_blocking(eb);
7150                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7151
7152                         ret = btrfs_lookup_extent_info(trans, root,
7153                                                        eb->start, level, 1,
7154                                                        &wc->refs[level],
7155                                                        &wc->flags[level]);
7156                         if (ret < 0) {
7157                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7158                                 path->locks[level] = 0;
7159                                 return ret;
7160                         }
7161                         BUG_ON(wc->refs[level] == 0);
7162                         if (wc->refs[level] == 1) {
7163                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7164                                 path->locks[level] = 0;
7165                                 return 1;
7166                         }
7167                 }
7168         }
7169
7170         /* wc->stage == DROP_REFERENCE */
7171         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
7172
7173         if (wc->refs[level] == 1) {
7174                 if (level == 0) {
7175                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7176                                 ret = btrfs_dec_ref(trans, root, eb, 1,
7177                                                     wc->for_reloc);
7178                         else
7179                                 ret = btrfs_dec_ref(trans, root, eb, 0,
7180                                                     wc->for_reloc);
7181                         BUG_ON(ret); /* -ENOMEM */
7182                 }
7183                 /* make block locked assertion in clean_tree_block happy */
7184                 if (!path->locks[level] &&
7185                     btrfs_header_generation(eb) == trans->transid) {
7186                         btrfs_tree_lock(eb);
7187                         btrfs_set_lock_blocking(eb);
7188                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7189                 }
7190                 clean_tree_block(trans, root, eb);
7191         }
7192
7193         if (eb == root->node) {
7194                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7195                         parent = eb->start;
7196                 else
7197                         BUG_ON(root->root_key.objectid !=
7198                                btrfs_header_owner(eb));
7199         } else {
7200                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7201                         parent = path->nodes[level + 1]->start;
7202                 else
7203                         BUG_ON(root->root_key.objectid !=
7204                                btrfs_header_owner(path->nodes[level + 1]));
7205         }
7206
7207         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7208 out:
7209         wc->refs[level] = 0;
7210         wc->flags[level] = 0;
7211         return 0;
7212 }
7213
7214 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
7215                                    struct btrfs_root *root,
7216                                    struct btrfs_path *path,
7217                                    struct walk_control *wc)
7218 {
7219         int level = wc->level;
7220         int lookup_info = 1;
7221         int ret;
7222
7223         while (level >= 0) {
7224                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
7225                 if (ret > 0)
7226                         break;
7227
7228                 if (level == 0)
7229                         break;
7230
7231                 if (path->slots[level] >=
7232                     btrfs_header_nritems(path->nodes[level]))
7233                         break;
7234
7235                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
7236                 if (ret > 0) {
7237                         path->slots[level]++;
7238                         continue;
7239                 } else if (ret < 0)
7240                         return ret;
7241                 level = wc->level;
7242         }
7243         return 0;
7244 }
7245
7246 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
7247                                  struct btrfs_root *root,
7248                                  struct btrfs_path *path,
7249                                  struct walk_control *wc, int max_level)
7250 {
7251         int level = wc->level;
7252         int ret;
7253
7254         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
7255         while (level < max_level && path->nodes[level]) {
7256                 wc->level = level;
7257                 if (path->slots[level] + 1 <
7258                     btrfs_header_nritems(path->nodes[level])) {
7259                         path->slots[level]++;
7260                         return 0;
7261                 } else {
7262                         ret = walk_up_proc(trans, root, path, wc);
7263                         if (ret > 0)
7264                                 return 0;
7265
7266                         if (path->locks[level]) {
7267                                 btrfs_tree_unlock_rw(path->nodes[level],
7268                                                      path->locks[level]);
7269                                 path->locks[level] = 0;
7270                         }
7271                         free_extent_buffer(path->nodes[level]);
7272                         path->nodes[level] = NULL;
7273                         level++;
7274                 }
7275         }
7276         return 1;
7277 }
7278
7279 /*
7280  * drop a subvolume tree.
7281  *
7282  * this function traverses the tree freeing any blocks that only
7283  * referenced by the tree.
7284  *
7285  * when a shared tree block is found. this function decreases its
7286  * reference count by one. if update_ref is true, this function
7287  * also make sure backrefs for the shared block and all lower level
7288  * blocks are properly updated.
7289  *
7290  * If called with for_reloc == 0, may exit early with -EAGAIN
7291  */
7292 int btrfs_drop_snapshot(struct btrfs_root *root,
7293                          struct btrfs_block_rsv *block_rsv, int update_ref,
7294                          int for_reloc)
7295 {
7296         struct btrfs_path *path;
7297         struct btrfs_trans_handle *trans;
7298         struct btrfs_root *tree_root = root->fs_info->tree_root;
7299         struct btrfs_root_item *root_item = &root->root_item;
7300         struct walk_control *wc;
7301         struct btrfs_key key;
7302         int err = 0;
7303         int ret;
7304         int level;
7305
7306         path = btrfs_alloc_path();
7307         if (!path) {
7308                 err = -ENOMEM;
7309                 goto out;
7310         }
7311
7312         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7313         if (!wc) {
7314                 btrfs_free_path(path);
7315                 err = -ENOMEM;
7316                 goto out;
7317         }
7318
7319         trans = btrfs_start_transaction(tree_root, 0);
7320         if (IS_ERR(trans)) {
7321                 err = PTR_ERR(trans);
7322                 goto out_free;
7323         }
7324
7325         if (block_rsv)
7326                 trans->block_rsv = block_rsv;
7327
7328         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7329                 level = btrfs_header_level(root->node);
7330                 path->nodes[level] = btrfs_lock_root_node(root);
7331                 btrfs_set_lock_blocking(path->nodes[level]);
7332                 path->slots[level] = 0;
7333                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7334                 memset(&wc->update_progress, 0,
7335                        sizeof(wc->update_progress));
7336         } else {
7337                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7338                 memcpy(&wc->update_progress, &key,
7339                        sizeof(wc->update_progress));
7340
7341                 level = root_item->drop_level;
7342                 BUG_ON(level == 0);
7343                 path->lowest_level = level;
7344                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7345                 path->lowest_level = 0;
7346                 if (ret < 0) {
7347                         err = ret;
7348                         goto out_end_trans;
7349                 }
7350                 WARN_ON(ret > 0);
7351
7352                 /*
7353                  * unlock our path, this is safe because only this
7354                  * function is allowed to delete this snapshot
7355                  */
7356                 btrfs_unlock_up_safe(path, 0);
7357
7358                 level = btrfs_header_level(root->node);
7359                 while (1) {
7360                         btrfs_tree_lock(path->nodes[level]);
7361                         btrfs_set_lock_blocking(path->nodes[level]);
7362
7363                         ret = btrfs_lookup_extent_info(trans, root,
7364                                                 path->nodes[level]->start,
7365                                                 level, 1, &wc->refs[level],
7366                                                 &wc->flags[level]);
7367                         if (ret < 0) {
7368                                 err = ret;
7369                                 goto out_end_trans;
7370                         }
7371                         BUG_ON(wc->refs[level] == 0);
7372
7373                         if (level == root_item->drop_level)
7374                                 break;
7375
7376                         btrfs_tree_unlock(path->nodes[level]);
7377                         WARN_ON(wc->refs[level] != 1);
7378                         level--;
7379                 }
7380         }
7381
7382         wc->level = level;
7383         wc->shared_level = -1;
7384         wc->stage = DROP_REFERENCE;
7385         wc->update_ref = update_ref;
7386         wc->keep_locks = 0;
7387         wc->for_reloc = for_reloc;
7388         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7389
7390         while (1) {
7391                 if (!for_reloc && btrfs_fs_closing(root->fs_info)) {
7392                         pr_debug("btrfs: drop snapshot early exit\n");
7393                         err = -EAGAIN;
7394                         goto out_end_trans;
7395                 }
7396
7397                 ret = walk_down_tree(trans, root, path, wc);
7398                 if (ret < 0) {
7399                         err = ret;
7400                         break;
7401                 }
7402
7403                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7404                 if (ret < 0) {
7405                         err = ret;
7406                         break;
7407                 }
7408
7409                 if (ret > 0) {
7410                         BUG_ON(wc->stage != DROP_REFERENCE);
7411                         break;
7412                 }
7413
7414                 if (wc->stage == DROP_REFERENCE) {
7415                         level = wc->level;
7416                         btrfs_node_key(path->nodes[level],
7417                                        &root_item->drop_progress,
7418                                        path->slots[level]);
7419                         root_item->drop_level = level;
7420                 }
7421
7422                 BUG_ON(wc->level == 0);
7423                 if (btrfs_should_end_transaction(trans, tree_root)) {
7424                         ret = btrfs_update_root(trans, tree_root,
7425                                                 &root->root_key,
7426                                                 root_item);
7427                         if (ret) {
7428                                 btrfs_abort_transaction(trans, tree_root, ret);
7429                                 err = ret;
7430                                 goto out_end_trans;
7431                         }
7432
7433                         btrfs_end_transaction_throttle(trans, tree_root);
7434                         trans = btrfs_start_transaction(tree_root, 0);
7435                         if (IS_ERR(trans)) {
7436                                 err = PTR_ERR(trans);
7437                                 goto out_free;
7438                         }
7439                         if (block_rsv)
7440                                 trans->block_rsv = block_rsv;
7441                 }
7442         }
7443         btrfs_release_path(path);
7444         if (err)
7445                 goto out_end_trans;
7446
7447         ret = btrfs_del_root(trans, tree_root, &root->root_key);
7448         if (ret) {
7449                 btrfs_abort_transaction(trans, tree_root, ret);
7450                 goto out_end_trans;
7451         }
7452
7453         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7454                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
7455                                            NULL, NULL);
7456                 if (ret < 0) {
7457                         btrfs_abort_transaction(trans, tree_root, ret);
7458                         err = ret;
7459                         goto out_end_trans;
7460                 } else if (ret > 0) {
7461                         /* if we fail to delete the orphan item this time
7462                          * around, it'll get picked up the next time.
7463                          *
7464                          * The most common failure here is just -ENOENT.
7465                          */
7466                         btrfs_del_orphan_item(trans, tree_root,
7467                                               root->root_key.objectid);
7468                 }
7469         }
7470
7471         if (root->in_radix) {
7472                 btrfs_free_fs_root(tree_root->fs_info, root);
7473         } else {
7474                 free_extent_buffer(root->node);
7475                 free_extent_buffer(root->commit_root);
7476                 kfree(root);
7477         }
7478 out_end_trans:
7479         btrfs_end_transaction_throttle(trans, tree_root);
7480 out_free:
7481         kfree(wc);
7482         btrfs_free_path(path);
7483 out:
7484         if (err)
7485                 btrfs_std_error(root->fs_info, err);
7486         return err;
7487 }
7488
7489 /*
7490  * drop subtree rooted at tree block 'node'.
7491  *
7492  * NOTE: this function will unlock and release tree block 'node'
7493  * only used by relocation code
7494  */
7495 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7496                         struct btrfs_root *root,
7497                         struct extent_buffer *node,
7498                         struct extent_buffer *parent)
7499 {
7500         struct btrfs_path *path;
7501         struct walk_control *wc;
7502         int level;
7503         int parent_level;
7504         int ret = 0;
7505         int wret;
7506
7507         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7508
7509         path = btrfs_alloc_path();
7510         if (!path)
7511                 return -ENOMEM;
7512
7513         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7514         if (!wc) {
7515                 btrfs_free_path(path);
7516                 return -ENOMEM;
7517         }
7518
7519         btrfs_assert_tree_locked(parent);
7520         parent_level = btrfs_header_level(parent);
7521         extent_buffer_get(parent);
7522         path->nodes[parent_level] = parent;
7523         path->slots[parent_level] = btrfs_header_nritems(parent);
7524
7525         btrfs_assert_tree_locked(node);
7526         level = btrfs_header_level(node);
7527         path->nodes[level] = node;
7528         path->slots[level] = 0;
7529         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7530
7531         wc->refs[parent_level] = 1;
7532         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7533         wc->level = level;
7534         wc->shared_level = -1;
7535         wc->stage = DROP_REFERENCE;
7536         wc->update_ref = 0;
7537         wc->keep_locks = 1;
7538         wc->for_reloc = 1;
7539         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7540
7541         while (1) {
7542                 wret = walk_down_tree(trans, root, path, wc);
7543                 if (wret < 0) {
7544                         ret = wret;
7545                         break;
7546                 }
7547
7548                 wret = walk_up_tree(trans, root, path, wc, parent_level);
7549                 if (wret < 0)
7550                         ret = wret;
7551                 if (wret != 0)
7552                         break;
7553         }
7554
7555         kfree(wc);
7556         btrfs_free_path(path);
7557         return ret;
7558 }
7559
7560 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7561 {
7562         u64 num_devices;
7563         u64 stripped;
7564
7565         /*
7566          * if restripe for this chunk_type is on pick target profile and
7567          * return, otherwise do the usual balance
7568          */
7569         stripped = get_restripe_target(root->fs_info, flags);
7570         if (stripped)
7571                 return extended_to_chunk(stripped);
7572
7573         /*
7574          * we add in the count of missing devices because we want
7575          * to make sure that any RAID levels on a degraded FS
7576          * continue to be honored.
7577          */
7578         num_devices = root->fs_info->fs_devices->rw_devices +
7579                 root->fs_info->fs_devices->missing_devices;
7580
7581         stripped = BTRFS_BLOCK_GROUP_RAID0 |
7582                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7583                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7584
7585         if (num_devices == 1) {
7586                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7587                 stripped = flags & ~stripped;
7588
7589                 /* turn raid0 into single device chunks */
7590                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7591                         return stripped;
7592
7593                 /* turn mirroring into duplication */
7594                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7595                              BTRFS_BLOCK_GROUP_RAID10))
7596                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7597         } else {
7598                 /* they already had raid on here, just return */
7599                 if (flags & stripped)
7600                         return flags;
7601
7602                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7603                 stripped = flags & ~stripped;
7604
7605                 /* switch duplicated blocks with raid1 */
7606                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7607                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7608
7609                 /* this is drive concat, leave it alone */
7610         }
7611
7612         return flags;
7613 }
7614
7615 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7616 {
7617         struct btrfs_space_info *sinfo = cache->space_info;
7618         u64 num_bytes;
7619         u64 min_allocable_bytes;
7620         int ret = -ENOSPC;
7621
7622
7623         /*
7624          * We need some metadata space and system metadata space for
7625          * allocating chunks in some corner cases until we force to set
7626          * it to be readonly.
7627          */
7628         if ((sinfo->flags &
7629              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7630             !force)
7631                 min_allocable_bytes = 1 * 1024 * 1024;
7632         else
7633                 min_allocable_bytes = 0;
7634
7635         spin_lock(&sinfo->lock);
7636         spin_lock(&cache->lock);
7637
7638         if (cache->ro) {
7639                 ret = 0;
7640                 goto out;
7641         }
7642
7643         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7644                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7645
7646         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7647             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7648             min_allocable_bytes <= sinfo->total_bytes) {
7649                 sinfo->bytes_readonly += num_bytes;
7650                 cache->ro = 1;
7651                 ret = 0;
7652         }
7653 out:
7654         spin_unlock(&cache->lock);
7655         spin_unlock(&sinfo->lock);
7656         return ret;
7657 }
7658
7659 int btrfs_set_block_group_ro(struct btrfs_root *root,
7660                              struct btrfs_block_group_cache *cache)
7661
7662 {
7663         struct btrfs_trans_handle *trans;
7664         u64 alloc_flags;
7665         int ret;
7666
7667         BUG_ON(cache->ro);
7668
7669         trans = btrfs_join_transaction(root);
7670         if (IS_ERR(trans))
7671                 return PTR_ERR(trans);
7672
7673         alloc_flags = update_block_group_flags(root, cache->flags);
7674         if (alloc_flags != cache->flags) {
7675                 ret = do_chunk_alloc(trans, root, alloc_flags,
7676                                      CHUNK_ALLOC_FORCE);
7677                 if (ret < 0)
7678                         goto out;
7679         }
7680
7681         ret = set_block_group_ro(cache, 0);
7682         if (!ret)
7683                 goto out;
7684         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7685         ret = do_chunk_alloc(trans, root, alloc_flags,
7686                              CHUNK_ALLOC_FORCE);
7687         if (ret < 0)
7688                 goto out;
7689         ret = set_block_group_ro(cache, 0);
7690 out:
7691         btrfs_end_transaction(trans, root);
7692         return ret;
7693 }
7694
7695 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7696                             struct btrfs_root *root, u64 type)
7697 {
7698         u64 alloc_flags = get_alloc_profile(root, type);
7699         return do_chunk_alloc(trans, root, alloc_flags,
7700                               CHUNK_ALLOC_FORCE);
7701 }
7702
7703 /*
7704  * helper to account the unused space of all the readonly block group in the
7705  * list. takes mirrors into account.
7706  */
7707 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7708 {
7709         struct btrfs_block_group_cache *block_group;
7710         u64 free_bytes = 0;
7711         int factor;
7712
7713         list_for_each_entry(block_group, groups_list, list) {
7714                 spin_lock(&block_group->lock);
7715
7716                 if (!block_group->ro) {
7717                         spin_unlock(&block_group->lock);
7718                         continue;
7719                 }
7720
7721                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7722                                           BTRFS_BLOCK_GROUP_RAID10 |
7723                                           BTRFS_BLOCK_GROUP_DUP))
7724                         factor = 2;
7725                 else
7726                         factor = 1;
7727
7728                 free_bytes += (block_group->key.offset -
7729                                btrfs_block_group_used(&block_group->item)) *
7730                                factor;
7731
7732                 spin_unlock(&block_group->lock);
7733         }
7734
7735         return free_bytes;
7736 }
7737
7738 /*
7739  * helper to account the unused space of all the readonly block group in the
7740  * space_info. takes mirrors into account.
7741  */
7742 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7743 {
7744         int i;
7745         u64 free_bytes = 0;
7746
7747         spin_lock(&sinfo->lock);
7748
7749         for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7750                 if (!list_empty(&sinfo->block_groups[i]))
7751                         free_bytes += __btrfs_get_ro_block_group_free_space(
7752                                                 &sinfo->block_groups[i]);
7753
7754         spin_unlock(&sinfo->lock);
7755
7756         return free_bytes;
7757 }
7758
7759 void btrfs_set_block_group_rw(struct btrfs_root *root,
7760                               struct btrfs_block_group_cache *cache)
7761 {
7762         struct btrfs_space_info *sinfo = cache->space_info;
7763         u64 num_bytes;
7764
7765         BUG_ON(!cache->ro);
7766
7767         spin_lock(&sinfo->lock);
7768         spin_lock(&cache->lock);
7769         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7770                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7771         sinfo->bytes_readonly -= num_bytes;
7772         cache->ro = 0;
7773         spin_unlock(&cache->lock);
7774         spin_unlock(&sinfo->lock);
7775 }
7776
7777 /*
7778  * checks to see if its even possible to relocate this block group.
7779  *
7780  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7781  * ok to go ahead and try.
7782  */
7783 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7784 {
7785         struct btrfs_block_group_cache *block_group;
7786         struct btrfs_space_info *space_info;
7787         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7788         struct btrfs_device *device;
7789         u64 min_free;
7790         u64 dev_min = 1;
7791         u64 dev_nr = 0;
7792         u64 target;
7793         int index;
7794         int full = 0;
7795         int ret = 0;
7796
7797         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7798
7799         /* odd, couldn't find the block group, leave it alone */
7800         if (!block_group)
7801                 return -1;
7802
7803         min_free = btrfs_block_group_used(&block_group->item);
7804
7805         /* no bytes used, we're good */
7806         if (!min_free)
7807                 goto out;
7808
7809         space_info = block_group->space_info;
7810         spin_lock(&space_info->lock);
7811
7812         full = space_info->full;
7813
7814         /*
7815          * if this is the last block group we have in this space, we can't
7816          * relocate it unless we're able to allocate a new chunk below.
7817          *
7818          * Otherwise, we need to make sure we have room in the space to handle
7819          * all of the extents from this block group.  If we can, we're good
7820          */
7821         if ((space_info->total_bytes != block_group->key.offset) &&
7822             (space_info->bytes_used + space_info->bytes_reserved +
7823              space_info->bytes_pinned + space_info->bytes_readonly +
7824              min_free < space_info->total_bytes)) {
7825                 spin_unlock(&space_info->lock);
7826                 goto out;
7827         }
7828         spin_unlock(&space_info->lock);
7829
7830         /*
7831          * ok we don't have enough space, but maybe we have free space on our
7832          * devices to allocate new chunks for relocation, so loop through our
7833          * alloc devices and guess if we have enough space.  if this block
7834          * group is going to be restriped, run checks against the target
7835          * profile instead of the current one.
7836          */
7837         ret = -1;
7838
7839         /*
7840          * index:
7841          *      0: raid10
7842          *      1: raid1
7843          *      2: dup
7844          *      3: raid0
7845          *      4: single
7846          */
7847         target = get_restripe_target(root->fs_info, block_group->flags);
7848         if (target) {
7849                 index = __get_raid_index(extended_to_chunk(target));
7850         } else {
7851                 /*
7852                  * this is just a balance, so if we were marked as full
7853                  * we know there is no space for a new chunk
7854                  */
7855                 if (full)
7856                         goto out;
7857
7858                 index = get_block_group_index(block_group);
7859         }
7860
7861         if (index == BTRFS_RAID_RAID10) {
7862                 dev_min = 4;
7863                 /* Divide by 2 */
7864                 min_free >>= 1;
7865         } else if (index == BTRFS_RAID_RAID1) {
7866                 dev_min = 2;
7867         } else if (index == BTRFS_RAID_DUP) {
7868                 /* Multiply by 2 */
7869                 min_free <<= 1;
7870         } else if (index == BTRFS_RAID_RAID0) {
7871                 dev_min = fs_devices->rw_devices;
7872                 do_div(min_free, dev_min);
7873         }
7874
7875         mutex_lock(&root->fs_info->chunk_mutex);
7876         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7877                 u64 dev_offset;
7878
7879                 /*
7880                  * check to make sure we can actually find a chunk with enough
7881                  * space to fit our block group in.
7882                  */
7883                 if (device->total_bytes > device->bytes_used + min_free &&
7884                     !device->is_tgtdev_for_dev_replace) {
7885                         ret = find_free_dev_extent(device, min_free,
7886                                                    &dev_offset, NULL);
7887                         if (!ret)
7888                                 dev_nr++;
7889
7890                         if (dev_nr >= dev_min)
7891                                 break;
7892
7893                         ret = -1;
7894                 }
7895         }
7896         mutex_unlock(&root->fs_info->chunk_mutex);
7897 out:
7898         btrfs_put_block_group(block_group);
7899         return ret;
7900 }
7901
7902 static int find_first_block_group(struct btrfs_root *root,
7903                 struct btrfs_path *path, struct btrfs_key *key)
7904 {
7905         int ret = 0;
7906         struct btrfs_key found_key;
7907         struct extent_buffer *leaf;
7908         int slot;
7909
7910         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7911         if (ret < 0)
7912                 goto out;
7913
7914         while (1) {
7915                 slot = path->slots[0];
7916                 leaf = path->nodes[0];
7917                 if (slot >= btrfs_header_nritems(leaf)) {
7918                         ret = btrfs_next_leaf(root, path);
7919                         if (ret == 0)
7920                                 continue;
7921                         if (ret < 0)
7922                                 goto out;
7923                         break;
7924                 }
7925                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7926
7927                 if (found_key.objectid >= key->objectid &&
7928                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7929                         ret = 0;
7930                         goto out;
7931                 }
7932                 path->slots[0]++;
7933         }
7934 out:
7935         return ret;
7936 }
7937
7938 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7939 {
7940         struct btrfs_block_group_cache *block_group;
7941         u64 last = 0;
7942
7943         while (1) {
7944                 struct inode *inode;
7945
7946                 block_group = btrfs_lookup_first_block_group(info, last);
7947                 while (block_group) {
7948                         spin_lock(&block_group->lock);
7949                         if (block_group->iref)
7950                                 break;
7951                         spin_unlock(&block_group->lock);
7952                         block_group = next_block_group(info->tree_root,
7953                                                        block_group);
7954                 }
7955                 if (!block_group) {
7956                         if (last == 0)
7957                                 break;
7958                         last = 0;
7959                         continue;
7960                 }
7961
7962                 inode = block_group->inode;
7963                 block_group->iref = 0;
7964                 block_group->inode = NULL;
7965                 spin_unlock(&block_group->lock);
7966                 iput(inode);
7967                 last = block_group->key.objectid + block_group->key.offset;
7968                 btrfs_put_block_group(block_group);
7969         }
7970 }
7971
7972 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7973 {
7974         struct btrfs_block_group_cache *block_group;
7975         struct btrfs_space_info *space_info;
7976         struct btrfs_caching_control *caching_ctl;
7977         struct rb_node *n;
7978
7979         down_write(&info->extent_commit_sem);
7980         while (!list_empty(&info->caching_block_groups)) {
7981                 caching_ctl = list_entry(info->caching_block_groups.next,
7982                                          struct btrfs_caching_control, list);
7983                 list_del(&caching_ctl->list);
7984                 put_caching_control(caching_ctl);
7985         }
7986         up_write(&info->extent_commit_sem);
7987
7988         spin_lock(&info->block_group_cache_lock);
7989         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7990                 block_group = rb_entry(n, struct btrfs_block_group_cache,
7991                                        cache_node);
7992                 rb_erase(&block_group->cache_node,
7993                          &info->block_group_cache_tree);
7994                 spin_unlock(&info->block_group_cache_lock);
7995
7996                 down_write(&block_group->space_info->groups_sem);
7997                 list_del(&block_group->list);
7998                 up_write(&block_group->space_info->groups_sem);
7999
8000                 if (block_group->cached == BTRFS_CACHE_STARTED)
8001                         wait_block_group_cache_done(block_group);
8002
8003                 /*
8004                  * We haven't cached this block group, which means we could
8005                  * possibly have excluded extents on this block group.
8006                  */
8007                 if (block_group->cached == BTRFS_CACHE_NO)
8008                         free_excluded_extents(info->extent_root, block_group);
8009
8010                 btrfs_remove_free_space_cache(block_group);
8011                 btrfs_put_block_group(block_group);
8012
8013                 spin_lock(&info->block_group_cache_lock);
8014         }
8015         spin_unlock(&info->block_group_cache_lock);
8016
8017         /* now that all the block groups are freed, go through and
8018          * free all the space_info structs.  This is only called during
8019          * the final stages of unmount, and so we know nobody is
8020          * using them.  We call synchronize_rcu() once before we start,
8021          * just to be on the safe side.
8022          */
8023         synchronize_rcu();
8024
8025         release_global_block_rsv(info);
8026
8027         while(!list_empty(&info->space_info)) {
8028                 space_info = list_entry(info->space_info.next,
8029                                         struct btrfs_space_info,
8030                                         list);
8031                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8032                         if (space_info->bytes_pinned > 0 ||
8033                             space_info->bytes_reserved > 0 ||
8034                             space_info->bytes_may_use > 0) {
8035                                 WARN_ON(1);
8036                                 dump_space_info(space_info, 0, 0);
8037                         }
8038                 }
8039                 list_del(&space_info->list);
8040                 kfree(space_info);
8041         }
8042         return 0;
8043 }
8044
8045 static void __link_block_group(struct btrfs_space_info *space_info,
8046                                struct btrfs_block_group_cache *cache)
8047 {
8048         int index = get_block_group_index(cache);
8049
8050         down_write(&space_info->groups_sem);
8051         list_add_tail(&cache->list, &space_info->block_groups[index]);
8052         up_write(&space_info->groups_sem);
8053 }
8054
8055 int btrfs_read_block_groups(struct btrfs_root *root)
8056 {
8057         struct btrfs_path *path;
8058         int ret;
8059         struct btrfs_block_group_cache *cache;
8060         struct btrfs_fs_info *info = root->fs_info;
8061         struct btrfs_space_info *space_info;
8062         struct btrfs_key key;
8063         struct btrfs_key found_key;
8064         struct extent_buffer *leaf;
8065         int need_clear = 0;
8066         u64 cache_gen;
8067
8068         root = info->extent_root;
8069         key.objectid = 0;
8070         key.offset = 0;
8071         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8072         path = btrfs_alloc_path();
8073         if (!path)
8074                 return -ENOMEM;
8075         path->reada = 1;
8076
8077         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
8078         if (btrfs_test_opt(root, SPACE_CACHE) &&
8079             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
8080                 need_clear = 1;
8081         if (btrfs_test_opt(root, CLEAR_CACHE))
8082                 need_clear = 1;
8083
8084         while (1) {
8085                 ret = find_first_block_group(root, path, &key);
8086                 if (ret > 0)
8087                         break;
8088                 if (ret != 0)
8089                         goto error;
8090                 leaf = path->nodes[0];
8091                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8092                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8093                 if (!cache) {
8094                         ret = -ENOMEM;
8095                         goto error;
8096                 }
8097                 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8098                                                 GFP_NOFS);
8099                 if (!cache->free_space_ctl) {
8100                         kfree(cache);
8101                         ret = -ENOMEM;
8102                         goto error;
8103                 }
8104
8105                 atomic_set(&cache->count, 1);
8106                 spin_lock_init(&cache->lock);
8107                 cache->fs_info = info;
8108                 INIT_LIST_HEAD(&cache->list);
8109                 INIT_LIST_HEAD(&cache->cluster_list);
8110
8111                 if (need_clear) {
8112                         /*
8113                          * When we mount with old space cache, we need to
8114                          * set BTRFS_DC_CLEAR and set dirty flag.
8115                          *
8116                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
8117                          *    truncate the old free space cache inode and
8118                          *    setup a new one.
8119                          * b) Setting 'dirty flag' makes sure that we flush
8120                          *    the new space cache info onto disk.
8121                          */
8122                         cache->disk_cache_state = BTRFS_DC_CLEAR;
8123                         if (btrfs_test_opt(root, SPACE_CACHE))
8124                                 cache->dirty = 1;
8125                 }
8126
8127                 read_extent_buffer(leaf, &cache->item,
8128                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
8129                                    sizeof(cache->item));
8130                 memcpy(&cache->key, &found_key, sizeof(found_key));
8131
8132                 key.objectid = found_key.objectid + found_key.offset;
8133                 btrfs_release_path(path);
8134                 cache->flags = btrfs_block_group_flags(&cache->item);
8135                 cache->sectorsize = root->sectorsize;
8136                 cache->full_stripe_len = btrfs_full_stripe_len(root,
8137                                                &root->fs_info->mapping_tree,
8138                                                found_key.objectid);
8139                 btrfs_init_free_space_ctl(cache);
8140
8141                 /*
8142                  * We need to exclude the super stripes now so that the space
8143                  * info has super bytes accounted for, otherwise we'll think
8144                  * we have more space than we actually do.
8145                  */
8146                 ret = exclude_super_stripes(root, cache);
8147                 if (ret) {
8148                         /*
8149                          * We may have excluded something, so call this just in
8150                          * case.
8151                          */
8152                         free_excluded_extents(root, cache);
8153                         kfree(cache->free_space_ctl);
8154                         kfree(cache);
8155                         goto error;
8156                 }
8157
8158                 /*
8159                  * check for two cases, either we are full, and therefore
8160                  * don't need to bother with the caching work since we won't
8161                  * find any space, or we are empty, and we can just add all
8162                  * the space in and be done with it.  This saves us _alot_ of
8163                  * time, particularly in the full case.
8164                  */
8165                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8166                         cache->last_byte_to_unpin = (u64)-1;
8167                         cache->cached = BTRFS_CACHE_FINISHED;
8168                         free_excluded_extents(root, cache);
8169                 } else if (btrfs_block_group_used(&cache->item) == 0) {
8170                         cache->last_byte_to_unpin = (u64)-1;
8171                         cache->cached = BTRFS_CACHE_FINISHED;
8172                         add_new_free_space(cache, root->fs_info,
8173                                            found_key.objectid,
8174                                            found_key.objectid +
8175                                            found_key.offset);
8176                         free_excluded_extents(root, cache);
8177                 }
8178
8179                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8180                 if (ret) {
8181                         btrfs_remove_free_space_cache(cache);
8182                         btrfs_put_block_group(cache);
8183                         goto error;
8184                 }
8185
8186                 ret = update_space_info(info, cache->flags, found_key.offset,
8187                                         btrfs_block_group_used(&cache->item),
8188                                         &space_info);
8189                 if (ret) {
8190                         btrfs_remove_free_space_cache(cache);
8191                         spin_lock(&info->block_group_cache_lock);
8192                         rb_erase(&cache->cache_node,
8193                                  &info->block_group_cache_tree);
8194                         spin_unlock(&info->block_group_cache_lock);
8195                         btrfs_put_block_group(cache);
8196                         goto error;
8197                 }
8198
8199                 cache->space_info = space_info;
8200                 spin_lock(&cache->space_info->lock);
8201                 cache->space_info->bytes_readonly += cache->bytes_super;
8202                 spin_unlock(&cache->space_info->lock);
8203
8204                 __link_block_group(space_info, cache);
8205
8206                 set_avail_alloc_bits(root->fs_info, cache->flags);
8207                 if (btrfs_chunk_readonly(root, cache->key.objectid))
8208                         set_block_group_ro(cache, 1);
8209         }
8210
8211         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8212                 if (!(get_alloc_profile(root, space_info->flags) &
8213                       (BTRFS_BLOCK_GROUP_RAID10 |
8214                        BTRFS_BLOCK_GROUP_RAID1 |
8215                        BTRFS_BLOCK_GROUP_RAID5 |
8216                        BTRFS_BLOCK_GROUP_RAID6 |
8217                        BTRFS_BLOCK_GROUP_DUP)))
8218                         continue;
8219                 /*
8220                  * avoid allocating from un-mirrored block group if there are
8221                  * mirrored block groups.
8222                  */
8223                 list_for_each_entry(cache, &space_info->block_groups[3], list)
8224                         set_block_group_ro(cache, 1);
8225                 list_for_each_entry(cache, &space_info->block_groups[4], list)
8226                         set_block_group_ro(cache, 1);
8227         }
8228
8229         init_global_block_rsv(info);
8230         ret = 0;
8231 error:
8232         btrfs_free_path(path);
8233         return ret;
8234 }
8235
8236 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
8237                                        struct btrfs_root *root)
8238 {
8239         struct btrfs_block_group_cache *block_group, *tmp;
8240         struct btrfs_root *extent_root = root->fs_info->extent_root;
8241         struct btrfs_block_group_item item;
8242         struct btrfs_key key;
8243         int ret = 0;
8244
8245         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
8246                                  new_bg_list) {
8247                 list_del_init(&block_group->new_bg_list);
8248
8249                 if (ret)
8250                         continue;
8251
8252                 spin_lock(&block_group->lock);
8253                 memcpy(&item, &block_group->item, sizeof(item));
8254                 memcpy(&key, &block_group->key, sizeof(key));
8255                 spin_unlock(&block_group->lock);
8256
8257                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
8258                                         sizeof(item));
8259                 if (ret)
8260                         btrfs_abort_transaction(trans, extent_root, ret);
8261         }
8262 }
8263
8264 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8265                            struct btrfs_root *root, u64 bytes_used,
8266                            u64 type, u64 chunk_objectid, u64 chunk_offset,
8267                            u64 size)
8268 {
8269         int ret;
8270         struct btrfs_root *extent_root;
8271         struct btrfs_block_group_cache *cache;
8272
8273         extent_root = root->fs_info->extent_root;
8274
8275         root->fs_info->last_trans_log_full_commit = trans->transid;
8276
8277         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8278         if (!cache)
8279                 return -ENOMEM;
8280         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8281                                         GFP_NOFS);
8282         if (!cache->free_space_ctl) {
8283                 kfree(cache);
8284                 return -ENOMEM;
8285         }
8286
8287         cache->key.objectid = chunk_offset;
8288         cache->key.offset = size;
8289         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8290         cache->sectorsize = root->sectorsize;
8291         cache->fs_info = root->fs_info;
8292         cache->full_stripe_len = btrfs_full_stripe_len(root,
8293                                                &root->fs_info->mapping_tree,
8294                                                chunk_offset);
8295
8296         atomic_set(&cache->count, 1);
8297         spin_lock_init(&cache->lock);
8298         INIT_LIST_HEAD(&cache->list);
8299         INIT_LIST_HEAD(&cache->cluster_list);
8300         INIT_LIST_HEAD(&cache->new_bg_list);
8301
8302         btrfs_init_free_space_ctl(cache);
8303
8304         btrfs_set_block_group_used(&cache->item, bytes_used);
8305         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8306         cache->flags = type;
8307         btrfs_set_block_group_flags(&cache->item, type);
8308
8309         cache->last_byte_to_unpin = (u64)-1;
8310         cache->cached = BTRFS_CACHE_FINISHED;
8311         ret = exclude_super_stripes(root, cache);
8312         if (ret) {
8313                 /*
8314                  * We may have excluded something, so call this just in
8315                  * case.
8316                  */
8317                 free_excluded_extents(root, cache);
8318                 kfree(cache->free_space_ctl);
8319                 kfree(cache);
8320                 return ret;
8321         }
8322
8323         add_new_free_space(cache, root->fs_info, chunk_offset,
8324                            chunk_offset + size);
8325
8326         free_excluded_extents(root, cache);
8327
8328         ret = btrfs_add_block_group_cache(root->fs_info, cache);
8329         if (ret) {
8330                 btrfs_remove_free_space_cache(cache);
8331                 btrfs_put_block_group(cache);
8332                 return ret;
8333         }
8334
8335         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8336                                 &cache->space_info);
8337         if (ret) {
8338                 btrfs_remove_free_space_cache(cache);
8339                 spin_lock(&root->fs_info->block_group_cache_lock);
8340                 rb_erase(&cache->cache_node,
8341                          &root->fs_info->block_group_cache_tree);
8342                 spin_unlock(&root->fs_info->block_group_cache_lock);
8343                 btrfs_put_block_group(cache);
8344                 return ret;
8345         }
8346         update_global_block_rsv(root->fs_info);
8347
8348         spin_lock(&cache->space_info->lock);
8349         cache->space_info->bytes_readonly += cache->bytes_super;
8350         spin_unlock(&cache->space_info->lock);
8351
8352         __link_block_group(cache->space_info, cache);
8353
8354         list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8355
8356         set_avail_alloc_bits(extent_root->fs_info, type);
8357
8358         return 0;
8359 }
8360
8361 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
8362 {
8363         u64 extra_flags = chunk_to_extended(flags) &
8364                                 BTRFS_EXTENDED_PROFILE_MASK;
8365
8366         write_seqlock(&fs_info->profiles_lock);
8367         if (flags & BTRFS_BLOCK_GROUP_DATA)
8368                 fs_info->avail_data_alloc_bits &= ~extra_flags;
8369         if (flags & BTRFS_BLOCK_GROUP_METADATA)
8370                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
8371         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
8372                 fs_info->avail_system_alloc_bits &= ~extra_flags;
8373         write_sequnlock(&fs_info->profiles_lock);
8374 }
8375
8376 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8377                              struct btrfs_root *root, u64 group_start)
8378 {
8379         struct btrfs_path *path;
8380         struct btrfs_block_group_cache *block_group;
8381         struct btrfs_free_cluster *cluster;
8382         struct btrfs_root *tree_root = root->fs_info->tree_root;
8383         struct btrfs_key key;
8384         struct inode *inode;
8385         int ret;
8386         int index;
8387         int factor;
8388
8389         root = root->fs_info->extent_root;
8390
8391         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8392         BUG_ON(!block_group);
8393         BUG_ON(!block_group->ro);
8394
8395         /*
8396          * Free the reserved super bytes from this block group before
8397          * remove it.
8398          */
8399         free_excluded_extents(root, block_group);
8400
8401         memcpy(&key, &block_group->key, sizeof(key));
8402         index = get_block_group_index(block_group);
8403         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8404                                   BTRFS_BLOCK_GROUP_RAID1 |
8405                                   BTRFS_BLOCK_GROUP_RAID10))
8406                 factor = 2;
8407         else
8408                 factor = 1;
8409
8410         /* make sure this block group isn't part of an allocation cluster */
8411         cluster = &root->fs_info->data_alloc_cluster;
8412         spin_lock(&cluster->refill_lock);
8413         btrfs_return_cluster_to_free_space(block_group, cluster);
8414         spin_unlock(&cluster->refill_lock);
8415
8416         /*
8417          * make sure this block group isn't part of a metadata
8418          * allocation cluster
8419          */
8420         cluster = &root->fs_info->meta_alloc_cluster;
8421         spin_lock(&cluster->refill_lock);
8422         btrfs_return_cluster_to_free_space(block_group, cluster);
8423         spin_unlock(&cluster->refill_lock);
8424
8425         path = btrfs_alloc_path();
8426         if (!path) {
8427                 ret = -ENOMEM;
8428                 goto out;
8429         }
8430
8431         inode = lookup_free_space_inode(tree_root, block_group, path);
8432         if (!IS_ERR(inode)) {
8433                 ret = btrfs_orphan_add(trans, inode);
8434                 if (ret) {
8435                         btrfs_add_delayed_iput(inode);
8436                         goto out;
8437                 }
8438                 clear_nlink(inode);
8439                 /* One for the block groups ref */
8440                 spin_lock(&block_group->lock);
8441                 if (block_group->iref) {
8442                         block_group->iref = 0;
8443                         block_group->inode = NULL;
8444                         spin_unlock(&block_group->lock);
8445                         iput(inode);
8446                 } else {
8447                         spin_unlock(&block_group->lock);
8448                 }
8449                 /* One for our lookup ref */
8450                 btrfs_add_delayed_iput(inode);
8451         }
8452
8453         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8454         key.offset = block_group->key.objectid;
8455         key.type = 0;
8456
8457         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8458         if (ret < 0)
8459                 goto out;
8460         if (ret > 0)
8461                 btrfs_release_path(path);
8462         if (ret == 0) {
8463                 ret = btrfs_del_item(trans, tree_root, path);
8464                 if (ret)
8465                         goto out;
8466                 btrfs_release_path(path);
8467         }
8468
8469         spin_lock(&root->fs_info->block_group_cache_lock);
8470         rb_erase(&block_group->cache_node,
8471                  &root->fs_info->block_group_cache_tree);
8472
8473         if (root->fs_info->first_logical_byte == block_group->key.objectid)
8474                 root->fs_info->first_logical_byte = (u64)-1;
8475         spin_unlock(&root->fs_info->block_group_cache_lock);
8476
8477         down_write(&block_group->space_info->groups_sem);
8478         /*
8479          * we must use list_del_init so people can check to see if they
8480          * are still on the list after taking the semaphore
8481          */
8482         list_del_init(&block_group->list);
8483         if (list_empty(&block_group->space_info->block_groups[index]))
8484                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8485         up_write(&block_group->space_info->groups_sem);
8486
8487         if (block_group->cached == BTRFS_CACHE_STARTED)
8488                 wait_block_group_cache_done(block_group);
8489
8490         btrfs_remove_free_space_cache(block_group);
8491
8492         spin_lock(&block_group->space_info->lock);
8493         block_group->space_info->total_bytes -= block_group->key.offset;
8494         block_group->space_info->bytes_readonly -= block_group->key.offset;
8495         block_group->space_info->disk_total -= block_group->key.offset * factor;
8496         spin_unlock(&block_group->space_info->lock);
8497
8498         memcpy(&key, &block_group->key, sizeof(key));
8499
8500         btrfs_clear_space_info_full(root->fs_info);
8501
8502         btrfs_put_block_group(block_group);
8503         btrfs_put_block_group(block_group);
8504
8505         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8506         if (ret > 0)
8507                 ret = -EIO;
8508         if (ret < 0)
8509                 goto out;
8510
8511         ret = btrfs_del_item(trans, root, path);
8512 out:
8513         btrfs_free_path(path);
8514         return ret;
8515 }
8516
8517 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8518 {
8519         struct btrfs_space_info *space_info;
8520         struct btrfs_super_block *disk_super;
8521         u64 features;
8522         u64 flags;
8523         int mixed = 0;
8524         int ret;
8525
8526         disk_super = fs_info->super_copy;
8527         if (!btrfs_super_root(disk_super))
8528                 return 1;
8529
8530         features = btrfs_super_incompat_flags(disk_super);
8531         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8532                 mixed = 1;
8533
8534         flags = BTRFS_BLOCK_GROUP_SYSTEM;
8535         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8536         if (ret)
8537                 goto out;
8538
8539         if (mixed) {
8540                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8541                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8542         } else {
8543                 flags = BTRFS_BLOCK_GROUP_METADATA;
8544                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8545                 if (ret)
8546                         goto out;
8547
8548                 flags = BTRFS_BLOCK_GROUP_DATA;
8549                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8550         }
8551 out:
8552         return ret;
8553 }
8554
8555 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8556 {
8557         return unpin_extent_range(root, start, end);
8558 }
8559
8560 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8561                                u64 num_bytes, u64 *actual_bytes)
8562 {
8563         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8564 }
8565
8566 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8567 {
8568         struct btrfs_fs_info *fs_info = root->fs_info;
8569         struct btrfs_block_group_cache *cache = NULL;
8570         u64 group_trimmed;
8571         u64 start;
8572         u64 end;
8573         u64 trimmed = 0;
8574         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8575         int ret = 0;
8576
8577         /*
8578          * try to trim all FS space, our block group may start from non-zero.
8579          */
8580         if (range->len == total_bytes)
8581                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8582         else
8583                 cache = btrfs_lookup_block_group(fs_info, range->start);
8584
8585         while (cache) {
8586                 if (cache->key.objectid >= (range->start + range->len)) {
8587                         btrfs_put_block_group(cache);
8588                         break;
8589                 }
8590
8591                 start = max(range->start, cache->key.objectid);
8592                 end = min(range->start + range->len,
8593                                 cache->key.objectid + cache->key.offset);
8594
8595                 if (end - start >= range->minlen) {
8596                         if (!block_group_cache_done(cache)) {
8597                                 ret = cache_block_group(cache, 0);
8598                                 if (!ret)
8599                                         wait_block_group_cache_done(cache);
8600                         }
8601                         ret = btrfs_trim_block_group(cache,
8602                                                      &group_trimmed,
8603                                                      start,
8604                                                      end,
8605                                                      range->minlen);
8606
8607                         trimmed += group_trimmed;
8608                         if (ret) {
8609                                 btrfs_put_block_group(cache);
8610                                 break;
8611                         }
8612                 }
8613
8614                 cache = next_block_group(fs_info->tree_root, cache);
8615         }
8616
8617         range->len = trimmed;
8618         return ret;
8619 }