]> rtime.felk.cvut.cz Git - linux-imx.git/blob - fs/btrfs/extent-tree.c
Btrfs: fix cluster alignment for mount -o ssd
[linux-imx.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include "compat.h"
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "raid56.h"
35 #include "locking.h"
36 #include "free-space-cache.h"
37 #include "math.h"
38
39 #undef SCRAMBLE_DELAYED_REFS
40
41 /*
42  * control flags for do_chunk_alloc's force field
43  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
44  * if we really need one.
45  *
46  * CHUNK_ALLOC_LIMITED means to only try and allocate one
47  * if we have very few chunks already allocated.  This is
48  * used as part of the clustering code to help make sure
49  * we have a good pool of storage to cluster in, without
50  * filling the FS with empty chunks
51  *
52  * CHUNK_ALLOC_FORCE means it must try to allocate one
53  *
54  */
55 enum {
56         CHUNK_ALLOC_NO_FORCE = 0,
57         CHUNK_ALLOC_LIMITED = 1,
58         CHUNK_ALLOC_FORCE = 2,
59 };
60
61 /*
62  * Control how reservations are dealt with.
63  *
64  * RESERVE_FREE - freeing a reservation.
65  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
66  *   ENOSPC accounting
67  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
68  *   bytes_may_use as the ENOSPC accounting is done elsewhere
69  */
70 enum {
71         RESERVE_FREE = 0,
72         RESERVE_ALLOC = 1,
73         RESERVE_ALLOC_NO_ACCOUNT = 2,
74 };
75
76 static int update_block_group(struct btrfs_trans_handle *trans,
77                               struct btrfs_root *root,
78                               u64 bytenr, u64 num_bytes, int alloc);
79 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
80                                 struct btrfs_root *root,
81                                 u64 bytenr, u64 num_bytes, u64 parent,
82                                 u64 root_objectid, u64 owner_objectid,
83                                 u64 owner_offset, int refs_to_drop,
84                                 struct btrfs_delayed_extent_op *extra_op);
85 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
86                                     struct extent_buffer *leaf,
87                                     struct btrfs_extent_item *ei);
88 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
89                                       struct btrfs_root *root,
90                                       u64 parent, u64 root_objectid,
91                                       u64 flags, u64 owner, u64 offset,
92                                       struct btrfs_key *ins, int ref_mod);
93 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
94                                      struct btrfs_root *root,
95                                      u64 parent, u64 root_objectid,
96                                      u64 flags, struct btrfs_disk_key *key,
97                                      int level, struct btrfs_key *ins);
98 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
99                           struct btrfs_root *extent_root, u64 flags,
100                           int force);
101 static int find_next_key(struct btrfs_path *path, int level,
102                          struct btrfs_key *key);
103 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
104                             int dump_block_groups);
105 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
106                                        u64 num_bytes, int reserve);
107
108 static noinline int
109 block_group_cache_done(struct btrfs_block_group_cache *cache)
110 {
111         smp_mb();
112         return cache->cached == BTRFS_CACHE_FINISHED;
113 }
114
115 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
116 {
117         return (cache->flags & bits) == bits;
118 }
119
120 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
121 {
122         atomic_inc(&cache->count);
123 }
124
125 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
126 {
127         if (atomic_dec_and_test(&cache->count)) {
128                 WARN_ON(cache->pinned > 0);
129                 WARN_ON(cache->reserved > 0);
130                 kfree(cache->free_space_ctl);
131                 kfree(cache);
132         }
133 }
134
135 /*
136  * this adds the block group to the fs_info rb tree for the block group
137  * cache
138  */
139 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
140                                 struct btrfs_block_group_cache *block_group)
141 {
142         struct rb_node **p;
143         struct rb_node *parent = NULL;
144         struct btrfs_block_group_cache *cache;
145
146         spin_lock(&info->block_group_cache_lock);
147         p = &info->block_group_cache_tree.rb_node;
148
149         while (*p) {
150                 parent = *p;
151                 cache = rb_entry(parent, struct btrfs_block_group_cache,
152                                  cache_node);
153                 if (block_group->key.objectid < cache->key.objectid) {
154                         p = &(*p)->rb_left;
155                 } else if (block_group->key.objectid > cache->key.objectid) {
156                         p = &(*p)->rb_right;
157                 } else {
158                         spin_unlock(&info->block_group_cache_lock);
159                         return -EEXIST;
160                 }
161         }
162
163         rb_link_node(&block_group->cache_node, parent, p);
164         rb_insert_color(&block_group->cache_node,
165                         &info->block_group_cache_tree);
166         spin_unlock(&info->block_group_cache_lock);
167
168         return 0;
169 }
170
171 /*
172  * This will return the block group at or after bytenr if contains is 0, else
173  * it will return the block group that contains the bytenr
174  */
175 static struct btrfs_block_group_cache *
176 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
177                               int contains)
178 {
179         struct btrfs_block_group_cache *cache, *ret = NULL;
180         struct rb_node *n;
181         u64 end, start;
182
183         spin_lock(&info->block_group_cache_lock);
184         n = info->block_group_cache_tree.rb_node;
185
186         while (n) {
187                 cache = rb_entry(n, struct btrfs_block_group_cache,
188                                  cache_node);
189                 end = cache->key.objectid + cache->key.offset - 1;
190                 start = cache->key.objectid;
191
192                 if (bytenr < start) {
193                         if (!contains && (!ret || start < ret->key.objectid))
194                                 ret = cache;
195                         n = n->rb_left;
196                 } else if (bytenr > start) {
197                         if (contains && bytenr <= end) {
198                                 ret = cache;
199                                 break;
200                         }
201                         n = n->rb_right;
202                 } else {
203                         ret = cache;
204                         break;
205                 }
206         }
207         if (ret)
208                 btrfs_get_block_group(ret);
209         spin_unlock(&info->block_group_cache_lock);
210
211         return ret;
212 }
213
214 static int add_excluded_extent(struct btrfs_root *root,
215                                u64 start, u64 num_bytes)
216 {
217         u64 end = start + num_bytes - 1;
218         set_extent_bits(&root->fs_info->freed_extents[0],
219                         start, end, EXTENT_UPTODATE, GFP_NOFS);
220         set_extent_bits(&root->fs_info->freed_extents[1],
221                         start, end, EXTENT_UPTODATE, GFP_NOFS);
222         return 0;
223 }
224
225 static void free_excluded_extents(struct btrfs_root *root,
226                                   struct btrfs_block_group_cache *cache)
227 {
228         u64 start, end;
229
230         start = cache->key.objectid;
231         end = start + cache->key.offset - 1;
232
233         clear_extent_bits(&root->fs_info->freed_extents[0],
234                           start, end, EXTENT_UPTODATE, GFP_NOFS);
235         clear_extent_bits(&root->fs_info->freed_extents[1],
236                           start, end, EXTENT_UPTODATE, GFP_NOFS);
237 }
238
239 static int exclude_super_stripes(struct btrfs_root *root,
240                                  struct btrfs_block_group_cache *cache)
241 {
242         u64 bytenr;
243         u64 *logical;
244         int stripe_len;
245         int i, nr, ret;
246
247         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
248                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
249                 cache->bytes_super += stripe_len;
250                 ret = add_excluded_extent(root, cache->key.objectid,
251                                           stripe_len);
252                 BUG_ON(ret); /* -ENOMEM */
253         }
254
255         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
256                 bytenr = btrfs_sb_offset(i);
257                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
258                                        cache->key.objectid, bytenr,
259                                        0, &logical, &nr, &stripe_len);
260                 BUG_ON(ret); /* -ENOMEM */
261
262                 while (nr--) {
263                         cache->bytes_super += stripe_len;
264                         ret = add_excluded_extent(root, logical[nr],
265                                                   stripe_len);
266                         BUG_ON(ret); /* -ENOMEM */
267                 }
268
269                 kfree(logical);
270         }
271         return 0;
272 }
273
274 static struct btrfs_caching_control *
275 get_caching_control(struct btrfs_block_group_cache *cache)
276 {
277         struct btrfs_caching_control *ctl;
278
279         spin_lock(&cache->lock);
280         if (cache->cached != BTRFS_CACHE_STARTED) {
281                 spin_unlock(&cache->lock);
282                 return NULL;
283         }
284
285         /* We're loading it the fast way, so we don't have a caching_ctl. */
286         if (!cache->caching_ctl) {
287                 spin_unlock(&cache->lock);
288                 return NULL;
289         }
290
291         ctl = cache->caching_ctl;
292         atomic_inc(&ctl->count);
293         spin_unlock(&cache->lock);
294         return ctl;
295 }
296
297 static void put_caching_control(struct btrfs_caching_control *ctl)
298 {
299         if (atomic_dec_and_test(&ctl->count))
300                 kfree(ctl);
301 }
302
303 /*
304  * this is only called by cache_block_group, since we could have freed extents
305  * we need to check the pinned_extents for any extents that can't be used yet
306  * since their free space will be released as soon as the transaction commits.
307  */
308 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
309                               struct btrfs_fs_info *info, u64 start, u64 end)
310 {
311         u64 extent_start, extent_end, size, total_added = 0;
312         int ret;
313
314         while (start < end) {
315                 ret = find_first_extent_bit(info->pinned_extents, start,
316                                             &extent_start, &extent_end,
317                                             EXTENT_DIRTY | EXTENT_UPTODATE,
318                                             NULL);
319                 if (ret)
320                         break;
321
322                 if (extent_start <= start) {
323                         start = extent_end + 1;
324                 } else if (extent_start > start && extent_start < end) {
325                         size = extent_start - start;
326                         total_added += size;
327                         ret = btrfs_add_free_space(block_group, start,
328                                                    size);
329                         BUG_ON(ret); /* -ENOMEM or logic error */
330                         start = extent_end + 1;
331                 } else {
332                         break;
333                 }
334         }
335
336         if (start < end) {
337                 size = end - start;
338                 total_added += size;
339                 ret = btrfs_add_free_space(block_group, start, size);
340                 BUG_ON(ret); /* -ENOMEM or logic error */
341         }
342
343         return total_added;
344 }
345
346 static noinline void caching_thread(struct btrfs_work *work)
347 {
348         struct btrfs_block_group_cache *block_group;
349         struct btrfs_fs_info *fs_info;
350         struct btrfs_caching_control *caching_ctl;
351         struct btrfs_root *extent_root;
352         struct btrfs_path *path;
353         struct extent_buffer *leaf;
354         struct btrfs_key key;
355         u64 total_found = 0;
356         u64 last = 0;
357         u32 nritems;
358         int ret = 0;
359
360         caching_ctl = container_of(work, struct btrfs_caching_control, work);
361         block_group = caching_ctl->block_group;
362         fs_info = block_group->fs_info;
363         extent_root = fs_info->extent_root;
364
365         path = btrfs_alloc_path();
366         if (!path)
367                 goto out;
368
369         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
370
371         /*
372          * We don't want to deadlock with somebody trying to allocate a new
373          * extent for the extent root while also trying to search the extent
374          * root to add free space.  So we skip locking and search the commit
375          * root, since its read-only
376          */
377         path->skip_locking = 1;
378         path->search_commit_root = 1;
379         path->reada = 1;
380
381         key.objectid = last;
382         key.offset = 0;
383         key.type = BTRFS_EXTENT_ITEM_KEY;
384 again:
385         mutex_lock(&caching_ctl->mutex);
386         /* need to make sure the commit_root doesn't disappear */
387         down_read(&fs_info->extent_commit_sem);
388
389         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
390         if (ret < 0)
391                 goto err;
392
393         leaf = path->nodes[0];
394         nritems = btrfs_header_nritems(leaf);
395
396         while (1) {
397                 if (btrfs_fs_closing(fs_info) > 1) {
398                         last = (u64)-1;
399                         break;
400                 }
401
402                 if (path->slots[0] < nritems) {
403                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
404                 } else {
405                         ret = find_next_key(path, 0, &key);
406                         if (ret)
407                                 break;
408
409                         if (need_resched() ||
410                             btrfs_next_leaf(extent_root, path)) {
411                                 caching_ctl->progress = last;
412                                 btrfs_release_path(path);
413                                 up_read(&fs_info->extent_commit_sem);
414                                 mutex_unlock(&caching_ctl->mutex);
415                                 cond_resched();
416                                 goto again;
417                         }
418                         leaf = path->nodes[0];
419                         nritems = btrfs_header_nritems(leaf);
420                         continue;
421                 }
422
423                 if (key.objectid < block_group->key.objectid) {
424                         path->slots[0]++;
425                         continue;
426                 }
427
428                 if (key.objectid >= block_group->key.objectid +
429                     block_group->key.offset)
430                         break;
431
432                 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
433                         total_found += add_new_free_space(block_group,
434                                                           fs_info, last,
435                                                           key.objectid);
436                         last = key.objectid + key.offset;
437
438                         if (total_found > (1024 * 1024 * 2)) {
439                                 total_found = 0;
440                                 wake_up(&caching_ctl->wait);
441                         }
442                 }
443                 path->slots[0]++;
444         }
445         ret = 0;
446
447         total_found += add_new_free_space(block_group, fs_info, last,
448                                           block_group->key.objectid +
449                                           block_group->key.offset);
450         caching_ctl->progress = (u64)-1;
451
452         spin_lock(&block_group->lock);
453         block_group->caching_ctl = NULL;
454         block_group->cached = BTRFS_CACHE_FINISHED;
455         spin_unlock(&block_group->lock);
456
457 err:
458         btrfs_free_path(path);
459         up_read(&fs_info->extent_commit_sem);
460
461         free_excluded_extents(extent_root, block_group);
462
463         mutex_unlock(&caching_ctl->mutex);
464 out:
465         wake_up(&caching_ctl->wait);
466
467         put_caching_control(caching_ctl);
468         btrfs_put_block_group(block_group);
469 }
470
471 static int cache_block_group(struct btrfs_block_group_cache *cache,
472                              struct btrfs_trans_handle *trans,
473                              struct btrfs_root *root,
474                              int load_cache_only)
475 {
476         DEFINE_WAIT(wait);
477         struct btrfs_fs_info *fs_info = cache->fs_info;
478         struct btrfs_caching_control *caching_ctl;
479         int ret = 0;
480
481         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
482         if (!caching_ctl)
483                 return -ENOMEM;
484
485         INIT_LIST_HEAD(&caching_ctl->list);
486         mutex_init(&caching_ctl->mutex);
487         init_waitqueue_head(&caching_ctl->wait);
488         caching_ctl->block_group = cache;
489         caching_ctl->progress = cache->key.objectid;
490         atomic_set(&caching_ctl->count, 1);
491         caching_ctl->work.func = caching_thread;
492
493         spin_lock(&cache->lock);
494         /*
495          * This should be a rare occasion, but this could happen I think in the
496          * case where one thread starts to load the space cache info, and then
497          * some other thread starts a transaction commit which tries to do an
498          * allocation while the other thread is still loading the space cache
499          * info.  The previous loop should have kept us from choosing this block
500          * group, but if we've moved to the state where we will wait on caching
501          * block groups we need to first check if we're doing a fast load here,
502          * so we can wait for it to finish, otherwise we could end up allocating
503          * from a block group who's cache gets evicted for one reason or
504          * another.
505          */
506         while (cache->cached == BTRFS_CACHE_FAST) {
507                 struct btrfs_caching_control *ctl;
508
509                 ctl = cache->caching_ctl;
510                 atomic_inc(&ctl->count);
511                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
512                 spin_unlock(&cache->lock);
513
514                 schedule();
515
516                 finish_wait(&ctl->wait, &wait);
517                 put_caching_control(ctl);
518                 spin_lock(&cache->lock);
519         }
520
521         if (cache->cached != BTRFS_CACHE_NO) {
522                 spin_unlock(&cache->lock);
523                 kfree(caching_ctl);
524                 return 0;
525         }
526         WARN_ON(cache->caching_ctl);
527         cache->caching_ctl = caching_ctl;
528         cache->cached = BTRFS_CACHE_FAST;
529         spin_unlock(&cache->lock);
530
531         /*
532          * We can't do the read from on-disk cache during a commit since we need
533          * to have the normal tree locking.  Also if we are currently trying to
534          * allocate blocks for the tree root we can't do the fast caching since
535          * we likely hold important locks.
536          */
537         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
538                 ret = load_free_space_cache(fs_info, cache);
539
540                 spin_lock(&cache->lock);
541                 if (ret == 1) {
542                         cache->caching_ctl = NULL;
543                         cache->cached = BTRFS_CACHE_FINISHED;
544                         cache->last_byte_to_unpin = (u64)-1;
545                 } else {
546                         if (load_cache_only) {
547                                 cache->caching_ctl = NULL;
548                                 cache->cached = BTRFS_CACHE_NO;
549                         } else {
550                                 cache->cached = BTRFS_CACHE_STARTED;
551                         }
552                 }
553                 spin_unlock(&cache->lock);
554                 wake_up(&caching_ctl->wait);
555                 if (ret == 1) {
556                         put_caching_control(caching_ctl);
557                         free_excluded_extents(fs_info->extent_root, cache);
558                         return 0;
559                 }
560         } else {
561                 /*
562                  * We are not going to do the fast caching, set cached to the
563                  * appropriate value and wakeup any waiters.
564                  */
565                 spin_lock(&cache->lock);
566                 if (load_cache_only) {
567                         cache->caching_ctl = NULL;
568                         cache->cached = BTRFS_CACHE_NO;
569                 } else {
570                         cache->cached = BTRFS_CACHE_STARTED;
571                 }
572                 spin_unlock(&cache->lock);
573                 wake_up(&caching_ctl->wait);
574         }
575
576         if (load_cache_only) {
577                 put_caching_control(caching_ctl);
578                 return 0;
579         }
580
581         down_write(&fs_info->extent_commit_sem);
582         atomic_inc(&caching_ctl->count);
583         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
584         up_write(&fs_info->extent_commit_sem);
585
586         btrfs_get_block_group(cache);
587
588         btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
589
590         return ret;
591 }
592
593 /*
594  * return the block group that starts at or after bytenr
595  */
596 static struct btrfs_block_group_cache *
597 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
598 {
599         struct btrfs_block_group_cache *cache;
600
601         cache = block_group_cache_tree_search(info, bytenr, 0);
602
603         return cache;
604 }
605
606 /*
607  * return the block group that contains the given bytenr
608  */
609 struct btrfs_block_group_cache *btrfs_lookup_block_group(
610                                                  struct btrfs_fs_info *info,
611                                                  u64 bytenr)
612 {
613         struct btrfs_block_group_cache *cache;
614
615         cache = block_group_cache_tree_search(info, bytenr, 1);
616
617         return cache;
618 }
619
620 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
621                                                   u64 flags)
622 {
623         struct list_head *head = &info->space_info;
624         struct btrfs_space_info *found;
625
626         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
627
628         rcu_read_lock();
629         list_for_each_entry_rcu(found, head, list) {
630                 if (found->flags & flags) {
631                         rcu_read_unlock();
632                         return found;
633                 }
634         }
635         rcu_read_unlock();
636         return NULL;
637 }
638
639 /*
640  * after adding space to the filesystem, we need to clear the full flags
641  * on all the space infos.
642  */
643 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
644 {
645         struct list_head *head = &info->space_info;
646         struct btrfs_space_info *found;
647
648         rcu_read_lock();
649         list_for_each_entry_rcu(found, head, list)
650                 found->full = 0;
651         rcu_read_unlock();
652 }
653
654 u64 btrfs_find_block_group(struct btrfs_root *root,
655                            u64 search_start, u64 search_hint, int owner)
656 {
657         struct btrfs_block_group_cache *cache;
658         u64 used;
659         u64 last = max(search_hint, search_start);
660         u64 group_start = 0;
661         int full_search = 0;
662         int factor = 9;
663         int wrapped = 0;
664 again:
665         while (1) {
666                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
667                 if (!cache)
668                         break;
669
670                 spin_lock(&cache->lock);
671                 last = cache->key.objectid + cache->key.offset;
672                 used = btrfs_block_group_used(&cache->item);
673
674                 if ((full_search || !cache->ro) &&
675                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
676                         if (used + cache->pinned + cache->reserved <
677                             div_factor(cache->key.offset, factor)) {
678                                 group_start = cache->key.objectid;
679                                 spin_unlock(&cache->lock);
680                                 btrfs_put_block_group(cache);
681                                 goto found;
682                         }
683                 }
684                 spin_unlock(&cache->lock);
685                 btrfs_put_block_group(cache);
686                 cond_resched();
687         }
688         if (!wrapped) {
689                 last = search_start;
690                 wrapped = 1;
691                 goto again;
692         }
693         if (!full_search && factor < 10) {
694                 last = search_start;
695                 full_search = 1;
696                 factor = 10;
697                 goto again;
698         }
699 found:
700         return group_start;
701 }
702
703 /* simple helper to search for an existing extent at a given offset */
704 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
705 {
706         int ret;
707         struct btrfs_key key;
708         struct btrfs_path *path;
709
710         path = btrfs_alloc_path();
711         if (!path)
712                 return -ENOMEM;
713
714         key.objectid = start;
715         key.offset = len;
716         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
717         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
718                                 0, 0);
719         btrfs_free_path(path);
720         return ret;
721 }
722
723 /*
724  * helper function to lookup reference count and flags of extent.
725  *
726  * the head node for delayed ref is used to store the sum of all the
727  * reference count modifications queued up in the rbtree. the head
728  * node may also store the extent flags to set. This way you can check
729  * to see what the reference count and extent flags would be if all of
730  * the delayed refs are not processed.
731  */
732 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
733                              struct btrfs_root *root, u64 bytenr,
734                              u64 num_bytes, u64 *refs, u64 *flags)
735 {
736         struct btrfs_delayed_ref_head *head;
737         struct btrfs_delayed_ref_root *delayed_refs;
738         struct btrfs_path *path;
739         struct btrfs_extent_item *ei;
740         struct extent_buffer *leaf;
741         struct btrfs_key key;
742         u32 item_size;
743         u64 num_refs;
744         u64 extent_flags;
745         int ret;
746
747         path = btrfs_alloc_path();
748         if (!path)
749                 return -ENOMEM;
750
751         key.objectid = bytenr;
752         key.type = BTRFS_EXTENT_ITEM_KEY;
753         key.offset = num_bytes;
754         if (!trans) {
755                 path->skip_locking = 1;
756                 path->search_commit_root = 1;
757         }
758 again:
759         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
760                                 &key, path, 0, 0);
761         if (ret < 0)
762                 goto out_free;
763
764         if (ret == 0) {
765                 leaf = path->nodes[0];
766                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
767                 if (item_size >= sizeof(*ei)) {
768                         ei = btrfs_item_ptr(leaf, path->slots[0],
769                                             struct btrfs_extent_item);
770                         num_refs = btrfs_extent_refs(leaf, ei);
771                         extent_flags = btrfs_extent_flags(leaf, ei);
772                 } else {
773 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
774                         struct btrfs_extent_item_v0 *ei0;
775                         BUG_ON(item_size != sizeof(*ei0));
776                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
777                                              struct btrfs_extent_item_v0);
778                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
779                         /* FIXME: this isn't correct for data */
780                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
781 #else
782                         BUG();
783 #endif
784                 }
785                 BUG_ON(num_refs == 0);
786         } else {
787                 num_refs = 0;
788                 extent_flags = 0;
789                 ret = 0;
790         }
791
792         if (!trans)
793                 goto out;
794
795         delayed_refs = &trans->transaction->delayed_refs;
796         spin_lock(&delayed_refs->lock);
797         head = btrfs_find_delayed_ref_head(trans, bytenr);
798         if (head) {
799                 if (!mutex_trylock(&head->mutex)) {
800                         atomic_inc(&head->node.refs);
801                         spin_unlock(&delayed_refs->lock);
802
803                         btrfs_release_path(path);
804
805                         /*
806                          * Mutex was contended, block until it's released and try
807                          * again
808                          */
809                         mutex_lock(&head->mutex);
810                         mutex_unlock(&head->mutex);
811                         btrfs_put_delayed_ref(&head->node);
812                         goto again;
813                 }
814                 if (head->extent_op && head->extent_op->update_flags)
815                         extent_flags |= head->extent_op->flags_to_set;
816                 else
817                         BUG_ON(num_refs == 0);
818
819                 num_refs += head->node.ref_mod;
820                 mutex_unlock(&head->mutex);
821         }
822         spin_unlock(&delayed_refs->lock);
823 out:
824         WARN_ON(num_refs == 0);
825         if (refs)
826                 *refs = num_refs;
827         if (flags)
828                 *flags = extent_flags;
829 out_free:
830         btrfs_free_path(path);
831         return ret;
832 }
833
834 /*
835  * Back reference rules.  Back refs have three main goals:
836  *
837  * 1) differentiate between all holders of references to an extent so that
838  *    when a reference is dropped we can make sure it was a valid reference
839  *    before freeing the extent.
840  *
841  * 2) Provide enough information to quickly find the holders of an extent
842  *    if we notice a given block is corrupted or bad.
843  *
844  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
845  *    maintenance.  This is actually the same as #2, but with a slightly
846  *    different use case.
847  *
848  * There are two kinds of back refs. The implicit back refs is optimized
849  * for pointers in non-shared tree blocks. For a given pointer in a block,
850  * back refs of this kind provide information about the block's owner tree
851  * and the pointer's key. These information allow us to find the block by
852  * b-tree searching. The full back refs is for pointers in tree blocks not
853  * referenced by their owner trees. The location of tree block is recorded
854  * in the back refs. Actually the full back refs is generic, and can be
855  * used in all cases the implicit back refs is used. The major shortcoming
856  * of the full back refs is its overhead. Every time a tree block gets
857  * COWed, we have to update back refs entry for all pointers in it.
858  *
859  * For a newly allocated tree block, we use implicit back refs for
860  * pointers in it. This means most tree related operations only involve
861  * implicit back refs. For a tree block created in old transaction, the
862  * only way to drop a reference to it is COW it. So we can detect the
863  * event that tree block loses its owner tree's reference and do the
864  * back refs conversion.
865  *
866  * When a tree block is COW'd through a tree, there are four cases:
867  *
868  * The reference count of the block is one and the tree is the block's
869  * owner tree. Nothing to do in this case.
870  *
871  * The reference count of the block is one and the tree is not the
872  * block's owner tree. In this case, full back refs is used for pointers
873  * in the block. Remove these full back refs, add implicit back refs for
874  * every pointers in the new block.
875  *
876  * The reference count of the block is greater than one and the tree is
877  * the block's owner tree. In this case, implicit back refs is used for
878  * pointers in the block. Add full back refs for every pointers in the
879  * block, increase lower level extents' reference counts. The original
880  * implicit back refs are entailed to the new block.
881  *
882  * The reference count of the block is greater than one and the tree is
883  * not the block's owner tree. Add implicit back refs for every pointer in
884  * the new block, increase lower level extents' reference count.
885  *
886  * Back Reference Key composing:
887  *
888  * The key objectid corresponds to the first byte in the extent,
889  * The key type is used to differentiate between types of back refs.
890  * There are different meanings of the key offset for different types
891  * of back refs.
892  *
893  * File extents can be referenced by:
894  *
895  * - multiple snapshots, subvolumes, or different generations in one subvol
896  * - different files inside a single subvolume
897  * - different offsets inside a file (bookend extents in file.c)
898  *
899  * The extent ref structure for the implicit back refs has fields for:
900  *
901  * - Objectid of the subvolume root
902  * - objectid of the file holding the reference
903  * - original offset in the file
904  * - how many bookend extents
905  *
906  * The key offset for the implicit back refs is hash of the first
907  * three fields.
908  *
909  * The extent ref structure for the full back refs has field for:
910  *
911  * - number of pointers in the tree leaf
912  *
913  * The key offset for the implicit back refs is the first byte of
914  * the tree leaf
915  *
916  * When a file extent is allocated, The implicit back refs is used.
917  * the fields are filled in:
918  *
919  *     (root_key.objectid, inode objectid, offset in file, 1)
920  *
921  * When a file extent is removed file truncation, we find the
922  * corresponding implicit back refs and check the following fields:
923  *
924  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
925  *
926  * Btree extents can be referenced by:
927  *
928  * - Different subvolumes
929  *
930  * Both the implicit back refs and the full back refs for tree blocks
931  * only consist of key. The key offset for the implicit back refs is
932  * objectid of block's owner tree. The key offset for the full back refs
933  * is the first byte of parent block.
934  *
935  * When implicit back refs is used, information about the lowest key and
936  * level of the tree block are required. These information are stored in
937  * tree block info structure.
938  */
939
940 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
941 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
942                                   struct btrfs_root *root,
943                                   struct btrfs_path *path,
944                                   u64 owner, u32 extra_size)
945 {
946         struct btrfs_extent_item *item;
947         struct btrfs_extent_item_v0 *ei0;
948         struct btrfs_extent_ref_v0 *ref0;
949         struct btrfs_tree_block_info *bi;
950         struct extent_buffer *leaf;
951         struct btrfs_key key;
952         struct btrfs_key found_key;
953         u32 new_size = sizeof(*item);
954         u64 refs;
955         int ret;
956
957         leaf = path->nodes[0];
958         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
959
960         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
961         ei0 = btrfs_item_ptr(leaf, path->slots[0],
962                              struct btrfs_extent_item_v0);
963         refs = btrfs_extent_refs_v0(leaf, ei0);
964
965         if (owner == (u64)-1) {
966                 while (1) {
967                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
968                                 ret = btrfs_next_leaf(root, path);
969                                 if (ret < 0)
970                                         return ret;
971                                 BUG_ON(ret > 0); /* Corruption */
972                                 leaf = path->nodes[0];
973                         }
974                         btrfs_item_key_to_cpu(leaf, &found_key,
975                                               path->slots[0]);
976                         BUG_ON(key.objectid != found_key.objectid);
977                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
978                                 path->slots[0]++;
979                                 continue;
980                         }
981                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
982                                               struct btrfs_extent_ref_v0);
983                         owner = btrfs_ref_objectid_v0(leaf, ref0);
984                         break;
985                 }
986         }
987         btrfs_release_path(path);
988
989         if (owner < BTRFS_FIRST_FREE_OBJECTID)
990                 new_size += sizeof(*bi);
991
992         new_size -= sizeof(*ei0);
993         ret = btrfs_search_slot(trans, root, &key, path,
994                                 new_size + extra_size, 1);
995         if (ret < 0)
996                 return ret;
997         BUG_ON(ret); /* Corruption */
998
999         btrfs_extend_item(trans, root, path, new_size);
1000
1001         leaf = path->nodes[0];
1002         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1003         btrfs_set_extent_refs(leaf, item, refs);
1004         /* FIXME: get real generation */
1005         btrfs_set_extent_generation(leaf, item, 0);
1006         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1007                 btrfs_set_extent_flags(leaf, item,
1008                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1009                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1010                 bi = (struct btrfs_tree_block_info *)(item + 1);
1011                 /* FIXME: get first key of the block */
1012                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1013                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1014         } else {
1015                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1016         }
1017         btrfs_mark_buffer_dirty(leaf);
1018         return 0;
1019 }
1020 #endif
1021
1022 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1023 {
1024         u32 high_crc = ~(u32)0;
1025         u32 low_crc = ~(u32)0;
1026         __le64 lenum;
1027
1028         lenum = cpu_to_le64(root_objectid);
1029         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1030         lenum = cpu_to_le64(owner);
1031         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1032         lenum = cpu_to_le64(offset);
1033         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1034
1035         return ((u64)high_crc << 31) ^ (u64)low_crc;
1036 }
1037
1038 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1039                                      struct btrfs_extent_data_ref *ref)
1040 {
1041         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1042                                     btrfs_extent_data_ref_objectid(leaf, ref),
1043                                     btrfs_extent_data_ref_offset(leaf, ref));
1044 }
1045
1046 static int match_extent_data_ref(struct extent_buffer *leaf,
1047                                  struct btrfs_extent_data_ref *ref,
1048                                  u64 root_objectid, u64 owner, u64 offset)
1049 {
1050         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1051             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1052             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1053                 return 0;
1054         return 1;
1055 }
1056
1057 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1058                                            struct btrfs_root *root,
1059                                            struct btrfs_path *path,
1060                                            u64 bytenr, u64 parent,
1061                                            u64 root_objectid,
1062                                            u64 owner, u64 offset)
1063 {
1064         struct btrfs_key key;
1065         struct btrfs_extent_data_ref *ref;
1066         struct extent_buffer *leaf;
1067         u32 nritems;
1068         int ret;
1069         int recow;
1070         int err = -ENOENT;
1071
1072         key.objectid = bytenr;
1073         if (parent) {
1074                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1075                 key.offset = parent;
1076         } else {
1077                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1078                 key.offset = hash_extent_data_ref(root_objectid,
1079                                                   owner, offset);
1080         }
1081 again:
1082         recow = 0;
1083         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1084         if (ret < 0) {
1085                 err = ret;
1086                 goto fail;
1087         }
1088
1089         if (parent) {
1090                 if (!ret)
1091                         return 0;
1092 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1093                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1094                 btrfs_release_path(path);
1095                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1096                 if (ret < 0) {
1097                         err = ret;
1098                         goto fail;
1099                 }
1100                 if (!ret)
1101                         return 0;
1102 #endif
1103                 goto fail;
1104         }
1105
1106         leaf = path->nodes[0];
1107         nritems = btrfs_header_nritems(leaf);
1108         while (1) {
1109                 if (path->slots[0] >= nritems) {
1110                         ret = btrfs_next_leaf(root, path);
1111                         if (ret < 0)
1112                                 err = ret;
1113                         if (ret)
1114                                 goto fail;
1115
1116                         leaf = path->nodes[0];
1117                         nritems = btrfs_header_nritems(leaf);
1118                         recow = 1;
1119                 }
1120
1121                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1122                 if (key.objectid != bytenr ||
1123                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1124                         goto fail;
1125
1126                 ref = btrfs_item_ptr(leaf, path->slots[0],
1127                                      struct btrfs_extent_data_ref);
1128
1129                 if (match_extent_data_ref(leaf, ref, root_objectid,
1130                                           owner, offset)) {
1131                         if (recow) {
1132                                 btrfs_release_path(path);
1133                                 goto again;
1134                         }
1135                         err = 0;
1136                         break;
1137                 }
1138                 path->slots[0]++;
1139         }
1140 fail:
1141         return err;
1142 }
1143
1144 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1145                                            struct btrfs_root *root,
1146                                            struct btrfs_path *path,
1147                                            u64 bytenr, u64 parent,
1148                                            u64 root_objectid, u64 owner,
1149                                            u64 offset, int refs_to_add)
1150 {
1151         struct btrfs_key key;
1152         struct extent_buffer *leaf;
1153         u32 size;
1154         u32 num_refs;
1155         int ret;
1156
1157         key.objectid = bytenr;
1158         if (parent) {
1159                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1160                 key.offset = parent;
1161                 size = sizeof(struct btrfs_shared_data_ref);
1162         } else {
1163                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1164                 key.offset = hash_extent_data_ref(root_objectid,
1165                                                   owner, offset);
1166                 size = sizeof(struct btrfs_extent_data_ref);
1167         }
1168
1169         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1170         if (ret && ret != -EEXIST)
1171                 goto fail;
1172
1173         leaf = path->nodes[0];
1174         if (parent) {
1175                 struct btrfs_shared_data_ref *ref;
1176                 ref = btrfs_item_ptr(leaf, path->slots[0],
1177                                      struct btrfs_shared_data_ref);
1178                 if (ret == 0) {
1179                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1180                 } else {
1181                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1182                         num_refs += refs_to_add;
1183                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1184                 }
1185         } else {
1186                 struct btrfs_extent_data_ref *ref;
1187                 while (ret == -EEXIST) {
1188                         ref = btrfs_item_ptr(leaf, path->slots[0],
1189                                              struct btrfs_extent_data_ref);
1190                         if (match_extent_data_ref(leaf, ref, root_objectid,
1191                                                   owner, offset))
1192                                 break;
1193                         btrfs_release_path(path);
1194                         key.offset++;
1195                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1196                                                       size);
1197                         if (ret && ret != -EEXIST)
1198                                 goto fail;
1199
1200                         leaf = path->nodes[0];
1201                 }
1202                 ref = btrfs_item_ptr(leaf, path->slots[0],
1203                                      struct btrfs_extent_data_ref);
1204                 if (ret == 0) {
1205                         btrfs_set_extent_data_ref_root(leaf, ref,
1206                                                        root_objectid);
1207                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1208                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1209                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1210                 } else {
1211                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1212                         num_refs += refs_to_add;
1213                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1214                 }
1215         }
1216         btrfs_mark_buffer_dirty(leaf);
1217         ret = 0;
1218 fail:
1219         btrfs_release_path(path);
1220         return ret;
1221 }
1222
1223 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1224                                            struct btrfs_root *root,
1225                                            struct btrfs_path *path,
1226                                            int refs_to_drop)
1227 {
1228         struct btrfs_key key;
1229         struct btrfs_extent_data_ref *ref1 = NULL;
1230         struct btrfs_shared_data_ref *ref2 = NULL;
1231         struct extent_buffer *leaf;
1232         u32 num_refs = 0;
1233         int ret = 0;
1234
1235         leaf = path->nodes[0];
1236         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1237
1238         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1239                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1240                                       struct btrfs_extent_data_ref);
1241                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1242         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1243                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1244                                       struct btrfs_shared_data_ref);
1245                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1246 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1247         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1248                 struct btrfs_extent_ref_v0 *ref0;
1249                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1250                                       struct btrfs_extent_ref_v0);
1251                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1252 #endif
1253         } else {
1254                 BUG();
1255         }
1256
1257         BUG_ON(num_refs < refs_to_drop);
1258         num_refs -= refs_to_drop;
1259
1260         if (num_refs == 0) {
1261                 ret = btrfs_del_item(trans, root, path);
1262         } else {
1263                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1264                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1265                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1266                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1267 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1268                 else {
1269                         struct btrfs_extent_ref_v0 *ref0;
1270                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1271                                         struct btrfs_extent_ref_v0);
1272                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1273                 }
1274 #endif
1275                 btrfs_mark_buffer_dirty(leaf);
1276         }
1277         return ret;
1278 }
1279
1280 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1281                                           struct btrfs_path *path,
1282                                           struct btrfs_extent_inline_ref *iref)
1283 {
1284         struct btrfs_key key;
1285         struct extent_buffer *leaf;
1286         struct btrfs_extent_data_ref *ref1;
1287         struct btrfs_shared_data_ref *ref2;
1288         u32 num_refs = 0;
1289
1290         leaf = path->nodes[0];
1291         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1292         if (iref) {
1293                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1294                     BTRFS_EXTENT_DATA_REF_KEY) {
1295                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1296                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1297                 } else {
1298                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1299                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1300                 }
1301         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1302                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1303                                       struct btrfs_extent_data_ref);
1304                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1305         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1306                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1307                                       struct btrfs_shared_data_ref);
1308                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1309 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1310         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1311                 struct btrfs_extent_ref_v0 *ref0;
1312                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1313                                       struct btrfs_extent_ref_v0);
1314                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1315 #endif
1316         } else {
1317                 WARN_ON(1);
1318         }
1319         return num_refs;
1320 }
1321
1322 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1323                                           struct btrfs_root *root,
1324                                           struct btrfs_path *path,
1325                                           u64 bytenr, u64 parent,
1326                                           u64 root_objectid)
1327 {
1328         struct btrfs_key key;
1329         int ret;
1330
1331         key.objectid = bytenr;
1332         if (parent) {
1333                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1334                 key.offset = parent;
1335         } else {
1336                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1337                 key.offset = root_objectid;
1338         }
1339
1340         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1341         if (ret > 0)
1342                 ret = -ENOENT;
1343 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1344         if (ret == -ENOENT && parent) {
1345                 btrfs_release_path(path);
1346                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1347                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1348                 if (ret > 0)
1349                         ret = -ENOENT;
1350         }
1351 #endif
1352         return ret;
1353 }
1354
1355 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1356                                           struct btrfs_root *root,
1357                                           struct btrfs_path *path,
1358                                           u64 bytenr, u64 parent,
1359                                           u64 root_objectid)
1360 {
1361         struct btrfs_key key;
1362         int ret;
1363
1364         key.objectid = bytenr;
1365         if (parent) {
1366                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1367                 key.offset = parent;
1368         } else {
1369                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1370                 key.offset = root_objectid;
1371         }
1372
1373         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1374         btrfs_release_path(path);
1375         return ret;
1376 }
1377
1378 static inline int extent_ref_type(u64 parent, u64 owner)
1379 {
1380         int type;
1381         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1382                 if (parent > 0)
1383                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1384                 else
1385                         type = BTRFS_TREE_BLOCK_REF_KEY;
1386         } else {
1387                 if (parent > 0)
1388                         type = BTRFS_SHARED_DATA_REF_KEY;
1389                 else
1390                         type = BTRFS_EXTENT_DATA_REF_KEY;
1391         }
1392         return type;
1393 }
1394
1395 static int find_next_key(struct btrfs_path *path, int level,
1396                          struct btrfs_key *key)
1397
1398 {
1399         for (; level < BTRFS_MAX_LEVEL; level++) {
1400                 if (!path->nodes[level])
1401                         break;
1402                 if (path->slots[level] + 1 >=
1403                     btrfs_header_nritems(path->nodes[level]))
1404                         continue;
1405                 if (level == 0)
1406                         btrfs_item_key_to_cpu(path->nodes[level], key,
1407                                               path->slots[level] + 1);
1408                 else
1409                         btrfs_node_key_to_cpu(path->nodes[level], key,
1410                                               path->slots[level] + 1);
1411                 return 0;
1412         }
1413         return 1;
1414 }
1415
1416 /*
1417  * look for inline back ref. if back ref is found, *ref_ret is set
1418  * to the address of inline back ref, and 0 is returned.
1419  *
1420  * if back ref isn't found, *ref_ret is set to the address where it
1421  * should be inserted, and -ENOENT is returned.
1422  *
1423  * if insert is true and there are too many inline back refs, the path
1424  * points to the extent item, and -EAGAIN is returned.
1425  *
1426  * NOTE: inline back refs are ordered in the same way that back ref
1427  *       items in the tree are ordered.
1428  */
1429 static noinline_for_stack
1430 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1431                                  struct btrfs_root *root,
1432                                  struct btrfs_path *path,
1433                                  struct btrfs_extent_inline_ref **ref_ret,
1434                                  u64 bytenr, u64 num_bytes,
1435                                  u64 parent, u64 root_objectid,
1436                                  u64 owner, u64 offset, int insert)
1437 {
1438         struct btrfs_key key;
1439         struct extent_buffer *leaf;
1440         struct btrfs_extent_item *ei;
1441         struct btrfs_extent_inline_ref *iref;
1442         u64 flags;
1443         u64 item_size;
1444         unsigned long ptr;
1445         unsigned long end;
1446         int extra_size;
1447         int type;
1448         int want;
1449         int ret;
1450         int err = 0;
1451
1452         key.objectid = bytenr;
1453         key.type = BTRFS_EXTENT_ITEM_KEY;
1454         key.offset = num_bytes;
1455
1456         want = extent_ref_type(parent, owner);
1457         if (insert) {
1458                 extra_size = btrfs_extent_inline_ref_size(want);
1459                 path->keep_locks = 1;
1460         } else
1461                 extra_size = -1;
1462         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1463         if (ret < 0) {
1464                 err = ret;
1465                 goto out;
1466         }
1467         if (ret && !insert) {
1468                 err = -ENOENT;
1469                 goto out;
1470         }
1471         BUG_ON(ret); /* Corruption */
1472
1473         leaf = path->nodes[0];
1474         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1475 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1476         if (item_size < sizeof(*ei)) {
1477                 if (!insert) {
1478                         err = -ENOENT;
1479                         goto out;
1480                 }
1481                 ret = convert_extent_item_v0(trans, root, path, owner,
1482                                              extra_size);
1483                 if (ret < 0) {
1484                         err = ret;
1485                         goto out;
1486                 }
1487                 leaf = path->nodes[0];
1488                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1489         }
1490 #endif
1491         BUG_ON(item_size < sizeof(*ei));
1492
1493         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1494         flags = btrfs_extent_flags(leaf, ei);
1495
1496         ptr = (unsigned long)(ei + 1);
1497         end = (unsigned long)ei + item_size;
1498
1499         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1500                 ptr += sizeof(struct btrfs_tree_block_info);
1501                 BUG_ON(ptr > end);
1502         } else {
1503                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1504         }
1505
1506         err = -ENOENT;
1507         while (1) {
1508                 if (ptr >= end) {
1509                         WARN_ON(ptr > end);
1510                         break;
1511                 }
1512                 iref = (struct btrfs_extent_inline_ref *)ptr;
1513                 type = btrfs_extent_inline_ref_type(leaf, iref);
1514                 if (want < type)
1515                         break;
1516                 if (want > type) {
1517                         ptr += btrfs_extent_inline_ref_size(type);
1518                         continue;
1519                 }
1520
1521                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1522                         struct btrfs_extent_data_ref *dref;
1523                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1524                         if (match_extent_data_ref(leaf, dref, root_objectid,
1525                                                   owner, offset)) {
1526                                 err = 0;
1527                                 break;
1528                         }
1529                         if (hash_extent_data_ref_item(leaf, dref) <
1530                             hash_extent_data_ref(root_objectid, owner, offset))
1531                                 break;
1532                 } else {
1533                         u64 ref_offset;
1534                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1535                         if (parent > 0) {
1536                                 if (parent == ref_offset) {
1537                                         err = 0;
1538                                         break;
1539                                 }
1540                                 if (ref_offset < parent)
1541                                         break;
1542                         } else {
1543                                 if (root_objectid == ref_offset) {
1544                                         err = 0;
1545                                         break;
1546                                 }
1547                                 if (ref_offset < root_objectid)
1548                                         break;
1549                         }
1550                 }
1551                 ptr += btrfs_extent_inline_ref_size(type);
1552         }
1553         if (err == -ENOENT && insert) {
1554                 if (item_size + extra_size >=
1555                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1556                         err = -EAGAIN;
1557                         goto out;
1558                 }
1559                 /*
1560                  * To add new inline back ref, we have to make sure
1561                  * there is no corresponding back ref item.
1562                  * For simplicity, we just do not add new inline back
1563                  * ref if there is any kind of item for this block
1564                  */
1565                 if (find_next_key(path, 0, &key) == 0 &&
1566                     key.objectid == bytenr &&
1567                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1568                         err = -EAGAIN;
1569                         goto out;
1570                 }
1571         }
1572         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1573 out:
1574         if (insert) {
1575                 path->keep_locks = 0;
1576                 btrfs_unlock_up_safe(path, 1);
1577         }
1578         return err;
1579 }
1580
1581 /*
1582  * helper to add new inline back ref
1583  */
1584 static noinline_for_stack
1585 void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1586                                  struct btrfs_root *root,
1587                                  struct btrfs_path *path,
1588                                  struct btrfs_extent_inline_ref *iref,
1589                                  u64 parent, u64 root_objectid,
1590                                  u64 owner, u64 offset, int refs_to_add,
1591                                  struct btrfs_delayed_extent_op *extent_op)
1592 {
1593         struct extent_buffer *leaf;
1594         struct btrfs_extent_item *ei;
1595         unsigned long ptr;
1596         unsigned long end;
1597         unsigned long item_offset;
1598         u64 refs;
1599         int size;
1600         int type;
1601
1602         leaf = path->nodes[0];
1603         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1604         item_offset = (unsigned long)iref - (unsigned long)ei;
1605
1606         type = extent_ref_type(parent, owner);
1607         size = btrfs_extent_inline_ref_size(type);
1608
1609         btrfs_extend_item(trans, root, path, size);
1610
1611         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1612         refs = btrfs_extent_refs(leaf, ei);
1613         refs += refs_to_add;
1614         btrfs_set_extent_refs(leaf, ei, refs);
1615         if (extent_op)
1616                 __run_delayed_extent_op(extent_op, leaf, ei);
1617
1618         ptr = (unsigned long)ei + item_offset;
1619         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1620         if (ptr < end - size)
1621                 memmove_extent_buffer(leaf, ptr + size, ptr,
1622                                       end - size - ptr);
1623
1624         iref = (struct btrfs_extent_inline_ref *)ptr;
1625         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1626         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1627                 struct btrfs_extent_data_ref *dref;
1628                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1629                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1630                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1631                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1632                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1633         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1634                 struct btrfs_shared_data_ref *sref;
1635                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1636                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1637                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1638         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1639                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1640         } else {
1641                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1642         }
1643         btrfs_mark_buffer_dirty(leaf);
1644 }
1645
1646 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1647                                  struct btrfs_root *root,
1648                                  struct btrfs_path *path,
1649                                  struct btrfs_extent_inline_ref **ref_ret,
1650                                  u64 bytenr, u64 num_bytes, u64 parent,
1651                                  u64 root_objectid, u64 owner, u64 offset)
1652 {
1653         int ret;
1654
1655         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1656                                            bytenr, num_bytes, parent,
1657                                            root_objectid, owner, offset, 0);
1658         if (ret != -ENOENT)
1659                 return ret;
1660
1661         btrfs_release_path(path);
1662         *ref_ret = NULL;
1663
1664         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1665                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1666                                             root_objectid);
1667         } else {
1668                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1669                                              root_objectid, owner, offset);
1670         }
1671         return ret;
1672 }
1673
1674 /*
1675  * helper to update/remove inline back ref
1676  */
1677 static noinline_for_stack
1678 void update_inline_extent_backref(struct btrfs_trans_handle *trans,
1679                                   struct btrfs_root *root,
1680                                   struct btrfs_path *path,
1681                                   struct btrfs_extent_inline_ref *iref,
1682                                   int refs_to_mod,
1683                                   struct btrfs_delayed_extent_op *extent_op)
1684 {
1685         struct extent_buffer *leaf;
1686         struct btrfs_extent_item *ei;
1687         struct btrfs_extent_data_ref *dref = NULL;
1688         struct btrfs_shared_data_ref *sref = NULL;
1689         unsigned long ptr;
1690         unsigned long end;
1691         u32 item_size;
1692         int size;
1693         int type;
1694         u64 refs;
1695
1696         leaf = path->nodes[0];
1697         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1698         refs = btrfs_extent_refs(leaf, ei);
1699         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1700         refs += refs_to_mod;
1701         btrfs_set_extent_refs(leaf, ei, refs);
1702         if (extent_op)
1703                 __run_delayed_extent_op(extent_op, leaf, ei);
1704
1705         type = btrfs_extent_inline_ref_type(leaf, iref);
1706
1707         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1708                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1709                 refs = btrfs_extent_data_ref_count(leaf, dref);
1710         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1711                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1712                 refs = btrfs_shared_data_ref_count(leaf, sref);
1713         } else {
1714                 refs = 1;
1715                 BUG_ON(refs_to_mod != -1);
1716         }
1717
1718         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1719         refs += refs_to_mod;
1720
1721         if (refs > 0) {
1722                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1723                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1724                 else
1725                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1726         } else {
1727                 size =  btrfs_extent_inline_ref_size(type);
1728                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1729                 ptr = (unsigned long)iref;
1730                 end = (unsigned long)ei + item_size;
1731                 if (ptr + size < end)
1732                         memmove_extent_buffer(leaf, ptr, ptr + size,
1733                                               end - ptr - size);
1734                 item_size -= size;
1735                 btrfs_truncate_item(trans, root, path, item_size, 1);
1736         }
1737         btrfs_mark_buffer_dirty(leaf);
1738 }
1739
1740 static noinline_for_stack
1741 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1742                                  struct btrfs_root *root,
1743                                  struct btrfs_path *path,
1744                                  u64 bytenr, u64 num_bytes, u64 parent,
1745                                  u64 root_objectid, u64 owner,
1746                                  u64 offset, int refs_to_add,
1747                                  struct btrfs_delayed_extent_op *extent_op)
1748 {
1749         struct btrfs_extent_inline_ref *iref;
1750         int ret;
1751
1752         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1753                                            bytenr, num_bytes, parent,
1754                                            root_objectid, owner, offset, 1);
1755         if (ret == 0) {
1756                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1757                 update_inline_extent_backref(trans, root, path, iref,
1758                                              refs_to_add, extent_op);
1759         } else if (ret == -ENOENT) {
1760                 setup_inline_extent_backref(trans, root, path, iref, parent,
1761                                             root_objectid, owner, offset,
1762                                             refs_to_add, extent_op);
1763                 ret = 0;
1764         }
1765         return ret;
1766 }
1767
1768 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1769                                  struct btrfs_root *root,
1770                                  struct btrfs_path *path,
1771                                  u64 bytenr, u64 parent, u64 root_objectid,
1772                                  u64 owner, u64 offset, int refs_to_add)
1773 {
1774         int ret;
1775         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1776                 BUG_ON(refs_to_add != 1);
1777                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1778                                             parent, root_objectid);
1779         } else {
1780                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1781                                              parent, root_objectid,
1782                                              owner, offset, refs_to_add);
1783         }
1784         return ret;
1785 }
1786
1787 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1788                                  struct btrfs_root *root,
1789                                  struct btrfs_path *path,
1790                                  struct btrfs_extent_inline_ref *iref,
1791                                  int refs_to_drop, int is_data)
1792 {
1793         int ret = 0;
1794
1795         BUG_ON(!is_data && refs_to_drop != 1);
1796         if (iref) {
1797                 update_inline_extent_backref(trans, root, path, iref,
1798                                              -refs_to_drop, NULL);
1799         } else if (is_data) {
1800                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1801         } else {
1802                 ret = btrfs_del_item(trans, root, path);
1803         }
1804         return ret;
1805 }
1806
1807 static int btrfs_issue_discard(struct block_device *bdev,
1808                                 u64 start, u64 len)
1809 {
1810         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1811 }
1812
1813 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1814                                 u64 num_bytes, u64 *actual_bytes)
1815 {
1816         int ret;
1817         u64 discarded_bytes = 0;
1818         struct btrfs_bio *bbio = NULL;
1819
1820
1821         /* Tell the block device(s) that the sectors can be discarded */
1822         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1823                               bytenr, &num_bytes, &bbio, 0);
1824         /* Error condition is -ENOMEM */
1825         if (!ret) {
1826                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1827                 int i;
1828
1829
1830                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1831                         if (!stripe->dev->can_discard)
1832                                 continue;
1833
1834                         ret = btrfs_issue_discard(stripe->dev->bdev,
1835                                                   stripe->physical,
1836                                                   stripe->length);
1837                         if (!ret)
1838                                 discarded_bytes += stripe->length;
1839                         else if (ret != -EOPNOTSUPP)
1840                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1841
1842                         /*
1843                          * Just in case we get back EOPNOTSUPP for some reason,
1844                          * just ignore the return value so we don't screw up
1845                          * people calling discard_extent.
1846                          */
1847                         ret = 0;
1848                 }
1849                 kfree(bbio);
1850         }
1851
1852         if (actual_bytes)
1853                 *actual_bytes = discarded_bytes;
1854
1855
1856         if (ret == -EOPNOTSUPP)
1857                 ret = 0;
1858         return ret;
1859 }
1860
1861 /* Can return -ENOMEM */
1862 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1863                          struct btrfs_root *root,
1864                          u64 bytenr, u64 num_bytes, u64 parent,
1865                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1866 {
1867         int ret;
1868         struct btrfs_fs_info *fs_info = root->fs_info;
1869
1870         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1871                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1872
1873         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1874                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1875                                         num_bytes,
1876                                         parent, root_objectid, (int)owner,
1877                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1878         } else {
1879                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1880                                         num_bytes,
1881                                         parent, root_objectid, owner, offset,
1882                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1883         }
1884         return ret;
1885 }
1886
1887 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1888                                   struct btrfs_root *root,
1889                                   u64 bytenr, u64 num_bytes,
1890                                   u64 parent, u64 root_objectid,
1891                                   u64 owner, u64 offset, int refs_to_add,
1892                                   struct btrfs_delayed_extent_op *extent_op)
1893 {
1894         struct btrfs_path *path;
1895         struct extent_buffer *leaf;
1896         struct btrfs_extent_item *item;
1897         u64 refs;
1898         int ret;
1899         int err = 0;
1900
1901         path = btrfs_alloc_path();
1902         if (!path)
1903                 return -ENOMEM;
1904
1905         path->reada = 1;
1906         path->leave_spinning = 1;
1907         /* this will setup the path even if it fails to insert the back ref */
1908         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1909                                            path, bytenr, num_bytes, parent,
1910                                            root_objectid, owner, offset,
1911                                            refs_to_add, extent_op);
1912         if (ret == 0)
1913                 goto out;
1914
1915         if (ret != -EAGAIN) {
1916                 err = ret;
1917                 goto out;
1918         }
1919
1920         leaf = path->nodes[0];
1921         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1922         refs = btrfs_extent_refs(leaf, item);
1923         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1924         if (extent_op)
1925                 __run_delayed_extent_op(extent_op, leaf, item);
1926
1927         btrfs_mark_buffer_dirty(leaf);
1928         btrfs_release_path(path);
1929
1930         path->reada = 1;
1931         path->leave_spinning = 1;
1932
1933         /* now insert the actual backref */
1934         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1935                                     path, bytenr, parent, root_objectid,
1936                                     owner, offset, refs_to_add);
1937         if (ret)
1938                 btrfs_abort_transaction(trans, root, ret);
1939 out:
1940         btrfs_free_path(path);
1941         return err;
1942 }
1943
1944 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1945                                 struct btrfs_root *root,
1946                                 struct btrfs_delayed_ref_node *node,
1947                                 struct btrfs_delayed_extent_op *extent_op,
1948                                 int insert_reserved)
1949 {
1950         int ret = 0;
1951         struct btrfs_delayed_data_ref *ref;
1952         struct btrfs_key ins;
1953         u64 parent = 0;
1954         u64 ref_root = 0;
1955         u64 flags = 0;
1956
1957         ins.objectid = node->bytenr;
1958         ins.offset = node->num_bytes;
1959         ins.type = BTRFS_EXTENT_ITEM_KEY;
1960
1961         ref = btrfs_delayed_node_to_data_ref(node);
1962         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1963                 parent = ref->parent;
1964         else
1965                 ref_root = ref->root;
1966
1967         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1968                 if (extent_op) {
1969                         BUG_ON(extent_op->update_key);
1970                         flags |= extent_op->flags_to_set;
1971                 }
1972                 ret = alloc_reserved_file_extent(trans, root,
1973                                                  parent, ref_root, flags,
1974                                                  ref->objectid, ref->offset,
1975                                                  &ins, node->ref_mod);
1976         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1977                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1978                                              node->num_bytes, parent,
1979                                              ref_root, ref->objectid,
1980                                              ref->offset, node->ref_mod,
1981                                              extent_op);
1982         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1983                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1984                                           node->num_bytes, parent,
1985                                           ref_root, ref->objectid,
1986                                           ref->offset, node->ref_mod,
1987                                           extent_op);
1988         } else {
1989                 BUG();
1990         }
1991         return ret;
1992 }
1993
1994 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1995                                     struct extent_buffer *leaf,
1996                                     struct btrfs_extent_item *ei)
1997 {
1998         u64 flags = btrfs_extent_flags(leaf, ei);
1999         if (extent_op->update_flags) {
2000                 flags |= extent_op->flags_to_set;
2001                 btrfs_set_extent_flags(leaf, ei, flags);
2002         }
2003
2004         if (extent_op->update_key) {
2005                 struct btrfs_tree_block_info *bi;
2006                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2007                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2008                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2009         }
2010 }
2011
2012 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2013                                  struct btrfs_root *root,
2014                                  struct btrfs_delayed_ref_node *node,
2015                                  struct btrfs_delayed_extent_op *extent_op)
2016 {
2017         struct btrfs_key key;
2018         struct btrfs_path *path;
2019         struct btrfs_extent_item *ei;
2020         struct extent_buffer *leaf;
2021         u32 item_size;
2022         int ret;
2023         int err = 0;
2024
2025         if (trans->aborted)
2026                 return 0;
2027
2028         path = btrfs_alloc_path();
2029         if (!path)
2030                 return -ENOMEM;
2031
2032         key.objectid = node->bytenr;
2033         key.type = BTRFS_EXTENT_ITEM_KEY;
2034         key.offset = node->num_bytes;
2035
2036         path->reada = 1;
2037         path->leave_spinning = 1;
2038         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2039                                 path, 0, 1);
2040         if (ret < 0) {
2041                 err = ret;
2042                 goto out;
2043         }
2044         if (ret > 0) {
2045                 err = -EIO;
2046                 goto out;
2047         }
2048
2049         leaf = path->nodes[0];
2050         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2051 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2052         if (item_size < sizeof(*ei)) {
2053                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2054                                              path, (u64)-1, 0);
2055                 if (ret < 0) {
2056                         err = ret;
2057                         goto out;
2058                 }
2059                 leaf = path->nodes[0];
2060                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2061         }
2062 #endif
2063         BUG_ON(item_size < sizeof(*ei));
2064         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2065         __run_delayed_extent_op(extent_op, leaf, ei);
2066
2067         btrfs_mark_buffer_dirty(leaf);
2068 out:
2069         btrfs_free_path(path);
2070         return err;
2071 }
2072
2073 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2074                                 struct btrfs_root *root,
2075                                 struct btrfs_delayed_ref_node *node,
2076                                 struct btrfs_delayed_extent_op *extent_op,
2077                                 int insert_reserved)
2078 {
2079         int ret = 0;
2080         struct btrfs_delayed_tree_ref *ref;
2081         struct btrfs_key ins;
2082         u64 parent = 0;
2083         u64 ref_root = 0;
2084
2085         ins.objectid = node->bytenr;
2086         ins.offset = node->num_bytes;
2087         ins.type = BTRFS_EXTENT_ITEM_KEY;
2088
2089         ref = btrfs_delayed_node_to_tree_ref(node);
2090         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2091                 parent = ref->parent;
2092         else
2093                 ref_root = ref->root;
2094
2095         BUG_ON(node->ref_mod != 1);
2096         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2097                 BUG_ON(!extent_op || !extent_op->update_flags ||
2098                        !extent_op->update_key);
2099                 ret = alloc_reserved_tree_block(trans, root,
2100                                                 parent, ref_root,
2101                                                 extent_op->flags_to_set,
2102                                                 &extent_op->key,
2103                                                 ref->level, &ins);
2104         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2105                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2106                                              node->num_bytes, parent, ref_root,
2107                                              ref->level, 0, 1, extent_op);
2108         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2109                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2110                                           node->num_bytes, parent, ref_root,
2111                                           ref->level, 0, 1, extent_op);
2112         } else {
2113                 BUG();
2114         }
2115         return ret;
2116 }
2117
2118 /* helper function to actually process a single delayed ref entry */
2119 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2120                                struct btrfs_root *root,
2121                                struct btrfs_delayed_ref_node *node,
2122                                struct btrfs_delayed_extent_op *extent_op,
2123                                int insert_reserved)
2124 {
2125         int ret = 0;
2126
2127         if (trans->aborted)
2128                 return 0;
2129
2130         if (btrfs_delayed_ref_is_head(node)) {
2131                 struct btrfs_delayed_ref_head *head;
2132                 /*
2133                  * we've hit the end of the chain and we were supposed
2134                  * to insert this extent into the tree.  But, it got
2135                  * deleted before we ever needed to insert it, so all
2136                  * we have to do is clean up the accounting
2137                  */
2138                 BUG_ON(extent_op);
2139                 head = btrfs_delayed_node_to_head(node);
2140                 if (insert_reserved) {
2141                         btrfs_pin_extent(root, node->bytenr,
2142                                          node->num_bytes, 1);
2143                         if (head->is_data) {
2144                                 ret = btrfs_del_csums(trans, root,
2145                                                       node->bytenr,
2146                                                       node->num_bytes);
2147                         }
2148                 }
2149                 mutex_unlock(&head->mutex);
2150                 return ret;
2151         }
2152
2153         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2154             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2155                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2156                                            insert_reserved);
2157         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2158                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2159                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2160                                            insert_reserved);
2161         else
2162                 BUG();
2163         return ret;
2164 }
2165
2166 static noinline struct btrfs_delayed_ref_node *
2167 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2168 {
2169         struct rb_node *node;
2170         struct btrfs_delayed_ref_node *ref;
2171         int action = BTRFS_ADD_DELAYED_REF;
2172 again:
2173         /*
2174          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2175          * this prevents ref count from going down to zero when
2176          * there still are pending delayed ref.
2177          */
2178         node = rb_prev(&head->node.rb_node);
2179         while (1) {
2180                 if (!node)
2181                         break;
2182                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2183                                 rb_node);
2184                 if (ref->bytenr != head->node.bytenr)
2185                         break;
2186                 if (ref->action == action)
2187                         return ref;
2188                 node = rb_prev(node);
2189         }
2190         if (action == BTRFS_ADD_DELAYED_REF) {
2191                 action = BTRFS_DROP_DELAYED_REF;
2192                 goto again;
2193         }
2194         return NULL;
2195 }
2196
2197 /*
2198  * Returns 0 on success or if called with an already aborted transaction.
2199  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2200  */
2201 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2202                                        struct btrfs_root *root,
2203                                        struct list_head *cluster)
2204 {
2205         struct btrfs_delayed_ref_root *delayed_refs;
2206         struct btrfs_delayed_ref_node *ref;
2207         struct btrfs_delayed_ref_head *locked_ref = NULL;
2208         struct btrfs_delayed_extent_op *extent_op;
2209         struct btrfs_fs_info *fs_info = root->fs_info;
2210         int ret;
2211         int count = 0;
2212         int must_insert_reserved = 0;
2213
2214         delayed_refs = &trans->transaction->delayed_refs;
2215         while (1) {
2216                 if (!locked_ref) {
2217                         /* pick a new head ref from the cluster list */
2218                         if (list_empty(cluster))
2219                                 break;
2220
2221                         locked_ref = list_entry(cluster->next,
2222                                      struct btrfs_delayed_ref_head, cluster);
2223
2224                         /* grab the lock that says we are going to process
2225                          * all the refs for this head */
2226                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2227
2228                         /*
2229                          * we may have dropped the spin lock to get the head
2230                          * mutex lock, and that might have given someone else
2231                          * time to free the head.  If that's true, it has been
2232                          * removed from our list and we can move on.
2233                          */
2234                         if (ret == -EAGAIN) {
2235                                 locked_ref = NULL;
2236                                 count++;
2237                                 continue;
2238                         }
2239                 }
2240
2241                 /*
2242                  * We need to try and merge add/drops of the same ref since we
2243                  * can run into issues with relocate dropping the implicit ref
2244                  * and then it being added back again before the drop can
2245                  * finish.  If we merged anything we need to re-loop so we can
2246                  * get a good ref.
2247                  */
2248                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2249                                          locked_ref);
2250
2251                 /*
2252                  * locked_ref is the head node, so we have to go one
2253                  * node back for any delayed ref updates
2254                  */
2255                 ref = select_delayed_ref(locked_ref);
2256
2257                 if (ref && ref->seq &&
2258                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2259                         /*
2260                          * there are still refs with lower seq numbers in the
2261                          * process of being added. Don't run this ref yet.
2262                          */
2263                         list_del_init(&locked_ref->cluster);
2264                         mutex_unlock(&locked_ref->mutex);
2265                         locked_ref = NULL;
2266                         delayed_refs->num_heads_ready++;
2267                         spin_unlock(&delayed_refs->lock);
2268                         cond_resched();
2269                         spin_lock(&delayed_refs->lock);
2270                         continue;
2271                 }
2272
2273                 /*
2274                  * record the must insert reserved flag before we
2275                  * drop the spin lock.
2276                  */
2277                 must_insert_reserved = locked_ref->must_insert_reserved;
2278                 locked_ref->must_insert_reserved = 0;
2279
2280                 extent_op = locked_ref->extent_op;
2281                 locked_ref->extent_op = NULL;
2282
2283                 if (!ref) {
2284                         /* All delayed refs have been processed, Go ahead
2285                          * and send the head node to run_one_delayed_ref,
2286                          * so that any accounting fixes can happen
2287                          */
2288                         ref = &locked_ref->node;
2289
2290                         if (extent_op && must_insert_reserved) {
2291                                 kfree(extent_op);
2292                                 extent_op = NULL;
2293                         }
2294
2295                         if (extent_op) {
2296                                 spin_unlock(&delayed_refs->lock);
2297
2298                                 ret = run_delayed_extent_op(trans, root,
2299                                                             ref, extent_op);
2300                                 kfree(extent_op);
2301
2302                                 if (ret) {
2303                                         list_del_init(&locked_ref->cluster);
2304                                         mutex_unlock(&locked_ref->mutex);
2305
2306                                         printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
2307                                         spin_lock(&delayed_refs->lock);
2308                                         return ret;
2309                                 }
2310
2311                                 goto next;
2312                         }
2313
2314                         list_del_init(&locked_ref->cluster);
2315                         locked_ref = NULL;
2316                 }
2317
2318                 ref->in_tree = 0;
2319                 rb_erase(&ref->rb_node, &delayed_refs->root);
2320                 delayed_refs->num_entries--;
2321                 if (locked_ref) {
2322                         /*
2323                          * when we play the delayed ref, also correct the
2324                          * ref_mod on head
2325                          */
2326                         switch (ref->action) {
2327                         case BTRFS_ADD_DELAYED_REF:
2328                         case BTRFS_ADD_DELAYED_EXTENT:
2329                                 locked_ref->node.ref_mod -= ref->ref_mod;
2330                                 break;
2331                         case BTRFS_DROP_DELAYED_REF:
2332                                 locked_ref->node.ref_mod += ref->ref_mod;
2333                                 break;
2334                         default:
2335                                 WARN_ON(1);
2336                         }
2337                 }
2338                 spin_unlock(&delayed_refs->lock);
2339
2340                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2341                                           must_insert_reserved);
2342
2343                 btrfs_put_delayed_ref(ref);
2344                 kfree(extent_op);
2345                 count++;
2346
2347                 if (ret) {
2348                         if (locked_ref) {
2349                                 list_del_init(&locked_ref->cluster);
2350                                 mutex_unlock(&locked_ref->mutex);
2351                         }
2352                         printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
2353                         spin_lock(&delayed_refs->lock);
2354                         return ret;
2355                 }
2356
2357 next:
2358                 cond_resched();
2359                 spin_lock(&delayed_refs->lock);
2360         }
2361         return count;
2362 }
2363
2364 #ifdef SCRAMBLE_DELAYED_REFS
2365 /*
2366  * Normally delayed refs get processed in ascending bytenr order. This
2367  * correlates in most cases to the order added. To expose dependencies on this
2368  * order, we start to process the tree in the middle instead of the beginning
2369  */
2370 static u64 find_middle(struct rb_root *root)
2371 {
2372         struct rb_node *n = root->rb_node;
2373         struct btrfs_delayed_ref_node *entry;
2374         int alt = 1;
2375         u64 middle;
2376         u64 first = 0, last = 0;
2377
2378         n = rb_first(root);
2379         if (n) {
2380                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2381                 first = entry->bytenr;
2382         }
2383         n = rb_last(root);
2384         if (n) {
2385                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2386                 last = entry->bytenr;
2387         }
2388         n = root->rb_node;
2389
2390         while (n) {
2391                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2392                 WARN_ON(!entry->in_tree);
2393
2394                 middle = entry->bytenr;
2395
2396                 if (alt)
2397                         n = n->rb_left;
2398                 else
2399                         n = n->rb_right;
2400
2401                 alt = 1 - alt;
2402         }
2403         return middle;
2404 }
2405 #endif
2406
2407 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2408                                          struct btrfs_fs_info *fs_info)
2409 {
2410         struct qgroup_update *qgroup_update;
2411         int ret = 0;
2412
2413         if (list_empty(&trans->qgroup_ref_list) !=
2414             !trans->delayed_ref_elem.seq) {
2415                 /* list without seq or seq without list */
2416                 printk(KERN_ERR "btrfs: qgroup accounting update error, list is%s empty, seq is %llu\n",
2417                         list_empty(&trans->qgroup_ref_list) ? "" : " not",
2418                         trans->delayed_ref_elem.seq);
2419                 BUG();
2420         }
2421
2422         if (!trans->delayed_ref_elem.seq)
2423                 return 0;
2424
2425         while (!list_empty(&trans->qgroup_ref_list)) {
2426                 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2427                                                  struct qgroup_update, list);
2428                 list_del(&qgroup_update->list);
2429                 if (!ret)
2430                         ret = btrfs_qgroup_account_ref(
2431                                         trans, fs_info, qgroup_update->node,
2432                                         qgroup_update->extent_op);
2433                 kfree(qgroup_update);
2434         }
2435
2436         btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2437
2438         return ret;
2439 }
2440
2441 /*
2442  * this starts processing the delayed reference count updates and
2443  * extent insertions we have queued up so far.  count can be
2444  * 0, which means to process everything in the tree at the start
2445  * of the run (but not newly added entries), or it can be some target
2446  * number you'd like to process.
2447  *
2448  * Returns 0 on success or if called with an aborted transaction
2449  * Returns <0 on error and aborts the transaction
2450  */
2451 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2452                            struct btrfs_root *root, unsigned long count)
2453 {
2454         struct rb_node *node;
2455         struct btrfs_delayed_ref_root *delayed_refs;
2456         struct btrfs_delayed_ref_node *ref;
2457         struct list_head cluster;
2458         int ret;
2459         u64 delayed_start;
2460         int run_all = count == (unsigned long)-1;
2461         int run_most = 0;
2462         int loops;
2463
2464         /* We'll clean this up in btrfs_cleanup_transaction */
2465         if (trans->aborted)
2466                 return 0;
2467
2468         if (root == root->fs_info->extent_root)
2469                 root = root->fs_info->tree_root;
2470
2471         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2472
2473         delayed_refs = &trans->transaction->delayed_refs;
2474         INIT_LIST_HEAD(&cluster);
2475 again:
2476         loops = 0;
2477         spin_lock(&delayed_refs->lock);
2478
2479 #ifdef SCRAMBLE_DELAYED_REFS
2480         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2481 #endif
2482
2483         if (count == 0) {
2484                 count = delayed_refs->num_entries * 2;
2485                 run_most = 1;
2486         }
2487         while (1) {
2488                 if (!(run_all || run_most) &&
2489                     delayed_refs->num_heads_ready < 64)
2490                         break;
2491
2492                 /*
2493                  * go find something we can process in the rbtree.  We start at
2494                  * the beginning of the tree, and then build a cluster
2495                  * of refs to process starting at the first one we are able to
2496                  * lock
2497                  */
2498                 delayed_start = delayed_refs->run_delayed_start;
2499                 ret = btrfs_find_ref_cluster(trans, &cluster,
2500                                              delayed_refs->run_delayed_start);
2501                 if (ret)
2502                         break;
2503
2504                 ret = run_clustered_refs(trans, root, &cluster);
2505                 if (ret < 0) {
2506                         spin_unlock(&delayed_refs->lock);
2507                         btrfs_abort_transaction(trans, root, ret);
2508                         return ret;
2509                 }
2510
2511                 count -= min_t(unsigned long, ret, count);
2512
2513                 if (count == 0)
2514                         break;
2515
2516                 if (delayed_start >= delayed_refs->run_delayed_start) {
2517                         if (loops == 0) {
2518                                 /*
2519                                  * btrfs_find_ref_cluster looped. let's do one
2520                                  * more cycle. if we don't run any delayed ref
2521                                  * during that cycle (because we can't because
2522                                  * all of them are blocked), bail out.
2523                                  */
2524                                 loops = 1;
2525                         } else {
2526                                 /*
2527                                  * no runnable refs left, stop trying
2528                                  */
2529                                 BUG_ON(run_all);
2530                                 break;
2531                         }
2532                 }
2533                 if (ret) {
2534                         /* refs were run, let's reset staleness detection */
2535                         loops = 0;
2536                 }
2537         }
2538
2539         if (run_all) {
2540                 if (!list_empty(&trans->new_bgs)) {
2541                         spin_unlock(&delayed_refs->lock);
2542                         btrfs_create_pending_block_groups(trans, root);
2543                         spin_lock(&delayed_refs->lock);
2544                 }
2545
2546                 node = rb_first(&delayed_refs->root);
2547                 if (!node)
2548                         goto out;
2549                 count = (unsigned long)-1;
2550
2551                 while (node) {
2552                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2553                                        rb_node);
2554                         if (btrfs_delayed_ref_is_head(ref)) {
2555                                 struct btrfs_delayed_ref_head *head;
2556
2557                                 head = btrfs_delayed_node_to_head(ref);
2558                                 atomic_inc(&ref->refs);
2559
2560                                 spin_unlock(&delayed_refs->lock);
2561                                 /*
2562                                  * Mutex was contended, block until it's
2563                                  * released and try again
2564                                  */
2565                                 mutex_lock(&head->mutex);
2566                                 mutex_unlock(&head->mutex);
2567
2568                                 btrfs_put_delayed_ref(ref);
2569                                 cond_resched();
2570                                 goto again;
2571                         }
2572                         node = rb_next(node);
2573                 }
2574                 spin_unlock(&delayed_refs->lock);
2575                 schedule_timeout(1);
2576                 goto again;
2577         }
2578 out:
2579         spin_unlock(&delayed_refs->lock);
2580         assert_qgroups_uptodate(trans);
2581         return 0;
2582 }
2583
2584 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2585                                 struct btrfs_root *root,
2586                                 u64 bytenr, u64 num_bytes, u64 flags,
2587                                 int is_data)
2588 {
2589         struct btrfs_delayed_extent_op *extent_op;
2590         int ret;
2591
2592         extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2593         if (!extent_op)
2594                 return -ENOMEM;
2595
2596         extent_op->flags_to_set = flags;
2597         extent_op->update_flags = 1;
2598         extent_op->update_key = 0;
2599         extent_op->is_data = is_data ? 1 : 0;
2600
2601         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2602                                           num_bytes, extent_op);
2603         if (ret)
2604                 kfree(extent_op);
2605         return ret;
2606 }
2607
2608 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2609                                       struct btrfs_root *root,
2610                                       struct btrfs_path *path,
2611                                       u64 objectid, u64 offset, u64 bytenr)
2612 {
2613         struct btrfs_delayed_ref_head *head;
2614         struct btrfs_delayed_ref_node *ref;
2615         struct btrfs_delayed_data_ref *data_ref;
2616         struct btrfs_delayed_ref_root *delayed_refs;
2617         struct rb_node *node;
2618         int ret = 0;
2619
2620         ret = -ENOENT;
2621         delayed_refs = &trans->transaction->delayed_refs;
2622         spin_lock(&delayed_refs->lock);
2623         head = btrfs_find_delayed_ref_head(trans, bytenr);
2624         if (!head)
2625                 goto out;
2626
2627         if (!mutex_trylock(&head->mutex)) {
2628                 atomic_inc(&head->node.refs);
2629                 spin_unlock(&delayed_refs->lock);
2630
2631                 btrfs_release_path(path);
2632
2633                 /*
2634                  * Mutex was contended, block until it's released and let
2635                  * caller try again
2636                  */
2637                 mutex_lock(&head->mutex);
2638                 mutex_unlock(&head->mutex);
2639                 btrfs_put_delayed_ref(&head->node);
2640                 return -EAGAIN;
2641         }
2642
2643         node = rb_prev(&head->node.rb_node);
2644         if (!node)
2645                 goto out_unlock;
2646
2647         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2648
2649         if (ref->bytenr != bytenr)
2650                 goto out_unlock;
2651
2652         ret = 1;
2653         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2654                 goto out_unlock;
2655
2656         data_ref = btrfs_delayed_node_to_data_ref(ref);
2657
2658         node = rb_prev(node);
2659         if (node) {
2660                 int seq = ref->seq;
2661
2662                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2663                 if (ref->bytenr == bytenr && ref->seq == seq)
2664                         goto out_unlock;
2665         }
2666
2667         if (data_ref->root != root->root_key.objectid ||
2668             data_ref->objectid != objectid || data_ref->offset != offset)
2669                 goto out_unlock;
2670
2671         ret = 0;
2672 out_unlock:
2673         mutex_unlock(&head->mutex);
2674 out:
2675         spin_unlock(&delayed_refs->lock);
2676         return ret;
2677 }
2678
2679 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2680                                         struct btrfs_root *root,
2681                                         struct btrfs_path *path,
2682                                         u64 objectid, u64 offset, u64 bytenr)
2683 {
2684         struct btrfs_root *extent_root = root->fs_info->extent_root;
2685         struct extent_buffer *leaf;
2686         struct btrfs_extent_data_ref *ref;
2687         struct btrfs_extent_inline_ref *iref;
2688         struct btrfs_extent_item *ei;
2689         struct btrfs_key key;
2690         u32 item_size;
2691         int ret;
2692
2693         key.objectid = bytenr;
2694         key.offset = (u64)-1;
2695         key.type = BTRFS_EXTENT_ITEM_KEY;
2696
2697         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2698         if (ret < 0)
2699                 goto out;
2700         BUG_ON(ret == 0); /* Corruption */
2701
2702         ret = -ENOENT;
2703         if (path->slots[0] == 0)
2704                 goto out;
2705
2706         path->slots[0]--;
2707         leaf = path->nodes[0];
2708         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2709
2710         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2711                 goto out;
2712
2713         ret = 1;
2714         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2715 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2716         if (item_size < sizeof(*ei)) {
2717                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2718                 goto out;
2719         }
2720 #endif
2721         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2722
2723         if (item_size != sizeof(*ei) +
2724             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2725                 goto out;
2726
2727         if (btrfs_extent_generation(leaf, ei) <=
2728             btrfs_root_last_snapshot(&root->root_item))
2729                 goto out;
2730
2731         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2732         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2733             BTRFS_EXTENT_DATA_REF_KEY)
2734                 goto out;
2735
2736         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2737         if (btrfs_extent_refs(leaf, ei) !=
2738             btrfs_extent_data_ref_count(leaf, ref) ||
2739             btrfs_extent_data_ref_root(leaf, ref) !=
2740             root->root_key.objectid ||
2741             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2742             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2743                 goto out;
2744
2745         ret = 0;
2746 out:
2747         return ret;
2748 }
2749
2750 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2751                           struct btrfs_root *root,
2752                           u64 objectid, u64 offset, u64 bytenr)
2753 {
2754         struct btrfs_path *path;
2755         int ret;
2756         int ret2;
2757
2758         path = btrfs_alloc_path();
2759         if (!path)
2760                 return -ENOENT;
2761
2762         do {
2763                 ret = check_committed_ref(trans, root, path, objectid,
2764                                           offset, bytenr);
2765                 if (ret && ret != -ENOENT)
2766                         goto out;
2767
2768                 ret2 = check_delayed_ref(trans, root, path, objectid,
2769                                          offset, bytenr);
2770         } while (ret2 == -EAGAIN);
2771
2772         if (ret2 && ret2 != -ENOENT) {
2773                 ret = ret2;
2774                 goto out;
2775         }
2776
2777         if (ret != -ENOENT || ret2 != -ENOENT)
2778                 ret = 0;
2779 out:
2780         btrfs_free_path(path);
2781         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2782                 WARN_ON(ret > 0);
2783         return ret;
2784 }
2785
2786 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2787                            struct btrfs_root *root,
2788                            struct extent_buffer *buf,
2789                            int full_backref, int inc, int for_cow)
2790 {
2791         u64 bytenr;
2792         u64 num_bytes;
2793         u64 parent;
2794         u64 ref_root;
2795         u32 nritems;
2796         struct btrfs_key key;
2797         struct btrfs_file_extent_item *fi;
2798         int i;
2799         int level;
2800         int ret = 0;
2801         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2802                             u64, u64, u64, u64, u64, u64, int);
2803
2804         ref_root = btrfs_header_owner(buf);
2805         nritems = btrfs_header_nritems(buf);
2806         level = btrfs_header_level(buf);
2807
2808         if (!root->ref_cows && level == 0)
2809                 return 0;
2810
2811         if (inc)
2812                 process_func = btrfs_inc_extent_ref;
2813         else
2814                 process_func = btrfs_free_extent;
2815
2816         if (full_backref)
2817                 parent = buf->start;
2818         else
2819                 parent = 0;
2820
2821         for (i = 0; i < nritems; i++) {
2822                 if (level == 0) {
2823                         btrfs_item_key_to_cpu(buf, &key, i);
2824                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2825                                 continue;
2826                         fi = btrfs_item_ptr(buf, i,
2827                                             struct btrfs_file_extent_item);
2828                         if (btrfs_file_extent_type(buf, fi) ==
2829                             BTRFS_FILE_EXTENT_INLINE)
2830                                 continue;
2831                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2832                         if (bytenr == 0)
2833                                 continue;
2834
2835                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2836                         key.offset -= btrfs_file_extent_offset(buf, fi);
2837                         ret = process_func(trans, root, bytenr, num_bytes,
2838                                            parent, ref_root, key.objectid,
2839                                            key.offset, for_cow);
2840                         if (ret)
2841                                 goto fail;
2842                 } else {
2843                         bytenr = btrfs_node_blockptr(buf, i);
2844                         num_bytes = btrfs_level_size(root, level - 1);
2845                         ret = process_func(trans, root, bytenr, num_bytes,
2846                                            parent, ref_root, level - 1, 0,
2847                                            for_cow);
2848                         if (ret)
2849                                 goto fail;
2850                 }
2851         }
2852         return 0;
2853 fail:
2854         return ret;
2855 }
2856
2857 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2858                   struct extent_buffer *buf, int full_backref, int for_cow)
2859 {
2860         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
2861 }
2862
2863 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2864                   struct extent_buffer *buf, int full_backref, int for_cow)
2865 {
2866         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
2867 }
2868
2869 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2870                                  struct btrfs_root *root,
2871                                  struct btrfs_path *path,
2872                                  struct btrfs_block_group_cache *cache)
2873 {
2874         int ret;
2875         struct btrfs_root *extent_root = root->fs_info->extent_root;
2876         unsigned long bi;
2877         struct extent_buffer *leaf;
2878
2879         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2880         if (ret < 0)
2881                 goto fail;
2882         BUG_ON(ret); /* Corruption */
2883
2884         leaf = path->nodes[0];
2885         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2886         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2887         btrfs_mark_buffer_dirty(leaf);
2888         btrfs_release_path(path);
2889 fail:
2890         if (ret) {
2891                 btrfs_abort_transaction(trans, root, ret);
2892                 return ret;
2893         }
2894         return 0;
2895
2896 }
2897
2898 static struct btrfs_block_group_cache *
2899 next_block_group(struct btrfs_root *root,
2900                  struct btrfs_block_group_cache *cache)
2901 {
2902         struct rb_node *node;
2903         spin_lock(&root->fs_info->block_group_cache_lock);
2904         node = rb_next(&cache->cache_node);
2905         btrfs_put_block_group(cache);
2906         if (node) {
2907                 cache = rb_entry(node, struct btrfs_block_group_cache,
2908                                  cache_node);
2909                 btrfs_get_block_group(cache);
2910         } else
2911                 cache = NULL;
2912         spin_unlock(&root->fs_info->block_group_cache_lock);
2913         return cache;
2914 }
2915
2916 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2917                             struct btrfs_trans_handle *trans,
2918                             struct btrfs_path *path)
2919 {
2920         struct btrfs_root *root = block_group->fs_info->tree_root;
2921         struct inode *inode = NULL;
2922         u64 alloc_hint = 0;
2923         int dcs = BTRFS_DC_ERROR;
2924         int num_pages = 0;
2925         int retries = 0;
2926         int ret = 0;
2927
2928         /*
2929          * If this block group is smaller than 100 megs don't bother caching the
2930          * block group.
2931          */
2932         if (block_group->key.offset < (100 * 1024 * 1024)) {
2933                 spin_lock(&block_group->lock);
2934                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2935                 spin_unlock(&block_group->lock);
2936                 return 0;
2937         }
2938
2939 again:
2940         inode = lookup_free_space_inode(root, block_group, path);
2941         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2942                 ret = PTR_ERR(inode);
2943                 btrfs_release_path(path);
2944                 goto out;
2945         }
2946
2947         if (IS_ERR(inode)) {
2948                 BUG_ON(retries);
2949                 retries++;
2950
2951                 if (block_group->ro)
2952                         goto out_free;
2953
2954                 ret = create_free_space_inode(root, trans, block_group, path);
2955                 if (ret)
2956                         goto out_free;
2957                 goto again;
2958         }
2959
2960         /* We've already setup this transaction, go ahead and exit */
2961         if (block_group->cache_generation == trans->transid &&
2962             i_size_read(inode)) {
2963                 dcs = BTRFS_DC_SETUP;
2964                 goto out_put;
2965         }
2966
2967         /*
2968          * We want to set the generation to 0, that way if anything goes wrong
2969          * from here on out we know not to trust this cache when we load up next
2970          * time.
2971          */
2972         BTRFS_I(inode)->generation = 0;
2973         ret = btrfs_update_inode(trans, root, inode);
2974         WARN_ON(ret);
2975
2976         if (i_size_read(inode) > 0) {
2977                 ret = btrfs_truncate_free_space_cache(root, trans, path,
2978                                                       inode);
2979                 if (ret)
2980                         goto out_put;
2981         }
2982
2983         spin_lock(&block_group->lock);
2984         if (block_group->cached != BTRFS_CACHE_FINISHED ||
2985             !btrfs_test_opt(root, SPACE_CACHE)) {
2986                 /*
2987                  * don't bother trying to write stuff out _if_
2988                  * a) we're not cached,
2989                  * b) we're with nospace_cache mount option.
2990                  */
2991                 dcs = BTRFS_DC_WRITTEN;
2992                 spin_unlock(&block_group->lock);
2993                 goto out_put;
2994         }
2995         spin_unlock(&block_group->lock);
2996
2997         /*
2998          * Try to preallocate enough space based on how big the block group is.
2999          * Keep in mind this has to include any pinned space which could end up
3000          * taking up quite a bit since it's not folded into the other space
3001          * cache.
3002          */
3003         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3004         if (!num_pages)
3005                 num_pages = 1;
3006
3007         num_pages *= 16;
3008         num_pages *= PAGE_CACHE_SIZE;
3009
3010         ret = btrfs_check_data_free_space(inode, num_pages);
3011         if (ret)
3012                 goto out_put;
3013
3014         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3015                                               num_pages, num_pages,
3016                                               &alloc_hint);
3017         if (!ret)
3018                 dcs = BTRFS_DC_SETUP;
3019         btrfs_free_reserved_data_space(inode, num_pages);
3020
3021 out_put:
3022         iput(inode);
3023 out_free:
3024         btrfs_release_path(path);
3025 out:
3026         spin_lock(&block_group->lock);
3027         if (!ret && dcs == BTRFS_DC_SETUP)
3028                 block_group->cache_generation = trans->transid;
3029         block_group->disk_cache_state = dcs;
3030         spin_unlock(&block_group->lock);
3031
3032         return ret;
3033 }
3034
3035 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3036                                    struct btrfs_root *root)
3037 {
3038         struct btrfs_block_group_cache *cache;
3039         int err = 0;
3040         struct btrfs_path *path;
3041         u64 last = 0;
3042
3043         path = btrfs_alloc_path();
3044         if (!path)
3045                 return -ENOMEM;
3046
3047 again:
3048         while (1) {
3049                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3050                 while (cache) {
3051                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3052                                 break;
3053                         cache = next_block_group(root, cache);
3054                 }
3055                 if (!cache) {
3056                         if (last == 0)
3057                                 break;
3058                         last = 0;
3059                         continue;
3060                 }
3061                 err = cache_save_setup(cache, trans, path);
3062                 last = cache->key.objectid + cache->key.offset;
3063                 btrfs_put_block_group(cache);
3064         }
3065
3066         while (1) {
3067                 if (last == 0) {
3068                         err = btrfs_run_delayed_refs(trans, root,
3069                                                      (unsigned long)-1);
3070                         if (err) /* File system offline */
3071                                 goto out;
3072                 }
3073
3074                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3075                 while (cache) {
3076                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3077                                 btrfs_put_block_group(cache);
3078                                 goto again;
3079                         }
3080
3081                         if (cache->dirty)
3082                                 break;
3083                         cache = next_block_group(root, cache);
3084                 }
3085                 if (!cache) {
3086                         if (last == 0)
3087                                 break;
3088                         last = 0;
3089                         continue;
3090                 }
3091
3092                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3093                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3094                 cache->dirty = 0;
3095                 last = cache->key.objectid + cache->key.offset;
3096
3097                 err = write_one_cache_group(trans, root, path, cache);
3098                 if (err) /* File system offline */
3099                         goto out;
3100
3101                 btrfs_put_block_group(cache);
3102         }
3103
3104         while (1) {
3105                 /*
3106                  * I don't think this is needed since we're just marking our
3107                  * preallocated extent as written, but just in case it can't
3108                  * hurt.
3109                  */
3110                 if (last == 0) {
3111                         err = btrfs_run_delayed_refs(trans, root,
3112                                                      (unsigned long)-1);
3113                         if (err) /* File system offline */
3114                                 goto out;
3115                 }
3116
3117                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3118                 while (cache) {
3119                         /*
3120                          * Really this shouldn't happen, but it could if we
3121                          * couldn't write the entire preallocated extent and
3122                          * splitting the extent resulted in a new block.
3123                          */
3124                         if (cache->dirty) {
3125                                 btrfs_put_block_group(cache);
3126                                 goto again;
3127                         }
3128                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3129                                 break;
3130                         cache = next_block_group(root, cache);
3131                 }
3132                 if (!cache) {
3133                         if (last == 0)
3134                                 break;
3135                         last = 0;
3136                         continue;
3137                 }
3138
3139                 err = btrfs_write_out_cache(root, trans, cache, path);
3140
3141                 /*
3142                  * If we didn't have an error then the cache state is still
3143                  * NEED_WRITE, so we can set it to WRITTEN.
3144                  */
3145                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3146                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3147                 last = cache->key.objectid + cache->key.offset;
3148                 btrfs_put_block_group(cache);
3149         }
3150 out:
3151
3152         btrfs_free_path(path);
3153         return err;
3154 }
3155
3156 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3157 {
3158         struct btrfs_block_group_cache *block_group;
3159         int readonly = 0;
3160
3161         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3162         if (!block_group || block_group->ro)
3163                 readonly = 1;
3164         if (block_group)
3165                 btrfs_put_block_group(block_group);
3166         return readonly;
3167 }
3168
3169 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3170                              u64 total_bytes, u64 bytes_used,
3171                              struct btrfs_space_info **space_info)
3172 {
3173         struct btrfs_space_info *found;
3174         int i;
3175         int factor;
3176
3177         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3178                      BTRFS_BLOCK_GROUP_RAID10))
3179                 factor = 2;
3180         else
3181                 factor = 1;
3182
3183         found = __find_space_info(info, flags);
3184         if (found) {
3185                 spin_lock(&found->lock);
3186                 found->total_bytes += total_bytes;
3187                 found->disk_total += total_bytes * factor;
3188                 found->bytes_used += bytes_used;
3189                 found->disk_used += bytes_used * factor;
3190                 found->full = 0;
3191                 spin_unlock(&found->lock);
3192                 *space_info = found;
3193                 return 0;
3194         }
3195         found = kzalloc(sizeof(*found), GFP_NOFS);
3196         if (!found)
3197                 return -ENOMEM;
3198
3199         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3200                 INIT_LIST_HEAD(&found->block_groups[i]);
3201         init_rwsem(&found->groups_sem);
3202         spin_lock_init(&found->lock);
3203         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3204         found->total_bytes = total_bytes;
3205         found->disk_total = total_bytes * factor;
3206         found->bytes_used = bytes_used;
3207         found->disk_used = bytes_used * factor;
3208         found->bytes_pinned = 0;
3209         found->bytes_reserved = 0;
3210         found->bytes_readonly = 0;
3211         found->bytes_may_use = 0;
3212         found->full = 0;
3213         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3214         found->chunk_alloc = 0;
3215         found->flush = 0;
3216         init_waitqueue_head(&found->wait);
3217         *space_info = found;
3218         list_add_rcu(&found->list, &info->space_info);
3219         if (flags & BTRFS_BLOCK_GROUP_DATA)
3220                 info->data_sinfo = found;
3221         return 0;
3222 }
3223
3224 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3225 {
3226         u64 extra_flags = chunk_to_extended(flags) &
3227                                 BTRFS_EXTENDED_PROFILE_MASK;
3228
3229         if (flags & BTRFS_BLOCK_GROUP_DATA)
3230                 fs_info->avail_data_alloc_bits |= extra_flags;
3231         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3232                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3233         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3234                 fs_info->avail_system_alloc_bits |= extra_flags;
3235 }
3236
3237 /*
3238  * returns target flags in extended format or 0 if restripe for this
3239  * chunk_type is not in progress
3240  *
3241  * should be called with either volume_mutex or balance_lock held
3242  */
3243 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3244 {
3245         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3246         u64 target = 0;
3247
3248         if (!bctl)
3249                 return 0;
3250
3251         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3252             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3253                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3254         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3255                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3256                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3257         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3258                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3259                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3260         }
3261
3262         return target;
3263 }
3264
3265 /*
3266  * @flags: available profiles in extended format (see ctree.h)
3267  *
3268  * Returns reduced profile in chunk format.  If profile changing is in
3269  * progress (either running or paused) picks the target profile (if it's
3270  * already available), otherwise falls back to plain reducing.
3271  */
3272 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3273 {
3274         /*
3275          * we add in the count of missing devices because we want
3276          * to make sure that any RAID levels on a degraded FS
3277          * continue to be honored.
3278          */
3279         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3280                 root->fs_info->fs_devices->missing_devices;
3281         u64 target;
3282         u64 tmp;
3283
3284         /*
3285          * see if restripe for this chunk_type is in progress, if so
3286          * try to reduce to the target profile
3287          */
3288         spin_lock(&root->fs_info->balance_lock);
3289         target = get_restripe_target(root->fs_info, flags);
3290         if (target) {
3291                 /* pick target profile only if it's already available */
3292                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3293                         spin_unlock(&root->fs_info->balance_lock);
3294                         return extended_to_chunk(target);
3295                 }
3296         }
3297         spin_unlock(&root->fs_info->balance_lock);
3298
3299         /* First, mask out the RAID levels which aren't possible */
3300         if (num_devices == 1)
3301                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3302                            BTRFS_BLOCK_GROUP_RAID5);
3303         if (num_devices < 3)
3304                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3305         if (num_devices < 4)
3306                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3307
3308         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3309                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3310                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3311         flags &= ~tmp;
3312
3313         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3314                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3315         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3316                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3317         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3318                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3319         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3320                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3321         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3322                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3323
3324         return extended_to_chunk(flags | tmp);
3325 }
3326
3327 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3328 {
3329         if (flags & BTRFS_BLOCK_GROUP_DATA)
3330                 flags |= root->fs_info->avail_data_alloc_bits;
3331         else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3332                 flags |= root->fs_info->avail_system_alloc_bits;
3333         else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3334                 flags |= root->fs_info->avail_metadata_alloc_bits;
3335
3336         return btrfs_reduce_alloc_profile(root, flags);
3337 }
3338
3339 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3340 {
3341         u64 flags;
3342         u64 ret;
3343
3344         if (data)
3345                 flags = BTRFS_BLOCK_GROUP_DATA;
3346         else if (root == root->fs_info->chunk_root)
3347                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3348         else
3349                 flags = BTRFS_BLOCK_GROUP_METADATA;
3350
3351         ret = get_alloc_profile(root, flags);
3352         return ret;
3353 }
3354
3355 /*
3356  * This will check the space that the inode allocates from to make sure we have
3357  * enough space for bytes.
3358  */
3359 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3360 {
3361         struct btrfs_space_info *data_sinfo;
3362         struct btrfs_root *root = BTRFS_I(inode)->root;
3363         struct btrfs_fs_info *fs_info = root->fs_info;
3364         u64 used;
3365         int ret = 0, committed = 0, alloc_chunk = 1;
3366
3367         /* make sure bytes are sectorsize aligned */
3368         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3369
3370         if (root == root->fs_info->tree_root ||
3371             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3372                 alloc_chunk = 0;
3373                 committed = 1;
3374         }
3375
3376         data_sinfo = fs_info->data_sinfo;
3377         if (!data_sinfo)
3378                 goto alloc;
3379
3380 again:
3381         /* make sure we have enough space to handle the data first */
3382         spin_lock(&data_sinfo->lock);
3383         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3384                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3385                 data_sinfo->bytes_may_use;
3386
3387         if (used + bytes > data_sinfo->total_bytes) {
3388                 struct btrfs_trans_handle *trans;
3389
3390                 /*
3391                  * if we don't have enough free bytes in this space then we need
3392                  * to alloc a new chunk.
3393                  */
3394                 if (!data_sinfo->full && alloc_chunk) {
3395                         u64 alloc_target;
3396
3397                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3398                         spin_unlock(&data_sinfo->lock);
3399 alloc:
3400                         alloc_target = btrfs_get_alloc_profile(root, 1);
3401                         trans = btrfs_join_transaction(root);
3402                         if (IS_ERR(trans))
3403                                 return PTR_ERR(trans);
3404
3405                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3406                                              alloc_target,
3407                                              CHUNK_ALLOC_NO_FORCE);
3408                         btrfs_end_transaction(trans, root);
3409                         if (ret < 0) {
3410                                 if (ret != -ENOSPC)
3411                                         return ret;
3412                                 else
3413                                         goto commit_trans;
3414                         }
3415
3416                         if (!data_sinfo)
3417                                 data_sinfo = fs_info->data_sinfo;
3418
3419                         goto again;
3420                 }
3421
3422                 /*
3423                  * If we have less pinned bytes than we want to allocate then
3424                  * don't bother committing the transaction, it won't help us.
3425                  */
3426                 if (data_sinfo->bytes_pinned < bytes)
3427                         committed = 1;
3428                 spin_unlock(&data_sinfo->lock);
3429
3430                 /* commit the current transaction and try again */
3431 commit_trans:
3432                 if (!committed &&
3433                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3434                         committed = 1;
3435                         trans = btrfs_join_transaction(root);
3436                         if (IS_ERR(trans))
3437                                 return PTR_ERR(trans);
3438                         ret = btrfs_commit_transaction(trans, root);
3439                         if (ret)
3440                                 return ret;
3441                         goto again;
3442                 }
3443
3444                 return -ENOSPC;
3445         }
3446         data_sinfo->bytes_may_use += bytes;
3447         trace_btrfs_space_reservation(root->fs_info, "space_info",
3448                                       data_sinfo->flags, bytes, 1);
3449         spin_unlock(&data_sinfo->lock);
3450
3451         return 0;
3452 }
3453
3454 /*
3455  * Called if we need to clear a data reservation for this inode.
3456  */
3457 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3458 {
3459         struct btrfs_root *root = BTRFS_I(inode)->root;
3460         struct btrfs_space_info *data_sinfo;
3461
3462         /* make sure bytes are sectorsize aligned */
3463         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3464
3465         data_sinfo = root->fs_info->data_sinfo;
3466         spin_lock(&data_sinfo->lock);
3467         data_sinfo->bytes_may_use -= bytes;
3468         trace_btrfs_space_reservation(root->fs_info, "space_info",
3469                                       data_sinfo->flags, bytes, 0);
3470         spin_unlock(&data_sinfo->lock);
3471 }
3472
3473 static void force_metadata_allocation(struct btrfs_fs_info *info)
3474 {
3475         struct list_head *head = &info->space_info;
3476         struct btrfs_space_info *found;
3477
3478         rcu_read_lock();
3479         list_for_each_entry_rcu(found, head, list) {
3480                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3481                         found->force_alloc = CHUNK_ALLOC_FORCE;
3482         }
3483         rcu_read_unlock();
3484 }
3485
3486 static int should_alloc_chunk(struct btrfs_root *root,
3487                               struct btrfs_space_info *sinfo, int force)
3488 {
3489         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3490         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3491         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3492         u64 thresh;
3493
3494         if (force == CHUNK_ALLOC_FORCE)
3495                 return 1;
3496
3497         /*
3498          * We need to take into account the global rsv because for all intents
3499          * and purposes it's used space.  Don't worry about locking the
3500          * global_rsv, it doesn't change except when the transaction commits.
3501          */
3502         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3503                 num_allocated += global_rsv->size;
3504
3505         /*
3506          * in limited mode, we want to have some free space up to
3507          * about 1% of the FS size.
3508          */
3509         if (force == CHUNK_ALLOC_LIMITED) {
3510                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3511                 thresh = max_t(u64, 64 * 1024 * 1024,
3512                                div_factor_fine(thresh, 1));
3513
3514                 if (num_bytes - num_allocated < thresh)
3515                         return 1;
3516         }
3517
3518         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3519                 return 0;
3520         return 1;
3521 }
3522
3523 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3524 {
3525         u64 num_dev;
3526
3527         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3528                     BTRFS_BLOCK_GROUP_RAID0 |
3529                     BTRFS_BLOCK_GROUP_RAID5 |
3530                     BTRFS_BLOCK_GROUP_RAID6))
3531                 num_dev = root->fs_info->fs_devices->rw_devices;
3532         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3533                 num_dev = 2;
3534         else
3535                 num_dev = 1;    /* DUP or single */
3536
3537         /* metadata for updaing devices and chunk tree */
3538         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3539 }
3540
3541 static void check_system_chunk(struct btrfs_trans_handle *trans,
3542                                struct btrfs_root *root, u64 type)
3543 {
3544         struct btrfs_space_info *info;
3545         u64 left;
3546         u64 thresh;
3547
3548         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3549         spin_lock(&info->lock);
3550         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3551                 info->bytes_reserved - info->bytes_readonly;
3552         spin_unlock(&info->lock);
3553
3554         thresh = get_system_chunk_thresh(root, type);
3555         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3556                 printk(KERN_INFO "left=%llu, need=%llu, flags=%llu\n",
3557                        left, thresh, type);
3558                 dump_space_info(info, 0, 0);
3559         }
3560
3561         if (left < thresh) {
3562                 u64 flags;
3563
3564                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3565                 btrfs_alloc_chunk(trans, root, flags);
3566         }
3567 }
3568
3569 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3570                           struct btrfs_root *extent_root, u64 flags, int force)
3571 {
3572         struct btrfs_space_info *space_info;
3573         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3574         int wait_for_alloc = 0;
3575         int ret = 0;
3576
3577         space_info = __find_space_info(extent_root->fs_info, flags);
3578         if (!space_info) {
3579                 ret = update_space_info(extent_root->fs_info, flags,
3580                                         0, 0, &space_info);
3581                 BUG_ON(ret); /* -ENOMEM */
3582         }
3583         BUG_ON(!space_info); /* Logic error */
3584
3585 again:
3586         spin_lock(&space_info->lock);
3587         if (force < space_info->force_alloc)
3588                 force = space_info->force_alloc;
3589         if (space_info->full) {
3590                 spin_unlock(&space_info->lock);
3591                 return 0;
3592         }
3593
3594         if (!should_alloc_chunk(extent_root, space_info, force)) {
3595                 spin_unlock(&space_info->lock);
3596                 return 0;
3597         } else if (space_info->chunk_alloc) {
3598                 wait_for_alloc = 1;
3599         } else {
3600                 space_info->chunk_alloc = 1;
3601         }
3602
3603         spin_unlock(&space_info->lock);
3604
3605         mutex_lock(&fs_info->chunk_mutex);
3606
3607         /*
3608          * The chunk_mutex is held throughout the entirety of a chunk
3609          * allocation, so once we've acquired the chunk_mutex we know that the
3610          * other guy is done and we need to recheck and see if we should
3611          * allocate.
3612          */
3613         if (wait_for_alloc) {
3614                 mutex_unlock(&fs_info->chunk_mutex);
3615                 wait_for_alloc = 0;
3616                 goto again;
3617         }
3618
3619         /*
3620          * If we have mixed data/metadata chunks we want to make sure we keep
3621          * allocating mixed chunks instead of individual chunks.
3622          */
3623         if (btrfs_mixed_space_info(space_info))
3624                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3625
3626         /*
3627          * if we're doing a data chunk, go ahead and make sure that
3628          * we keep a reasonable number of metadata chunks allocated in the
3629          * FS as well.
3630          */
3631         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3632                 fs_info->data_chunk_allocations++;
3633                 if (!(fs_info->data_chunk_allocations %
3634                       fs_info->metadata_ratio))
3635                         force_metadata_allocation(fs_info);
3636         }
3637
3638         /*
3639          * Check if we have enough space in SYSTEM chunk because we may need
3640          * to update devices.
3641          */
3642         check_system_chunk(trans, extent_root, flags);
3643
3644         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3645         if (ret < 0 && ret != -ENOSPC)
3646                 goto out;
3647
3648         spin_lock(&space_info->lock);
3649         if (ret)
3650                 space_info->full = 1;
3651         else
3652                 ret = 1;
3653
3654         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3655         space_info->chunk_alloc = 0;
3656         spin_unlock(&space_info->lock);
3657 out:
3658         mutex_unlock(&fs_info->chunk_mutex);
3659         return ret;
3660 }
3661
3662 static int can_overcommit(struct btrfs_root *root,
3663                           struct btrfs_space_info *space_info, u64 bytes,
3664                           enum btrfs_reserve_flush_enum flush)
3665 {
3666         u64 profile = btrfs_get_alloc_profile(root, 0);
3667         u64 avail;
3668         u64 used;
3669
3670         used = space_info->bytes_used + space_info->bytes_reserved +
3671                 space_info->bytes_pinned + space_info->bytes_readonly +
3672                 space_info->bytes_may_use;
3673
3674         spin_lock(&root->fs_info->free_chunk_lock);
3675         avail = root->fs_info->free_chunk_space;
3676         spin_unlock(&root->fs_info->free_chunk_lock);
3677
3678         /*
3679          * If we have dup, raid1 or raid10 then only half of the free
3680          * space is actually useable.  For raid56, the space info used
3681          * doesn't include the parity drive, so we don't have to
3682          * change the math
3683          */
3684         if (profile & (BTRFS_BLOCK_GROUP_DUP |
3685                        BTRFS_BLOCK_GROUP_RAID1 |
3686                        BTRFS_BLOCK_GROUP_RAID10))
3687                 avail >>= 1;
3688
3689         /*
3690          * If we aren't flushing all things, let us overcommit up to
3691          * 1/2th of the space. If we can flush, don't let us overcommit
3692          * too much, let it overcommit up to 1/8 of the space.
3693          */
3694         if (flush == BTRFS_RESERVE_FLUSH_ALL)
3695                 avail >>= 3;
3696         else
3697                 avail >>= 1;
3698
3699         if (used + bytes < space_info->total_bytes + avail)
3700                 return 1;
3701         return 0;
3702 }
3703
3704 static int writeback_inodes_sb_nr_if_idle_safe(struct super_block *sb,
3705                                                unsigned long nr_pages,
3706                                                enum wb_reason reason)
3707 {
3708         if (!writeback_in_progress(sb->s_bdi) &&
3709             down_read_trylock(&sb->s_umount)) {
3710                 writeback_inodes_sb_nr(sb, nr_pages, reason);
3711                 up_read(&sb->s_umount);
3712                 return 1;
3713         }
3714
3715         return 0;
3716 }
3717
3718 /*
3719  * shrink metadata reservation for delalloc
3720  */
3721 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
3722                             bool wait_ordered)
3723 {
3724         struct btrfs_block_rsv *block_rsv;
3725         struct btrfs_space_info *space_info;
3726         struct btrfs_trans_handle *trans;
3727         u64 delalloc_bytes;
3728         u64 max_reclaim;
3729         long time_left;
3730         unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3731         int loops = 0;
3732         enum btrfs_reserve_flush_enum flush;
3733
3734         trans = (struct btrfs_trans_handle *)current->journal_info;
3735         block_rsv = &root->fs_info->delalloc_block_rsv;
3736         space_info = block_rsv->space_info;
3737
3738         smp_mb();
3739         delalloc_bytes = root->fs_info->delalloc_bytes;
3740         if (delalloc_bytes == 0) {
3741                 if (trans)
3742                         return;
3743                 btrfs_wait_ordered_extents(root, 0);
3744                 return;
3745         }
3746
3747         while (delalloc_bytes && loops < 3) {
3748                 max_reclaim = min(delalloc_bytes, to_reclaim);
3749                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
3750                 writeback_inodes_sb_nr_if_idle_safe(root->fs_info->sb,
3751                                                     nr_pages,
3752                                                     WB_REASON_FS_FREE_SPACE);
3753
3754                 /*
3755                  * We need to wait for the async pages to actually start before
3756                  * we do anything.
3757                  */
3758                 wait_event(root->fs_info->async_submit_wait,
3759                            !atomic_read(&root->fs_info->async_delalloc_pages));
3760
3761                 if (!trans)
3762                         flush = BTRFS_RESERVE_FLUSH_ALL;
3763                 else
3764                         flush = BTRFS_RESERVE_NO_FLUSH;
3765                 spin_lock(&space_info->lock);
3766                 if (can_overcommit(root, space_info, orig, flush)) {
3767                         spin_unlock(&space_info->lock);
3768                         break;
3769                 }
3770                 spin_unlock(&space_info->lock);
3771
3772                 loops++;
3773                 if (wait_ordered && !trans) {
3774                         btrfs_wait_ordered_extents(root, 0);
3775                 } else {
3776                         time_left = schedule_timeout_killable(1);
3777                         if (time_left)
3778                                 break;
3779                 }
3780                 smp_mb();
3781                 delalloc_bytes = root->fs_info->delalloc_bytes;
3782         }
3783 }
3784
3785 /**
3786  * maybe_commit_transaction - possibly commit the transaction if its ok to
3787  * @root - the root we're allocating for
3788  * @bytes - the number of bytes we want to reserve
3789  * @force - force the commit
3790  *
3791  * This will check to make sure that committing the transaction will actually
3792  * get us somewhere and then commit the transaction if it does.  Otherwise it
3793  * will return -ENOSPC.
3794  */
3795 static int may_commit_transaction(struct btrfs_root *root,
3796                                   struct btrfs_space_info *space_info,
3797                                   u64 bytes, int force)
3798 {
3799         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
3800         struct btrfs_trans_handle *trans;
3801
3802         trans = (struct btrfs_trans_handle *)current->journal_info;
3803         if (trans)
3804                 return -EAGAIN;
3805
3806         if (force)
3807                 goto commit;
3808
3809         /* See if there is enough pinned space to make this reservation */
3810         spin_lock(&space_info->lock);
3811         if (space_info->bytes_pinned >= bytes) {
3812                 spin_unlock(&space_info->lock);
3813                 goto commit;
3814         }
3815         spin_unlock(&space_info->lock);
3816
3817         /*
3818          * See if there is some space in the delayed insertion reservation for
3819          * this reservation.
3820          */
3821         if (space_info != delayed_rsv->space_info)
3822                 return -ENOSPC;
3823
3824         spin_lock(&space_info->lock);
3825         spin_lock(&delayed_rsv->lock);
3826         if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
3827                 spin_unlock(&delayed_rsv->lock);
3828                 spin_unlock(&space_info->lock);
3829                 return -ENOSPC;
3830         }
3831         spin_unlock(&delayed_rsv->lock);
3832         spin_unlock(&space_info->lock);
3833
3834 commit:
3835         trans = btrfs_join_transaction(root);
3836         if (IS_ERR(trans))
3837                 return -ENOSPC;
3838
3839         return btrfs_commit_transaction(trans, root);
3840 }
3841
3842 enum flush_state {
3843         FLUSH_DELAYED_ITEMS_NR  =       1,
3844         FLUSH_DELAYED_ITEMS     =       2,
3845         FLUSH_DELALLOC          =       3,
3846         FLUSH_DELALLOC_WAIT     =       4,
3847         ALLOC_CHUNK             =       5,
3848         COMMIT_TRANS            =       6,
3849 };
3850
3851 static int flush_space(struct btrfs_root *root,
3852                        struct btrfs_space_info *space_info, u64 num_bytes,
3853                        u64 orig_bytes, int state)
3854 {
3855         struct btrfs_trans_handle *trans;
3856         int nr;
3857         int ret = 0;
3858
3859         switch (state) {
3860         case FLUSH_DELAYED_ITEMS_NR:
3861         case FLUSH_DELAYED_ITEMS:
3862                 if (state == FLUSH_DELAYED_ITEMS_NR) {
3863                         u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
3864
3865                         nr = (int)div64_u64(num_bytes, bytes);
3866                         if (!nr)
3867                                 nr = 1;
3868                         nr *= 2;
3869                 } else {
3870                         nr = -1;
3871                 }
3872                 trans = btrfs_join_transaction(root);
3873                 if (IS_ERR(trans)) {
3874                         ret = PTR_ERR(trans);
3875                         break;
3876                 }
3877                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
3878                 btrfs_end_transaction(trans, root);
3879                 break;
3880         case FLUSH_DELALLOC:
3881         case FLUSH_DELALLOC_WAIT:
3882                 shrink_delalloc(root, num_bytes, orig_bytes,
3883                                 state == FLUSH_DELALLOC_WAIT);
3884                 break;
3885         case ALLOC_CHUNK:
3886                 trans = btrfs_join_transaction(root);
3887                 if (IS_ERR(trans)) {
3888                         ret = PTR_ERR(trans);
3889                         break;
3890                 }
3891                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3892                                      btrfs_get_alloc_profile(root, 0),
3893                                      CHUNK_ALLOC_NO_FORCE);
3894                 btrfs_end_transaction(trans, root);
3895                 if (ret == -ENOSPC)
3896                         ret = 0;
3897                 break;
3898         case COMMIT_TRANS:
3899                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
3900                 break;
3901         default:
3902                 ret = -ENOSPC;
3903                 break;
3904         }
3905
3906         return ret;
3907 }
3908 /**
3909  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
3910  * @root - the root we're allocating for
3911  * @block_rsv - the block_rsv we're allocating for
3912  * @orig_bytes - the number of bytes we want
3913  * @flush - wether or not we can flush to make our reservation
3914  *
3915  * This will reserve orgi_bytes number of bytes from the space info associated
3916  * with the block_rsv.  If there is not enough space it will make an attempt to
3917  * flush out space to make room.  It will do this by flushing delalloc if
3918  * possible or committing the transaction.  If flush is 0 then no attempts to
3919  * regain reservations will be made and this will fail if there is not enough
3920  * space already.
3921  */
3922 static int reserve_metadata_bytes(struct btrfs_root *root,
3923                                   struct btrfs_block_rsv *block_rsv,
3924                                   u64 orig_bytes,
3925                                   enum btrfs_reserve_flush_enum flush)
3926 {
3927         struct btrfs_space_info *space_info = block_rsv->space_info;
3928         u64 used;
3929         u64 num_bytes = orig_bytes;
3930         int flush_state = FLUSH_DELAYED_ITEMS_NR;
3931         int ret = 0;
3932         bool flushing = false;
3933
3934 again:
3935         ret = 0;
3936         spin_lock(&space_info->lock);
3937         /*
3938          * We only want to wait if somebody other than us is flushing and we
3939          * are actually allowed to flush all things.
3940          */
3941         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
3942                space_info->flush) {
3943                 spin_unlock(&space_info->lock);
3944                 /*
3945                  * If we have a trans handle we can't wait because the flusher
3946                  * may have to commit the transaction, which would mean we would
3947                  * deadlock since we are waiting for the flusher to finish, but
3948                  * hold the current transaction open.
3949                  */
3950                 if (current->journal_info)
3951                         return -EAGAIN;
3952                 ret = wait_event_killable(space_info->wait, !space_info->flush);
3953                 /* Must have been killed, return */
3954                 if (ret)
3955                         return -EINTR;
3956
3957                 spin_lock(&space_info->lock);
3958         }
3959
3960         ret = -ENOSPC;
3961         used = space_info->bytes_used + space_info->bytes_reserved +
3962                 space_info->bytes_pinned + space_info->bytes_readonly +
3963                 space_info->bytes_may_use;
3964
3965         /*
3966          * The idea here is that we've not already over-reserved the block group
3967          * then we can go ahead and save our reservation first and then start
3968          * flushing if we need to.  Otherwise if we've already overcommitted
3969          * lets start flushing stuff first and then come back and try to make
3970          * our reservation.
3971          */
3972         if (used <= space_info->total_bytes) {
3973                 if (used + orig_bytes <= space_info->total_bytes) {
3974                         space_info->bytes_may_use += orig_bytes;
3975                         trace_btrfs_space_reservation(root->fs_info,
3976                                 "space_info", space_info->flags, orig_bytes, 1);
3977                         ret = 0;
3978                 } else {
3979                         /*
3980                          * Ok set num_bytes to orig_bytes since we aren't
3981                          * overocmmitted, this way we only try and reclaim what
3982                          * we need.
3983                          */
3984                         num_bytes = orig_bytes;
3985                 }
3986         } else {
3987                 /*
3988                  * Ok we're over committed, set num_bytes to the overcommitted
3989                  * amount plus the amount of bytes that we need for this
3990                  * reservation.
3991                  */
3992                 num_bytes = used - space_info->total_bytes +
3993                         (orig_bytes * 2);
3994         }
3995
3996         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
3997                 space_info->bytes_may_use += orig_bytes;
3998                 trace_btrfs_space_reservation(root->fs_info, "space_info",
3999                                               space_info->flags, orig_bytes,
4000                                               1);
4001                 ret = 0;
4002         }
4003
4004         /*
4005          * Couldn't make our reservation, save our place so while we're trying
4006          * to reclaim space we can actually use it instead of somebody else
4007          * stealing it from us.
4008          *
4009          * We make the other tasks wait for the flush only when we can flush
4010          * all things.
4011          */
4012         if (ret && flush == BTRFS_RESERVE_FLUSH_ALL) {
4013                 flushing = true;
4014                 space_info->flush = 1;
4015         }
4016
4017         spin_unlock(&space_info->lock);
4018
4019         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4020                 goto out;
4021
4022         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4023                           flush_state);
4024         flush_state++;
4025
4026         /*
4027          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4028          * would happen. So skip delalloc flush.
4029          */
4030         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4031             (flush_state == FLUSH_DELALLOC ||
4032              flush_state == FLUSH_DELALLOC_WAIT))
4033                 flush_state = ALLOC_CHUNK;
4034
4035         if (!ret)
4036                 goto again;
4037         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4038                  flush_state < COMMIT_TRANS)
4039                 goto again;
4040         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4041                  flush_state <= COMMIT_TRANS)
4042                 goto again;
4043
4044 out:
4045         if (flushing) {
4046                 spin_lock(&space_info->lock);
4047                 space_info->flush = 0;
4048                 wake_up_all(&space_info->wait);
4049                 spin_unlock(&space_info->lock);
4050         }
4051         return ret;
4052 }
4053
4054 static struct btrfs_block_rsv *get_block_rsv(
4055                                         const struct btrfs_trans_handle *trans,
4056                                         const struct btrfs_root *root)
4057 {
4058         struct btrfs_block_rsv *block_rsv = NULL;
4059
4060         if (root->ref_cows)
4061                 block_rsv = trans->block_rsv;
4062
4063         if (root == root->fs_info->csum_root && trans->adding_csums)
4064                 block_rsv = trans->block_rsv;
4065
4066         if (!block_rsv)
4067                 block_rsv = root->block_rsv;
4068
4069         if (!block_rsv)
4070                 block_rsv = &root->fs_info->empty_block_rsv;
4071
4072         return block_rsv;
4073 }
4074
4075 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4076                                u64 num_bytes)
4077 {
4078         int ret = -ENOSPC;
4079         spin_lock(&block_rsv->lock);
4080         if (block_rsv->reserved >= num_bytes) {
4081                 block_rsv->reserved -= num_bytes;
4082                 if (block_rsv->reserved < block_rsv->size)
4083                         block_rsv->full = 0;
4084                 ret = 0;
4085         }
4086         spin_unlock(&block_rsv->lock);
4087         return ret;
4088 }
4089
4090 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4091                                 u64 num_bytes, int update_size)
4092 {
4093         spin_lock(&block_rsv->lock);
4094         block_rsv->reserved += num_bytes;
4095         if (update_size)
4096                 block_rsv->size += num_bytes;
4097         else if (block_rsv->reserved >= block_rsv->size)
4098                 block_rsv->full = 1;
4099         spin_unlock(&block_rsv->lock);
4100 }
4101
4102 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4103                                     struct btrfs_block_rsv *block_rsv,
4104                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4105 {
4106         struct btrfs_space_info *space_info = block_rsv->space_info;
4107
4108         spin_lock(&block_rsv->lock);
4109         if (num_bytes == (u64)-1)
4110                 num_bytes = block_rsv->size;
4111         block_rsv->size -= num_bytes;
4112         if (block_rsv->reserved >= block_rsv->size) {
4113                 num_bytes = block_rsv->reserved - block_rsv->size;
4114                 block_rsv->reserved = block_rsv->size;
4115                 block_rsv->full = 1;
4116         } else {
4117                 num_bytes = 0;
4118         }
4119         spin_unlock(&block_rsv->lock);
4120
4121         if (num_bytes > 0) {
4122                 if (dest) {
4123                         spin_lock(&dest->lock);
4124                         if (!dest->full) {
4125                                 u64 bytes_to_add;
4126
4127                                 bytes_to_add = dest->size - dest->reserved;
4128                                 bytes_to_add = min(num_bytes, bytes_to_add);
4129                                 dest->reserved += bytes_to_add;
4130                                 if (dest->reserved >= dest->size)
4131                                         dest->full = 1;
4132                                 num_bytes -= bytes_to_add;
4133                         }
4134                         spin_unlock(&dest->lock);
4135                 }
4136                 if (num_bytes) {
4137                         spin_lock(&space_info->lock);
4138                         space_info->bytes_may_use -= num_bytes;
4139                         trace_btrfs_space_reservation(fs_info, "space_info",
4140                                         space_info->flags, num_bytes, 0);
4141                         space_info->reservation_progress++;
4142                         spin_unlock(&space_info->lock);
4143                 }
4144         }
4145 }
4146
4147 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4148                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4149 {
4150         int ret;
4151
4152         ret = block_rsv_use_bytes(src, num_bytes);
4153         if (ret)
4154                 return ret;
4155
4156         block_rsv_add_bytes(dst, num_bytes, 1);
4157         return 0;
4158 }
4159
4160 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4161 {
4162         memset(rsv, 0, sizeof(*rsv));
4163         spin_lock_init(&rsv->lock);
4164         rsv->type = type;
4165 }
4166
4167 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4168                                               unsigned short type)
4169 {
4170         struct btrfs_block_rsv *block_rsv;
4171         struct btrfs_fs_info *fs_info = root->fs_info;
4172
4173         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4174         if (!block_rsv)
4175                 return NULL;
4176
4177         btrfs_init_block_rsv(block_rsv, type);
4178         block_rsv->space_info = __find_space_info(fs_info,
4179                                                   BTRFS_BLOCK_GROUP_METADATA);
4180         return block_rsv;
4181 }
4182
4183 void btrfs_free_block_rsv(struct btrfs_root *root,
4184                           struct btrfs_block_rsv *rsv)
4185 {
4186         if (!rsv)
4187                 return;
4188         btrfs_block_rsv_release(root, rsv, (u64)-1);
4189         kfree(rsv);
4190 }
4191
4192 int btrfs_block_rsv_add(struct btrfs_root *root,
4193                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4194                         enum btrfs_reserve_flush_enum flush)
4195 {
4196         int ret;
4197
4198         if (num_bytes == 0)
4199                 return 0;
4200
4201         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4202         if (!ret) {
4203                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4204                 return 0;
4205         }
4206
4207         return ret;
4208 }
4209
4210 int btrfs_block_rsv_check(struct btrfs_root *root,
4211                           struct btrfs_block_rsv *block_rsv, int min_factor)
4212 {
4213         u64 num_bytes = 0;
4214         int ret = -ENOSPC;
4215
4216         if (!block_rsv)
4217                 return 0;
4218
4219         spin_lock(&block_rsv->lock);
4220         num_bytes = div_factor(block_rsv->size, min_factor);
4221         if (block_rsv->reserved >= num_bytes)
4222                 ret = 0;
4223         spin_unlock(&block_rsv->lock);
4224
4225         return ret;
4226 }
4227
4228 int btrfs_block_rsv_refill(struct btrfs_root *root,
4229                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4230                            enum btrfs_reserve_flush_enum flush)
4231 {
4232         u64 num_bytes = 0;
4233         int ret = -ENOSPC;
4234
4235         if (!block_rsv)
4236                 return 0;
4237
4238         spin_lock(&block_rsv->lock);
4239         num_bytes = min_reserved;
4240         if (block_rsv->reserved >= num_bytes)
4241                 ret = 0;
4242         else
4243                 num_bytes -= block_rsv->reserved;
4244         spin_unlock(&block_rsv->lock);
4245
4246         if (!ret)
4247                 return 0;
4248
4249         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4250         if (!ret) {
4251                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4252                 return 0;
4253         }
4254
4255         return ret;
4256 }
4257
4258 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4259                             struct btrfs_block_rsv *dst_rsv,
4260                             u64 num_bytes)
4261 {
4262         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4263 }
4264
4265 void btrfs_block_rsv_release(struct btrfs_root *root,
4266                              struct btrfs_block_rsv *block_rsv,
4267                              u64 num_bytes)
4268 {
4269         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4270         if (global_rsv->full || global_rsv == block_rsv ||
4271             block_rsv->space_info != global_rsv->space_info)
4272                 global_rsv = NULL;
4273         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4274                                 num_bytes);
4275 }
4276
4277 /*
4278  * helper to calculate size of global block reservation.
4279  * the desired value is sum of space used by extent tree,
4280  * checksum tree and root tree
4281  */
4282 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4283 {
4284         struct btrfs_space_info *sinfo;
4285         u64 num_bytes;
4286         u64 meta_used;
4287         u64 data_used;
4288         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4289
4290         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4291         spin_lock(&sinfo->lock);
4292         data_used = sinfo->bytes_used;
4293         spin_unlock(&sinfo->lock);
4294
4295         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4296         spin_lock(&sinfo->lock);
4297         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4298                 data_used = 0;
4299         meta_used = sinfo->bytes_used;
4300         spin_unlock(&sinfo->lock);
4301
4302         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4303                     csum_size * 2;
4304         num_bytes += div64_u64(data_used + meta_used, 50);
4305
4306         if (num_bytes * 3 > meta_used)
4307                 num_bytes = div64_u64(meta_used, 3);
4308
4309         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4310 }
4311
4312 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4313 {
4314         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4315         struct btrfs_space_info *sinfo = block_rsv->space_info;
4316         u64 num_bytes;
4317
4318         num_bytes = calc_global_metadata_size(fs_info);
4319
4320         spin_lock(&sinfo->lock);
4321         spin_lock(&block_rsv->lock);
4322
4323         block_rsv->size = num_bytes;
4324
4325         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4326                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4327                     sinfo->bytes_may_use;
4328
4329         if (sinfo->total_bytes > num_bytes) {
4330                 num_bytes = sinfo->total_bytes - num_bytes;
4331                 block_rsv->reserved += num_bytes;
4332                 sinfo->bytes_may_use += num_bytes;
4333                 trace_btrfs_space_reservation(fs_info, "space_info",
4334                                       sinfo->flags, num_bytes, 1);
4335         }
4336
4337         if (block_rsv->reserved >= block_rsv->size) {
4338                 num_bytes = block_rsv->reserved - block_rsv->size;
4339                 sinfo->bytes_may_use -= num_bytes;
4340                 trace_btrfs_space_reservation(fs_info, "space_info",
4341                                       sinfo->flags, num_bytes, 0);
4342                 sinfo->reservation_progress++;
4343                 block_rsv->reserved = block_rsv->size;
4344                 block_rsv->full = 1;
4345         }
4346
4347         spin_unlock(&block_rsv->lock);
4348         spin_unlock(&sinfo->lock);
4349 }
4350
4351 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4352 {
4353         struct btrfs_space_info *space_info;
4354
4355         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4356         fs_info->chunk_block_rsv.space_info = space_info;
4357
4358         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4359         fs_info->global_block_rsv.space_info = space_info;
4360         fs_info->delalloc_block_rsv.space_info = space_info;
4361         fs_info->trans_block_rsv.space_info = space_info;
4362         fs_info->empty_block_rsv.space_info = space_info;
4363         fs_info->delayed_block_rsv.space_info = space_info;
4364
4365         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4366         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4367         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4368         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4369         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4370
4371         update_global_block_rsv(fs_info);
4372 }
4373
4374 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4375 {
4376         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4377                                 (u64)-1);
4378         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4379         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4380         WARN_ON(fs_info->trans_block_rsv.size > 0);
4381         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4382         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4383         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4384         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4385         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4386 }
4387
4388 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4389                                   struct btrfs_root *root)
4390 {
4391         if (!trans->block_rsv)
4392                 return;
4393
4394         if (!trans->bytes_reserved)
4395                 return;
4396
4397         trace_btrfs_space_reservation(root->fs_info, "transaction",
4398                                       trans->transid, trans->bytes_reserved, 0);
4399         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4400         trans->bytes_reserved = 0;
4401 }
4402
4403 /* Can only return 0 or -ENOSPC */
4404 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4405                                   struct inode *inode)
4406 {
4407         struct btrfs_root *root = BTRFS_I(inode)->root;
4408         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4409         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4410
4411         /*
4412          * We need to hold space in order to delete our orphan item once we've
4413          * added it, so this takes the reservation so we can release it later
4414          * when we are truly done with the orphan item.
4415          */
4416         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4417         trace_btrfs_space_reservation(root->fs_info, "orphan",
4418                                       btrfs_ino(inode), num_bytes, 1);
4419         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4420 }
4421
4422 void btrfs_orphan_release_metadata(struct inode *inode)
4423 {
4424         struct btrfs_root *root = BTRFS_I(inode)->root;
4425         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4426         trace_btrfs_space_reservation(root->fs_info, "orphan",
4427                                       btrfs_ino(inode), num_bytes, 0);
4428         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4429 }
4430
4431 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4432                                 struct btrfs_pending_snapshot *pending)
4433 {
4434         struct btrfs_root *root = pending->root;
4435         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4436         struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
4437         /*
4438          * two for root back/forward refs, two for directory entries,
4439          * one for root of the snapshot and one for parent inode.
4440          */
4441         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 6);
4442         dst_rsv->space_info = src_rsv->space_info;
4443         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4444 }
4445
4446 /**
4447  * drop_outstanding_extent - drop an outstanding extent
4448  * @inode: the inode we're dropping the extent for
4449  *
4450  * This is called when we are freeing up an outstanding extent, either called
4451  * after an error or after an extent is written.  This will return the number of
4452  * reserved extents that need to be freed.  This must be called with
4453  * BTRFS_I(inode)->lock held.
4454  */
4455 static unsigned drop_outstanding_extent(struct inode *inode)
4456 {
4457         unsigned drop_inode_space = 0;
4458         unsigned dropped_extents = 0;
4459
4460         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4461         BTRFS_I(inode)->outstanding_extents--;
4462
4463         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4464             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4465                                &BTRFS_I(inode)->runtime_flags))
4466                 drop_inode_space = 1;
4467
4468         /*
4469          * If we have more or the same amount of outsanding extents than we have
4470          * reserved then we need to leave the reserved extents count alone.
4471          */
4472         if (BTRFS_I(inode)->outstanding_extents >=
4473             BTRFS_I(inode)->reserved_extents)
4474                 return drop_inode_space;
4475
4476         dropped_extents = BTRFS_I(inode)->reserved_extents -
4477                 BTRFS_I(inode)->outstanding_extents;
4478         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4479         return dropped_extents + drop_inode_space;
4480 }
4481
4482 /**
4483  * calc_csum_metadata_size - return the amount of metada space that must be
4484  *      reserved/free'd for the given bytes.
4485  * @inode: the inode we're manipulating
4486  * @num_bytes: the number of bytes in question
4487  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4488  *
4489  * This adjusts the number of csum_bytes in the inode and then returns the
4490  * correct amount of metadata that must either be reserved or freed.  We
4491  * calculate how many checksums we can fit into one leaf and then divide the
4492  * number of bytes that will need to be checksumed by this value to figure out
4493  * how many checksums will be required.  If we are adding bytes then the number
4494  * may go up and we will return the number of additional bytes that must be
4495  * reserved.  If it is going down we will return the number of bytes that must
4496  * be freed.
4497  *
4498  * This must be called with BTRFS_I(inode)->lock held.
4499  */
4500 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4501                                    int reserve)
4502 {
4503         struct btrfs_root *root = BTRFS_I(inode)->root;
4504         u64 csum_size;
4505         int num_csums_per_leaf;
4506         int num_csums;
4507         int old_csums;
4508
4509         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4510             BTRFS_I(inode)->csum_bytes == 0)
4511                 return 0;
4512
4513         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4514         if (reserve)
4515                 BTRFS_I(inode)->csum_bytes += num_bytes;
4516         else
4517                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4518         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4519         num_csums_per_leaf = (int)div64_u64(csum_size,
4520                                             sizeof(struct btrfs_csum_item) +
4521                                             sizeof(struct btrfs_disk_key));
4522         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4523         num_csums = num_csums + num_csums_per_leaf - 1;
4524         num_csums = num_csums / num_csums_per_leaf;
4525
4526         old_csums = old_csums + num_csums_per_leaf - 1;
4527         old_csums = old_csums / num_csums_per_leaf;
4528
4529         /* No change, no need to reserve more */
4530         if (old_csums == num_csums)
4531                 return 0;
4532
4533         if (reserve)
4534                 return btrfs_calc_trans_metadata_size(root,
4535                                                       num_csums - old_csums);
4536
4537         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4538 }
4539
4540 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4541 {
4542         struct btrfs_root *root = BTRFS_I(inode)->root;
4543         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4544         u64 to_reserve = 0;
4545         u64 csum_bytes;
4546         unsigned nr_extents = 0;
4547         int extra_reserve = 0;
4548         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4549         int ret;
4550         bool delalloc_lock = true;
4551
4552         /* If we are a free space inode we need to not flush since we will be in
4553          * the middle of a transaction commit.  We also don't need the delalloc
4554          * mutex since we won't race with anybody.  We need this mostly to make
4555          * lockdep shut its filthy mouth.
4556          */
4557         if (btrfs_is_free_space_inode(inode)) {
4558                 flush = BTRFS_RESERVE_NO_FLUSH;
4559                 delalloc_lock = false;
4560         }
4561
4562         if (flush != BTRFS_RESERVE_NO_FLUSH &&
4563             btrfs_transaction_in_commit(root->fs_info))
4564                 schedule_timeout(1);
4565
4566         if (delalloc_lock)
4567                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4568
4569         num_bytes = ALIGN(num_bytes, root->sectorsize);
4570
4571         spin_lock(&BTRFS_I(inode)->lock);
4572         BTRFS_I(inode)->outstanding_extents++;
4573
4574         if (BTRFS_I(inode)->outstanding_extents >
4575             BTRFS_I(inode)->reserved_extents)
4576                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4577                         BTRFS_I(inode)->reserved_extents;
4578
4579         /*
4580          * Add an item to reserve for updating the inode when we complete the
4581          * delalloc io.
4582          */
4583         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4584                       &BTRFS_I(inode)->runtime_flags)) {
4585                 nr_extents++;
4586                 extra_reserve = 1;
4587         }
4588
4589         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4590         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4591         csum_bytes = BTRFS_I(inode)->csum_bytes;
4592         spin_unlock(&BTRFS_I(inode)->lock);
4593
4594         if (root->fs_info->quota_enabled) {
4595                 ret = btrfs_qgroup_reserve(root, num_bytes +
4596                                            nr_extents * root->leafsize);
4597                 if (ret) {
4598                         spin_lock(&BTRFS_I(inode)->lock);
4599                         calc_csum_metadata_size(inode, num_bytes, 0);
4600                         spin_unlock(&BTRFS_I(inode)->lock);
4601                         if (delalloc_lock)
4602                                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4603                         return ret;
4604                 }
4605         }
4606
4607         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4608         if (ret) {
4609                 u64 to_free = 0;
4610                 unsigned dropped;
4611
4612                 spin_lock(&BTRFS_I(inode)->lock);
4613                 dropped = drop_outstanding_extent(inode);
4614                 /*
4615                  * If the inodes csum_bytes is the same as the original
4616                  * csum_bytes then we know we haven't raced with any free()ers
4617                  * so we can just reduce our inodes csum bytes and carry on.
4618                  * Otherwise we have to do the normal free thing to account for
4619                  * the case that the free side didn't free up its reserve
4620                  * because of this outstanding reservation.
4621                  */
4622                 if (BTRFS_I(inode)->csum_bytes == csum_bytes)
4623                         calc_csum_metadata_size(inode, num_bytes, 0);
4624                 else
4625                         to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4626                 spin_unlock(&BTRFS_I(inode)->lock);
4627                 if (dropped)
4628                         to_free += btrfs_calc_trans_metadata_size(root, dropped);
4629
4630                 if (to_free) {
4631                         btrfs_block_rsv_release(root, block_rsv, to_free);
4632                         trace_btrfs_space_reservation(root->fs_info,
4633                                                       "delalloc",
4634                                                       btrfs_ino(inode),
4635                                                       to_free, 0);
4636                 }
4637                 if (root->fs_info->quota_enabled) {
4638                         btrfs_qgroup_free(root, num_bytes +
4639                                                 nr_extents * root->leafsize);
4640                 }
4641                 if (delalloc_lock)
4642                         mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4643                 return ret;
4644         }
4645
4646         spin_lock(&BTRFS_I(inode)->lock);
4647         if (extra_reserve) {
4648                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4649                         &BTRFS_I(inode)->runtime_flags);
4650                 nr_extents--;
4651         }
4652         BTRFS_I(inode)->reserved_extents += nr_extents;
4653         spin_unlock(&BTRFS_I(inode)->lock);
4654
4655         if (delalloc_lock)
4656                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4657
4658         if (to_reserve)
4659                 trace_btrfs_space_reservation(root->fs_info,"delalloc",
4660                                               btrfs_ino(inode), to_reserve, 1);
4661         block_rsv_add_bytes(block_rsv, to_reserve, 1);
4662
4663         return 0;
4664 }
4665
4666 /**
4667  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4668  * @inode: the inode to release the reservation for
4669  * @num_bytes: the number of bytes we're releasing
4670  *
4671  * This will release the metadata reservation for an inode.  This can be called
4672  * once we complete IO for a given set of bytes to release their metadata
4673  * reservations.
4674  */
4675 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4676 {
4677         struct btrfs_root *root = BTRFS_I(inode)->root;
4678         u64 to_free = 0;
4679         unsigned dropped;
4680
4681         num_bytes = ALIGN(num_bytes, root->sectorsize);
4682         spin_lock(&BTRFS_I(inode)->lock);
4683         dropped = drop_outstanding_extent(inode);
4684
4685         to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4686         spin_unlock(&BTRFS_I(inode)->lock);
4687         if (dropped > 0)
4688                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4689
4690         trace_btrfs_space_reservation(root->fs_info, "delalloc",
4691                                       btrfs_ino(inode), to_free, 0);
4692         if (root->fs_info->quota_enabled) {
4693                 btrfs_qgroup_free(root, num_bytes +
4694                                         dropped * root->leafsize);
4695         }
4696
4697         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4698                                 to_free);
4699 }
4700
4701 /**
4702  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4703  * @inode: inode we're writing to
4704  * @num_bytes: the number of bytes we want to allocate
4705  *
4706  * This will do the following things
4707  *
4708  * o reserve space in the data space info for num_bytes
4709  * o reserve space in the metadata space info based on number of outstanding
4710  *   extents and how much csums will be needed
4711  * o add to the inodes ->delalloc_bytes
4712  * o add it to the fs_info's delalloc inodes list.
4713  *
4714  * This will return 0 for success and -ENOSPC if there is no space left.
4715  */
4716 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4717 {
4718         int ret;
4719
4720         ret = btrfs_check_data_free_space(inode, num_bytes);
4721         if (ret)
4722                 return ret;
4723
4724         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4725         if (ret) {
4726                 btrfs_free_reserved_data_space(inode, num_bytes);
4727                 return ret;
4728         }
4729
4730         return 0;
4731 }
4732
4733 /**
4734  * btrfs_delalloc_release_space - release data and metadata space for delalloc
4735  * @inode: inode we're releasing space for
4736  * @num_bytes: the number of bytes we want to free up
4737  *
4738  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
4739  * called in the case that we don't need the metadata AND data reservations
4740  * anymore.  So if there is an error or we insert an inline extent.
4741  *
4742  * This function will release the metadata space that was not used and will
4743  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
4744  * list if there are no delalloc bytes left.
4745  */
4746 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4747 {
4748         btrfs_delalloc_release_metadata(inode, num_bytes);
4749         btrfs_free_reserved_data_space(inode, num_bytes);
4750 }
4751
4752 static int update_block_group(struct btrfs_trans_handle *trans,
4753                               struct btrfs_root *root,
4754                               u64 bytenr, u64 num_bytes, int alloc)
4755 {
4756         struct btrfs_block_group_cache *cache = NULL;
4757         struct btrfs_fs_info *info = root->fs_info;
4758         u64 total = num_bytes;
4759         u64 old_val;
4760         u64 byte_in_group;
4761         int factor;
4762
4763         /* block accounting for super block */
4764         spin_lock(&info->delalloc_lock);
4765         old_val = btrfs_super_bytes_used(info->super_copy);
4766         if (alloc)
4767                 old_val += num_bytes;
4768         else
4769                 old_val -= num_bytes;
4770         btrfs_set_super_bytes_used(info->super_copy, old_val);
4771         spin_unlock(&info->delalloc_lock);
4772
4773         while (total) {
4774                 cache = btrfs_lookup_block_group(info, bytenr);
4775                 if (!cache)
4776                         return -ENOENT;
4777                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4778                                     BTRFS_BLOCK_GROUP_RAID1 |
4779                                     BTRFS_BLOCK_GROUP_RAID10))
4780                         factor = 2;
4781                 else
4782                         factor = 1;
4783                 /*
4784                  * If this block group has free space cache written out, we
4785                  * need to make sure to load it if we are removing space.  This
4786                  * is because we need the unpinning stage to actually add the
4787                  * space back to the block group, otherwise we will leak space.
4788                  */
4789                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4790                         cache_block_group(cache, trans, NULL, 1);
4791
4792                 byte_in_group = bytenr - cache->key.objectid;
4793                 WARN_ON(byte_in_group > cache->key.offset);
4794
4795                 spin_lock(&cache->space_info->lock);
4796                 spin_lock(&cache->lock);
4797
4798                 if (btrfs_test_opt(root, SPACE_CACHE) &&
4799                     cache->disk_cache_state < BTRFS_DC_CLEAR)
4800                         cache->disk_cache_state = BTRFS_DC_CLEAR;
4801
4802                 cache->dirty = 1;
4803                 old_val = btrfs_block_group_used(&cache->item);
4804                 num_bytes = min(total, cache->key.offset - byte_in_group);
4805                 if (alloc) {
4806                         old_val += num_bytes;
4807                         btrfs_set_block_group_used(&cache->item, old_val);
4808                         cache->reserved -= num_bytes;
4809                         cache->space_info->bytes_reserved -= num_bytes;
4810                         cache->space_info->bytes_used += num_bytes;
4811                         cache->space_info->disk_used += num_bytes * factor;
4812                         spin_unlock(&cache->lock);
4813                         spin_unlock(&cache->space_info->lock);
4814                 } else {
4815                         old_val -= num_bytes;
4816                         btrfs_set_block_group_used(&cache->item, old_val);
4817                         cache->pinned += num_bytes;
4818                         cache->space_info->bytes_pinned += num_bytes;
4819                         cache->space_info->bytes_used -= num_bytes;
4820                         cache->space_info->disk_used -= num_bytes * factor;
4821                         spin_unlock(&cache->lock);
4822                         spin_unlock(&cache->space_info->lock);
4823
4824                         set_extent_dirty(info->pinned_extents,
4825                                          bytenr, bytenr + num_bytes - 1,
4826                                          GFP_NOFS | __GFP_NOFAIL);
4827                 }
4828                 btrfs_put_block_group(cache);
4829                 total -= num_bytes;
4830                 bytenr += num_bytes;
4831         }
4832         return 0;
4833 }
4834
4835 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4836 {
4837         struct btrfs_block_group_cache *cache;
4838         u64 bytenr;
4839
4840         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4841         if (!cache)
4842                 return 0;
4843
4844         bytenr = cache->key.objectid;
4845         btrfs_put_block_group(cache);
4846
4847         return bytenr;
4848 }
4849
4850 static int pin_down_extent(struct btrfs_root *root,
4851                            struct btrfs_block_group_cache *cache,
4852                            u64 bytenr, u64 num_bytes, int reserved)
4853 {
4854         spin_lock(&cache->space_info->lock);
4855         spin_lock(&cache->lock);
4856         cache->pinned += num_bytes;
4857         cache->space_info->bytes_pinned += num_bytes;
4858         if (reserved) {
4859                 cache->reserved -= num_bytes;
4860                 cache->space_info->bytes_reserved -= num_bytes;
4861         }
4862         spin_unlock(&cache->lock);
4863         spin_unlock(&cache->space_info->lock);
4864
4865         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4866                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4867         return 0;
4868 }
4869
4870 /*
4871  * this function must be called within transaction
4872  */
4873 int btrfs_pin_extent(struct btrfs_root *root,
4874                      u64 bytenr, u64 num_bytes, int reserved)
4875 {
4876         struct btrfs_block_group_cache *cache;
4877
4878         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4879         BUG_ON(!cache); /* Logic error */
4880
4881         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4882
4883         btrfs_put_block_group(cache);
4884         return 0;
4885 }
4886
4887 /*
4888  * this function must be called within transaction
4889  */
4890 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
4891                                     struct btrfs_root *root,
4892                                     u64 bytenr, u64 num_bytes)
4893 {
4894         struct btrfs_block_group_cache *cache;
4895
4896         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4897         BUG_ON(!cache); /* Logic error */
4898
4899         /*
4900          * pull in the free space cache (if any) so that our pin
4901          * removes the free space from the cache.  We have load_only set
4902          * to one because the slow code to read in the free extents does check
4903          * the pinned extents.
4904          */
4905         cache_block_group(cache, trans, root, 1);
4906
4907         pin_down_extent(root, cache, bytenr, num_bytes, 0);
4908
4909         /* remove us from the free space cache (if we're there at all) */
4910         btrfs_remove_free_space(cache, bytenr, num_bytes);
4911         btrfs_put_block_group(cache);
4912         return 0;
4913 }
4914
4915 /**
4916  * btrfs_update_reserved_bytes - update the block_group and space info counters
4917  * @cache:      The cache we are manipulating
4918  * @num_bytes:  The number of bytes in question
4919  * @reserve:    One of the reservation enums
4920  *
4921  * This is called by the allocator when it reserves space, or by somebody who is
4922  * freeing space that was never actually used on disk.  For example if you
4923  * reserve some space for a new leaf in transaction A and before transaction A
4924  * commits you free that leaf, you call this with reserve set to 0 in order to
4925  * clear the reservation.
4926  *
4927  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
4928  * ENOSPC accounting.  For data we handle the reservation through clearing the
4929  * delalloc bits in the io_tree.  We have to do this since we could end up
4930  * allocating less disk space for the amount of data we have reserved in the
4931  * case of compression.
4932  *
4933  * If this is a reservation and the block group has become read only we cannot
4934  * make the reservation and return -EAGAIN, otherwise this function always
4935  * succeeds.
4936  */
4937 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4938                                        u64 num_bytes, int reserve)
4939 {
4940         struct btrfs_space_info *space_info = cache->space_info;
4941         int ret = 0;
4942
4943         spin_lock(&space_info->lock);
4944         spin_lock(&cache->lock);
4945         if (reserve != RESERVE_FREE) {
4946                 if (cache->ro) {
4947                         ret = -EAGAIN;
4948                 } else {
4949                         cache->reserved += num_bytes;
4950                         space_info->bytes_reserved += num_bytes;
4951                         if (reserve == RESERVE_ALLOC) {
4952                                 trace_btrfs_space_reservation(cache->fs_info,
4953                                                 "space_info", space_info->flags,
4954                                                 num_bytes, 0);
4955                                 space_info->bytes_may_use -= num_bytes;
4956                         }
4957                 }
4958         } else {
4959                 if (cache->ro)
4960                         space_info->bytes_readonly += num_bytes;
4961                 cache->reserved -= num_bytes;
4962                 space_info->bytes_reserved -= num_bytes;
4963                 space_info->reservation_progress++;
4964         }
4965         spin_unlock(&cache->lock);
4966         spin_unlock(&space_info->lock);
4967         return ret;
4968 }
4969
4970 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4971                                 struct btrfs_root *root)
4972 {
4973         struct btrfs_fs_info *fs_info = root->fs_info;
4974         struct btrfs_caching_control *next;
4975         struct btrfs_caching_control *caching_ctl;
4976         struct btrfs_block_group_cache *cache;
4977
4978         down_write(&fs_info->extent_commit_sem);
4979
4980         list_for_each_entry_safe(caching_ctl, next,
4981                                  &fs_info->caching_block_groups, list) {
4982                 cache = caching_ctl->block_group;
4983                 if (block_group_cache_done(cache)) {
4984                         cache->last_byte_to_unpin = (u64)-1;
4985                         list_del_init(&caching_ctl->list);
4986                         put_caching_control(caching_ctl);
4987                 } else {
4988                         cache->last_byte_to_unpin = caching_ctl->progress;
4989                 }
4990         }
4991
4992         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4993                 fs_info->pinned_extents = &fs_info->freed_extents[1];
4994         else
4995                 fs_info->pinned_extents = &fs_info->freed_extents[0];
4996
4997         up_write(&fs_info->extent_commit_sem);
4998
4999         update_global_block_rsv(fs_info);
5000 }
5001
5002 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5003 {
5004         struct btrfs_fs_info *fs_info = root->fs_info;
5005         struct btrfs_block_group_cache *cache = NULL;
5006         struct btrfs_space_info *space_info;
5007         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5008         u64 len;
5009         bool readonly;
5010
5011         while (start <= end) {
5012                 readonly = false;
5013                 if (!cache ||
5014                     start >= cache->key.objectid + cache->key.offset) {
5015                         if (cache)
5016                                 btrfs_put_block_group(cache);
5017                         cache = btrfs_lookup_block_group(fs_info, start);
5018                         BUG_ON(!cache); /* Logic error */
5019                 }
5020
5021                 len = cache->key.objectid + cache->key.offset - start;
5022                 len = min(len, end + 1 - start);
5023
5024                 if (start < cache->last_byte_to_unpin) {
5025                         len = min(len, cache->last_byte_to_unpin - start);
5026                         btrfs_add_free_space(cache, start, len);
5027                 }
5028
5029                 start += len;
5030                 space_info = cache->space_info;
5031
5032                 spin_lock(&space_info->lock);
5033                 spin_lock(&cache->lock);
5034                 cache->pinned -= len;
5035                 space_info->bytes_pinned -= len;
5036                 if (cache->ro) {
5037                         space_info->bytes_readonly += len;
5038                         readonly = true;
5039                 }
5040                 spin_unlock(&cache->lock);
5041                 if (!readonly && global_rsv->space_info == space_info) {
5042                         spin_lock(&global_rsv->lock);
5043                         if (!global_rsv->full) {
5044                                 len = min(len, global_rsv->size -
5045                                           global_rsv->reserved);
5046                                 global_rsv->reserved += len;
5047                                 space_info->bytes_may_use += len;
5048                                 if (global_rsv->reserved >= global_rsv->size)
5049                                         global_rsv->full = 1;
5050                         }
5051                         spin_unlock(&global_rsv->lock);
5052                 }
5053                 spin_unlock(&space_info->lock);
5054         }
5055
5056         if (cache)
5057                 btrfs_put_block_group(cache);
5058         return 0;
5059 }
5060
5061 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5062                                struct btrfs_root *root)
5063 {
5064         struct btrfs_fs_info *fs_info = root->fs_info;
5065         struct extent_io_tree *unpin;
5066         u64 start;
5067         u64 end;
5068         int ret;
5069
5070         if (trans->aborted)
5071                 return 0;
5072
5073         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5074                 unpin = &fs_info->freed_extents[1];
5075         else
5076                 unpin = &fs_info->freed_extents[0];
5077
5078         while (1) {
5079                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5080                                             EXTENT_DIRTY, NULL);
5081                 if (ret)
5082                         break;
5083
5084                 if (btrfs_test_opt(root, DISCARD))
5085                         ret = btrfs_discard_extent(root, start,
5086                                                    end + 1 - start, NULL);
5087
5088                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5089                 unpin_extent_range(root, start, end);
5090                 cond_resched();
5091         }
5092
5093         return 0;
5094 }
5095
5096 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5097                                 struct btrfs_root *root,
5098                                 u64 bytenr, u64 num_bytes, u64 parent,
5099                                 u64 root_objectid, u64 owner_objectid,
5100                                 u64 owner_offset, int refs_to_drop,
5101                                 struct btrfs_delayed_extent_op *extent_op)
5102 {
5103         struct btrfs_key key;
5104         struct btrfs_path *path;
5105         struct btrfs_fs_info *info = root->fs_info;
5106         struct btrfs_root *extent_root = info->extent_root;
5107         struct extent_buffer *leaf;
5108         struct btrfs_extent_item *ei;
5109         struct btrfs_extent_inline_ref *iref;
5110         int ret;
5111         int is_data;
5112         int extent_slot = 0;
5113         int found_extent = 0;
5114         int num_to_del = 1;
5115         u32 item_size;
5116         u64 refs;
5117
5118         path = btrfs_alloc_path();
5119         if (!path)
5120                 return -ENOMEM;
5121
5122         path->reada = 1;
5123         path->leave_spinning = 1;
5124
5125         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5126         BUG_ON(!is_data && refs_to_drop != 1);
5127
5128         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5129                                     bytenr, num_bytes, parent,
5130                                     root_objectid, owner_objectid,
5131                                     owner_offset);
5132         if (ret == 0) {
5133                 extent_slot = path->slots[0];
5134                 while (extent_slot >= 0) {
5135                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5136                                               extent_slot);
5137                         if (key.objectid != bytenr)
5138                                 break;
5139                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5140                             key.offset == num_bytes) {
5141                                 found_extent = 1;
5142                                 break;
5143                         }
5144                         if (path->slots[0] - extent_slot > 5)
5145                                 break;
5146                         extent_slot--;
5147                 }
5148 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5149                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5150                 if (found_extent && item_size < sizeof(*ei))
5151                         found_extent = 0;
5152 #endif
5153                 if (!found_extent) {
5154                         BUG_ON(iref);
5155                         ret = remove_extent_backref(trans, extent_root, path,
5156                                                     NULL, refs_to_drop,
5157                                                     is_data);
5158                         if (ret) {
5159                                 btrfs_abort_transaction(trans, extent_root, ret);
5160                                 goto out;
5161                         }
5162                         btrfs_release_path(path);
5163                         path->leave_spinning = 1;
5164
5165                         key.objectid = bytenr;
5166                         key.type = BTRFS_EXTENT_ITEM_KEY;
5167                         key.offset = num_bytes;
5168
5169                         ret = btrfs_search_slot(trans, extent_root,
5170                                                 &key, path, -1, 1);
5171                         if (ret) {
5172                                 printk(KERN_ERR "umm, got %d back from search"
5173                                        ", was looking for %llu\n", ret,
5174                                        (unsigned long long)bytenr);
5175                                 if (ret > 0)
5176                                         btrfs_print_leaf(extent_root,
5177                                                          path->nodes[0]);
5178                         }
5179                         if (ret < 0) {
5180                                 btrfs_abort_transaction(trans, extent_root, ret);
5181                                 goto out;
5182                         }
5183                         extent_slot = path->slots[0];
5184                 }
5185         } else if (ret == -ENOENT) {
5186                 btrfs_print_leaf(extent_root, path->nodes[0]);
5187                 WARN_ON(1);
5188                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
5189                        "parent %llu root %llu  owner %llu offset %llu\n",
5190                        (unsigned long long)bytenr,
5191                        (unsigned long long)parent,
5192                        (unsigned long long)root_objectid,
5193                        (unsigned long long)owner_objectid,
5194                        (unsigned long long)owner_offset);
5195         } else {
5196                 btrfs_abort_transaction(trans, extent_root, ret);
5197                 goto out;
5198         }
5199
5200         leaf = path->nodes[0];
5201         item_size = btrfs_item_size_nr(leaf, extent_slot);
5202 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5203         if (item_size < sizeof(*ei)) {
5204                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5205                 ret = convert_extent_item_v0(trans, extent_root, path,
5206                                              owner_objectid, 0);
5207                 if (ret < 0) {
5208                         btrfs_abort_transaction(trans, extent_root, ret);
5209                         goto out;
5210                 }
5211
5212                 btrfs_release_path(path);
5213                 path->leave_spinning = 1;
5214
5215                 key.objectid = bytenr;
5216                 key.type = BTRFS_EXTENT_ITEM_KEY;
5217                 key.offset = num_bytes;
5218
5219                 ret = btrfs_search_slot(trans, extent_root, &key, path,
5220                                         -1, 1);
5221                 if (ret) {
5222                         printk(KERN_ERR "umm, got %d back from search"
5223                                ", was looking for %llu\n", ret,
5224                                (unsigned long long)bytenr);
5225                         btrfs_print_leaf(extent_root, path->nodes[0]);
5226                 }
5227                 if (ret < 0) {
5228                         btrfs_abort_transaction(trans, extent_root, ret);
5229                         goto out;
5230                 }
5231
5232                 extent_slot = path->slots[0];
5233                 leaf = path->nodes[0];
5234                 item_size = btrfs_item_size_nr(leaf, extent_slot);
5235         }
5236 #endif
5237         BUG_ON(item_size < sizeof(*ei));
5238         ei = btrfs_item_ptr(leaf, extent_slot,
5239                             struct btrfs_extent_item);
5240         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5241                 struct btrfs_tree_block_info *bi;
5242                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5243                 bi = (struct btrfs_tree_block_info *)(ei + 1);
5244                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5245         }
5246
5247         refs = btrfs_extent_refs(leaf, ei);
5248         BUG_ON(refs < refs_to_drop);
5249         refs -= refs_to_drop;
5250
5251         if (refs > 0) {
5252                 if (extent_op)
5253                         __run_delayed_extent_op(extent_op, leaf, ei);
5254                 /*
5255                  * In the case of inline back ref, reference count will
5256                  * be updated by remove_extent_backref
5257                  */
5258                 if (iref) {
5259                         BUG_ON(!found_extent);
5260                 } else {
5261                         btrfs_set_extent_refs(leaf, ei, refs);
5262                         btrfs_mark_buffer_dirty(leaf);
5263                 }
5264                 if (found_extent) {
5265                         ret = remove_extent_backref(trans, extent_root, path,
5266                                                     iref, refs_to_drop,
5267                                                     is_data);
5268                         if (ret) {
5269                                 btrfs_abort_transaction(trans, extent_root, ret);
5270                                 goto out;
5271                         }
5272                 }
5273         } else {
5274                 if (found_extent) {
5275                         BUG_ON(is_data && refs_to_drop !=
5276                                extent_data_ref_count(root, path, iref));
5277                         if (iref) {
5278                                 BUG_ON(path->slots[0] != extent_slot);
5279                         } else {
5280                                 BUG_ON(path->slots[0] != extent_slot + 1);
5281                                 path->slots[0] = extent_slot;
5282                                 num_to_del = 2;
5283                         }
5284                 }
5285
5286                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5287                                       num_to_del);
5288                 if (ret) {
5289                         btrfs_abort_transaction(trans, extent_root, ret);
5290                         goto out;
5291                 }
5292                 btrfs_release_path(path);
5293
5294                 if (is_data) {
5295                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5296                         if (ret) {
5297                                 btrfs_abort_transaction(trans, extent_root, ret);
5298                                 goto out;
5299                         }
5300                 }
5301
5302                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
5303                 if (ret) {
5304                         btrfs_abort_transaction(trans, extent_root, ret);
5305                         goto out;
5306                 }
5307         }
5308 out:
5309         btrfs_free_path(path);
5310         return ret;
5311 }
5312
5313 /*
5314  * when we free an block, it is possible (and likely) that we free the last
5315  * delayed ref for that extent as well.  This searches the delayed ref tree for
5316  * a given extent, and if there are no other delayed refs to be processed, it
5317  * removes it from the tree.
5318  */
5319 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5320                                       struct btrfs_root *root, u64 bytenr)
5321 {
5322         struct btrfs_delayed_ref_head *head;
5323         struct btrfs_delayed_ref_root *delayed_refs;
5324         struct btrfs_delayed_ref_node *ref;
5325         struct rb_node *node;
5326         int ret = 0;
5327
5328         delayed_refs = &trans->transaction->delayed_refs;
5329         spin_lock(&delayed_refs->lock);
5330         head = btrfs_find_delayed_ref_head(trans, bytenr);
5331         if (!head)
5332                 goto out;
5333
5334         node = rb_prev(&head->node.rb_node);
5335         if (!node)
5336                 goto out;
5337
5338         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5339
5340         /* there are still entries for this ref, we can't drop it */
5341         if (ref->bytenr == bytenr)
5342                 goto out;
5343
5344         if (head->extent_op) {
5345                 if (!head->must_insert_reserved)
5346                         goto out;
5347                 kfree(head->extent_op);
5348                 head->extent_op = NULL;
5349         }
5350
5351         /*
5352          * waiting for the lock here would deadlock.  If someone else has it
5353          * locked they are already in the process of dropping it anyway
5354          */
5355         if (!mutex_trylock(&head->mutex))
5356                 goto out;
5357
5358         /*
5359          * at this point we have a head with no other entries.  Go
5360          * ahead and process it.
5361          */
5362         head->node.in_tree = 0;
5363         rb_erase(&head->node.rb_node, &delayed_refs->root);
5364
5365         delayed_refs->num_entries--;
5366
5367         /*
5368          * we don't take a ref on the node because we're removing it from the
5369          * tree, so we just steal the ref the tree was holding.
5370          */
5371         delayed_refs->num_heads--;
5372         if (list_empty(&head->cluster))
5373                 delayed_refs->num_heads_ready--;
5374
5375         list_del_init(&head->cluster);
5376         spin_unlock(&delayed_refs->lock);
5377
5378         BUG_ON(head->extent_op);
5379         if (head->must_insert_reserved)
5380                 ret = 1;
5381
5382         mutex_unlock(&head->mutex);
5383         btrfs_put_delayed_ref(&head->node);
5384         return ret;
5385 out:
5386         spin_unlock(&delayed_refs->lock);
5387         return 0;
5388 }
5389
5390 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5391                            struct btrfs_root *root,
5392                            struct extent_buffer *buf,
5393                            u64 parent, int last_ref)
5394 {
5395         struct btrfs_block_group_cache *cache = NULL;
5396         int ret;
5397
5398         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5399                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5400                                         buf->start, buf->len,
5401                                         parent, root->root_key.objectid,
5402                                         btrfs_header_level(buf),
5403                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
5404                 BUG_ON(ret); /* -ENOMEM */
5405         }
5406
5407         if (!last_ref)
5408                 return;
5409
5410         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5411
5412         if (btrfs_header_generation(buf) == trans->transid) {
5413                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5414                         ret = check_ref_cleanup(trans, root, buf->start);
5415                         if (!ret)
5416                                 goto out;
5417                 }
5418
5419                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5420                         pin_down_extent(root, cache, buf->start, buf->len, 1);
5421                         goto out;
5422                 }
5423
5424                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5425
5426                 btrfs_add_free_space(cache, buf->start, buf->len);
5427                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5428         }
5429 out:
5430         /*
5431          * Deleting the buffer, clear the corrupt flag since it doesn't matter
5432          * anymore.
5433          */
5434         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5435         btrfs_put_block_group(cache);
5436 }
5437
5438 /* Can return -ENOMEM */
5439 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5440                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5441                       u64 owner, u64 offset, int for_cow)
5442 {
5443         int ret;
5444         struct btrfs_fs_info *fs_info = root->fs_info;
5445
5446         /*
5447          * tree log blocks never actually go into the extent allocation
5448          * tree, just update pinning info and exit early.
5449          */
5450         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5451                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5452                 /* unlocks the pinned mutex */
5453                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
5454                 ret = 0;
5455         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5456                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5457                                         num_bytes,
5458                                         parent, root_objectid, (int)owner,
5459                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5460         } else {
5461                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5462                                                 num_bytes,
5463                                                 parent, root_objectid, owner,
5464                                                 offset, BTRFS_DROP_DELAYED_REF,
5465                                                 NULL, for_cow);
5466         }
5467         return ret;
5468 }
5469
5470 static u64 stripe_align(struct btrfs_root *root,
5471                         struct btrfs_block_group_cache *cache,
5472                         u64 val, u64 num_bytes)
5473 {
5474         u64 mask;
5475         u64 ret;
5476         mask = ((u64)root->stripesize - 1);
5477         ret = (val + mask) & ~mask;
5478         return ret;
5479 }
5480
5481 /*
5482  * when we wait for progress in the block group caching, its because
5483  * our allocation attempt failed at least once.  So, we must sleep
5484  * and let some progress happen before we try again.
5485  *
5486  * This function will sleep at least once waiting for new free space to
5487  * show up, and then it will check the block group free space numbers
5488  * for our min num_bytes.  Another option is to have it go ahead
5489  * and look in the rbtree for a free extent of a given size, but this
5490  * is a good start.
5491  */
5492 static noinline int
5493 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5494                                 u64 num_bytes)
5495 {
5496         struct btrfs_caching_control *caching_ctl;
5497         DEFINE_WAIT(wait);
5498
5499         caching_ctl = get_caching_control(cache);
5500         if (!caching_ctl)
5501                 return 0;
5502
5503         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
5504                    (cache->free_space_ctl->free_space >= num_bytes));
5505
5506         put_caching_control(caching_ctl);
5507         return 0;
5508 }
5509
5510 static noinline int
5511 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5512 {
5513         struct btrfs_caching_control *caching_ctl;
5514         DEFINE_WAIT(wait);
5515
5516         caching_ctl = get_caching_control(cache);
5517         if (!caching_ctl)
5518                 return 0;
5519
5520         wait_event(caching_ctl->wait, block_group_cache_done(cache));
5521
5522         put_caching_control(caching_ctl);
5523         return 0;
5524 }
5525
5526 int __get_raid_index(u64 flags)
5527 {
5528         int index;
5529
5530         if (flags & BTRFS_BLOCK_GROUP_RAID10)
5531                 index = 0;
5532         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
5533                 index = 1;
5534         else if (flags & BTRFS_BLOCK_GROUP_DUP)
5535                 index = 2;
5536         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
5537                 index = 3;
5538         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
5539                 index = 5;
5540         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
5541                 index = 6;
5542         else
5543                 index = 4; /* BTRFS_BLOCK_GROUP_SINGLE */
5544         return index;
5545 }
5546
5547 static int get_block_group_index(struct btrfs_block_group_cache *cache)
5548 {
5549         return __get_raid_index(cache->flags);
5550 }
5551
5552 enum btrfs_loop_type {
5553         LOOP_CACHING_NOWAIT = 0,
5554         LOOP_CACHING_WAIT = 1,
5555         LOOP_ALLOC_CHUNK = 2,
5556         LOOP_NO_EMPTY_SIZE = 3,
5557 };
5558
5559 /*
5560  * walks the btree of allocated extents and find a hole of a given size.
5561  * The key ins is changed to record the hole:
5562  * ins->objectid == block start
5563  * ins->flags = BTRFS_EXTENT_ITEM_KEY
5564  * ins->offset == number of blocks
5565  * Any available blocks before search_start are skipped.
5566  */
5567 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5568                                      struct btrfs_root *orig_root,
5569                                      u64 num_bytes, u64 empty_size,
5570                                      u64 hint_byte, struct btrfs_key *ins,
5571                                      u64 data)
5572 {
5573         int ret = 0;
5574         struct btrfs_root *root = orig_root->fs_info->extent_root;
5575         struct btrfs_free_cluster *last_ptr = NULL;
5576         struct btrfs_block_group_cache *block_group = NULL;
5577         struct btrfs_block_group_cache *used_block_group;
5578         u64 search_start = 0;
5579         int empty_cluster = 2 * 1024 * 1024;
5580         struct btrfs_space_info *space_info;
5581         int loop = 0;
5582         int index = 0;
5583         int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5584                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5585         bool found_uncached_bg = false;
5586         bool failed_cluster_refill = false;
5587         bool failed_alloc = false;
5588         bool use_cluster = true;
5589         bool have_caching_bg = false;
5590
5591         WARN_ON(num_bytes < root->sectorsize);
5592         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
5593         ins->objectid = 0;
5594         ins->offset = 0;
5595
5596         trace_find_free_extent(orig_root, num_bytes, empty_size, data);
5597
5598         space_info = __find_space_info(root->fs_info, data);
5599         if (!space_info) {
5600                 printk(KERN_ERR "No space info for %llu\n", data);
5601                 return -ENOSPC;
5602         }
5603
5604         /*
5605          * If the space info is for both data and metadata it means we have a
5606          * small filesystem and we can't use the clustering stuff.
5607          */
5608         if (btrfs_mixed_space_info(space_info))
5609                 use_cluster = false;
5610
5611         if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5612                 last_ptr = &root->fs_info->meta_alloc_cluster;
5613                 if (!btrfs_test_opt(root, SSD))
5614                         empty_cluster = 64 * 1024;
5615         }
5616
5617         if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5618             btrfs_test_opt(root, SSD)) {
5619                 last_ptr = &root->fs_info->data_alloc_cluster;
5620         }
5621
5622         if (last_ptr) {
5623                 spin_lock(&last_ptr->lock);
5624                 if (last_ptr->block_group)
5625                         hint_byte = last_ptr->window_start;
5626                 spin_unlock(&last_ptr->lock);
5627         }
5628
5629         search_start = max(search_start, first_logical_byte(root, 0));
5630         search_start = max(search_start, hint_byte);
5631
5632         if (!last_ptr)
5633                 empty_cluster = 0;
5634
5635         if (search_start == hint_byte) {
5636                 block_group = btrfs_lookup_block_group(root->fs_info,
5637                                                        search_start);
5638                 used_block_group = block_group;
5639                 /*
5640                  * we don't want to use the block group if it doesn't match our
5641                  * allocation bits, or if its not cached.
5642                  *
5643                  * However if we are re-searching with an ideal block group
5644                  * picked out then we don't care that the block group is cached.
5645                  */
5646                 if (block_group && block_group_bits(block_group, data) &&
5647                     block_group->cached != BTRFS_CACHE_NO) {
5648                         down_read(&space_info->groups_sem);
5649                         if (list_empty(&block_group->list) ||
5650                             block_group->ro) {
5651                                 /*
5652                                  * someone is removing this block group,
5653                                  * we can't jump into the have_block_group
5654                                  * target because our list pointers are not
5655                                  * valid
5656                                  */
5657                                 btrfs_put_block_group(block_group);
5658                                 up_read(&space_info->groups_sem);
5659                         } else {
5660                                 index = get_block_group_index(block_group);
5661                                 goto have_block_group;
5662                         }
5663                 } else if (block_group) {
5664                         btrfs_put_block_group(block_group);
5665                 }
5666         }
5667 search:
5668         have_caching_bg = false;
5669         down_read(&space_info->groups_sem);
5670         list_for_each_entry(block_group, &space_info->block_groups[index],
5671                             list) {
5672                 u64 offset;
5673                 int cached;
5674
5675                 used_block_group = block_group;
5676                 btrfs_get_block_group(block_group);
5677                 search_start = block_group->key.objectid;
5678
5679                 /*
5680                  * this can happen if we end up cycling through all the
5681                  * raid types, but we want to make sure we only allocate
5682                  * for the proper type.
5683                  */
5684                 if (!block_group_bits(block_group, data)) {
5685                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
5686                                 BTRFS_BLOCK_GROUP_RAID1 |
5687                                 BTRFS_BLOCK_GROUP_RAID5 |
5688                                 BTRFS_BLOCK_GROUP_RAID6 |
5689                                 BTRFS_BLOCK_GROUP_RAID10;
5690
5691                         /*
5692                          * if they asked for extra copies and this block group
5693                          * doesn't provide them, bail.  This does allow us to
5694                          * fill raid0 from raid1.
5695                          */
5696                         if ((data & extra) && !(block_group->flags & extra))
5697                                 goto loop;
5698                 }
5699
5700 have_block_group:
5701                 cached = block_group_cache_done(block_group);
5702                 if (unlikely(!cached)) {
5703                         found_uncached_bg = true;
5704                         ret = cache_block_group(block_group, trans,
5705                                                 orig_root, 0);
5706                         BUG_ON(ret < 0);
5707                         ret = 0;
5708                 }
5709
5710                 if (unlikely(block_group->ro))
5711                         goto loop;
5712
5713                 /*
5714                  * Ok we want to try and use the cluster allocator, so
5715                  * lets look there
5716                  */
5717                 if (last_ptr) {
5718                         unsigned long aligned_cluster;
5719                         /*
5720                          * the refill lock keeps out other
5721                          * people trying to start a new cluster
5722                          */
5723                         spin_lock(&last_ptr->refill_lock);
5724                         used_block_group = last_ptr->block_group;
5725                         if (used_block_group != block_group &&
5726                             (!used_block_group ||
5727                              used_block_group->ro ||
5728                              !block_group_bits(used_block_group, data))) {
5729                                 used_block_group = block_group;
5730                                 goto refill_cluster;
5731                         }
5732
5733                         if (used_block_group != block_group)
5734                                 btrfs_get_block_group(used_block_group);
5735
5736                         offset = btrfs_alloc_from_cluster(used_block_group,
5737                           last_ptr, num_bytes, used_block_group->key.objectid);
5738                         if (offset) {
5739                                 /* we have a block, we're done */
5740                                 spin_unlock(&last_ptr->refill_lock);
5741                                 trace_btrfs_reserve_extent_cluster(root,
5742                                         block_group, search_start, num_bytes);
5743                                 goto checks;
5744                         }
5745
5746                         WARN_ON(last_ptr->block_group != used_block_group);
5747                         if (used_block_group != block_group) {
5748                                 btrfs_put_block_group(used_block_group);
5749                                 used_block_group = block_group;
5750                         }
5751 refill_cluster:
5752                         BUG_ON(used_block_group != block_group);
5753                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
5754                          * set up a new clusters, so lets just skip it
5755                          * and let the allocator find whatever block
5756                          * it can find.  If we reach this point, we
5757                          * will have tried the cluster allocator
5758                          * plenty of times and not have found
5759                          * anything, so we are likely way too
5760                          * fragmented for the clustering stuff to find
5761                          * anything.
5762                          *
5763                          * However, if the cluster is taken from the
5764                          * current block group, release the cluster
5765                          * first, so that we stand a better chance of
5766                          * succeeding in the unclustered
5767                          * allocation.  */
5768                         if (loop >= LOOP_NO_EMPTY_SIZE &&
5769                             last_ptr->block_group != block_group) {
5770                                 spin_unlock(&last_ptr->refill_lock);
5771                                 goto unclustered_alloc;
5772                         }
5773
5774                         /*
5775                          * this cluster didn't work out, free it and
5776                          * start over
5777                          */
5778                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5779
5780                         if (loop >= LOOP_NO_EMPTY_SIZE) {
5781                                 spin_unlock(&last_ptr->refill_lock);
5782                                 goto unclustered_alloc;
5783                         }
5784
5785                         aligned_cluster = max_t(unsigned long,
5786                                                 empty_cluster + empty_size,
5787                                               block_group->full_stripe_len);
5788
5789                         /* allocate a cluster in this block group */
5790                         ret = btrfs_find_space_cluster(trans, root,
5791                                                block_group, last_ptr,
5792                                                search_start, num_bytes,
5793                                                aligned_cluster);
5794                         if (ret == 0) {
5795                                 /*
5796                                  * now pull our allocation out of this
5797                                  * cluster
5798                                  */
5799                                 offset = btrfs_alloc_from_cluster(block_group,
5800                                                   last_ptr, num_bytes,
5801                                                   search_start);
5802                                 if (offset) {
5803                                         /* we found one, proceed */
5804                                         spin_unlock(&last_ptr->refill_lock);
5805                                         trace_btrfs_reserve_extent_cluster(root,
5806                                                 block_group, search_start,
5807                                                 num_bytes);
5808                                         goto checks;
5809                                 }
5810                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
5811                                    && !failed_cluster_refill) {
5812                                 spin_unlock(&last_ptr->refill_lock);
5813
5814                                 failed_cluster_refill = true;
5815                                 wait_block_group_cache_progress(block_group,
5816                                        num_bytes + empty_cluster + empty_size);
5817                                 goto have_block_group;
5818                         }
5819
5820                         /*
5821                          * at this point we either didn't find a cluster
5822                          * or we weren't able to allocate a block from our
5823                          * cluster.  Free the cluster we've been trying
5824                          * to use, and go to the next block group
5825                          */
5826                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5827                         spin_unlock(&last_ptr->refill_lock);
5828                         goto loop;
5829                 }
5830
5831 unclustered_alloc:
5832                 spin_lock(&block_group->free_space_ctl->tree_lock);
5833                 if (cached &&
5834                     block_group->free_space_ctl->free_space <
5835                     num_bytes + empty_cluster + empty_size) {
5836                         spin_unlock(&block_group->free_space_ctl->tree_lock);
5837                         goto loop;
5838                 }
5839                 spin_unlock(&block_group->free_space_ctl->tree_lock);
5840
5841                 offset = btrfs_find_space_for_alloc(block_group, search_start,
5842                                                     num_bytes, empty_size);
5843                 /*
5844                  * If we didn't find a chunk, and we haven't failed on this
5845                  * block group before, and this block group is in the middle of
5846                  * caching and we are ok with waiting, then go ahead and wait
5847                  * for progress to be made, and set failed_alloc to true.
5848                  *
5849                  * If failed_alloc is true then we've already waited on this
5850                  * block group once and should move on to the next block group.
5851                  */
5852                 if (!offset && !failed_alloc && !cached &&
5853                     loop > LOOP_CACHING_NOWAIT) {
5854                         wait_block_group_cache_progress(block_group,
5855                                                 num_bytes + empty_size);
5856                         failed_alloc = true;
5857                         goto have_block_group;
5858                 } else if (!offset) {
5859                         if (!cached)
5860                                 have_caching_bg = true;
5861                         goto loop;
5862                 }
5863 checks:
5864                 search_start = stripe_align(root, used_block_group,
5865                                             offset, num_bytes);
5866
5867                 /* move on to the next group */
5868                 if (search_start + num_bytes >
5869                     used_block_group->key.objectid + used_block_group->key.offset) {
5870                         btrfs_add_free_space(used_block_group, offset, num_bytes);
5871                         goto loop;
5872                 }
5873
5874                 if (offset < search_start)
5875                         btrfs_add_free_space(used_block_group, offset,
5876                                              search_start - offset);
5877                 BUG_ON(offset > search_start);
5878
5879                 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
5880                                                   alloc_type);
5881                 if (ret == -EAGAIN) {
5882                         btrfs_add_free_space(used_block_group, offset, num_bytes);
5883                         goto loop;
5884                 }
5885
5886                 /* we are all good, lets return */
5887                 ins->objectid = search_start;
5888                 ins->offset = num_bytes;
5889
5890                 trace_btrfs_reserve_extent(orig_root, block_group,
5891                                            search_start, num_bytes);
5892                 if (used_block_group != block_group)
5893                         btrfs_put_block_group(used_block_group);
5894                 btrfs_put_block_group(block_group);
5895                 break;
5896 loop:
5897                 failed_cluster_refill = false;
5898                 failed_alloc = false;
5899                 BUG_ON(index != get_block_group_index(block_group));
5900                 if (used_block_group != block_group)
5901                         btrfs_put_block_group(used_block_group);
5902                 btrfs_put_block_group(block_group);
5903         }
5904         up_read(&space_info->groups_sem);
5905
5906         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
5907                 goto search;
5908
5909         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5910                 goto search;
5911
5912         /*
5913          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5914          *                      caching kthreads as we move along
5915          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5916          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5917          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5918          *                      again
5919          */
5920         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
5921                 index = 0;
5922                 loop++;
5923                 if (loop == LOOP_ALLOC_CHUNK) {
5924                         ret = do_chunk_alloc(trans, root, data,
5925                                              CHUNK_ALLOC_FORCE);
5926                         /*
5927                          * Do not bail out on ENOSPC since we
5928                          * can do more things.
5929                          */
5930                         if (ret < 0 && ret != -ENOSPC) {
5931                                 btrfs_abort_transaction(trans,
5932                                                         root, ret);
5933                                 goto out;
5934                         }
5935                 }
5936
5937                 if (loop == LOOP_NO_EMPTY_SIZE) {
5938                         empty_size = 0;
5939                         empty_cluster = 0;
5940                 }
5941
5942                 goto search;
5943         } else if (!ins->objectid) {
5944                 ret = -ENOSPC;
5945         } else if (ins->objectid) {
5946                 ret = 0;
5947         }
5948 out:
5949
5950         return ret;
5951 }
5952
5953 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5954                             int dump_block_groups)
5955 {
5956         struct btrfs_block_group_cache *cache;
5957         int index = 0;
5958
5959         spin_lock(&info->lock);
5960         printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
5961                (unsigned long long)info->flags,
5962                (unsigned long long)(info->total_bytes - info->bytes_used -
5963                                     info->bytes_pinned - info->bytes_reserved -
5964                                     info->bytes_readonly),
5965                (info->full) ? "" : "not ");
5966         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5967                "reserved=%llu, may_use=%llu, readonly=%llu\n",
5968                (unsigned long long)info->total_bytes,
5969                (unsigned long long)info->bytes_used,
5970                (unsigned long long)info->bytes_pinned,
5971                (unsigned long long)info->bytes_reserved,
5972                (unsigned long long)info->bytes_may_use,
5973                (unsigned long long)info->bytes_readonly);
5974         spin_unlock(&info->lock);
5975
5976         if (!dump_block_groups)
5977                 return;
5978
5979         down_read(&info->groups_sem);
5980 again:
5981         list_for_each_entry(cache, &info->block_groups[index], list) {
5982                 spin_lock(&cache->lock);
5983                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
5984                        (unsigned long long)cache->key.objectid,
5985                        (unsigned long long)cache->key.offset,
5986                        (unsigned long long)btrfs_block_group_used(&cache->item),
5987                        (unsigned long long)cache->pinned,
5988                        (unsigned long long)cache->reserved,
5989                        cache->ro ? "[readonly]" : "");
5990                 btrfs_dump_free_space(cache, bytes);
5991                 spin_unlock(&cache->lock);
5992         }
5993         if (++index < BTRFS_NR_RAID_TYPES)
5994                 goto again;
5995         up_read(&info->groups_sem);
5996 }
5997
5998 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5999                          struct btrfs_root *root,
6000                          u64 num_bytes, u64 min_alloc_size,
6001                          u64 empty_size, u64 hint_byte,
6002                          struct btrfs_key *ins, u64 data)
6003 {
6004         bool final_tried = false;
6005         int ret;
6006
6007         data = btrfs_get_alloc_profile(root, data);
6008 again:
6009         WARN_ON(num_bytes < root->sectorsize);
6010         ret = find_free_extent(trans, root, num_bytes, empty_size,
6011                                hint_byte, ins, data);
6012
6013         if (ret == -ENOSPC) {
6014                 if (!final_tried) {
6015                         num_bytes = num_bytes >> 1;
6016                         num_bytes = num_bytes & ~(root->sectorsize - 1);
6017                         num_bytes = max(num_bytes, min_alloc_size);
6018                         if (num_bytes == min_alloc_size)
6019                                 final_tried = true;
6020                         goto again;
6021                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6022                         struct btrfs_space_info *sinfo;
6023
6024                         sinfo = __find_space_info(root->fs_info, data);
6025                         printk(KERN_ERR "btrfs allocation failed flags %llu, "
6026                                "wanted %llu\n", (unsigned long long)data,
6027                                (unsigned long long)num_bytes);
6028                         if (sinfo)
6029                                 dump_space_info(sinfo, num_bytes, 1);
6030                 }
6031         }
6032
6033         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6034
6035         return ret;
6036 }
6037
6038 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6039                                         u64 start, u64 len, int pin)
6040 {
6041         struct btrfs_block_group_cache *cache;
6042         int ret = 0;
6043
6044         cache = btrfs_lookup_block_group(root->fs_info, start);
6045         if (!cache) {
6046                 printk(KERN_ERR "Unable to find block group for %llu\n",
6047                        (unsigned long long)start);
6048                 return -ENOSPC;
6049         }
6050
6051         if (btrfs_test_opt(root, DISCARD))
6052                 ret = btrfs_discard_extent(root, start, len, NULL);
6053
6054         if (pin)
6055                 pin_down_extent(root, cache, start, len, 1);
6056         else {
6057                 btrfs_add_free_space(cache, start, len);
6058                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6059         }
6060         btrfs_put_block_group(cache);
6061
6062         trace_btrfs_reserved_extent_free(root, start, len);
6063
6064         return ret;
6065 }
6066
6067 int btrfs_free_reserved_extent(struct btrfs_root *root,
6068                                         u64 start, u64 len)
6069 {
6070         return __btrfs_free_reserved_extent(root, start, len, 0);
6071 }
6072
6073 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6074                                        u64 start, u64 len)
6075 {
6076         return __btrfs_free_reserved_extent(root, start, len, 1);
6077 }
6078
6079 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6080                                       struct btrfs_root *root,
6081                                       u64 parent, u64 root_objectid,
6082                                       u64 flags, u64 owner, u64 offset,
6083                                       struct btrfs_key *ins, int ref_mod)
6084 {
6085         int ret;
6086         struct btrfs_fs_info *fs_info = root->fs_info;
6087         struct btrfs_extent_item *extent_item;
6088         struct btrfs_extent_inline_ref *iref;
6089         struct btrfs_path *path;
6090         struct extent_buffer *leaf;
6091         int type;
6092         u32 size;
6093
6094         if (parent > 0)
6095                 type = BTRFS_SHARED_DATA_REF_KEY;
6096         else
6097                 type = BTRFS_EXTENT_DATA_REF_KEY;
6098
6099         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6100
6101         path = btrfs_alloc_path();
6102         if (!path)
6103                 return -ENOMEM;
6104
6105         path->leave_spinning = 1;
6106         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6107                                       ins, size);
6108         if (ret) {
6109                 btrfs_free_path(path);
6110                 return ret;
6111         }
6112
6113         leaf = path->nodes[0];
6114         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6115                                      struct btrfs_extent_item);
6116         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6117         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6118         btrfs_set_extent_flags(leaf, extent_item,
6119                                flags | BTRFS_EXTENT_FLAG_DATA);
6120
6121         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6122         btrfs_set_extent_inline_ref_type(leaf, iref, type);
6123         if (parent > 0) {
6124                 struct btrfs_shared_data_ref *ref;
6125                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6126                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6127                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6128         } else {
6129                 struct btrfs_extent_data_ref *ref;
6130                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6131                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6132                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6133                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6134                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6135         }
6136
6137         btrfs_mark_buffer_dirty(path->nodes[0]);
6138         btrfs_free_path(path);
6139
6140         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
6141         if (ret) { /* -ENOENT, logic error */
6142                 printk(KERN_ERR "btrfs update block group failed for %llu "
6143                        "%llu\n", (unsigned long long)ins->objectid,
6144                        (unsigned long long)ins->offset);
6145                 BUG();
6146         }
6147         return ret;
6148 }
6149
6150 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6151                                      struct btrfs_root *root,
6152                                      u64 parent, u64 root_objectid,
6153                                      u64 flags, struct btrfs_disk_key *key,
6154                                      int level, struct btrfs_key *ins)
6155 {
6156         int ret;
6157         struct btrfs_fs_info *fs_info = root->fs_info;
6158         struct btrfs_extent_item *extent_item;
6159         struct btrfs_tree_block_info *block_info;
6160         struct btrfs_extent_inline_ref *iref;
6161         struct btrfs_path *path;
6162         struct extent_buffer *leaf;
6163         u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
6164
6165         path = btrfs_alloc_path();
6166         if (!path)
6167                 return -ENOMEM;
6168
6169         path->leave_spinning = 1;
6170         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6171                                       ins, size);
6172         if (ret) {
6173                 btrfs_free_path(path);
6174                 return ret;
6175         }
6176
6177         leaf = path->nodes[0];
6178         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6179                                      struct btrfs_extent_item);
6180         btrfs_set_extent_refs(leaf, extent_item, 1);
6181         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6182         btrfs_set_extent_flags(leaf, extent_item,
6183                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6184         block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6185
6186         btrfs_set_tree_block_key(leaf, block_info, key);
6187         btrfs_set_tree_block_level(leaf, block_info, level);
6188
6189         iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6190         if (parent > 0) {
6191                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6192                 btrfs_set_extent_inline_ref_type(leaf, iref,
6193                                                  BTRFS_SHARED_BLOCK_REF_KEY);
6194                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6195         } else {
6196                 btrfs_set_extent_inline_ref_type(leaf, iref,
6197                                                  BTRFS_TREE_BLOCK_REF_KEY);
6198                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6199         }
6200
6201         btrfs_mark_buffer_dirty(leaf);
6202         btrfs_free_path(path);
6203
6204         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
6205         if (ret) { /* -ENOENT, logic error */
6206                 printk(KERN_ERR "btrfs update block group failed for %llu "
6207                        "%llu\n", (unsigned long long)ins->objectid,
6208                        (unsigned long long)ins->offset);
6209                 BUG();
6210         }
6211         return ret;
6212 }
6213
6214 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6215                                      struct btrfs_root *root,
6216                                      u64 root_objectid, u64 owner,
6217                                      u64 offset, struct btrfs_key *ins)
6218 {
6219         int ret;
6220
6221         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6222
6223         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6224                                          ins->offset, 0,
6225                                          root_objectid, owner, offset,
6226                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6227         return ret;
6228 }
6229
6230 /*
6231  * this is used by the tree logging recovery code.  It records that
6232  * an extent has been allocated and makes sure to clear the free
6233  * space cache bits as well
6234  */
6235 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6236                                    struct btrfs_root *root,
6237                                    u64 root_objectid, u64 owner, u64 offset,
6238                                    struct btrfs_key *ins)
6239 {
6240         int ret;
6241         struct btrfs_block_group_cache *block_group;
6242         struct btrfs_caching_control *caching_ctl;
6243         u64 start = ins->objectid;
6244         u64 num_bytes = ins->offset;
6245
6246         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6247         cache_block_group(block_group, trans, NULL, 0);
6248         caching_ctl = get_caching_control(block_group);
6249
6250         if (!caching_ctl) {
6251                 BUG_ON(!block_group_cache_done(block_group));
6252                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6253                 BUG_ON(ret); /* -ENOMEM */
6254         } else {
6255                 mutex_lock(&caching_ctl->mutex);
6256
6257                 if (start >= caching_ctl->progress) {
6258                         ret = add_excluded_extent(root, start, num_bytes);
6259                         BUG_ON(ret); /* -ENOMEM */
6260                 } else if (start + num_bytes <= caching_ctl->progress) {
6261                         ret = btrfs_remove_free_space(block_group,
6262                                                       start, num_bytes);
6263                         BUG_ON(ret); /* -ENOMEM */
6264                 } else {
6265                         num_bytes = caching_ctl->progress - start;
6266                         ret = btrfs_remove_free_space(block_group,
6267                                                       start, num_bytes);
6268                         BUG_ON(ret); /* -ENOMEM */
6269
6270                         start = caching_ctl->progress;
6271                         num_bytes = ins->objectid + ins->offset -
6272                                     caching_ctl->progress;
6273                         ret = add_excluded_extent(root, start, num_bytes);
6274                         BUG_ON(ret); /* -ENOMEM */
6275                 }
6276
6277                 mutex_unlock(&caching_ctl->mutex);
6278                 put_caching_control(caching_ctl);
6279         }
6280
6281         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6282                                           RESERVE_ALLOC_NO_ACCOUNT);
6283         BUG_ON(ret); /* logic error */
6284         btrfs_put_block_group(block_group);
6285         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6286                                          0, owner, offset, ins, 1);
6287         return ret;
6288 }
6289
6290 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
6291                                             struct btrfs_root *root,
6292                                             u64 bytenr, u32 blocksize,
6293                                             int level)
6294 {
6295         struct extent_buffer *buf;
6296
6297         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6298         if (!buf)
6299                 return ERR_PTR(-ENOMEM);
6300         btrfs_set_header_generation(buf, trans->transid);
6301         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6302         btrfs_tree_lock(buf);
6303         clean_tree_block(trans, root, buf);
6304         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6305
6306         btrfs_set_lock_blocking(buf);
6307         btrfs_set_buffer_uptodate(buf);
6308
6309         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6310                 /*
6311                  * we allow two log transactions at a time, use different
6312                  * EXENT bit to differentiate dirty pages.
6313                  */
6314                 if (root->log_transid % 2 == 0)
6315                         set_extent_dirty(&root->dirty_log_pages, buf->start,
6316                                         buf->start + buf->len - 1, GFP_NOFS);
6317                 else
6318                         set_extent_new(&root->dirty_log_pages, buf->start,
6319                                         buf->start + buf->len - 1, GFP_NOFS);
6320         } else {
6321                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6322                          buf->start + buf->len - 1, GFP_NOFS);
6323         }
6324         trans->blocks_used++;
6325         /* this returns a buffer locked for blocking */
6326         return buf;
6327 }
6328
6329 static struct btrfs_block_rsv *
6330 use_block_rsv(struct btrfs_trans_handle *trans,
6331               struct btrfs_root *root, u32 blocksize)
6332 {
6333         struct btrfs_block_rsv *block_rsv;
6334         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6335         int ret;
6336
6337         block_rsv = get_block_rsv(trans, root);
6338
6339         if (block_rsv->size == 0) {
6340                 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6341                                              BTRFS_RESERVE_NO_FLUSH);
6342                 /*
6343                  * If we couldn't reserve metadata bytes try and use some from
6344                  * the global reserve.
6345                  */
6346                 if (ret && block_rsv != global_rsv) {
6347                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6348                         if (!ret)
6349                                 return global_rsv;
6350                         return ERR_PTR(ret);
6351                 } else if (ret) {
6352                         return ERR_PTR(ret);
6353                 }
6354                 return block_rsv;
6355         }
6356
6357         ret = block_rsv_use_bytes(block_rsv, blocksize);
6358         if (!ret)
6359                 return block_rsv;
6360         if (ret && !block_rsv->failfast) {
6361                 static DEFINE_RATELIMIT_STATE(_rs,
6362                                 DEFAULT_RATELIMIT_INTERVAL,
6363                                 /*DEFAULT_RATELIMIT_BURST*/ 2);
6364                 if (__ratelimit(&_rs))
6365                         WARN(1, KERN_DEBUG "btrfs: block rsv returned %d\n",
6366                              ret);
6367                 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6368                                              BTRFS_RESERVE_NO_FLUSH);
6369                 if (!ret) {
6370                         return block_rsv;
6371                 } else if (ret && block_rsv != global_rsv) {
6372                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6373                         if (!ret)
6374                                 return global_rsv;
6375                 }
6376         }
6377
6378         return ERR_PTR(-ENOSPC);
6379 }
6380
6381 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6382                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
6383 {
6384         block_rsv_add_bytes(block_rsv, blocksize, 0);
6385         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6386 }
6387
6388 /*
6389  * finds a free extent and does all the dirty work required for allocation
6390  * returns the key for the extent through ins, and a tree buffer for
6391  * the first block of the extent through buf.
6392  *
6393  * returns the tree buffer or NULL.
6394  */
6395 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6396                                         struct btrfs_root *root, u32 blocksize,
6397                                         u64 parent, u64 root_objectid,
6398                                         struct btrfs_disk_key *key, int level,
6399                                         u64 hint, u64 empty_size)
6400 {
6401         struct btrfs_key ins;
6402         struct btrfs_block_rsv *block_rsv;
6403         struct extent_buffer *buf;
6404         u64 flags = 0;
6405         int ret;
6406
6407
6408         block_rsv = use_block_rsv(trans, root, blocksize);
6409         if (IS_ERR(block_rsv))
6410                 return ERR_CAST(block_rsv);
6411
6412         ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6413                                    empty_size, hint, &ins, 0);
6414         if (ret) {
6415                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6416                 return ERR_PTR(ret);
6417         }
6418
6419         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6420                                     blocksize, level);
6421         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6422
6423         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6424                 if (parent == 0)
6425                         parent = ins.objectid;
6426                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6427         } else
6428                 BUG_ON(parent > 0);
6429
6430         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6431                 struct btrfs_delayed_extent_op *extent_op;
6432                 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
6433                 BUG_ON(!extent_op); /* -ENOMEM */
6434                 if (key)
6435                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
6436                 else
6437                         memset(&extent_op->key, 0, sizeof(extent_op->key));
6438                 extent_op->flags_to_set = flags;
6439                 extent_op->update_key = 1;
6440                 extent_op->update_flags = 1;
6441                 extent_op->is_data = 0;
6442
6443                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6444                                         ins.objectid,
6445                                         ins.offset, parent, root_objectid,
6446                                         level, BTRFS_ADD_DELAYED_EXTENT,
6447                                         extent_op, 0);
6448                 BUG_ON(ret); /* -ENOMEM */
6449         }
6450         return buf;
6451 }
6452
6453 struct walk_control {
6454         u64 refs[BTRFS_MAX_LEVEL];
6455         u64 flags[BTRFS_MAX_LEVEL];
6456         struct btrfs_key update_progress;
6457         int stage;
6458         int level;
6459         int shared_level;
6460         int update_ref;
6461         int keep_locks;
6462         int reada_slot;
6463         int reada_count;
6464         int for_reloc;
6465 };
6466
6467 #define DROP_REFERENCE  1
6468 #define UPDATE_BACKREF  2
6469
6470 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6471                                      struct btrfs_root *root,
6472                                      struct walk_control *wc,
6473                                      struct btrfs_path *path)
6474 {
6475         u64 bytenr;
6476         u64 generation;
6477         u64 refs;
6478         u64 flags;
6479         u32 nritems;
6480         u32 blocksize;
6481         struct btrfs_key key;
6482         struct extent_buffer *eb;
6483         int ret;
6484         int slot;
6485         int nread = 0;
6486
6487         if (path->slots[wc->level] < wc->reada_slot) {
6488                 wc->reada_count = wc->reada_count * 2 / 3;
6489                 wc->reada_count = max(wc->reada_count, 2);
6490         } else {
6491                 wc->reada_count = wc->reada_count * 3 / 2;
6492                 wc->reada_count = min_t(int, wc->reada_count,
6493                                         BTRFS_NODEPTRS_PER_BLOCK(root));
6494         }
6495
6496         eb = path->nodes[wc->level];
6497         nritems = btrfs_header_nritems(eb);
6498         blocksize = btrfs_level_size(root, wc->level - 1);
6499
6500         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6501                 if (nread >= wc->reada_count)
6502                         break;
6503
6504                 cond_resched();
6505                 bytenr = btrfs_node_blockptr(eb, slot);
6506                 generation = btrfs_node_ptr_generation(eb, slot);
6507
6508                 if (slot == path->slots[wc->level])
6509                         goto reada;
6510
6511                 if (wc->stage == UPDATE_BACKREF &&
6512                     generation <= root->root_key.offset)
6513                         continue;
6514
6515                 /* We don't lock the tree block, it's OK to be racy here */
6516                 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6517                                                &refs, &flags);
6518                 /* We don't care about errors in readahead. */
6519                 if (ret < 0)
6520                         continue;
6521                 BUG_ON(refs == 0);
6522
6523                 if (wc->stage == DROP_REFERENCE) {
6524                         if (refs == 1)
6525                                 goto reada;
6526
6527                         if (wc->level == 1 &&
6528                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6529                                 continue;
6530                         if (!wc->update_ref ||
6531                             generation <= root->root_key.offset)
6532                                 continue;
6533                         btrfs_node_key_to_cpu(eb, &key, slot);
6534                         ret = btrfs_comp_cpu_keys(&key,
6535                                                   &wc->update_progress);
6536                         if (ret < 0)
6537                                 continue;
6538                 } else {
6539                         if (wc->level == 1 &&
6540                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6541                                 continue;
6542                 }
6543 reada:
6544                 ret = readahead_tree_block(root, bytenr, blocksize,
6545                                            generation);
6546                 if (ret)
6547                         break;
6548                 nread++;
6549         }
6550         wc->reada_slot = slot;
6551 }
6552
6553 /*
6554  * hepler to process tree block while walking down the tree.
6555  *
6556  * when wc->stage == UPDATE_BACKREF, this function updates
6557  * back refs for pointers in the block.
6558  *
6559  * NOTE: return value 1 means we should stop walking down.
6560  */
6561 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6562                                    struct btrfs_root *root,
6563                                    struct btrfs_path *path,
6564                                    struct walk_control *wc, int lookup_info)
6565 {
6566         int level = wc->level;
6567         struct extent_buffer *eb = path->nodes[level];
6568         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6569         int ret;
6570
6571         if (wc->stage == UPDATE_BACKREF &&
6572             btrfs_header_owner(eb) != root->root_key.objectid)
6573                 return 1;
6574
6575         /*
6576          * when reference count of tree block is 1, it won't increase
6577          * again. once full backref flag is set, we never clear it.
6578          */
6579         if (lookup_info &&
6580             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6581              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
6582                 BUG_ON(!path->locks[level]);
6583                 ret = btrfs_lookup_extent_info(trans, root,
6584                                                eb->start, eb->len,
6585                                                &wc->refs[level],
6586                                                &wc->flags[level]);
6587                 BUG_ON(ret == -ENOMEM);
6588                 if (ret)
6589                         return ret;
6590                 BUG_ON(wc->refs[level] == 0);
6591         }
6592
6593         if (wc->stage == DROP_REFERENCE) {
6594                 if (wc->refs[level] > 1)
6595                         return 1;
6596
6597                 if (path->locks[level] && !wc->keep_locks) {
6598                         btrfs_tree_unlock_rw(eb, path->locks[level]);
6599                         path->locks[level] = 0;
6600                 }
6601                 return 0;
6602         }
6603
6604         /* wc->stage == UPDATE_BACKREF */
6605         if (!(wc->flags[level] & flag)) {
6606                 BUG_ON(!path->locks[level]);
6607                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
6608                 BUG_ON(ret); /* -ENOMEM */
6609                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6610                 BUG_ON(ret); /* -ENOMEM */
6611                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6612                                                   eb->len, flag, 0);
6613                 BUG_ON(ret); /* -ENOMEM */
6614                 wc->flags[level] |= flag;
6615         }
6616
6617         /*
6618          * the block is shared by multiple trees, so it's not good to
6619          * keep the tree lock
6620          */
6621         if (path->locks[level] && level > 0) {
6622                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6623                 path->locks[level] = 0;
6624         }
6625         return 0;
6626 }
6627
6628 /*
6629  * hepler to process tree block pointer.
6630  *
6631  * when wc->stage == DROP_REFERENCE, this function checks
6632  * reference count of the block pointed to. if the block
6633  * is shared and we need update back refs for the subtree
6634  * rooted at the block, this function changes wc->stage to
6635  * UPDATE_BACKREF. if the block is shared and there is no
6636  * need to update back, this function drops the reference
6637  * to the block.
6638  *
6639  * NOTE: return value 1 means we should stop walking down.
6640  */
6641 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6642                                  struct btrfs_root *root,
6643                                  struct btrfs_path *path,
6644                                  struct walk_control *wc, int *lookup_info)
6645 {
6646         u64 bytenr;
6647         u64 generation;
6648         u64 parent;
6649         u32 blocksize;
6650         struct btrfs_key key;
6651         struct extent_buffer *next;
6652         int level = wc->level;
6653         int reada = 0;
6654         int ret = 0;
6655
6656         generation = btrfs_node_ptr_generation(path->nodes[level],
6657                                                path->slots[level]);
6658         /*
6659          * if the lower level block was created before the snapshot
6660          * was created, we know there is no need to update back refs
6661          * for the subtree
6662          */
6663         if (wc->stage == UPDATE_BACKREF &&
6664             generation <= root->root_key.offset) {
6665                 *lookup_info = 1;
6666                 return 1;
6667         }
6668
6669         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
6670         blocksize = btrfs_level_size(root, level - 1);
6671
6672         next = btrfs_find_tree_block(root, bytenr, blocksize);
6673         if (!next) {
6674                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
6675                 if (!next)
6676                         return -ENOMEM;
6677                 reada = 1;
6678         }
6679         btrfs_tree_lock(next);
6680         btrfs_set_lock_blocking(next);
6681
6682         ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6683                                        &wc->refs[level - 1],
6684                                        &wc->flags[level - 1]);
6685         if (ret < 0) {
6686                 btrfs_tree_unlock(next);
6687                 return ret;
6688         }
6689
6690         BUG_ON(wc->refs[level - 1] == 0);
6691         *lookup_info = 0;
6692
6693         if (wc->stage == DROP_REFERENCE) {
6694                 if (wc->refs[level - 1] > 1) {
6695                         if (level == 1 &&
6696                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6697                                 goto skip;
6698
6699                         if (!wc->update_ref ||
6700                             generation <= root->root_key.offset)
6701                                 goto skip;
6702
6703                         btrfs_node_key_to_cpu(path->nodes[level], &key,
6704                                               path->slots[level]);
6705                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6706                         if (ret < 0)
6707                                 goto skip;
6708
6709                         wc->stage = UPDATE_BACKREF;
6710                         wc->shared_level = level - 1;
6711                 }
6712         } else {
6713                 if (level == 1 &&
6714                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6715                         goto skip;
6716         }
6717
6718         if (!btrfs_buffer_uptodate(next, generation, 0)) {
6719                 btrfs_tree_unlock(next);
6720                 free_extent_buffer(next);
6721                 next = NULL;
6722                 *lookup_info = 1;
6723         }
6724
6725         if (!next) {
6726                 if (reada && level == 1)
6727                         reada_walk_down(trans, root, wc, path);
6728                 next = read_tree_block(root, bytenr, blocksize, generation);
6729                 if (!next)
6730                         return -EIO;
6731                 btrfs_tree_lock(next);
6732                 btrfs_set_lock_blocking(next);
6733         }
6734
6735         level--;
6736         BUG_ON(level != btrfs_header_level(next));
6737         path->nodes[level] = next;
6738         path->slots[level] = 0;
6739         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6740         wc->level = level;
6741         if (wc->level == 1)
6742                 wc->reada_slot = 0;
6743         return 0;
6744 skip:
6745         wc->refs[level - 1] = 0;
6746         wc->flags[level - 1] = 0;
6747         if (wc->stage == DROP_REFERENCE) {
6748                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6749                         parent = path->nodes[level]->start;
6750                 } else {
6751                         BUG_ON(root->root_key.objectid !=
6752                                btrfs_header_owner(path->nodes[level]));
6753                         parent = 0;
6754                 }
6755
6756                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6757                                 root->root_key.objectid, level - 1, 0, 0);
6758                 BUG_ON(ret); /* -ENOMEM */
6759         }
6760         btrfs_tree_unlock(next);
6761         free_extent_buffer(next);
6762         *lookup_info = 1;
6763         return 1;
6764 }
6765
6766 /*
6767  * hepler to process tree block while walking up the tree.
6768  *
6769  * when wc->stage == DROP_REFERENCE, this function drops
6770  * reference count on the block.
6771  *
6772  * when wc->stage == UPDATE_BACKREF, this function changes
6773  * wc->stage back to DROP_REFERENCE if we changed wc->stage
6774  * to UPDATE_BACKREF previously while processing the block.
6775  *
6776  * NOTE: return value 1 means we should stop walking up.
6777  */
6778 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6779                                  struct btrfs_root *root,
6780                                  struct btrfs_path *path,
6781                                  struct walk_control *wc)
6782 {
6783         int ret;
6784         int level = wc->level;
6785         struct extent_buffer *eb = path->nodes[level];
6786         u64 parent = 0;
6787
6788         if (wc->stage == UPDATE_BACKREF) {
6789                 BUG_ON(wc->shared_level < level);
6790                 if (level < wc->shared_level)
6791                         goto out;
6792
6793                 ret = find_next_key(path, level + 1, &wc->update_progress);
6794                 if (ret > 0)
6795                         wc->update_ref = 0;
6796
6797                 wc->stage = DROP_REFERENCE;
6798                 wc->shared_level = -1;
6799                 path->slots[level] = 0;
6800
6801                 /*
6802                  * check reference count again if the block isn't locked.
6803                  * we should start walking down the tree again if reference
6804                  * count is one.
6805                  */
6806                 if (!path->locks[level]) {
6807                         BUG_ON(level == 0);
6808                         btrfs_tree_lock(eb);
6809                         btrfs_set_lock_blocking(eb);
6810                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6811
6812                         ret = btrfs_lookup_extent_info(trans, root,
6813                                                        eb->start, eb->len,
6814                                                        &wc->refs[level],
6815                                                        &wc->flags[level]);
6816                         if (ret < 0) {
6817                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6818                                 return ret;
6819                         }
6820                         BUG_ON(wc->refs[level] == 0);
6821                         if (wc->refs[level] == 1) {
6822                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6823                                 return 1;
6824                         }
6825                 }
6826         }
6827
6828         /* wc->stage == DROP_REFERENCE */
6829         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6830
6831         if (wc->refs[level] == 1) {
6832                 if (level == 0) {
6833                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6834                                 ret = btrfs_dec_ref(trans, root, eb, 1,
6835                                                     wc->for_reloc);
6836                         else
6837                                 ret = btrfs_dec_ref(trans, root, eb, 0,
6838                                                     wc->for_reloc);
6839                         BUG_ON(ret); /* -ENOMEM */
6840                 }
6841                 /* make block locked assertion in clean_tree_block happy */
6842                 if (!path->locks[level] &&
6843                     btrfs_header_generation(eb) == trans->transid) {
6844                         btrfs_tree_lock(eb);
6845                         btrfs_set_lock_blocking(eb);
6846                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6847                 }
6848                 clean_tree_block(trans, root, eb);
6849         }
6850
6851         if (eb == root->node) {
6852                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6853                         parent = eb->start;
6854                 else
6855                         BUG_ON(root->root_key.objectid !=
6856                                btrfs_header_owner(eb));
6857         } else {
6858                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6859                         parent = path->nodes[level + 1]->start;
6860                 else
6861                         BUG_ON(root->root_key.objectid !=
6862                                btrfs_header_owner(path->nodes[level + 1]));
6863         }
6864
6865         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
6866 out:
6867         wc->refs[level] = 0;
6868         wc->flags[level] = 0;
6869         return 0;
6870 }
6871
6872 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6873                                    struct btrfs_root *root,
6874                                    struct btrfs_path *path,
6875                                    struct walk_control *wc)
6876 {
6877         int level = wc->level;
6878         int lookup_info = 1;
6879         int ret;
6880
6881         while (level >= 0) {
6882                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
6883                 if (ret > 0)
6884                         break;
6885
6886                 if (level == 0)
6887                         break;
6888
6889                 if (path->slots[level] >=
6890                     btrfs_header_nritems(path->nodes[level]))
6891                         break;
6892
6893                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
6894                 if (ret > 0) {
6895                         path->slots[level]++;
6896                         continue;
6897                 } else if (ret < 0)
6898                         return ret;
6899                 level = wc->level;
6900         }
6901         return 0;
6902 }
6903
6904 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6905                                  struct btrfs_root *root,
6906                                  struct btrfs_path *path,
6907                                  struct walk_control *wc, int max_level)
6908 {
6909         int level = wc->level;
6910         int ret;
6911
6912         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6913         while (level < max_level && path->nodes[level]) {
6914                 wc->level = level;
6915                 if (path->slots[level] + 1 <
6916                     btrfs_header_nritems(path->nodes[level])) {
6917                         path->slots[level]++;
6918                         return 0;
6919                 } else {
6920                         ret = walk_up_proc(trans, root, path, wc);
6921                         if (ret > 0)
6922                                 return 0;
6923
6924                         if (path->locks[level]) {
6925                                 btrfs_tree_unlock_rw(path->nodes[level],
6926                                                      path->locks[level]);
6927                                 path->locks[level] = 0;
6928                         }
6929                         free_extent_buffer(path->nodes[level]);
6930                         path->nodes[level] = NULL;
6931                         level++;
6932                 }
6933         }
6934         return 1;
6935 }
6936
6937 /*
6938  * drop a subvolume tree.
6939  *
6940  * this function traverses the tree freeing any blocks that only
6941  * referenced by the tree.
6942  *
6943  * when a shared tree block is found. this function decreases its
6944  * reference count by one. if update_ref is true, this function
6945  * also make sure backrefs for the shared block and all lower level
6946  * blocks are properly updated.
6947  */
6948 int btrfs_drop_snapshot(struct btrfs_root *root,
6949                          struct btrfs_block_rsv *block_rsv, int update_ref,
6950                          int for_reloc)
6951 {
6952         struct btrfs_path *path;
6953         struct btrfs_trans_handle *trans;
6954         struct btrfs_root *tree_root = root->fs_info->tree_root;
6955         struct btrfs_root_item *root_item = &root->root_item;
6956         struct walk_control *wc;
6957         struct btrfs_key key;
6958         int err = 0;
6959         int ret;
6960         int level;
6961
6962         path = btrfs_alloc_path();
6963         if (!path) {
6964                 err = -ENOMEM;
6965                 goto out;
6966         }
6967
6968         wc = kzalloc(sizeof(*wc), GFP_NOFS);
6969         if (!wc) {
6970                 btrfs_free_path(path);
6971                 err = -ENOMEM;
6972                 goto out;
6973         }
6974
6975         trans = btrfs_start_transaction(tree_root, 0);
6976         if (IS_ERR(trans)) {
6977                 err = PTR_ERR(trans);
6978                 goto out_free;
6979         }
6980
6981         if (block_rsv)
6982                 trans->block_rsv = block_rsv;
6983
6984         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6985                 level = btrfs_header_level(root->node);
6986                 path->nodes[level] = btrfs_lock_root_node(root);
6987                 btrfs_set_lock_blocking(path->nodes[level]);
6988                 path->slots[level] = 0;
6989                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6990                 memset(&wc->update_progress, 0,
6991                        sizeof(wc->update_progress));
6992         } else {
6993                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6994                 memcpy(&wc->update_progress, &key,
6995                        sizeof(wc->update_progress));
6996
6997                 level = root_item->drop_level;
6998                 BUG_ON(level == 0);
6999                 path->lowest_level = level;
7000                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7001                 path->lowest_level = 0;
7002                 if (ret < 0) {
7003                         err = ret;
7004                         goto out_end_trans;
7005                 }
7006                 WARN_ON(ret > 0);
7007
7008                 /*
7009                  * unlock our path, this is safe because only this
7010                  * function is allowed to delete this snapshot
7011                  */
7012                 btrfs_unlock_up_safe(path, 0);
7013
7014                 level = btrfs_header_level(root->node);
7015                 while (1) {
7016                         btrfs_tree_lock(path->nodes[level]);
7017                         btrfs_set_lock_blocking(path->nodes[level]);
7018
7019                         ret = btrfs_lookup_extent_info(trans, root,
7020                                                 path->nodes[level]->start,
7021                                                 path->nodes[level]->len,
7022                                                 &wc->refs[level],
7023                                                 &wc->flags[level]);
7024                         if (ret < 0) {
7025                                 err = ret;
7026                                 goto out_end_trans;
7027                         }
7028                         BUG_ON(wc->refs[level] == 0);
7029
7030                         if (level == root_item->drop_level)
7031                                 break;
7032
7033                         btrfs_tree_unlock(path->nodes[level]);
7034                         WARN_ON(wc->refs[level] != 1);
7035                         level--;
7036                 }
7037         }
7038
7039         wc->level = level;
7040         wc->shared_level = -1;
7041         wc->stage = DROP_REFERENCE;
7042         wc->update_ref = update_ref;
7043         wc->keep_locks = 0;
7044         wc->for_reloc = for_reloc;
7045         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7046
7047         while (1) {
7048                 ret = walk_down_tree(trans, root, path, wc);
7049                 if (ret < 0) {
7050                         err = ret;
7051                         break;
7052                 }
7053
7054                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7055                 if (ret < 0) {
7056                         err = ret;
7057                         break;
7058                 }
7059
7060                 if (ret > 0) {
7061                         BUG_ON(wc->stage != DROP_REFERENCE);
7062                         break;
7063                 }
7064
7065                 if (wc->stage == DROP_REFERENCE) {
7066                         level = wc->level;
7067                         btrfs_node_key(path->nodes[level],
7068                                        &root_item->drop_progress,
7069                                        path->slots[level]);
7070                         root_item->drop_level = level;
7071                 }
7072
7073                 BUG_ON(wc->level == 0);
7074                 if (btrfs_should_end_transaction(trans, tree_root)) {
7075                         ret = btrfs_update_root(trans, tree_root,
7076                                                 &root->root_key,
7077                                                 root_item);
7078                         if (ret) {
7079                                 btrfs_abort_transaction(trans, tree_root, ret);
7080                                 err = ret;
7081                                 goto out_end_trans;
7082                         }
7083
7084                         btrfs_end_transaction_throttle(trans, tree_root);
7085                         trans = btrfs_start_transaction(tree_root, 0);
7086                         if (IS_ERR(trans)) {
7087                                 err = PTR_ERR(trans);
7088                                 goto out_free;
7089                         }
7090                         if (block_rsv)
7091                                 trans->block_rsv = block_rsv;
7092                 }
7093         }
7094         btrfs_release_path(path);
7095         if (err)
7096                 goto out_end_trans;
7097
7098         ret = btrfs_del_root(trans, tree_root, &root->root_key);
7099         if (ret) {
7100                 btrfs_abort_transaction(trans, tree_root, ret);
7101                 goto out_end_trans;
7102         }
7103
7104         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7105                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
7106                                            NULL, NULL);
7107                 if (ret < 0) {
7108                         btrfs_abort_transaction(trans, tree_root, ret);
7109                         err = ret;
7110                         goto out_end_trans;
7111                 } else if (ret > 0) {
7112                         /* if we fail to delete the orphan item this time
7113                          * around, it'll get picked up the next time.
7114                          *
7115                          * The most common failure here is just -ENOENT.
7116                          */
7117                         btrfs_del_orphan_item(trans, tree_root,
7118                                               root->root_key.objectid);
7119                 }
7120         }
7121
7122         if (root->in_radix) {
7123                 btrfs_free_fs_root(tree_root->fs_info, root);
7124         } else {
7125                 free_extent_buffer(root->node);
7126                 free_extent_buffer(root->commit_root);
7127                 kfree(root);
7128         }
7129 out_end_trans:
7130         btrfs_end_transaction_throttle(trans, tree_root);
7131 out_free:
7132         kfree(wc);
7133         btrfs_free_path(path);
7134 out:
7135         if (err)
7136                 btrfs_std_error(root->fs_info, err);
7137         return err;
7138 }
7139
7140 /*
7141  * drop subtree rooted at tree block 'node'.
7142  *
7143  * NOTE: this function will unlock and release tree block 'node'
7144  * only used by relocation code
7145  */
7146 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7147                         struct btrfs_root *root,
7148                         struct extent_buffer *node,
7149                         struct extent_buffer *parent)
7150 {
7151         struct btrfs_path *path;
7152         struct walk_control *wc;
7153         int level;
7154         int parent_level;
7155         int ret = 0;
7156         int wret;
7157
7158         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7159
7160         path = btrfs_alloc_path();
7161         if (!path)
7162                 return -ENOMEM;
7163
7164         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7165         if (!wc) {
7166                 btrfs_free_path(path);
7167                 return -ENOMEM;
7168         }
7169
7170         btrfs_assert_tree_locked(parent);
7171         parent_level = btrfs_header_level(parent);
7172         extent_buffer_get(parent);
7173         path->nodes[parent_level] = parent;
7174         path->slots[parent_level] = btrfs_header_nritems(parent);
7175
7176         btrfs_assert_tree_locked(node);
7177         level = btrfs_header_level(node);
7178         path->nodes[level] = node;
7179         path->slots[level] = 0;
7180         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7181
7182         wc->refs[parent_level] = 1;
7183         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7184         wc->level = level;
7185         wc->shared_level = -1;
7186         wc->stage = DROP_REFERENCE;
7187         wc->update_ref = 0;
7188         wc->keep_locks = 1;
7189         wc->for_reloc = 1;
7190         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7191
7192         while (1) {
7193                 wret = walk_down_tree(trans, root, path, wc);
7194                 if (wret < 0) {
7195                         ret = wret;
7196                         break;
7197                 }
7198
7199                 wret = walk_up_tree(trans, root, path, wc, parent_level);
7200                 if (wret < 0)
7201                         ret = wret;
7202                 if (wret != 0)
7203                         break;
7204         }
7205
7206         kfree(wc);
7207         btrfs_free_path(path);
7208         return ret;
7209 }
7210
7211 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7212 {
7213         u64 num_devices;
7214         u64 stripped;
7215
7216         /*
7217          * if restripe for this chunk_type is on pick target profile and
7218          * return, otherwise do the usual balance
7219          */
7220         stripped = get_restripe_target(root->fs_info, flags);
7221         if (stripped)
7222                 return extended_to_chunk(stripped);
7223
7224         /*
7225          * we add in the count of missing devices because we want
7226          * to make sure that any RAID levels on a degraded FS
7227          * continue to be honored.
7228          */
7229         num_devices = root->fs_info->fs_devices->rw_devices +
7230                 root->fs_info->fs_devices->missing_devices;
7231
7232         stripped = BTRFS_BLOCK_GROUP_RAID0 |
7233                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7234                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7235
7236         if (num_devices == 1) {
7237                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7238                 stripped = flags & ~stripped;
7239
7240                 /* turn raid0 into single device chunks */
7241                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7242                         return stripped;
7243
7244                 /* turn mirroring into duplication */
7245                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7246                              BTRFS_BLOCK_GROUP_RAID10))
7247                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7248         } else {
7249                 /* they already had raid on here, just return */
7250                 if (flags & stripped)
7251                         return flags;
7252
7253                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7254                 stripped = flags & ~stripped;
7255
7256                 /* switch duplicated blocks with raid1 */
7257                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7258                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7259
7260                 /* this is drive concat, leave it alone */
7261         }
7262
7263         return flags;
7264 }
7265
7266 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7267 {
7268         struct btrfs_space_info *sinfo = cache->space_info;
7269         u64 num_bytes;
7270         u64 min_allocable_bytes;
7271         int ret = -ENOSPC;
7272
7273
7274         /*
7275          * We need some metadata space and system metadata space for
7276          * allocating chunks in some corner cases until we force to set
7277          * it to be readonly.
7278          */
7279         if ((sinfo->flags &
7280              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7281             !force)
7282                 min_allocable_bytes = 1 * 1024 * 1024;
7283         else
7284                 min_allocable_bytes = 0;
7285
7286         spin_lock(&sinfo->lock);
7287         spin_lock(&cache->lock);
7288
7289         if (cache->ro) {
7290                 ret = 0;
7291                 goto out;
7292         }
7293
7294         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7295                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7296
7297         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7298             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7299             min_allocable_bytes <= sinfo->total_bytes) {
7300                 sinfo->bytes_readonly += num_bytes;
7301                 cache->ro = 1;
7302                 ret = 0;
7303         }
7304 out:
7305         spin_unlock(&cache->lock);
7306         spin_unlock(&sinfo->lock);
7307         return ret;
7308 }
7309
7310 int btrfs_set_block_group_ro(struct btrfs_root *root,
7311                              struct btrfs_block_group_cache *cache)
7312
7313 {
7314         struct btrfs_trans_handle *trans;
7315         u64 alloc_flags;
7316         int ret;
7317
7318         BUG_ON(cache->ro);
7319
7320         trans = btrfs_join_transaction(root);
7321         if (IS_ERR(trans))
7322                 return PTR_ERR(trans);
7323
7324         alloc_flags = update_block_group_flags(root, cache->flags);
7325         if (alloc_flags != cache->flags) {
7326                 ret = do_chunk_alloc(trans, root, alloc_flags,
7327                                      CHUNK_ALLOC_FORCE);
7328                 if (ret < 0)
7329                         goto out;
7330         }
7331
7332         ret = set_block_group_ro(cache, 0);
7333         if (!ret)
7334                 goto out;
7335         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7336         ret = do_chunk_alloc(trans, root, alloc_flags,
7337                              CHUNK_ALLOC_FORCE);
7338         if (ret < 0)
7339                 goto out;
7340         ret = set_block_group_ro(cache, 0);
7341 out:
7342         btrfs_end_transaction(trans, root);
7343         return ret;
7344 }
7345
7346 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7347                             struct btrfs_root *root, u64 type)
7348 {
7349         u64 alloc_flags = get_alloc_profile(root, type);
7350         return do_chunk_alloc(trans, root, alloc_flags,
7351                               CHUNK_ALLOC_FORCE);
7352 }
7353
7354 /*
7355  * helper to account the unused space of all the readonly block group in the
7356  * list. takes mirrors into account.
7357  */
7358 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7359 {
7360         struct btrfs_block_group_cache *block_group;
7361         u64 free_bytes = 0;
7362         int factor;
7363
7364         list_for_each_entry(block_group, groups_list, list) {
7365                 spin_lock(&block_group->lock);
7366
7367                 if (!block_group->ro) {
7368                         spin_unlock(&block_group->lock);
7369                         continue;
7370                 }
7371
7372                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7373                                           BTRFS_BLOCK_GROUP_RAID10 |
7374                                           BTRFS_BLOCK_GROUP_DUP))
7375                         factor = 2;
7376                 else
7377                         factor = 1;
7378
7379                 free_bytes += (block_group->key.offset -
7380                                btrfs_block_group_used(&block_group->item)) *
7381                                factor;
7382
7383                 spin_unlock(&block_group->lock);
7384         }
7385
7386         return free_bytes;
7387 }
7388
7389 /*
7390  * helper to account the unused space of all the readonly block group in the
7391  * space_info. takes mirrors into account.
7392  */
7393 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7394 {
7395         int i;
7396         u64 free_bytes = 0;
7397
7398         spin_lock(&sinfo->lock);
7399
7400         for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7401                 if (!list_empty(&sinfo->block_groups[i]))
7402                         free_bytes += __btrfs_get_ro_block_group_free_space(
7403                                                 &sinfo->block_groups[i]);
7404
7405         spin_unlock(&sinfo->lock);
7406
7407         return free_bytes;
7408 }
7409
7410 void btrfs_set_block_group_rw(struct btrfs_root *root,
7411                               struct btrfs_block_group_cache *cache)
7412 {
7413         struct btrfs_space_info *sinfo = cache->space_info;
7414         u64 num_bytes;
7415
7416         BUG_ON(!cache->ro);
7417
7418         spin_lock(&sinfo->lock);
7419         spin_lock(&cache->lock);
7420         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7421                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7422         sinfo->bytes_readonly -= num_bytes;
7423         cache->ro = 0;
7424         spin_unlock(&cache->lock);
7425         spin_unlock(&sinfo->lock);
7426 }
7427
7428 /*
7429  * checks to see if its even possible to relocate this block group.
7430  *
7431  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7432  * ok to go ahead and try.
7433  */
7434 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7435 {
7436         struct btrfs_block_group_cache *block_group;
7437         struct btrfs_space_info *space_info;
7438         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7439         struct btrfs_device *device;
7440         u64 min_free;
7441         u64 dev_min = 1;
7442         u64 dev_nr = 0;
7443         u64 target;
7444         int index;
7445         int full = 0;
7446         int ret = 0;
7447
7448         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7449
7450         /* odd, couldn't find the block group, leave it alone */
7451         if (!block_group)
7452                 return -1;
7453
7454         min_free = btrfs_block_group_used(&block_group->item);
7455
7456         /* no bytes used, we're good */
7457         if (!min_free)
7458                 goto out;
7459
7460         space_info = block_group->space_info;
7461         spin_lock(&space_info->lock);
7462
7463         full = space_info->full;
7464
7465         /*
7466          * if this is the last block group we have in this space, we can't
7467          * relocate it unless we're able to allocate a new chunk below.
7468          *
7469          * Otherwise, we need to make sure we have room in the space to handle
7470          * all of the extents from this block group.  If we can, we're good
7471          */
7472         if ((space_info->total_bytes != block_group->key.offset) &&
7473             (space_info->bytes_used + space_info->bytes_reserved +
7474              space_info->bytes_pinned + space_info->bytes_readonly +
7475              min_free < space_info->total_bytes)) {
7476                 spin_unlock(&space_info->lock);
7477                 goto out;
7478         }
7479         spin_unlock(&space_info->lock);
7480
7481         /*
7482          * ok we don't have enough space, but maybe we have free space on our
7483          * devices to allocate new chunks for relocation, so loop through our
7484          * alloc devices and guess if we have enough space.  if this block
7485          * group is going to be restriped, run checks against the target
7486          * profile instead of the current one.
7487          */
7488         ret = -1;
7489
7490         /*
7491          * index:
7492          *      0: raid10
7493          *      1: raid1
7494          *      2: dup
7495          *      3: raid0
7496          *      4: single
7497          */
7498         target = get_restripe_target(root->fs_info, block_group->flags);
7499         if (target) {
7500                 index = __get_raid_index(extended_to_chunk(target));
7501         } else {
7502                 /*
7503                  * this is just a balance, so if we were marked as full
7504                  * we know there is no space for a new chunk
7505                  */
7506                 if (full)
7507                         goto out;
7508
7509                 index = get_block_group_index(block_group);
7510         }
7511
7512         if (index == 0) {
7513                 dev_min = 4;
7514                 /* Divide by 2 */
7515                 min_free >>= 1;
7516         } else if (index == 1) {
7517                 dev_min = 2;
7518         } else if (index == 2) {
7519                 /* Multiply by 2 */
7520                 min_free <<= 1;
7521         } else if (index == 3) {
7522                 dev_min = fs_devices->rw_devices;
7523                 do_div(min_free, dev_min);
7524         }
7525
7526         mutex_lock(&root->fs_info->chunk_mutex);
7527         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7528                 u64 dev_offset;
7529
7530                 /*
7531                  * check to make sure we can actually find a chunk with enough
7532                  * space to fit our block group in.
7533                  */
7534                 if (device->total_bytes > device->bytes_used + min_free &&
7535                     !device->is_tgtdev_for_dev_replace) {
7536                         ret = find_free_dev_extent(device, min_free,
7537                                                    &dev_offset, NULL);
7538                         if (!ret)
7539                                 dev_nr++;
7540
7541                         if (dev_nr >= dev_min)
7542                                 break;
7543
7544                         ret = -1;
7545                 }
7546         }
7547         mutex_unlock(&root->fs_info->chunk_mutex);
7548 out:
7549         btrfs_put_block_group(block_group);
7550         return ret;
7551 }
7552
7553 static int find_first_block_group(struct btrfs_root *root,
7554                 struct btrfs_path *path, struct btrfs_key *key)
7555 {
7556         int ret = 0;
7557         struct btrfs_key found_key;
7558         struct extent_buffer *leaf;
7559         int slot;
7560
7561         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7562         if (ret < 0)
7563                 goto out;
7564
7565         while (1) {
7566                 slot = path->slots[0];
7567                 leaf = path->nodes[0];
7568                 if (slot >= btrfs_header_nritems(leaf)) {
7569                         ret = btrfs_next_leaf(root, path);
7570                         if (ret == 0)
7571                                 continue;
7572                         if (ret < 0)
7573                                 goto out;
7574                         break;
7575                 }
7576                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7577
7578                 if (found_key.objectid >= key->objectid &&
7579                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7580                         ret = 0;
7581                         goto out;
7582                 }
7583                 path->slots[0]++;
7584         }
7585 out:
7586         return ret;
7587 }
7588
7589 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7590 {
7591         struct btrfs_block_group_cache *block_group;
7592         u64 last = 0;
7593
7594         while (1) {
7595                 struct inode *inode;
7596
7597                 block_group = btrfs_lookup_first_block_group(info, last);
7598                 while (block_group) {
7599                         spin_lock(&block_group->lock);
7600                         if (block_group->iref)
7601                                 break;
7602                         spin_unlock(&block_group->lock);
7603                         block_group = next_block_group(info->tree_root,
7604                                                        block_group);
7605                 }
7606                 if (!block_group) {
7607                         if (last == 0)
7608                                 break;
7609                         last = 0;
7610                         continue;
7611                 }
7612
7613                 inode = block_group->inode;
7614                 block_group->iref = 0;
7615                 block_group->inode = NULL;
7616                 spin_unlock(&block_group->lock);
7617                 iput(inode);
7618                 last = block_group->key.objectid + block_group->key.offset;
7619                 btrfs_put_block_group(block_group);
7620         }
7621 }
7622
7623 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7624 {
7625         struct btrfs_block_group_cache *block_group;
7626         struct btrfs_space_info *space_info;
7627         struct btrfs_caching_control *caching_ctl;
7628         struct rb_node *n;
7629
7630         down_write(&info->extent_commit_sem);
7631         while (!list_empty(&info->caching_block_groups)) {
7632                 caching_ctl = list_entry(info->caching_block_groups.next,
7633                                          struct btrfs_caching_control, list);
7634                 list_del(&caching_ctl->list);
7635                 put_caching_control(caching_ctl);
7636         }
7637         up_write(&info->extent_commit_sem);
7638
7639         spin_lock(&info->block_group_cache_lock);
7640         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7641                 block_group = rb_entry(n, struct btrfs_block_group_cache,
7642                                        cache_node);
7643                 rb_erase(&block_group->cache_node,
7644                          &info->block_group_cache_tree);
7645                 spin_unlock(&info->block_group_cache_lock);
7646
7647                 down_write(&block_group->space_info->groups_sem);
7648                 list_del(&block_group->list);
7649                 up_write(&block_group->space_info->groups_sem);
7650
7651                 if (block_group->cached == BTRFS_CACHE_STARTED)
7652                         wait_block_group_cache_done(block_group);
7653
7654                 /*
7655                  * We haven't cached this block group, which means we could
7656                  * possibly have excluded extents on this block group.
7657                  */
7658                 if (block_group->cached == BTRFS_CACHE_NO)
7659                         free_excluded_extents(info->extent_root, block_group);
7660
7661                 btrfs_remove_free_space_cache(block_group);
7662                 btrfs_put_block_group(block_group);
7663
7664                 spin_lock(&info->block_group_cache_lock);
7665         }
7666         spin_unlock(&info->block_group_cache_lock);
7667
7668         /* now that all the block groups are freed, go through and
7669          * free all the space_info structs.  This is only called during
7670          * the final stages of unmount, and so we know nobody is
7671          * using them.  We call synchronize_rcu() once before we start,
7672          * just to be on the safe side.
7673          */
7674         synchronize_rcu();
7675
7676         release_global_block_rsv(info);
7677
7678         while(!list_empty(&info->space_info)) {
7679                 space_info = list_entry(info->space_info.next,
7680                                         struct btrfs_space_info,
7681                                         list);
7682                 if (space_info->bytes_pinned > 0 ||
7683                     space_info->bytes_reserved > 0 ||
7684                     space_info->bytes_may_use > 0) {
7685                         WARN_ON(1);
7686                         dump_space_info(space_info, 0, 0);
7687                 }
7688                 list_del(&space_info->list);
7689                 kfree(space_info);
7690         }
7691         return 0;
7692 }
7693
7694 static void __link_block_group(struct btrfs_space_info *space_info,
7695                                struct btrfs_block_group_cache *cache)
7696 {
7697         int index = get_block_group_index(cache);
7698
7699         down_write(&space_info->groups_sem);
7700         list_add_tail(&cache->list, &space_info->block_groups[index]);
7701         up_write(&space_info->groups_sem);
7702 }
7703
7704 int btrfs_read_block_groups(struct btrfs_root *root)
7705 {
7706         struct btrfs_path *path;
7707         int ret;
7708         struct btrfs_block_group_cache *cache;
7709         struct btrfs_fs_info *info = root->fs_info;
7710         struct btrfs_space_info *space_info;
7711         struct btrfs_key key;
7712         struct btrfs_key found_key;
7713         struct extent_buffer *leaf;
7714         int need_clear = 0;
7715         u64 cache_gen;
7716
7717         root = info->extent_root;
7718         key.objectid = 0;
7719         key.offset = 0;
7720         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7721         path = btrfs_alloc_path();
7722         if (!path)
7723                 return -ENOMEM;
7724         path->reada = 1;
7725
7726         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
7727         if (btrfs_test_opt(root, SPACE_CACHE) &&
7728             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
7729                 need_clear = 1;
7730         if (btrfs_test_opt(root, CLEAR_CACHE))
7731                 need_clear = 1;
7732
7733         while (1) {
7734                 ret = find_first_block_group(root, path, &key);
7735                 if (ret > 0)
7736                         break;
7737                 if (ret != 0)
7738                         goto error;
7739                 leaf = path->nodes[0];
7740                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7741                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7742                 if (!cache) {
7743                         ret = -ENOMEM;
7744                         goto error;
7745                 }
7746                 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7747                                                 GFP_NOFS);
7748                 if (!cache->free_space_ctl) {
7749                         kfree(cache);
7750                         ret = -ENOMEM;
7751                         goto error;
7752                 }
7753
7754                 atomic_set(&cache->count, 1);
7755                 spin_lock_init(&cache->lock);
7756                 cache->fs_info = info;
7757                 INIT_LIST_HEAD(&cache->list);
7758                 INIT_LIST_HEAD(&cache->cluster_list);
7759
7760                 if (need_clear) {
7761                         /*
7762                          * When we mount with old space cache, we need to
7763                          * set BTRFS_DC_CLEAR and set dirty flag.
7764                          *
7765                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
7766                          *    truncate the old free space cache inode and
7767                          *    setup a new one.
7768                          * b) Setting 'dirty flag' makes sure that we flush
7769                          *    the new space cache info onto disk.
7770                          */
7771                         cache->disk_cache_state = BTRFS_DC_CLEAR;
7772                         if (btrfs_test_opt(root, SPACE_CACHE))
7773                                 cache->dirty = 1;
7774                 }
7775
7776                 read_extent_buffer(leaf, &cache->item,
7777                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
7778                                    sizeof(cache->item));
7779                 memcpy(&cache->key, &found_key, sizeof(found_key));
7780
7781                 key.objectid = found_key.objectid + found_key.offset;
7782                 btrfs_release_path(path);
7783                 cache->flags = btrfs_block_group_flags(&cache->item);
7784                 cache->sectorsize = root->sectorsize;
7785                 cache->full_stripe_len = btrfs_full_stripe_len(root,
7786                                                &root->fs_info->mapping_tree,
7787                                                found_key.objectid);
7788                 btrfs_init_free_space_ctl(cache);
7789
7790                 /*
7791                  * We need to exclude the super stripes now so that the space
7792                  * info has super bytes accounted for, otherwise we'll think
7793                  * we have more space than we actually do.
7794                  */
7795                 exclude_super_stripes(root, cache);
7796
7797                 /*
7798                  * check for two cases, either we are full, and therefore
7799                  * don't need to bother with the caching work since we won't
7800                  * find any space, or we are empty, and we can just add all
7801                  * the space in and be done with it.  This saves us _alot_ of
7802                  * time, particularly in the full case.
7803                  */
7804                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7805                         cache->last_byte_to_unpin = (u64)-1;
7806                         cache->cached = BTRFS_CACHE_FINISHED;
7807                         free_excluded_extents(root, cache);
7808                 } else if (btrfs_block_group_used(&cache->item) == 0) {
7809                         cache->last_byte_to_unpin = (u64)-1;
7810                         cache->cached = BTRFS_CACHE_FINISHED;
7811                         add_new_free_space(cache, root->fs_info,
7812                                            found_key.objectid,
7813                                            found_key.objectid +
7814                                            found_key.offset);
7815                         free_excluded_extents(root, cache);
7816                 }
7817
7818                 ret = update_space_info(info, cache->flags, found_key.offset,
7819                                         btrfs_block_group_used(&cache->item),
7820                                         &space_info);
7821                 BUG_ON(ret); /* -ENOMEM */
7822                 cache->space_info = space_info;
7823                 spin_lock(&cache->space_info->lock);
7824                 cache->space_info->bytes_readonly += cache->bytes_super;
7825                 spin_unlock(&cache->space_info->lock);
7826
7827                 __link_block_group(space_info, cache);
7828
7829                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7830                 BUG_ON(ret); /* Logic error */
7831
7832                 set_avail_alloc_bits(root->fs_info, cache->flags);
7833                 if (btrfs_chunk_readonly(root, cache->key.objectid))
7834                         set_block_group_ro(cache, 1);
7835         }
7836
7837         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7838                 if (!(get_alloc_profile(root, space_info->flags) &
7839                       (BTRFS_BLOCK_GROUP_RAID10 |
7840                        BTRFS_BLOCK_GROUP_RAID1 |
7841                        BTRFS_BLOCK_GROUP_RAID5 |
7842                        BTRFS_BLOCK_GROUP_RAID6 |
7843                        BTRFS_BLOCK_GROUP_DUP)))
7844                         continue;
7845                 /*
7846                  * avoid allocating from un-mirrored block group if there are
7847                  * mirrored block groups.
7848                  */
7849                 list_for_each_entry(cache, &space_info->block_groups[3], list)
7850                         set_block_group_ro(cache, 1);
7851                 list_for_each_entry(cache, &space_info->block_groups[4], list)
7852                         set_block_group_ro(cache, 1);
7853         }
7854
7855         init_global_block_rsv(info);
7856         ret = 0;
7857 error:
7858         btrfs_free_path(path);
7859         return ret;
7860 }
7861
7862 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
7863                                        struct btrfs_root *root)
7864 {
7865         struct btrfs_block_group_cache *block_group, *tmp;
7866         struct btrfs_root *extent_root = root->fs_info->extent_root;
7867         struct btrfs_block_group_item item;
7868         struct btrfs_key key;
7869         int ret = 0;
7870
7871         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
7872                                  new_bg_list) {
7873                 list_del_init(&block_group->new_bg_list);
7874
7875                 if (ret)
7876                         continue;
7877
7878                 spin_lock(&block_group->lock);
7879                 memcpy(&item, &block_group->item, sizeof(item));
7880                 memcpy(&key, &block_group->key, sizeof(key));
7881                 spin_unlock(&block_group->lock);
7882
7883                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
7884                                         sizeof(item));
7885                 if (ret)
7886                         btrfs_abort_transaction(trans, extent_root, ret);
7887         }
7888 }
7889
7890 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7891                            struct btrfs_root *root, u64 bytes_used,
7892                            u64 type, u64 chunk_objectid, u64 chunk_offset,
7893                            u64 size)
7894 {
7895         int ret;
7896         struct btrfs_root *extent_root;
7897         struct btrfs_block_group_cache *cache;
7898
7899         extent_root = root->fs_info->extent_root;
7900
7901         root->fs_info->last_trans_log_full_commit = trans->transid;
7902
7903         cache = kzalloc(sizeof(*cache), GFP_NOFS);
7904         if (!cache)
7905                 return -ENOMEM;
7906         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7907                                         GFP_NOFS);
7908         if (!cache->free_space_ctl) {
7909                 kfree(cache);
7910                 return -ENOMEM;
7911         }
7912
7913         cache->key.objectid = chunk_offset;
7914         cache->key.offset = size;
7915         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7916         cache->sectorsize = root->sectorsize;
7917         cache->fs_info = root->fs_info;
7918         cache->full_stripe_len = btrfs_full_stripe_len(root,
7919                                                &root->fs_info->mapping_tree,
7920                                                chunk_offset);
7921
7922         atomic_set(&cache->count, 1);
7923         spin_lock_init(&cache->lock);
7924         INIT_LIST_HEAD(&cache->list);
7925         INIT_LIST_HEAD(&cache->cluster_list);
7926         INIT_LIST_HEAD(&cache->new_bg_list);
7927
7928         btrfs_init_free_space_ctl(cache);
7929
7930         btrfs_set_block_group_used(&cache->item, bytes_used);
7931         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7932         cache->flags = type;
7933         btrfs_set_block_group_flags(&cache->item, type);
7934
7935         cache->last_byte_to_unpin = (u64)-1;
7936         cache->cached = BTRFS_CACHE_FINISHED;
7937         exclude_super_stripes(root, cache);
7938
7939         add_new_free_space(cache, root->fs_info, chunk_offset,
7940                            chunk_offset + size);
7941
7942         free_excluded_extents(root, cache);
7943
7944         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7945                                 &cache->space_info);
7946         BUG_ON(ret); /* -ENOMEM */
7947         update_global_block_rsv(root->fs_info);
7948
7949         spin_lock(&cache->space_info->lock);
7950         cache->space_info->bytes_readonly += cache->bytes_super;
7951         spin_unlock(&cache->space_info->lock);
7952
7953         __link_block_group(cache->space_info, cache);
7954
7955         ret = btrfs_add_block_group_cache(root->fs_info, cache);
7956         BUG_ON(ret); /* Logic error */
7957
7958         list_add_tail(&cache->new_bg_list, &trans->new_bgs);
7959
7960         set_avail_alloc_bits(extent_root->fs_info, type);
7961
7962         return 0;
7963 }
7964
7965 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
7966 {
7967         u64 extra_flags = chunk_to_extended(flags) &
7968                                 BTRFS_EXTENDED_PROFILE_MASK;
7969
7970         if (flags & BTRFS_BLOCK_GROUP_DATA)
7971                 fs_info->avail_data_alloc_bits &= ~extra_flags;
7972         if (flags & BTRFS_BLOCK_GROUP_METADATA)
7973                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
7974         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
7975                 fs_info->avail_system_alloc_bits &= ~extra_flags;
7976 }
7977
7978 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7979                              struct btrfs_root *root, u64 group_start)
7980 {
7981         struct btrfs_path *path;
7982         struct btrfs_block_group_cache *block_group;
7983         struct btrfs_free_cluster *cluster;
7984         struct btrfs_root *tree_root = root->fs_info->tree_root;
7985         struct btrfs_key key;
7986         struct inode *inode;
7987         int ret;
7988         int index;
7989         int factor;
7990
7991         root = root->fs_info->extent_root;
7992
7993         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7994         BUG_ON(!block_group);
7995         BUG_ON(!block_group->ro);
7996
7997         /*
7998          * Free the reserved super bytes from this block group before
7999          * remove it.
8000          */
8001         free_excluded_extents(root, block_group);
8002
8003         memcpy(&key, &block_group->key, sizeof(key));
8004         index = get_block_group_index(block_group);
8005         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8006                                   BTRFS_BLOCK_GROUP_RAID1 |
8007                                   BTRFS_BLOCK_GROUP_RAID10))
8008                 factor = 2;
8009         else
8010                 factor = 1;
8011
8012         /* make sure this block group isn't part of an allocation cluster */
8013         cluster = &root->fs_info->data_alloc_cluster;
8014         spin_lock(&cluster->refill_lock);
8015         btrfs_return_cluster_to_free_space(block_group, cluster);
8016         spin_unlock(&cluster->refill_lock);
8017
8018         /*
8019          * make sure this block group isn't part of a metadata
8020          * allocation cluster
8021          */
8022         cluster = &root->fs_info->meta_alloc_cluster;
8023         spin_lock(&cluster->refill_lock);
8024         btrfs_return_cluster_to_free_space(block_group, cluster);
8025         spin_unlock(&cluster->refill_lock);
8026
8027         path = btrfs_alloc_path();
8028         if (!path) {
8029                 ret = -ENOMEM;
8030                 goto out;
8031         }
8032
8033         inode = lookup_free_space_inode(tree_root, block_group, path);
8034         if (!IS_ERR(inode)) {
8035                 ret = btrfs_orphan_add(trans, inode);
8036                 if (ret) {
8037                         btrfs_add_delayed_iput(inode);
8038                         goto out;
8039                 }
8040                 clear_nlink(inode);
8041                 /* One for the block groups ref */
8042                 spin_lock(&block_group->lock);
8043                 if (block_group->iref) {
8044                         block_group->iref = 0;
8045                         block_group->inode = NULL;
8046                         spin_unlock(&block_group->lock);
8047                         iput(inode);
8048                 } else {
8049                         spin_unlock(&block_group->lock);
8050                 }
8051                 /* One for our lookup ref */
8052                 btrfs_add_delayed_iput(inode);
8053         }
8054
8055         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8056         key.offset = block_group->key.objectid;
8057         key.type = 0;
8058
8059         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8060         if (ret < 0)
8061                 goto out;
8062         if (ret > 0)
8063                 btrfs_release_path(path);
8064         if (ret == 0) {
8065                 ret = btrfs_del_item(trans, tree_root, path);
8066                 if (ret)
8067                         goto out;
8068                 btrfs_release_path(path);
8069         }
8070
8071         spin_lock(&root->fs_info->block_group_cache_lock);
8072         rb_erase(&block_group->cache_node,
8073                  &root->fs_info->block_group_cache_tree);
8074         spin_unlock(&root->fs_info->block_group_cache_lock);
8075
8076         down_write(&block_group->space_info->groups_sem);
8077         /*
8078          * we must use list_del_init so people can check to see if they
8079          * are still on the list after taking the semaphore
8080          */
8081         list_del_init(&block_group->list);
8082         if (list_empty(&block_group->space_info->block_groups[index]))
8083                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8084         up_write(&block_group->space_info->groups_sem);
8085
8086         if (block_group->cached == BTRFS_CACHE_STARTED)
8087                 wait_block_group_cache_done(block_group);
8088
8089         btrfs_remove_free_space_cache(block_group);
8090
8091         spin_lock(&block_group->space_info->lock);
8092         block_group->space_info->total_bytes -= block_group->key.offset;
8093         block_group->space_info->bytes_readonly -= block_group->key.offset;
8094         block_group->space_info->disk_total -= block_group->key.offset * factor;
8095         spin_unlock(&block_group->space_info->lock);
8096
8097         memcpy(&key, &block_group->key, sizeof(key));
8098
8099         btrfs_clear_space_info_full(root->fs_info);
8100
8101         btrfs_put_block_group(block_group);
8102         btrfs_put_block_group(block_group);
8103
8104         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8105         if (ret > 0)
8106                 ret = -EIO;
8107         if (ret < 0)
8108                 goto out;
8109
8110         ret = btrfs_del_item(trans, root, path);
8111 out:
8112         btrfs_free_path(path);
8113         return ret;
8114 }
8115
8116 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8117 {
8118         struct btrfs_space_info *space_info;
8119         struct btrfs_super_block *disk_super;
8120         u64 features;
8121         u64 flags;
8122         int mixed = 0;
8123         int ret;
8124
8125         disk_super = fs_info->super_copy;
8126         if (!btrfs_super_root(disk_super))
8127                 return 1;
8128
8129         features = btrfs_super_incompat_flags(disk_super);
8130         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8131                 mixed = 1;
8132
8133         flags = BTRFS_BLOCK_GROUP_SYSTEM;
8134         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8135         if (ret)
8136                 goto out;
8137
8138         if (mixed) {
8139                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8140                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8141         } else {
8142                 flags = BTRFS_BLOCK_GROUP_METADATA;
8143                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8144                 if (ret)
8145                         goto out;
8146
8147                 flags = BTRFS_BLOCK_GROUP_DATA;
8148                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8149         }
8150 out:
8151         return ret;
8152 }
8153
8154 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8155 {
8156         return unpin_extent_range(root, start, end);
8157 }
8158
8159 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8160                                u64 num_bytes, u64 *actual_bytes)
8161 {
8162         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8163 }
8164
8165 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8166 {
8167         struct btrfs_fs_info *fs_info = root->fs_info;
8168         struct btrfs_block_group_cache *cache = NULL;
8169         u64 group_trimmed;
8170         u64 start;
8171         u64 end;
8172         u64 trimmed = 0;
8173         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8174         int ret = 0;
8175
8176         /*
8177          * try to trim all FS space, our block group may start from non-zero.
8178          */
8179         if (range->len == total_bytes)
8180                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8181         else
8182                 cache = btrfs_lookup_block_group(fs_info, range->start);
8183
8184         while (cache) {
8185                 if (cache->key.objectid >= (range->start + range->len)) {
8186                         btrfs_put_block_group(cache);
8187                         break;
8188                 }
8189
8190                 start = max(range->start, cache->key.objectid);
8191                 end = min(range->start + range->len,
8192                                 cache->key.objectid + cache->key.offset);
8193
8194                 if (end - start >= range->minlen) {
8195                         if (!block_group_cache_done(cache)) {
8196                                 ret = cache_block_group(cache, NULL, root, 0);
8197                                 if (!ret)
8198                                         wait_block_group_cache_done(cache);
8199                         }
8200                         ret = btrfs_trim_block_group(cache,
8201                                                      &group_trimmed,
8202                                                      start,
8203                                                      end,
8204                                                      range->minlen);
8205
8206                         trimmed += group_trimmed;
8207                         if (ret) {
8208                                 btrfs_put_block_group(cache);
8209                                 break;
8210                         }
8211                 }
8212
8213                 cache = next_block_group(fs_info->tree_root, cache);
8214         }
8215
8216         range->len = trimmed;
8217         return ret;
8218 }