]> rtime.felk.cvut.cz Git - linux-imx.git/blob - fs/btrfs/extent-tree.c
b9526f74904903f74662ba22c588d81684842556
[linux-imx.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include "compat.h"
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37
38 #undef SCRAMBLE_DELAYED_REFS
39
40 /*
41  * control flags for do_chunk_alloc's force field
42  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
43  * if we really need one.
44  *
45  * CHUNK_ALLOC_LIMITED means to only try and allocate one
46  * if we have very few chunks already allocated.  This is
47  * used as part of the clustering code to help make sure
48  * we have a good pool of storage to cluster in, without
49  * filling the FS with empty chunks
50  *
51  * CHUNK_ALLOC_FORCE means it must try to allocate one
52  *
53  */
54 enum {
55         CHUNK_ALLOC_NO_FORCE = 0,
56         CHUNK_ALLOC_LIMITED = 1,
57         CHUNK_ALLOC_FORCE = 2,
58 };
59
60 /*
61  * Control how reservations are dealt with.
62  *
63  * RESERVE_FREE - freeing a reservation.
64  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
65  *   ENOSPC accounting
66  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
67  *   bytes_may_use as the ENOSPC accounting is done elsewhere
68  */
69 enum {
70         RESERVE_FREE = 0,
71         RESERVE_ALLOC = 1,
72         RESERVE_ALLOC_NO_ACCOUNT = 2,
73 };
74
75 static int update_block_group(struct btrfs_trans_handle *trans,
76                               struct btrfs_root *root,
77                               u64 bytenr, u64 num_bytes, int alloc);
78 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
79                                 struct btrfs_root *root,
80                                 u64 bytenr, u64 num_bytes, u64 parent,
81                                 u64 root_objectid, u64 owner_objectid,
82                                 u64 owner_offset, int refs_to_drop,
83                                 struct btrfs_delayed_extent_op *extra_op);
84 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
85                                     struct extent_buffer *leaf,
86                                     struct btrfs_extent_item *ei);
87 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
88                                       struct btrfs_root *root,
89                                       u64 parent, u64 root_objectid,
90                                       u64 flags, u64 owner, u64 offset,
91                                       struct btrfs_key *ins, int ref_mod);
92 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
93                                      struct btrfs_root *root,
94                                      u64 parent, u64 root_objectid,
95                                      u64 flags, struct btrfs_disk_key *key,
96                                      int level, struct btrfs_key *ins);
97 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
98                           struct btrfs_root *extent_root, u64 flags,
99                           int force);
100 static int find_next_key(struct btrfs_path *path, int level,
101                          struct btrfs_key *key);
102 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
103                             int dump_block_groups);
104 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
105                                        u64 num_bytes, int reserve);
106
107 static noinline int
108 block_group_cache_done(struct btrfs_block_group_cache *cache)
109 {
110         smp_mb();
111         return cache->cached == BTRFS_CACHE_FINISHED;
112 }
113
114 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
115 {
116         return (cache->flags & bits) == bits;
117 }
118
119 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
120 {
121         atomic_inc(&cache->count);
122 }
123
124 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
125 {
126         if (atomic_dec_and_test(&cache->count)) {
127                 WARN_ON(cache->pinned > 0);
128                 WARN_ON(cache->reserved > 0);
129                 kfree(cache->free_space_ctl);
130                 kfree(cache);
131         }
132 }
133
134 /*
135  * this adds the block group to the fs_info rb tree for the block group
136  * cache
137  */
138 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
139                                 struct btrfs_block_group_cache *block_group)
140 {
141         struct rb_node **p;
142         struct rb_node *parent = NULL;
143         struct btrfs_block_group_cache *cache;
144
145         spin_lock(&info->block_group_cache_lock);
146         p = &info->block_group_cache_tree.rb_node;
147
148         while (*p) {
149                 parent = *p;
150                 cache = rb_entry(parent, struct btrfs_block_group_cache,
151                                  cache_node);
152                 if (block_group->key.objectid < cache->key.objectid) {
153                         p = &(*p)->rb_left;
154                 } else if (block_group->key.objectid > cache->key.objectid) {
155                         p = &(*p)->rb_right;
156                 } else {
157                         spin_unlock(&info->block_group_cache_lock);
158                         return -EEXIST;
159                 }
160         }
161
162         rb_link_node(&block_group->cache_node, parent, p);
163         rb_insert_color(&block_group->cache_node,
164                         &info->block_group_cache_tree);
165         spin_unlock(&info->block_group_cache_lock);
166
167         return 0;
168 }
169
170 /*
171  * This will return the block group at or after bytenr if contains is 0, else
172  * it will return the block group that contains the bytenr
173  */
174 static struct btrfs_block_group_cache *
175 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
176                               int contains)
177 {
178         struct btrfs_block_group_cache *cache, *ret = NULL;
179         struct rb_node *n;
180         u64 end, start;
181
182         spin_lock(&info->block_group_cache_lock);
183         n = info->block_group_cache_tree.rb_node;
184
185         while (n) {
186                 cache = rb_entry(n, struct btrfs_block_group_cache,
187                                  cache_node);
188                 end = cache->key.objectid + cache->key.offset - 1;
189                 start = cache->key.objectid;
190
191                 if (bytenr < start) {
192                         if (!contains && (!ret || start < ret->key.objectid))
193                                 ret = cache;
194                         n = n->rb_left;
195                 } else if (bytenr > start) {
196                         if (contains && bytenr <= end) {
197                                 ret = cache;
198                                 break;
199                         }
200                         n = n->rb_right;
201                 } else {
202                         ret = cache;
203                         break;
204                 }
205         }
206         if (ret)
207                 btrfs_get_block_group(ret);
208         spin_unlock(&info->block_group_cache_lock);
209
210         return ret;
211 }
212
213 static int add_excluded_extent(struct btrfs_root *root,
214                                u64 start, u64 num_bytes)
215 {
216         u64 end = start + num_bytes - 1;
217         set_extent_bits(&root->fs_info->freed_extents[0],
218                         start, end, EXTENT_UPTODATE, GFP_NOFS);
219         set_extent_bits(&root->fs_info->freed_extents[1],
220                         start, end, EXTENT_UPTODATE, GFP_NOFS);
221         return 0;
222 }
223
224 static void free_excluded_extents(struct btrfs_root *root,
225                                   struct btrfs_block_group_cache *cache)
226 {
227         u64 start, end;
228
229         start = cache->key.objectid;
230         end = start + cache->key.offset - 1;
231
232         clear_extent_bits(&root->fs_info->freed_extents[0],
233                           start, end, EXTENT_UPTODATE, GFP_NOFS);
234         clear_extent_bits(&root->fs_info->freed_extents[1],
235                           start, end, EXTENT_UPTODATE, GFP_NOFS);
236 }
237
238 static int exclude_super_stripes(struct btrfs_root *root,
239                                  struct btrfs_block_group_cache *cache)
240 {
241         u64 bytenr;
242         u64 *logical;
243         int stripe_len;
244         int i, nr, ret;
245
246         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
247                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
248                 cache->bytes_super += stripe_len;
249                 ret = add_excluded_extent(root, cache->key.objectid,
250                                           stripe_len);
251                 BUG_ON(ret); /* -ENOMEM */
252         }
253
254         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
255                 bytenr = btrfs_sb_offset(i);
256                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
257                                        cache->key.objectid, bytenr,
258                                        0, &logical, &nr, &stripe_len);
259                 BUG_ON(ret); /* -ENOMEM */
260
261                 while (nr--) {
262                         cache->bytes_super += stripe_len;
263                         ret = add_excluded_extent(root, logical[nr],
264                                                   stripe_len);
265                         BUG_ON(ret); /* -ENOMEM */
266                 }
267
268                 kfree(logical);
269         }
270         return 0;
271 }
272
273 static struct btrfs_caching_control *
274 get_caching_control(struct btrfs_block_group_cache *cache)
275 {
276         struct btrfs_caching_control *ctl;
277
278         spin_lock(&cache->lock);
279         if (cache->cached != BTRFS_CACHE_STARTED) {
280                 spin_unlock(&cache->lock);
281                 return NULL;
282         }
283
284         /* We're loading it the fast way, so we don't have a caching_ctl. */
285         if (!cache->caching_ctl) {
286                 spin_unlock(&cache->lock);
287                 return NULL;
288         }
289
290         ctl = cache->caching_ctl;
291         atomic_inc(&ctl->count);
292         spin_unlock(&cache->lock);
293         return ctl;
294 }
295
296 static void put_caching_control(struct btrfs_caching_control *ctl)
297 {
298         if (atomic_dec_and_test(&ctl->count))
299                 kfree(ctl);
300 }
301
302 /*
303  * this is only called by cache_block_group, since we could have freed extents
304  * we need to check the pinned_extents for any extents that can't be used yet
305  * since their free space will be released as soon as the transaction commits.
306  */
307 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
308                               struct btrfs_fs_info *info, u64 start, u64 end)
309 {
310         u64 extent_start, extent_end, size, total_added = 0;
311         int ret;
312
313         while (start < end) {
314                 ret = find_first_extent_bit(info->pinned_extents, start,
315                                             &extent_start, &extent_end,
316                                             EXTENT_DIRTY | EXTENT_UPTODATE,
317                                             NULL);
318                 if (ret)
319                         break;
320
321                 if (extent_start <= start) {
322                         start = extent_end + 1;
323                 } else if (extent_start > start && extent_start < end) {
324                         size = extent_start - start;
325                         total_added += size;
326                         ret = btrfs_add_free_space(block_group, start,
327                                                    size);
328                         BUG_ON(ret); /* -ENOMEM or logic error */
329                         start = extent_end + 1;
330                 } else {
331                         break;
332                 }
333         }
334
335         if (start < end) {
336                 size = end - start;
337                 total_added += size;
338                 ret = btrfs_add_free_space(block_group, start, size);
339                 BUG_ON(ret); /* -ENOMEM or logic error */
340         }
341
342         return total_added;
343 }
344
345 static noinline void caching_thread(struct btrfs_work *work)
346 {
347         struct btrfs_block_group_cache *block_group;
348         struct btrfs_fs_info *fs_info;
349         struct btrfs_caching_control *caching_ctl;
350         struct btrfs_root *extent_root;
351         struct btrfs_path *path;
352         struct extent_buffer *leaf;
353         struct btrfs_key key;
354         u64 total_found = 0;
355         u64 last = 0;
356         u32 nritems;
357         int ret = 0;
358
359         caching_ctl = container_of(work, struct btrfs_caching_control, work);
360         block_group = caching_ctl->block_group;
361         fs_info = block_group->fs_info;
362         extent_root = fs_info->extent_root;
363
364         path = btrfs_alloc_path();
365         if (!path)
366                 goto out;
367
368         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
369
370         /*
371          * We don't want to deadlock with somebody trying to allocate a new
372          * extent for the extent root while also trying to search the extent
373          * root to add free space.  So we skip locking and search the commit
374          * root, since its read-only
375          */
376         path->skip_locking = 1;
377         path->search_commit_root = 1;
378         path->reada = 1;
379
380         key.objectid = last;
381         key.offset = 0;
382         key.type = BTRFS_EXTENT_ITEM_KEY;
383 again:
384         mutex_lock(&caching_ctl->mutex);
385         /* need to make sure the commit_root doesn't disappear */
386         down_read(&fs_info->extent_commit_sem);
387
388         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
389         if (ret < 0)
390                 goto err;
391
392         leaf = path->nodes[0];
393         nritems = btrfs_header_nritems(leaf);
394
395         while (1) {
396                 if (btrfs_fs_closing(fs_info) > 1) {
397                         last = (u64)-1;
398                         break;
399                 }
400
401                 if (path->slots[0] < nritems) {
402                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
403                 } else {
404                         ret = find_next_key(path, 0, &key);
405                         if (ret)
406                                 break;
407
408                         if (need_resched() ||
409                             btrfs_next_leaf(extent_root, path)) {
410                                 caching_ctl->progress = last;
411                                 btrfs_release_path(path);
412                                 up_read(&fs_info->extent_commit_sem);
413                                 mutex_unlock(&caching_ctl->mutex);
414                                 cond_resched();
415                                 goto again;
416                         }
417                         leaf = path->nodes[0];
418                         nritems = btrfs_header_nritems(leaf);
419                         continue;
420                 }
421
422                 if (key.objectid < block_group->key.objectid) {
423                         path->slots[0]++;
424                         continue;
425                 }
426
427                 if (key.objectid >= block_group->key.objectid +
428                     block_group->key.offset)
429                         break;
430
431                 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
432                         total_found += add_new_free_space(block_group,
433                                                           fs_info, last,
434                                                           key.objectid);
435                         last = key.objectid + key.offset;
436
437                         if (total_found > (1024 * 1024 * 2)) {
438                                 total_found = 0;
439                                 wake_up(&caching_ctl->wait);
440                         }
441                 }
442                 path->slots[0]++;
443         }
444         ret = 0;
445
446         total_found += add_new_free_space(block_group, fs_info, last,
447                                           block_group->key.objectid +
448                                           block_group->key.offset);
449         caching_ctl->progress = (u64)-1;
450
451         spin_lock(&block_group->lock);
452         block_group->caching_ctl = NULL;
453         block_group->cached = BTRFS_CACHE_FINISHED;
454         spin_unlock(&block_group->lock);
455
456 err:
457         btrfs_free_path(path);
458         up_read(&fs_info->extent_commit_sem);
459
460         free_excluded_extents(extent_root, block_group);
461
462         mutex_unlock(&caching_ctl->mutex);
463 out:
464         wake_up(&caching_ctl->wait);
465
466         put_caching_control(caching_ctl);
467         btrfs_put_block_group(block_group);
468 }
469
470 static int cache_block_group(struct btrfs_block_group_cache *cache,
471                              struct btrfs_trans_handle *trans,
472                              struct btrfs_root *root,
473                              int load_cache_only)
474 {
475         DEFINE_WAIT(wait);
476         struct btrfs_fs_info *fs_info = cache->fs_info;
477         struct btrfs_caching_control *caching_ctl;
478         int ret = 0;
479
480         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
481         if (!caching_ctl)
482                 return -ENOMEM;
483
484         INIT_LIST_HEAD(&caching_ctl->list);
485         mutex_init(&caching_ctl->mutex);
486         init_waitqueue_head(&caching_ctl->wait);
487         caching_ctl->block_group = cache;
488         caching_ctl->progress = cache->key.objectid;
489         atomic_set(&caching_ctl->count, 1);
490         caching_ctl->work.func = caching_thread;
491
492         spin_lock(&cache->lock);
493         /*
494          * This should be a rare occasion, but this could happen I think in the
495          * case where one thread starts to load the space cache info, and then
496          * some other thread starts a transaction commit which tries to do an
497          * allocation while the other thread is still loading the space cache
498          * info.  The previous loop should have kept us from choosing this block
499          * group, but if we've moved to the state where we will wait on caching
500          * block groups we need to first check if we're doing a fast load here,
501          * so we can wait for it to finish, otherwise we could end up allocating
502          * from a block group who's cache gets evicted for one reason or
503          * another.
504          */
505         while (cache->cached == BTRFS_CACHE_FAST) {
506                 struct btrfs_caching_control *ctl;
507
508                 ctl = cache->caching_ctl;
509                 atomic_inc(&ctl->count);
510                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
511                 spin_unlock(&cache->lock);
512
513                 schedule();
514
515                 finish_wait(&ctl->wait, &wait);
516                 put_caching_control(ctl);
517                 spin_lock(&cache->lock);
518         }
519
520         if (cache->cached != BTRFS_CACHE_NO) {
521                 spin_unlock(&cache->lock);
522                 kfree(caching_ctl);
523                 return 0;
524         }
525         WARN_ON(cache->caching_ctl);
526         cache->caching_ctl = caching_ctl;
527         cache->cached = BTRFS_CACHE_FAST;
528         spin_unlock(&cache->lock);
529
530         /*
531          * We can't do the read from on-disk cache during a commit since we need
532          * to have the normal tree locking.  Also if we are currently trying to
533          * allocate blocks for the tree root we can't do the fast caching since
534          * we likely hold important locks.
535          */
536         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
537                 ret = load_free_space_cache(fs_info, cache);
538
539                 spin_lock(&cache->lock);
540                 if (ret == 1) {
541                         cache->caching_ctl = NULL;
542                         cache->cached = BTRFS_CACHE_FINISHED;
543                         cache->last_byte_to_unpin = (u64)-1;
544                 } else {
545                         if (load_cache_only) {
546                                 cache->caching_ctl = NULL;
547                                 cache->cached = BTRFS_CACHE_NO;
548                         } else {
549                                 cache->cached = BTRFS_CACHE_STARTED;
550                         }
551                 }
552                 spin_unlock(&cache->lock);
553                 wake_up(&caching_ctl->wait);
554                 if (ret == 1) {
555                         put_caching_control(caching_ctl);
556                         free_excluded_extents(fs_info->extent_root, cache);
557                         return 0;
558                 }
559         } else {
560                 /*
561                  * We are not going to do the fast caching, set cached to the
562                  * appropriate value and wakeup any waiters.
563                  */
564                 spin_lock(&cache->lock);
565                 if (load_cache_only) {
566                         cache->caching_ctl = NULL;
567                         cache->cached = BTRFS_CACHE_NO;
568                 } else {
569                         cache->cached = BTRFS_CACHE_STARTED;
570                 }
571                 spin_unlock(&cache->lock);
572                 wake_up(&caching_ctl->wait);
573         }
574
575         if (load_cache_only) {
576                 put_caching_control(caching_ctl);
577                 return 0;
578         }
579
580         down_write(&fs_info->extent_commit_sem);
581         atomic_inc(&caching_ctl->count);
582         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
583         up_write(&fs_info->extent_commit_sem);
584
585         btrfs_get_block_group(cache);
586
587         btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
588
589         return ret;
590 }
591
592 /*
593  * return the block group that starts at or after bytenr
594  */
595 static struct btrfs_block_group_cache *
596 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
597 {
598         struct btrfs_block_group_cache *cache;
599
600         cache = block_group_cache_tree_search(info, bytenr, 0);
601
602         return cache;
603 }
604
605 /*
606  * return the block group that contains the given bytenr
607  */
608 struct btrfs_block_group_cache *btrfs_lookup_block_group(
609                                                  struct btrfs_fs_info *info,
610                                                  u64 bytenr)
611 {
612         struct btrfs_block_group_cache *cache;
613
614         cache = block_group_cache_tree_search(info, bytenr, 1);
615
616         return cache;
617 }
618
619 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
620                                                   u64 flags)
621 {
622         struct list_head *head = &info->space_info;
623         struct btrfs_space_info *found;
624
625         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
626
627         rcu_read_lock();
628         list_for_each_entry_rcu(found, head, list) {
629                 if (found->flags & flags) {
630                         rcu_read_unlock();
631                         return found;
632                 }
633         }
634         rcu_read_unlock();
635         return NULL;
636 }
637
638 /*
639  * after adding space to the filesystem, we need to clear the full flags
640  * on all the space infos.
641  */
642 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
643 {
644         struct list_head *head = &info->space_info;
645         struct btrfs_space_info *found;
646
647         rcu_read_lock();
648         list_for_each_entry_rcu(found, head, list)
649                 found->full = 0;
650         rcu_read_unlock();
651 }
652
653 u64 btrfs_find_block_group(struct btrfs_root *root,
654                            u64 search_start, u64 search_hint, int owner)
655 {
656         struct btrfs_block_group_cache *cache;
657         u64 used;
658         u64 last = max(search_hint, search_start);
659         u64 group_start = 0;
660         int full_search = 0;
661         int factor = 9;
662         int wrapped = 0;
663 again:
664         while (1) {
665                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
666                 if (!cache)
667                         break;
668
669                 spin_lock(&cache->lock);
670                 last = cache->key.objectid + cache->key.offset;
671                 used = btrfs_block_group_used(&cache->item);
672
673                 if ((full_search || !cache->ro) &&
674                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
675                         if (used + cache->pinned + cache->reserved <
676                             div_factor(cache->key.offset, factor)) {
677                                 group_start = cache->key.objectid;
678                                 spin_unlock(&cache->lock);
679                                 btrfs_put_block_group(cache);
680                                 goto found;
681                         }
682                 }
683                 spin_unlock(&cache->lock);
684                 btrfs_put_block_group(cache);
685                 cond_resched();
686         }
687         if (!wrapped) {
688                 last = search_start;
689                 wrapped = 1;
690                 goto again;
691         }
692         if (!full_search && factor < 10) {
693                 last = search_start;
694                 full_search = 1;
695                 factor = 10;
696                 goto again;
697         }
698 found:
699         return group_start;
700 }
701
702 /* simple helper to search for an existing extent at a given offset */
703 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
704 {
705         int ret;
706         struct btrfs_key key;
707         struct btrfs_path *path;
708
709         path = btrfs_alloc_path();
710         if (!path)
711                 return -ENOMEM;
712
713         key.objectid = start;
714         key.offset = len;
715         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
716         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
717                                 0, 0);
718         btrfs_free_path(path);
719         return ret;
720 }
721
722 /*
723  * helper function to lookup reference count and flags of extent.
724  *
725  * the head node for delayed ref is used to store the sum of all the
726  * reference count modifications queued up in the rbtree. the head
727  * node may also store the extent flags to set. This way you can check
728  * to see what the reference count and extent flags would be if all of
729  * the delayed refs are not processed.
730  */
731 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
732                              struct btrfs_root *root, u64 bytenr,
733                              u64 num_bytes, u64 *refs, u64 *flags)
734 {
735         struct btrfs_delayed_ref_head *head;
736         struct btrfs_delayed_ref_root *delayed_refs;
737         struct btrfs_path *path;
738         struct btrfs_extent_item *ei;
739         struct extent_buffer *leaf;
740         struct btrfs_key key;
741         u32 item_size;
742         u64 num_refs;
743         u64 extent_flags;
744         int ret;
745
746         path = btrfs_alloc_path();
747         if (!path)
748                 return -ENOMEM;
749
750         key.objectid = bytenr;
751         key.type = BTRFS_EXTENT_ITEM_KEY;
752         key.offset = num_bytes;
753         if (!trans) {
754                 path->skip_locking = 1;
755                 path->search_commit_root = 1;
756         }
757 again:
758         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
759                                 &key, path, 0, 0);
760         if (ret < 0)
761                 goto out_free;
762
763         if (ret == 0) {
764                 leaf = path->nodes[0];
765                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
766                 if (item_size >= sizeof(*ei)) {
767                         ei = btrfs_item_ptr(leaf, path->slots[0],
768                                             struct btrfs_extent_item);
769                         num_refs = btrfs_extent_refs(leaf, ei);
770                         extent_flags = btrfs_extent_flags(leaf, ei);
771                 } else {
772 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
773                         struct btrfs_extent_item_v0 *ei0;
774                         BUG_ON(item_size != sizeof(*ei0));
775                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
776                                              struct btrfs_extent_item_v0);
777                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
778                         /* FIXME: this isn't correct for data */
779                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
780 #else
781                         BUG();
782 #endif
783                 }
784                 BUG_ON(num_refs == 0);
785         } else {
786                 num_refs = 0;
787                 extent_flags = 0;
788                 ret = 0;
789         }
790
791         if (!trans)
792                 goto out;
793
794         delayed_refs = &trans->transaction->delayed_refs;
795         spin_lock(&delayed_refs->lock);
796         head = btrfs_find_delayed_ref_head(trans, bytenr);
797         if (head) {
798                 if (!mutex_trylock(&head->mutex)) {
799                         atomic_inc(&head->node.refs);
800                         spin_unlock(&delayed_refs->lock);
801
802                         btrfs_release_path(path);
803
804                         /*
805                          * Mutex was contended, block until it's released and try
806                          * again
807                          */
808                         mutex_lock(&head->mutex);
809                         mutex_unlock(&head->mutex);
810                         btrfs_put_delayed_ref(&head->node);
811                         goto again;
812                 }
813                 if (head->extent_op && head->extent_op->update_flags)
814                         extent_flags |= head->extent_op->flags_to_set;
815                 else
816                         BUG_ON(num_refs == 0);
817
818                 num_refs += head->node.ref_mod;
819                 mutex_unlock(&head->mutex);
820         }
821         spin_unlock(&delayed_refs->lock);
822 out:
823         WARN_ON(num_refs == 0);
824         if (refs)
825                 *refs = num_refs;
826         if (flags)
827                 *flags = extent_flags;
828 out_free:
829         btrfs_free_path(path);
830         return ret;
831 }
832
833 /*
834  * Back reference rules.  Back refs have three main goals:
835  *
836  * 1) differentiate between all holders of references to an extent so that
837  *    when a reference is dropped we can make sure it was a valid reference
838  *    before freeing the extent.
839  *
840  * 2) Provide enough information to quickly find the holders of an extent
841  *    if we notice a given block is corrupted or bad.
842  *
843  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
844  *    maintenance.  This is actually the same as #2, but with a slightly
845  *    different use case.
846  *
847  * There are two kinds of back refs. The implicit back refs is optimized
848  * for pointers in non-shared tree blocks. For a given pointer in a block,
849  * back refs of this kind provide information about the block's owner tree
850  * and the pointer's key. These information allow us to find the block by
851  * b-tree searching. The full back refs is for pointers in tree blocks not
852  * referenced by their owner trees. The location of tree block is recorded
853  * in the back refs. Actually the full back refs is generic, and can be
854  * used in all cases the implicit back refs is used. The major shortcoming
855  * of the full back refs is its overhead. Every time a tree block gets
856  * COWed, we have to update back refs entry for all pointers in it.
857  *
858  * For a newly allocated tree block, we use implicit back refs for
859  * pointers in it. This means most tree related operations only involve
860  * implicit back refs. For a tree block created in old transaction, the
861  * only way to drop a reference to it is COW it. So we can detect the
862  * event that tree block loses its owner tree's reference and do the
863  * back refs conversion.
864  *
865  * When a tree block is COW'd through a tree, there are four cases:
866  *
867  * The reference count of the block is one and the tree is the block's
868  * owner tree. Nothing to do in this case.
869  *
870  * The reference count of the block is one and the tree is not the
871  * block's owner tree. In this case, full back refs is used for pointers
872  * in the block. Remove these full back refs, add implicit back refs for
873  * every pointers in the new block.
874  *
875  * The reference count of the block is greater than one and the tree is
876  * the block's owner tree. In this case, implicit back refs is used for
877  * pointers in the block. Add full back refs for every pointers in the
878  * block, increase lower level extents' reference counts. The original
879  * implicit back refs are entailed to the new block.
880  *
881  * The reference count of the block is greater than one and the tree is
882  * not the block's owner tree. Add implicit back refs for every pointer in
883  * the new block, increase lower level extents' reference count.
884  *
885  * Back Reference Key composing:
886  *
887  * The key objectid corresponds to the first byte in the extent,
888  * The key type is used to differentiate between types of back refs.
889  * There are different meanings of the key offset for different types
890  * of back refs.
891  *
892  * File extents can be referenced by:
893  *
894  * - multiple snapshots, subvolumes, or different generations in one subvol
895  * - different files inside a single subvolume
896  * - different offsets inside a file (bookend extents in file.c)
897  *
898  * The extent ref structure for the implicit back refs has fields for:
899  *
900  * - Objectid of the subvolume root
901  * - objectid of the file holding the reference
902  * - original offset in the file
903  * - how many bookend extents
904  *
905  * The key offset for the implicit back refs is hash of the first
906  * three fields.
907  *
908  * The extent ref structure for the full back refs has field for:
909  *
910  * - number of pointers in the tree leaf
911  *
912  * The key offset for the implicit back refs is the first byte of
913  * the tree leaf
914  *
915  * When a file extent is allocated, The implicit back refs is used.
916  * the fields are filled in:
917  *
918  *     (root_key.objectid, inode objectid, offset in file, 1)
919  *
920  * When a file extent is removed file truncation, we find the
921  * corresponding implicit back refs and check the following fields:
922  *
923  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
924  *
925  * Btree extents can be referenced by:
926  *
927  * - Different subvolumes
928  *
929  * Both the implicit back refs and the full back refs for tree blocks
930  * only consist of key. The key offset for the implicit back refs is
931  * objectid of block's owner tree. The key offset for the full back refs
932  * is the first byte of parent block.
933  *
934  * When implicit back refs is used, information about the lowest key and
935  * level of the tree block are required. These information are stored in
936  * tree block info structure.
937  */
938
939 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
940 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
941                                   struct btrfs_root *root,
942                                   struct btrfs_path *path,
943                                   u64 owner, u32 extra_size)
944 {
945         struct btrfs_extent_item *item;
946         struct btrfs_extent_item_v0 *ei0;
947         struct btrfs_extent_ref_v0 *ref0;
948         struct btrfs_tree_block_info *bi;
949         struct extent_buffer *leaf;
950         struct btrfs_key key;
951         struct btrfs_key found_key;
952         u32 new_size = sizeof(*item);
953         u64 refs;
954         int ret;
955
956         leaf = path->nodes[0];
957         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
958
959         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
960         ei0 = btrfs_item_ptr(leaf, path->slots[0],
961                              struct btrfs_extent_item_v0);
962         refs = btrfs_extent_refs_v0(leaf, ei0);
963
964         if (owner == (u64)-1) {
965                 while (1) {
966                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
967                                 ret = btrfs_next_leaf(root, path);
968                                 if (ret < 0)
969                                         return ret;
970                                 BUG_ON(ret > 0); /* Corruption */
971                                 leaf = path->nodes[0];
972                         }
973                         btrfs_item_key_to_cpu(leaf, &found_key,
974                                               path->slots[0]);
975                         BUG_ON(key.objectid != found_key.objectid);
976                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
977                                 path->slots[0]++;
978                                 continue;
979                         }
980                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
981                                               struct btrfs_extent_ref_v0);
982                         owner = btrfs_ref_objectid_v0(leaf, ref0);
983                         break;
984                 }
985         }
986         btrfs_release_path(path);
987
988         if (owner < BTRFS_FIRST_FREE_OBJECTID)
989                 new_size += sizeof(*bi);
990
991         new_size -= sizeof(*ei0);
992         ret = btrfs_search_slot(trans, root, &key, path,
993                                 new_size + extra_size, 1);
994         if (ret < 0)
995                 return ret;
996         BUG_ON(ret); /* Corruption */
997
998         btrfs_extend_item(trans, root, path, new_size);
999
1000         leaf = path->nodes[0];
1001         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1002         btrfs_set_extent_refs(leaf, item, refs);
1003         /* FIXME: get real generation */
1004         btrfs_set_extent_generation(leaf, item, 0);
1005         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1006                 btrfs_set_extent_flags(leaf, item,
1007                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1008                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1009                 bi = (struct btrfs_tree_block_info *)(item + 1);
1010                 /* FIXME: get first key of the block */
1011                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1012                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1013         } else {
1014                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1015         }
1016         btrfs_mark_buffer_dirty(leaf);
1017         return 0;
1018 }
1019 #endif
1020
1021 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1022 {
1023         u32 high_crc = ~(u32)0;
1024         u32 low_crc = ~(u32)0;
1025         __le64 lenum;
1026
1027         lenum = cpu_to_le64(root_objectid);
1028         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1029         lenum = cpu_to_le64(owner);
1030         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1031         lenum = cpu_to_le64(offset);
1032         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1033
1034         return ((u64)high_crc << 31) ^ (u64)low_crc;
1035 }
1036
1037 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1038                                      struct btrfs_extent_data_ref *ref)
1039 {
1040         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1041                                     btrfs_extent_data_ref_objectid(leaf, ref),
1042                                     btrfs_extent_data_ref_offset(leaf, ref));
1043 }
1044
1045 static int match_extent_data_ref(struct extent_buffer *leaf,
1046                                  struct btrfs_extent_data_ref *ref,
1047                                  u64 root_objectid, u64 owner, u64 offset)
1048 {
1049         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1050             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1051             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1052                 return 0;
1053         return 1;
1054 }
1055
1056 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1057                                            struct btrfs_root *root,
1058                                            struct btrfs_path *path,
1059                                            u64 bytenr, u64 parent,
1060                                            u64 root_objectid,
1061                                            u64 owner, u64 offset)
1062 {
1063         struct btrfs_key key;
1064         struct btrfs_extent_data_ref *ref;
1065         struct extent_buffer *leaf;
1066         u32 nritems;
1067         int ret;
1068         int recow;
1069         int err = -ENOENT;
1070
1071         key.objectid = bytenr;
1072         if (parent) {
1073                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1074                 key.offset = parent;
1075         } else {
1076                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1077                 key.offset = hash_extent_data_ref(root_objectid,
1078                                                   owner, offset);
1079         }
1080 again:
1081         recow = 0;
1082         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1083         if (ret < 0) {
1084                 err = ret;
1085                 goto fail;
1086         }
1087
1088         if (parent) {
1089                 if (!ret)
1090                         return 0;
1091 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1092                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1093                 btrfs_release_path(path);
1094                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1095                 if (ret < 0) {
1096                         err = ret;
1097                         goto fail;
1098                 }
1099                 if (!ret)
1100                         return 0;
1101 #endif
1102                 goto fail;
1103         }
1104
1105         leaf = path->nodes[0];
1106         nritems = btrfs_header_nritems(leaf);
1107         while (1) {
1108                 if (path->slots[0] >= nritems) {
1109                         ret = btrfs_next_leaf(root, path);
1110                         if (ret < 0)
1111                                 err = ret;
1112                         if (ret)
1113                                 goto fail;
1114
1115                         leaf = path->nodes[0];
1116                         nritems = btrfs_header_nritems(leaf);
1117                         recow = 1;
1118                 }
1119
1120                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1121                 if (key.objectid != bytenr ||
1122                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1123                         goto fail;
1124
1125                 ref = btrfs_item_ptr(leaf, path->slots[0],
1126                                      struct btrfs_extent_data_ref);
1127
1128                 if (match_extent_data_ref(leaf, ref, root_objectid,
1129                                           owner, offset)) {
1130                         if (recow) {
1131                                 btrfs_release_path(path);
1132                                 goto again;
1133                         }
1134                         err = 0;
1135                         break;
1136                 }
1137                 path->slots[0]++;
1138         }
1139 fail:
1140         return err;
1141 }
1142
1143 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1144                                            struct btrfs_root *root,
1145                                            struct btrfs_path *path,
1146                                            u64 bytenr, u64 parent,
1147                                            u64 root_objectid, u64 owner,
1148                                            u64 offset, int refs_to_add)
1149 {
1150         struct btrfs_key key;
1151         struct extent_buffer *leaf;
1152         u32 size;
1153         u32 num_refs;
1154         int ret;
1155
1156         key.objectid = bytenr;
1157         if (parent) {
1158                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1159                 key.offset = parent;
1160                 size = sizeof(struct btrfs_shared_data_ref);
1161         } else {
1162                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1163                 key.offset = hash_extent_data_ref(root_objectid,
1164                                                   owner, offset);
1165                 size = sizeof(struct btrfs_extent_data_ref);
1166         }
1167
1168         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1169         if (ret && ret != -EEXIST)
1170                 goto fail;
1171
1172         leaf = path->nodes[0];
1173         if (parent) {
1174                 struct btrfs_shared_data_ref *ref;
1175                 ref = btrfs_item_ptr(leaf, path->slots[0],
1176                                      struct btrfs_shared_data_ref);
1177                 if (ret == 0) {
1178                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1179                 } else {
1180                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1181                         num_refs += refs_to_add;
1182                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1183                 }
1184         } else {
1185                 struct btrfs_extent_data_ref *ref;
1186                 while (ret == -EEXIST) {
1187                         ref = btrfs_item_ptr(leaf, path->slots[0],
1188                                              struct btrfs_extent_data_ref);
1189                         if (match_extent_data_ref(leaf, ref, root_objectid,
1190                                                   owner, offset))
1191                                 break;
1192                         btrfs_release_path(path);
1193                         key.offset++;
1194                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1195                                                       size);
1196                         if (ret && ret != -EEXIST)
1197                                 goto fail;
1198
1199                         leaf = path->nodes[0];
1200                 }
1201                 ref = btrfs_item_ptr(leaf, path->slots[0],
1202                                      struct btrfs_extent_data_ref);
1203                 if (ret == 0) {
1204                         btrfs_set_extent_data_ref_root(leaf, ref,
1205                                                        root_objectid);
1206                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1207                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1208                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1209                 } else {
1210                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1211                         num_refs += refs_to_add;
1212                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1213                 }
1214         }
1215         btrfs_mark_buffer_dirty(leaf);
1216         ret = 0;
1217 fail:
1218         btrfs_release_path(path);
1219         return ret;
1220 }
1221
1222 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1223                                            struct btrfs_root *root,
1224                                            struct btrfs_path *path,
1225                                            int refs_to_drop)
1226 {
1227         struct btrfs_key key;
1228         struct btrfs_extent_data_ref *ref1 = NULL;
1229         struct btrfs_shared_data_ref *ref2 = NULL;
1230         struct extent_buffer *leaf;
1231         u32 num_refs = 0;
1232         int ret = 0;
1233
1234         leaf = path->nodes[0];
1235         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1236
1237         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1238                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1239                                       struct btrfs_extent_data_ref);
1240                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1241         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1242                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1243                                       struct btrfs_shared_data_ref);
1244                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1245 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1246         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1247                 struct btrfs_extent_ref_v0 *ref0;
1248                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1249                                       struct btrfs_extent_ref_v0);
1250                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1251 #endif
1252         } else {
1253                 BUG();
1254         }
1255
1256         BUG_ON(num_refs < refs_to_drop);
1257         num_refs -= refs_to_drop;
1258
1259         if (num_refs == 0) {
1260                 ret = btrfs_del_item(trans, root, path);
1261         } else {
1262                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1263                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1264                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1265                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1266 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1267                 else {
1268                         struct btrfs_extent_ref_v0 *ref0;
1269                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1270                                         struct btrfs_extent_ref_v0);
1271                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1272                 }
1273 #endif
1274                 btrfs_mark_buffer_dirty(leaf);
1275         }
1276         return ret;
1277 }
1278
1279 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1280                                           struct btrfs_path *path,
1281                                           struct btrfs_extent_inline_ref *iref)
1282 {
1283         struct btrfs_key key;
1284         struct extent_buffer *leaf;
1285         struct btrfs_extent_data_ref *ref1;
1286         struct btrfs_shared_data_ref *ref2;
1287         u32 num_refs = 0;
1288
1289         leaf = path->nodes[0];
1290         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1291         if (iref) {
1292                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1293                     BTRFS_EXTENT_DATA_REF_KEY) {
1294                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1295                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1296                 } else {
1297                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1298                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1299                 }
1300         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1301                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1302                                       struct btrfs_extent_data_ref);
1303                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1304         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1305                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1306                                       struct btrfs_shared_data_ref);
1307                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1308 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1309         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1310                 struct btrfs_extent_ref_v0 *ref0;
1311                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1312                                       struct btrfs_extent_ref_v0);
1313                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1314 #endif
1315         } else {
1316                 WARN_ON(1);
1317         }
1318         return num_refs;
1319 }
1320
1321 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1322                                           struct btrfs_root *root,
1323                                           struct btrfs_path *path,
1324                                           u64 bytenr, u64 parent,
1325                                           u64 root_objectid)
1326 {
1327         struct btrfs_key key;
1328         int ret;
1329
1330         key.objectid = bytenr;
1331         if (parent) {
1332                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1333                 key.offset = parent;
1334         } else {
1335                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1336                 key.offset = root_objectid;
1337         }
1338
1339         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1340         if (ret > 0)
1341                 ret = -ENOENT;
1342 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1343         if (ret == -ENOENT && parent) {
1344                 btrfs_release_path(path);
1345                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1346                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1347                 if (ret > 0)
1348                         ret = -ENOENT;
1349         }
1350 #endif
1351         return ret;
1352 }
1353
1354 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1355                                           struct btrfs_root *root,
1356                                           struct btrfs_path *path,
1357                                           u64 bytenr, u64 parent,
1358                                           u64 root_objectid)
1359 {
1360         struct btrfs_key key;
1361         int ret;
1362
1363         key.objectid = bytenr;
1364         if (parent) {
1365                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1366                 key.offset = parent;
1367         } else {
1368                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1369                 key.offset = root_objectid;
1370         }
1371
1372         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1373         btrfs_release_path(path);
1374         return ret;
1375 }
1376
1377 static inline int extent_ref_type(u64 parent, u64 owner)
1378 {
1379         int type;
1380         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1381                 if (parent > 0)
1382                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1383                 else
1384                         type = BTRFS_TREE_BLOCK_REF_KEY;
1385         } else {
1386                 if (parent > 0)
1387                         type = BTRFS_SHARED_DATA_REF_KEY;
1388                 else
1389                         type = BTRFS_EXTENT_DATA_REF_KEY;
1390         }
1391         return type;
1392 }
1393
1394 static int find_next_key(struct btrfs_path *path, int level,
1395                          struct btrfs_key *key)
1396
1397 {
1398         for (; level < BTRFS_MAX_LEVEL; level++) {
1399                 if (!path->nodes[level])
1400                         break;
1401                 if (path->slots[level] + 1 >=
1402                     btrfs_header_nritems(path->nodes[level]))
1403                         continue;
1404                 if (level == 0)
1405                         btrfs_item_key_to_cpu(path->nodes[level], key,
1406                                               path->slots[level] + 1);
1407                 else
1408                         btrfs_node_key_to_cpu(path->nodes[level], key,
1409                                               path->slots[level] + 1);
1410                 return 0;
1411         }
1412         return 1;
1413 }
1414
1415 /*
1416  * look for inline back ref. if back ref is found, *ref_ret is set
1417  * to the address of inline back ref, and 0 is returned.
1418  *
1419  * if back ref isn't found, *ref_ret is set to the address where it
1420  * should be inserted, and -ENOENT is returned.
1421  *
1422  * if insert is true and there are too many inline back refs, the path
1423  * points to the extent item, and -EAGAIN is returned.
1424  *
1425  * NOTE: inline back refs are ordered in the same way that back ref
1426  *       items in the tree are ordered.
1427  */
1428 static noinline_for_stack
1429 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1430                                  struct btrfs_root *root,
1431                                  struct btrfs_path *path,
1432                                  struct btrfs_extent_inline_ref **ref_ret,
1433                                  u64 bytenr, u64 num_bytes,
1434                                  u64 parent, u64 root_objectid,
1435                                  u64 owner, u64 offset, int insert)
1436 {
1437         struct btrfs_key key;
1438         struct extent_buffer *leaf;
1439         struct btrfs_extent_item *ei;
1440         struct btrfs_extent_inline_ref *iref;
1441         u64 flags;
1442         u64 item_size;
1443         unsigned long ptr;
1444         unsigned long end;
1445         int extra_size;
1446         int type;
1447         int want;
1448         int ret;
1449         int err = 0;
1450
1451         key.objectid = bytenr;
1452         key.type = BTRFS_EXTENT_ITEM_KEY;
1453         key.offset = num_bytes;
1454
1455         want = extent_ref_type(parent, owner);
1456         if (insert) {
1457                 extra_size = btrfs_extent_inline_ref_size(want);
1458                 path->keep_locks = 1;
1459         } else
1460                 extra_size = -1;
1461         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1462         if (ret < 0) {
1463                 err = ret;
1464                 goto out;
1465         }
1466         if (ret && !insert) {
1467                 err = -ENOENT;
1468                 goto out;
1469         }
1470         BUG_ON(ret); /* Corruption */
1471
1472         leaf = path->nodes[0];
1473         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1474 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1475         if (item_size < sizeof(*ei)) {
1476                 if (!insert) {
1477                         err = -ENOENT;
1478                         goto out;
1479                 }
1480                 ret = convert_extent_item_v0(trans, root, path, owner,
1481                                              extra_size);
1482                 if (ret < 0) {
1483                         err = ret;
1484                         goto out;
1485                 }
1486                 leaf = path->nodes[0];
1487                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1488         }
1489 #endif
1490         BUG_ON(item_size < sizeof(*ei));
1491
1492         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1493         flags = btrfs_extent_flags(leaf, ei);
1494
1495         ptr = (unsigned long)(ei + 1);
1496         end = (unsigned long)ei + item_size;
1497
1498         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1499                 ptr += sizeof(struct btrfs_tree_block_info);
1500                 BUG_ON(ptr > end);
1501         } else {
1502                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1503         }
1504
1505         err = -ENOENT;
1506         while (1) {
1507                 if (ptr >= end) {
1508                         WARN_ON(ptr > end);
1509                         break;
1510                 }
1511                 iref = (struct btrfs_extent_inline_ref *)ptr;
1512                 type = btrfs_extent_inline_ref_type(leaf, iref);
1513                 if (want < type)
1514                         break;
1515                 if (want > type) {
1516                         ptr += btrfs_extent_inline_ref_size(type);
1517                         continue;
1518                 }
1519
1520                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1521                         struct btrfs_extent_data_ref *dref;
1522                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1523                         if (match_extent_data_ref(leaf, dref, root_objectid,
1524                                                   owner, offset)) {
1525                                 err = 0;
1526                                 break;
1527                         }
1528                         if (hash_extent_data_ref_item(leaf, dref) <
1529                             hash_extent_data_ref(root_objectid, owner, offset))
1530                                 break;
1531                 } else {
1532                         u64 ref_offset;
1533                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1534                         if (parent > 0) {
1535                                 if (parent == ref_offset) {
1536                                         err = 0;
1537                                         break;
1538                                 }
1539                                 if (ref_offset < parent)
1540                                         break;
1541                         } else {
1542                                 if (root_objectid == ref_offset) {
1543                                         err = 0;
1544                                         break;
1545                                 }
1546                                 if (ref_offset < root_objectid)
1547                                         break;
1548                         }
1549                 }
1550                 ptr += btrfs_extent_inline_ref_size(type);
1551         }
1552         if (err == -ENOENT && insert) {
1553                 if (item_size + extra_size >=
1554                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1555                         err = -EAGAIN;
1556                         goto out;
1557                 }
1558                 /*
1559                  * To add new inline back ref, we have to make sure
1560                  * there is no corresponding back ref item.
1561                  * For simplicity, we just do not add new inline back
1562                  * ref if there is any kind of item for this block
1563                  */
1564                 if (find_next_key(path, 0, &key) == 0 &&
1565                     key.objectid == bytenr &&
1566                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1567                         err = -EAGAIN;
1568                         goto out;
1569                 }
1570         }
1571         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1572 out:
1573         if (insert) {
1574                 path->keep_locks = 0;
1575                 btrfs_unlock_up_safe(path, 1);
1576         }
1577         return err;
1578 }
1579
1580 /*
1581  * helper to add new inline back ref
1582  */
1583 static noinline_for_stack
1584 void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1585                                  struct btrfs_root *root,
1586                                  struct btrfs_path *path,
1587                                  struct btrfs_extent_inline_ref *iref,
1588                                  u64 parent, u64 root_objectid,
1589                                  u64 owner, u64 offset, int refs_to_add,
1590                                  struct btrfs_delayed_extent_op *extent_op)
1591 {
1592         struct extent_buffer *leaf;
1593         struct btrfs_extent_item *ei;
1594         unsigned long ptr;
1595         unsigned long end;
1596         unsigned long item_offset;
1597         u64 refs;
1598         int size;
1599         int type;
1600
1601         leaf = path->nodes[0];
1602         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1603         item_offset = (unsigned long)iref - (unsigned long)ei;
1604
1605         type = extent_ref_type(parent, owner);
1606         size = btrfs_extent_inline_ref_size(type);
1607
1608         btrfs_extend_item(trans, root, path, size);
1609
1610         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1611         refs = btrfs_extent_refs(leaf, ei);
1612         refs += refs_to_add;
1613         btrfs_set_extent_refs(leaf, ei, refs);
1614         if (extent_op)
1615                 __run_delayed_extent_op(extent_op, leaf, ei);
1616
1617         ptr = (unsigned long)ei + item_offset;
1618         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1619         if (ptr < end - size)
1620                 memmove_extent_buffer(leaf, ptr + size, ptr,
1621                                       end - size - ptr);
1622
1623         iref = (struct btrfs_extent_inline_ref *)ptr;
1624         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1625         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1626                 struct btrfs_extent_data_ref *dref;
1627                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1628                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1629                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1630                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1631                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1632         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1633                 struct btrfs_shared_data_ref *sref;
1634                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1635                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1636                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1637         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1638                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1639         } else {
1640                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1641         }
1642         btrfs_mark_buffer_dirty(leaf);
1643 }
1644
1645 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1646                                  struct btrfs_root *root,
1647                                  struct btrfs_path *path,
1648                                  struct btrfs_extent_inline_ref **ref_ret,
1649                                  u64 bytenr, u64 num_bytes, u64 parent,
1650                                  u64 root_objectid, u64 owner, u64 offset)
1651 {
1652         int ret;
1653
1654         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1655                                            bytenr, num_bytes, parent,
1656                                            root_objectid, owner, offset, 0);
1657         if (ret != -ENOENT)
1658                 return ret;
1659
1660         btrfs_release_path(path);
1661         *ref_ret = NULL;
1662
1663         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1664                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1665                                             root_objectid);
1666         } else {
1667                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1668                                              root_objectid, owner, offset);
1669         }
1670         return ret;
1671 }
1672
1673 /*
1674  * helper to update/remove inline back ref
1675  */
1676 static noinline_for_stack
1677 void update_inline_extent_backref(struct btrfs_trans_handle *trans,
1678                                   struct btrfs_root *root,
1679                                   struct btrfs_path *path,
1680                                   struct btrfs_extent_inline_ref *iref,
1681                                   int refs_to_mod,
1682                                   struct btrfs_delayed_extent_op *extent_op)
1683 {
1684         struct extent_buffer *leaf;
1685         struct btrfs_extent_item *ei;
1686         struct btrfs_extent_data_ref *dref = NULL;
1687         struct btrfs_shared_data_ref *sref = NULL;
1688         unsigned long ptr;
1689         unsigned long end;
1690         u32 item_size;
1691         int size;
1692         int type;
1693         u64 refs;
1694
1695         leaf = path->nodes[0];
1696         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1697         refs = btrfs_extent_refs(leaf, ei);
1698         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1699         refs += refs_to_mod;
1700         btrfs_set_extent_refs(leaf, ei, refs);
1701         if (extent_op)
1702                 __run_delayed_extent_op(extent_op, leaf, ei);
1703
1704         type = btrfs_extent_inline_ref_type(leaf, iref);
1705
1706         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1707                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1708                 refs = btrfs_extent_data_ref_count(leaf, dref);
1709         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1710                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1711                 refs = btrfs_shared_data_ref_count(leaf, sref);
1712         } else {
1713                 refs = 1;
1714                 BUG_ON(refs_to_mod != -1);
1715         }
1716
1717         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1718         refs += refs_to_mod;
1719
1720         if (refs > 0) {
1721                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1722                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1723                 else
1724                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1725         } else {
1726                 size =  btrfs_extent_inline_ref_size(type);
1727                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1728                 ptr = (unsigned long)iref;
1729                 end = (unsigned long)ei + item_size;
1730                 if (ptr + size < end)
1731                         memmove_extent_buffer(leaf, ptr, ptr + size,
1732                                               end - ptr - size);
1733                 item_size -= size;
1734                 btrfs_truncate_item(trans, root, path, item_size, 1);
1735         }
1736         btrfs_mark_buffer_dirty(leaf);
1737 }
1738
1739 static noinline_for_stack
1740 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1741                                  struct btrfs_root *root,
1742                                  struct btrfs_path *path,
1743                                  u64 bytenr, u64 num_bytes, u64 parent,
1744                                  u64 root_objectid, u64 owner,
1745                                  u64 offset, int refs_to_add,
1746                                  struct btrfs_delayed_extent_op *extent_op)
1747 {
1748         struct btrfs_extent_inline_ref *iref;
1749         int ret;
1750
1751         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1752                                            bytenr, num_bytes, parent,
1753                                            root_objectid, owner, offset, 1);
1754         if (ret == 0) {
1755                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1756                 update_inline_extent_backref(trans, root, path, iref,
1757                                              refs_to_add, extent_op);
1758         } else if (ret == -ENOENT) {
1759                 setup_inline_extent_backref(trans, root, path, iref, parent,
1760                                             root_objectid, owner, offset,
1761                                             refs_to_add, extent_op);
1762                 ret = 0;
1763         }
1764         return ret;
1765 }
1766
1767 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1768                                  struct btrfs_root *root,
1769                                  struct btrfs_path *path,
1770                                  u64 bytenr, u64 parent, u64 root_objectid,
1771                                  u64 owner, u64 offset, int refs_to_add)
1772 {
1773         int ret;
1774         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1775                 BUG_ON(refs_to_add != 1);
1776                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1777                                             parent, root_objectid);
1778         } else {
1779                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1780                                              parent, root_objectid,
1781                                              owner, offset, refs_to_add);
1782         }
1783         return ret;
1784 }
1785
1786 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1787                                  struct btrfs_root *root,
1788                                  struct btrfs_path *path,
1789                                  struct btrfs_extent_inline_ref *iref,
1790                                  int refs_to_drop, int is_data)
1791 {
1792         int ret = 0;
1793
1794         BUG_ON(!is_data && refs_to_drop != 1);
1795         if (iref) {
1796                 update_inline_extent_backref(trans, root, path, iref,
1797                                              -refs_to_drop, NULL);
1798         } else if (is_data) {
1799                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1800         } else {
1801                 ret = btrfs_del_item(trans, root, path);
1802         }
1803         return ret;
1804 }
1805
1806 static int btrfs_issue_discard(struct block_device *bdev,
1807                                 u64 start, u64 len)
1808 {
1809         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1810 }
1811
1812 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1813                                 u64 num_bytes, u64 *actual_bytes)
1814 {
1815         int ret;
1816         u64 discarded_bytes = 0;
1817         struct btrfs_bio *bbio = NULL;
1818
1819
1820         /* Tell the block device(s) that the sectors can be discarded */
1821         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1822                               bytenr, &num_bytes, &bbio, 0);
1823         /* Error condition is -ENOMEM */
1824         if (!ret) {
1825                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1826                 int i;
1827
1828
1829                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1830                         if (!stripe->dev->can_discard)
1831                                 continue;
1832
1833                         ret = btrfs_issue_discard(stripe->dev->bdev,
1834                                                   stripe->physical,
1835                                                   stripe->length);
1836                         if (!ret)
1837                                 discarded_bytes += stripe->length;
1838                         else if (ret != -EOPNOTSUPP)
1839                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1840
1841                         /*
1842                          * Just in case we get back EOPNOTSUPP for some reason,
1843                          * just ignore the return value so we don't screw up
1844                          * people calling discard_extent.
1845                          */
1846                         ret = 0;
1847                 }
1848                 kfree(bbio);
1849         }
1850
1851         if (actual_bytes)
1852                 *actual_bytes = discarded_bytes;
1853
1854
1855         return ret;
1856 }
1857
1858 /* Can return -ENOMEM */
1859 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1860                          struct btrfs_root *root,
1861                          u64 bytenr, u64 num_bytes, u64 parent,
1862                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1863 {
1864         int ret;
1865         struct btrfs_fs_info *fs_info = root->fs_info;
1866
1867         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1868                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1869
1870         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1871                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1872                                         num_bytes,
1873                                         parent, root_objectid, (int)owner,
1874                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1875         } else {
1876                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1877                                         num_bytes,
1878                                         parent, root_objectid, owner, offset,
1879                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1880         }
1881         return ret;
1882 }
1883
1884 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1885                                   struct btrfs_root *root,
1886                                   u64 bytenr, u64 num_bytes,
1887                                   u64 parent, u64 root_objectid,
1888                                   u64 owner, u64 offset, int refs_to_add,
1889                                   struct btrfs_delayed_extent_op *extent_op)
1890 {
1891         struct btrfs_path *path;
1892         struct extent_buffer *leaf;
1893         struct btrfs_extent_item *item;
1894         u64 refs;
1895         int ret;
1896         int err = 0;
1897
1898         path = btrfs_alloc_path();
1899         if (!path)
1900                 return -ENOMEM;
1901
1902         path->reada = 1;
1903         path->leave_spinning = 1;
1904         /* this will setup the path even if it fails to insert the back ref */
1905         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1906                                            path, bytenr, num_bytes, parent,
1907                                            root_objectid, owner, offset,
1908                                            refs_to_add, extent_op);
1909         if (ret == 0)
1910                 goto out;
1911
1912         if (ret != -EAGAIN) {
1913                 err = ret;
1914                 goto out;
1915         }
1916
1917         leaf = path->nodes[0];
1918         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1919         refs = btrfs_extent_refs(leaf, item);
1920         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1921         if (extent_op)
1922                 __run_delayed_extent_op(extent_op, leaf, item);
1923
1924         btrfs_mark_buffer_dirty(leaf);
1925         btrfs_release_path(path);
1926
1927         path->reada = 1;
1928         path->leave_spinning = 1;
1929
1930         /* now insert the actual backref */
1931         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1932                                     path, bytenr, parent, root_objectid,
1933                                     owner, offset, refs_to_add);
1934         if (ret)
1935                 btrfs_abort_transaction(trans, root, ret);
1936 out:
1937         btrfs_free_path(path);
1938         return err;
1939 }
1940
1941 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1942                                 struct btrfs_root *root,
1943                                 struct btrfs_delayed_ref_node *node,
1944                                 struct btrfs_delayed_extent_op *extent_op,
1945                                 int insert_reserved)
1946 {
1947         int ret = 0;
1948         struct btrfs_delayed_data_ref *ref;
1949         struct btrfs_key ins;
1950         u64 parent = 0;
1951         u64 ref_root = 0;
1952         u64 flags = 0;
1953
1954         ins.objectid = node->bytenr;
1955         ins.offset = node->num_bytes;
1956         ins.type = BTRFS_EXTENT_ITEM_KEY;
1957
1958         ref = btrfs_delayed_node_to_data_ref(node);
1959         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1960                 parent = ref->parent;
1961         else
1962                 ref_root = ref->root;
1963
1964         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1965                 if (extent_op) {
1966                         BUG_ON(extent_op->update_key);
1967                         flags |= extent_op->flags_to_set;
1968                 }
1969                 ret = alloc_reserved_file_extent(trans, root,
1970                                                  parent, ref_root, flags,
1971                                                  ref->objectid, ref->offset,
1972                                                  &ins, node->ref_mod);
1973         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1974                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1975                                              node->num_bytes, parent,
1976                                              ref_root, ref->objectid,
1977                                              ref->offset, node->ref_mod,
1978                                              extent_op);
1979         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1980                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1981                                           node->num_bytes, parent,
1982                                           ref_root, ref->objectid,
1983                                           ref->offset, node->ref_mod,
1984                                           extent_op);
1985         } else {
1986                 BUG();
1987         }
1988         return ret;
1989 }
1990
1991 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1992                                     struct extent_buffer *leaf,
1993                                     struct btrfs_extent_item *ei)
1994 {
1995         u64 flags = btrfs_extent_flags(leaf, ei);
1996         if (extent_op->update_flags) {
1997                 flags |= extent_op->flags_to_set;
1998                 btrfs_set_extent_flags(leaf, ei, flags);
1999         }
2000
2001         if (extent_op->update_key) {
2002                 struct btrfs_tree_block_info *bi;
2003                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2004                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2005                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2006         }
2007 }
2008
2009 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2010                                  struct btrfs_root *root,
2011                                  struct btrfs_delayed_ref_node *node,
2012                                  struct btrfs_delayed_extent_op *extent_op)
2013 {
2014         struct btrfs_key key;
2015         struct btrfs_path *path;
2016         struct btrfs_extent_item *ei;
2017         struct extent_buffer *leaf;
2018         u32 item_size;
2019         int ret;
2020         int err = 0;
2021
2022         if (trans->aborted)
2023                 return 0;
2024
2025         path = btrfs_alloc_path();
2026         if (!path)
2027                 return -ENOMEM;
2028
2029         key.objectid = node->bytenr;
2030         key.type = BTRFS_EXTENT_ITEM_KEY;
2031         key.offset = node->num_bytes;
2032
2033         path->reada = 1;
2034         path->leave_spinning = 1;
2035         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2036                                 path, 0, 1);
2037         if (ret < 0) {
2038                 err = ret;
2039                 goto out;
2040         }
2041         if (ret > 0) {
2042                 err = -EIO;
2043                 goto out;
2044         }
2045
2046         leaf = path->nodes[0];
2047         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2048 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2049         if (item_size < sizeof(*ei)) {
2050                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2051                                              path, (u64)-1, 0);
2052                 if (ret < 0) {
2053                         err = ret;
2054                         goto out;
2055                 }
2056                 leaf = path->nodes[0];
2057                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2058         }
2059 #endif
2060         BUG_ON(item_size < sizeof(*ei));
2061         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2062         __run_delayed_extent_op(extent_op, leaf, ei);
2063
2064         btrfs_mark_buffer_dirty(leaf);
2065 out:
2066         btrfs_free_path(path);
2067         return err;
2068 }
2069
2070 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2071                                 struct btrfs_root *root,
2072                                 struct btrfs_delayed_ref_node *node,
2073                                 struct btrfs_delayed_extent_op *extent_op,
2074                                 int insert_reserved)
2075 {
2076         int ret = 0;
2077         struct btrfs_delayed_tree_ref *ref;
2078         struct btrfs_key ins;
2079         u64 parent = 0;
2080         u64 ref_root = 0;
2081
2082         ins.objectid = node->bytenr;
2083         ins.offset = node->num_bytes;
2084         ins.type = BTRFS_EXTENT_ITEM_KEY;
2085
2086         ref = btrfs_delayed_node_to_tree_ref(node);
2087         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2088                 parent = ref->parent;
2089         else
2090                 ref_root = ref->root;
2091
2092         BUG_ON(node->ref_mod != 1);
2093         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2094                 BUG_ON(!extent_op || !extent_op->update_flags ||
2095                        !extent_op->update_key);
2096                 ret = alloc_reserved_tree_block(trans, root,
2097                                                 parent, ref_root,
2098                                                 extent_op->flags_to_set,
2099                                                 &extent_op->key,
2100                                                 ref->level, &ins);
2101         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2102                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2103                                              node->num_bytes, parent, ref_root,
2104                                              ref->level, 0, 1, extent_op);
2105         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2106                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2107                                           node->num_bytes, parent, ref_root,
2108                                           ref->level, 0, 1, extent_op);
2109         } else {
2110                 BUG();
2111         }
2112         return ret;
2113 }
2114
2115 /* helper function to actually process a single delayed ref entry */
2116 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2117                                struct btrfs_root *root,
2118                                struct btrfs_delayed_ref_node *node,
2119                                struct btrfs_delayed_extent_op *extent_op,
2120                                int insert_reserved)
2121 {
2122         int ret = 0;
2123
2124         if (trans->aborted)
2125                 return 0;
2126
2127         if (btrfs_delayed_ref_is_head(node)) {
2128                 struct btrfs_delayed_ref_head *head;
2129                 /*
2130                  * we've hit the end of the chain and we were supposed
2131                  * to insert this extent into the tree.  But, it got
2132                  * deleted before we ever needed to insert it, so all
2133                  * we have to do is clean up the accounting
2134                  */
2135                 BUG_ON(extent_op);
2136                 head = btrfs_delayed_node_to_head(node);
2137                 if (insert_reserved) {
2138                         btrfs_pin_extent(root, node->bytenr,
2139                                          node->num_bytes, 1);
2140                         if (head->is_data) {
2141                                 ret = btrfs_del_csums(trans, root,
2142                                                       node->bytenr,
2143                                                       node->num_bytes);
2144                         }
2145                 }
2146                 mutex_unlock(&head->mutex);
2147                 return ret;
2148         }
2149
2150         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2151             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2152                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2153                                            insert_reserved);
2154         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2155                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2156                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2157                                            insert_reserved);
2158         else
2159                 BUG();
2160         return ret;
2161 }
2162
2163 static noinline struct btrfs_delayed_ref_node *
2164 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2165 {
2166         struct rb_node *node;
2167         struct btrfs_delayed_ref_node *ref;
2168         int action = BTRFS_ADD_DELAYED_REF;
2169 again:
2170         /*
2171          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2172          * this prevents ref count from going down to zero when
2173          * there still are pending delayed ref.
2174          */
2175         node = rb_prev(&head->node.rb_node);
2176         while (1) {
2177                 if (!node)
2178                         break;
2179                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2180                                 rb_node);
2181                 if (ref->bytenr != head->node.bytenr)
2182                         break;
2183                 if (ref->action == action)
2184                         return ref;
2185                 node = rb_prev(node);
2186         }
2187         if (action == BTRFS_ADD_DELAYED_REF) {
2188                 action = BTRFS_DROP_DELAYED_REF;
2189                 goto again;
2190         }
2191         return NULL;
2192 }
2193
2194 /*
2195  * Returns 0 on success or if called with an already aborted transaction.
2196  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2197  */
2198 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2199                                        struct btrfs_root *root,
2200                                        struct list_head *cluster)
2201 {
2202         struct btrfs_delayed_ref_root *delayed_refs;
2203         struct btrfs_delayed_ref_node *ref;
2204         struct btrfs_delayed_ref_head *locked_ref = NULL;
2205         struct btrfs_delayed_extent_op *extent_op;
2206         struct btrfs_fs_info *fs_info = root->fs_info;
2207         int ret;
2208         int count = 0;
2209         int must_insert_reserved = 0;
2210
2211         delayed_refs = &trans->transaction->delayed_refs;
2212         while (1) {
2213                 if (!locked_ref) {
2214                         /* pick a new head ref from the cluster list */
2215                         if (list_empty(cluster))
2216                                 break;
2217
2218                         locked_ref = list_entry(cluster->next,
2219                                      struct btrfs_delayed_ref_head, cluster);
2220
2221                         /* grab the lock that says we are going to process
2222                          * all the refs for this head */
2223                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2224
2225                         /*
2226                          * we may have dropped the spin lock to get the head
2227                          * mutex lock, and that might have given someone else
2228                          * time to free the head.  If that's true, it has been
2229                          * removed from our list and we can move on.
2230                          */
2231                         if (ret == -EAGAIN) {
2232                                 locked_ref = NULL;
2233                                 count++;
2234                                 continue;
2235                         }
2236                 }
2237
2238                 /*
2239                  * We need to try and merge add/drops of the same ref since we
2240                  * can run into issues with relocate dropping the implicit ref
2241                  * and then it being added back again before the drop can
2242                  * finish.  If we merged anything we need to re-loop so we can
2243                  * get a good ref.
2244                  */
2245                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2246                                          locked_ref);
2247
2248                 /*
2249                  * locked_ref is the head node, so we have to go one
2250                  * node back for any delayed ref updates
2251                  */
2252                 ref = select_delayed_ref(locked_ref);
2253
2254                 if (ref && ref->seq &&
2255                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2256                         /*
2257                          * there are still refs with lower seq numbers in the
2258                          * process of being added. Don't run this ref yet.
2259                          */
2260                         list_del_init(&locked_ref->cluster);
2261                         mutex_unlock(&locked_ref->mutex);
2262                         locked_ref = NULL;
2263                         delayed_refs->num_heads_ready++;
2264                         spin_unlock(&delayed_refs->lock);
2265                         cond_resched();
2266                         spin_lock(&delayed_refs->lock);
2267                         continue;
2268                 }
2269
2270                 /*
2271                  * record the must insert reserved flag before we
2272                  * drop the spin lock.
2273                  */
2274                 must_insert_reserved = locked_ref->must_insert_reserved;
2275                 locked_ref->must_insert_reserved = 0;
2276
2277                 extent_op = locked_ref->extent_op;
2278                 locked_ref->extent_op = NULL;
2279
2280                 if (!ref) {
2281                         /* All delayed refs have been processed, Go ahead
2282                          * and send the head node to run_one_delayed_ref,
2283                          * so that any accounting fixes can happen
2284                          */
2285                         ref = &locked_ref->node;
2286
2287                         if (extent_op && must_insert_reserved) {
2288                                 kfree(extent_op);
2289                                 extent_op = NULL;
2290                         }
2291
2292                         if (extent_op) {
2293                                 spin_unlock(&delayed_refs->lock);
2294
2295                                 ret = run_delayed_extent_op(trans, root,
2296                                                             ref, extent_op);
2297                                 kfree(extent_op);
2298
2299                                 if (ret) {
2300                                         list_del_init(&locked_ref->cluster);
2301                                         mutex_unlock(&locked_ref->mutex);
2302
2303                                         printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
2304                                         spin_lock(&delayed_refs->lock);
2305                                         return ret;
2306                                 }
2307
2308                                 goto next;
2309                         }
2310
2311                         list_del_init(&locked_ref->cluster);
2312                         locked_ref = NULL;
2313                 }
2314
2315                 ref->in_tree = 0;
2316                 rb_erase(&ref->rb_node, &delayed_refs->root);
2317                 delayed_refs->num_entries--;
2318                 if (locked_ref) {
2319                         /*
2320                          * when we play the delayed ref, also correct the
2321                          * ref_mod on head
2322                          */
2323                         switch (ref->action) {
2324                         case BTRFS_ADD_DELAYED_REF:
2325                         case BTRFS_ADD_DELAYED_EXTENT:
2326                                 locked_ref->node.ref_mod -= ref->ref_mod;
2327                                 break;
2328                         case BTRFS_DROP_DELAYED_REF:
2329                                 locked_ref->node.ref_mod += ref->ref_mod;
2330                                 break;
2331                         default:
2332                                 WARN_ON(1);
2333                         }
2334                 }
2335                 spin_unlock(&delayed_refs->lock);
2336
2337                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2338                                           must_insert_reserved);
2339
2340                 btrfs_put_delayed_ref(ref);
2341                 kfree(extent_op);
2342                 count++;
2343
2344                 if (ret) {
2345                         if (locked_ref) {
2346                                 list_del_init(&locked_ref->cluster);
2347                                 mutex_unlock(&locked_ref->mutex);
2348                         }
2349                         printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
2350                         spin_lock(&delayed_refs->lock);
2351                         return ret;
2352                 }
2353
2354 next:
2355                 cond_resched();
2356                 spin_lock(&delayed_refs->lock);
2357         }
2358         return count;
2359 }
2360
2361 #ifdef SCRAMBLE_DELAYED_REFS
2362 /*
2363  * Normally delayed refs get processed in ascending bytenr order. This
2364  * correlates in most cases to the order added. To expose dependencies on this
2365  * order, we start to process the tree in the middle instead of the beginning
2366  */
2367 static u64 find_middle(struct rb_root *root)
2368 {
2369         struct rb_node *n = root->rb_node;
2370         struct btrfs_delayed_ref_node *entry;
2371         int alt = 1;
2372         u64 middle;
2373         u64 first = 0, last = 0;
2374
2375         n = rb_first(root);
2376         if (n) {
2377                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2378                 first = entry->bytenr;
2379         }
2380         n = rb_last(root);
2381         if (n) {
2382                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2383                 last = entry->bytenr;
2384         }
2385         n = root->rb_node;
2386
2387         while (n) {
2388                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2389                 WARN_ON(!entry->in_tree);
2390
2391                 middle = entry->bytenr;
2392
2393                 if (alt)
2394                         n = n->rb_left;
2395                 else
2396                         n = n->rb_right;
2397
2398                 alt = 1 - alt;
2399         }
2400         return middle;
2401 }
2402 #endif
2403
2404 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2405                                          struct btrfs_fs_info *fs_info)
2406 {
2407         struct qgroup_update *qgroup_update;
2408         int ret = 0;
2409
2410         if (list_empty(&trans->qgroup_ref_list) !=
2411             !trans->delayed_ref_elem.seq) {
2412                 /* list without seq or seq without list */
2413                 printk(KERN_ERR "btrfs: qgroup accounting update error, list is%s empty, seq is %llu\n",
2414                         list_empty(&trans->qgroup_ref_list) ? "" : " not",
2415                         trans->delayed_ref_elem.seq);
2416                 BUG();
2417         }
2418
2419         if (!trans->delayed_ref_elem.seq)
2420                 return 0;
2421
2422         while (!list_empty(&trans->qgroup_ref_list)) {
2423                 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2424                                                  struct qgroup_update, list);
2425                 list_del(&qgroup_update->list);
2426                 if (!ret)
2427                         ret = btrfs_qgroup_account_ref(
2428                                         trans, fs_info, qgroup_update->node,
2429                                         qgroup_update->extent_op);
2430                 kfree(qgroup_update);
2431         }
2432
2433         btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2434
2435         return ret;
2436 }
2437
2438 /*
2439  * this starts processing the delayed reference count updates and
2440  * extent insertions we have queued up so far.  count can be
2441  * 0, which means to process everything in the tree at the start
2442  * of the run (but not newly added entries), or it can be some target
2443  * number you'd like to process.
2444  *
2445  * Returns 0 on success or if called with an aborted transaction
2446  * Returns <0 on error and aborts the transaction
2447  */
2448 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2449                            struct btrfs_root *root, unsigned long count)
2450 {
2451         struct rb_node *node;
2452         struct btrfs_delayed_ref_root *delayed_refs;
2453         struct btrfs_delayed_ref_node *ref;
2454         struct list_head cluster;
2455         int ret;
2456         u64 delayed_start;
2457         int run_all = count == (unsigned long)-1;
2458         int run_most = 0;
2459         int loops;
2460
2461         /* We'll clean this up in btrfs_cleanup_transaction */
2462         if (trans->aborted)
2463                 return 0;
2464
2465         if (root == root->fs_info->extent_root)
2466                 root = root->fs_info->tree_root;
2467
2468         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2469
2470         delayed_refs = &trans->transaction->delayed_refs;
2471         INIT_LIST_HEAD(&cluster);
2472 again:
2473         loops = 0;
2474         spin_lock(&delayed_refs->lock);
2475
2476 #ifdef SCRAMBLE_DELAYED_REFS
2477         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2478 #endif
2479
2480         if (count == 0) {
2481                 count = delayed_refs->num_entries * 2;
2482                 run_most = 1;
2483         }
2484         while (1) {
2485                 if (!(run_all || run_most) &&
2486                     delayed_refs->num_heads_ready < 64)
2487                         break;
2488
2489                 /*
2490                  * go find something we can process in the rbtree.  We start at
2491                  * the beginning of the tree, and then build a cluster
2492                  * of refs to process starting at the first one we are able to
2493                  * lock
2494                  */
2495                 delayed_start = delayed_refs->run_delayed_start;
2496                 ret = btrfs_find_ref_cluster(trans, &cluster,
2497                                              delayed_refs->run_delayed_start);
2498                 if (ret)
2499                         break;
2500
2501                 ret = run_clustered_refs(trans, root, &cluster);
2502                 if (ret < 0) {
2503                         spin_unlock(&delayed_refs->lock);
2504                         btrfs_abort_transaction(trans, root, ret);
2505                         return ret;
2506                 }
2507
2508                 count -= min_t(unsigned long, ret, count);
2509
2510                 if (count == 0)
2511                         break;
2512
2513                 if (delayed_start >= delayed_refs->run_delayed_start) {
2514                         if (loops == 0) {
2515                                 /*
2516                                  * btrfs_find_ref_cluster looped. let's do one
2517                                  * more cycle. if we don't run any delayed ref
2518                                  * during that cycle (because we can't because
2519                                  * all of them are blocked), bail out.
2520                                  */
2521                                 loops = 1;
2522                         } else {
2523                                 /*
2524                                  * no runnable refs left, stop trying
2525                                  */
2526                                 BUG_ON(run_all);
2527                                 break;
2528                         }
2529                 }
2530                 if (ret) {
2531                         /* refs were run, let's reset staleness detection */
2532                         loops = 0;
2533                 }
2534         }
2535
2536         if (run_all) {
2537                 if (!list_empty(&trans->new_bgs)) {
2538                         spin_unlock(&delayed_refs->lock);
2539                         btrfs_create_pending_block_groups(trans, root);
2540                         spin_lock(&delayed_refs->lock);
2541                 }
2542
2543                 node = rb_first(&delayed_refs->root);
2544                 if (!node)
2545                         goto out;
2546                 count = (unsigned long)-1;
2547
2548                 while (node) {
2549                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2550                                        rb_node);
2551                         if (btrfs_delayed_ref_is_head(ref)) {
2552                                 struct btrfs_delayed_ref_head *head;
2553
2554                                 head = btrfs_delayed_node_to_head(ref);
2555                                 atomic_inc(&ref->refs);
2556
2557                                 spin_unlock(&delayed_refs->lock);
2558                                 /*
2559                                  * Mutex was contended, block until it's
2560                                  * released and try again
2561                                  */
2562                                 mutex_lock(&head->mutex);
2563                                 mutex_unlock(&head->mutex);
2564
2565                                 btrfs_put_delayed_ref(ref);
2566                                 cond_resched();
2567                                 goto again;
2568                         }
2569                         node = rb_next(node);
2570                 }
2571                 spin_unlock(&delayed_refs->lock);
2572                 schedule_timeout(1);
2573                 goto again;
2574         }
2575 out:
2576         spin_unlock(&delayed_refs->lock);
2577         assert_qgroups_uptodate(trans);
2578         return 0;
2579 }
2580
2581 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2582                                 struct btrfs_root *root,
2583                                 u64 bytenr, u64 num_bytes, u64 flags,
2584                                 int is_data)
2585 {
2586         struct btrfs_delayed_extent_op *extent_op;
2587         int ret;
2588
2589         extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2590         if (!extent_op)
2591                 return -ENOMEM;
2592
2593         extent_op->flags_to_set = flags;
2594         extent_op->update_flags = 1;
2595         extent_op->update_key = 0;
2596         extent_op->is_data = is_data ? 1 : 0;
2597
2598         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2599                                           num_bytes, extent_op);
2600         if (ret)
2601                 kfree(extent_op);
2602         return ret;
2603 }
2604
2605 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2606                                       struct btrfs_root *root,
2607                                       struct btrfs_path *path,
2608                                       u64 objectid, u64 offset, u64 bytenr)
2609 {
2610         struct btrfs_delayed_ref_head *head;
2611         struct btrfs_delayed_ref_node *ref;
2612         struct btrfs_delayed_data_ref *data_ref;
2613         struct btrfs_delayed_ref_root *delayed_refs;
2614         struct rb_node *node;
2615         int ret = 0;
2616
2617         ret = -ENOENT;
2618         delayed_refs = &trans->transaction->delayed_refs;
2619         spin_lock(&delayed_refs->lock);
2620         head = btrfs_find_delayed_ref_head(trans, bytenr);
2621         if (!head)
2622                 goto out;
2623
2624         if (!mutex_trylock(&head->mutex)) {
2625                 atomic_inc(&head->node.refs);
2626                 spin_unlock(&delayed_refs->lock);
2627
2628                 btrfs_release_path(path);
2629
2630                 /*
2631                  * Mutex was contended, block until it's released and let
2632                  * caller try again
2633                  */
2634                 mutex_lock(&head->mutex);
2635                 mutex_unlock(&head->mutex);
2636                 btrfs_put_delayed_ref(&head->node);
2637                 return -EAGAIN;
2638         }
2639
2640         node = rb_prev(&head->node.rb_node);
2641         if (!node)
2642                 goto out_unlock;
2643
2644         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2645
2646         if (ref->bytenr != bytenr)
2647                 goto out_unlock;
2648
2649         ret = 1;
2650         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2651                 goto out_unlock;
2652
2653         data_ref = btrfs_delayed_node_to_data_ref(ref);
2654
2655         node = rb_prev(node);
2656         if (node) {
2657                 int seq = ref->seq;
2658
2659                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2660                 if (ref->bytenr == bytenr && ref->seq == seq)
2661                         goto out_unlock;
2662         }
2663
2664         if (data_ref->root != root->root_key.objectid ||
2665             data_ref->objectid != objectid || data_ref->offset != offset)
2666                 goto out_unlock;
2667
2668         ret = 0;
2669 out_unlock:
2670         mutex_unlock(&head->mutex);
2671 out:
2672         spin_unlock(&delayed_refs->lock);
2673         return ret;
2674 }
2675
2676 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2677                                         struct btrfs_root *root,
2678                                         struct btrfs_path *path,
2679                                         u64 objectid, u64 offset, u64 bytenr)
2680 {
2681         struct btrfs_root *extent_root = root->fs_info->extent_root;
2682         struct extent_buffer *leaf;
2683         struct btrfs_extent_data_ref *ref;
2684         struct btrfs_extent_inline_ref *iref;
2685         struct btrfs_extent_item *ei;
2686         struct btrfs_key key;
2687         u32 item_size;
2688         int ret;
2689
2690         key.objectid = bytenr;
2691         key.offset = (u64)-1;
2692         key.type = BTRFS_EXTENT_ITEM_KEY;
2693
2694         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2695         if (ret < 0)
2696                 goto out;
2697         BUG_ON(ret == 0); /* Corruption */
2698
2699         ret = -ENOENT;
2700         if (path->slots[0] == 0)
2701                 goto out;
2702
2703         path->slots[0]--;
2704         leaf = path->nodes[0];
2705         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2706
2707         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2708                 goto out;
2709
2710         ret = 1;
2711         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2712 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2713         if (item_size < sizeof(*ei)) {
2714                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2715                 goto out;
2716         }
2717 #endif
2718         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2719
2720         if (item_size != sizeof(*ei) +
2721             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2722                 goto out;
2723
2724         if (btrfs_extent_generation(leaf, ei) <=
2725             btrfs_root_last_snapshot(&root->root_item))
2726                 goto out;
2727
2728         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2729         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2730             BTRFS_EXTENT_DATA_REF_KEY)
2731                 goto out;
2732
2733         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2734         if (btrfs_extent_refs(leaf, ei) !=
2735             btrfs_extent_data_ref_count(leaf, ref) ||
2736             btrfs_extent_data_ref_root(leaf, ref) !=
2737             root->root_key.objectid ||
2738             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2739             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2740                 goto out;
2741
2742         ret = 0;
2743 out:
2744         return ret;
2745 }
2746
2747 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2748                           struct btrfs_root *root,
2749                           u64 objectid, u64 offset, u64 bytenr)
2750 {
2751         struct btrfs_path *path;
2752         int ret;
2753         int ret2;
2754
2755         path = btrfs_alloc_path();
2756         if (!path)
2757                 return -ENOENT;
2758
2759         do {
2760                 ret = check_committed_ref(trans, root, path, objectid,
2761                                           offset, bytenr);
2762                 if (ret && ret != -ENOENT)
2763                         goto out;
2764
2765                 ret2 = check_delayed_ref(trans, root, path, objectid,
2766                                          offset, bytenr);
2767         } while (ret2 == -EAGAIN);
2768
2769         if (ret2 && ret2 != -ENOENT) {
2770                 ret = ret2;
2771                 goto out;
2772         }
2773
2774         if (ret != -ENOENT || ret2 != -ENOENT)
2775                 ret = 0;
2776 out:
2777         btrfs_free_path(path);
2778         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2779                 WARN_ON(ret > 0);
2780         return ret;
2781 }
2782
2783 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2784                            struct btrfs_root *root,
2785                            struct extent_buffer *buf,
2786                            int full_backref, int inc, int for_cow)
2787 {
2788         u64 bytenr;
2789         u64 num_bytes;
2790         u64 parent;
2791         u64 ref_root;
2792         u32 nritems;
2793         struct btrfs_key key;
2794         struct btrfs_file_extent_item *fi;
2795         int i;
2796         int level;
2797         int ret = 0;
2798         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2799                             u64, u64, u64, u64, u64, u64, int);
2800
2801         ref_root = btrfs_header_owner(buf);
2802         nritems = btrfs_header_nritems(buf);
2803         level = btrfs_header_level(buf);
2804
2805         if (!root->ref_cows && level == 0)
2806                 return 0;
2807
2808         if (inc)
2809                 process_func = btrfs_inc_extent_ref;
2810         else
2811                 process_func = btrfs_free_extent;
2812
2813         if (full_backref)
2814                 parent = buf->start;
2815         else
2816                 parent = 0;
2817
2818         for (i = 0; i < nritems; i++) {
2819                 if (level == 0) {
2820                         btrfs_item_key_to_cpu(buf, &key, i);
2821                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2822                                 continue;
2823                         fi = btrfs_item_ptr(buf, i,
2824                                             struct btrfs_file_extent_item);
2825                         if (btrfs_file_extent_type(buf, fi) ==
2826                             BTRFS_FILE_EXTENT_INLINE)
2827                                 continue;
2828                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2829                         if (bytenr == 0)
2830                                 continue;
2831
2832                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2833                         key.offset -= btrfs_file_extent_offset(buf, fi);
2834                         ret = process_func(trans, root, bytenr, num_bytes,
2835                                            parent, ref_root, key.objectid,
2836                                            key.offset, for_cow);
2837                         if (ret)
2838                                 goto fail;
2839                 } else {
2840                         bytenr = btrfs_node_blockptr(buf, i);
2841                         num_bytes = btrfs_level_size(root, level - 1);
2842                         ret = process_func(trans, root, bytenr, num_bytes,
2843                                            parent, ref_root, level - 1, 0,
2844                                            for_cow);
2845                         if (ret)
2846                                 goto fail;
2847                 }
2848         }
2849         return 0;
2850 fail:
2851         return ret;
2852 }
2853
2854 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2855                   struct extent_buffer *buf, int full_backref, int for_cow)
2856 {
2857         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
2858 }
2859
2860 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2861                   struct extent_buffer *buf, int full_backref, int for_cow)
2862 {
2863         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
2864 }
2865
2866 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2867                                  struct btrfs_root *root,
2868                                  struct btrfs_path *path,
2869                                  struct btrfs_block_group_cache *cache)
2870 {
2871         int ret;
2872         struct btrfs_root *extent_root = root->fs_info->extent_root;
2873         unsigned long bi;
2874         struct extent_buffer *leaf;
2875
2876         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2877         if (ret < 0)
2878                 goto fail;
2879         BUG_ON(ret); /* Corruption */
2880
2881         leaf = path->nodes[0];
2882         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2883         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2884         btrfs_mark_buffer_dirty(leaf);
2885         btrfs_release_path(path);
2886 fail:
2887         if (ret) {
2888                 btrfs_abort_transaction(trans, root, ret);
2889                 return ret;
2890         }
2891         return 0;
2892
2893 }
2894
2895 static struct btrfs_block_group_cache *
2896 next_block_group(struct btrfs_root *root,
2897                  struct btrfs_block_group_cache *cache)
2898 {
2899         struct rb_node *node;
2900         spin_lock(&root->fs_info->block_group_cache_lock);
2901         node = rb_next(&cache->cache_node);
2902         btrfs_put_block_group(cache);
2903         if (node) {
2904                 cache = rb_entry(node, struct btrfs_block_group_cache,
2905                                  cache_node);
2906                 btrfs_get_block_group(cache);
2907         } else
2908                 cache = NULL;
2909         spin_unlock(&root->fs_info->block_group_cache_lock);
2910         return cache;
2911 }
2912
2913 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2914                             struct btrfs_trans_handle *trans,
2915                             struct btrfs_path *path)
2916 {
2917         struct btrfs_root *root = block_group->fs_info->tree_root;
2918         struct inode *inode = NULL;
2919         u64 alloc_hint = 0;
2920         int dcs = BTRFS_DC_ERROR;
2921         int num_pages = 0;
2922         int retries = 0;
2923         int ret = 0;
2924
2925         /*
2926          * If this block group is smaller than 100 megs don't bother caching the
2927          * block group.
2928          */
2929         if (block_group->key.offset < (100 * 1024 * 1024)) {
2930                 spin_lock(&block_group->lock);
2931                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2932                 spin_unlock(&block_group->lock);
2933                 return 0;
2934         }
2935
2936 again:
2937         inode = lookup_free_space_inode(root, block_group, path);
2938         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2939                 ret = PTR_ERR(inode);
2940                 btrfs_release_path(path);
2941                 goto out;
2942         }
2943
2944         if (IS_ERR(inode)) {
2945                 BUG_ON(retries);
2946                 retries++;
2947
2948                 if (block_group->ro)
2949                         goto out_free;
2950
2951                 ret = create_free_space_inode(root, trans, block_group, path);
2952                 if (ret)
2953                         goto out_free;
2954                 goto again;
2955         }
2956
2957         /* We've already setup this transaction, go ahead and exit */
2958         if (block_group->cache_generation == trans->transid &&
2959             i_size_read(inode)) {
2960                 dcs = BTRFS_DC_SETUP;
2961                 goto out_put;
2962         }
2963
2964         /*
2965          * We want to set the generation to 0, that way if anything goes wrong
2966          * from here on out we know not to trust this cache when we load up next
2967          * time.
2968          */
2969         BTRFS_I(inode)->generation = 0;
2970         ret = btrfs_update_inode(trans, root, inode);
2971         WARN_ON(ret);
2972
2973         if (i_size_read(inode) > 0) {
2974                 ret = btrfs_truncate_free_space_cache(root, trans, path,
2975                                                       inode);
2976                 if (ret)
2977                         goto out_put;
2978         }
2979
2980         spin_lock(&block_group->lock);
2981         if (block_group->cached != BTRFS_CACHE_FINISHED ||
2982             !btrfs_test_opt(root, SPACE_CACHE)) {
2983                 /*
2984                  * don't bother trying to write stuff out _if_
2985                  * a) we're not cached,
2986                  * b) we're with nospace_cache mount option.
2987                  */
2988                 dcs = BTRFS_DC_WRITTEN;
2989                 spin_unlock(&block_group->lock);
2990                 goto out_put;
2991         }
2992         spin_unlock(&block_group->lock);
2993
2994         /*
2995          * Try to preallocate enough space based on how big the block group is.
2996          * Keep in mind this has to include any pinned space which could end up
2997          * taking up quite a bit since it's not folded into the other space
2998          * cache.
2999          */
3000         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3001         if (!num_pages)
3002                 num_pages = 1;
3003
3004         num_pages *= 16;
3005         num_pages *= PAGE_CACHE_SIZE;
3006
3007         ret = btrfs_check_data_free_space(inode, num_pages);
3008         if (ret)
3009                 goto out_put;
3010
3011         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3012                                               num_pages, num_pages,
3013                                               &alloc_hint);
3014         if (!ret)
3015                 dcs = BTRFS_DC_SETUP;
3016         btrfs_free_reserved_data_space(inode, num_pages);
3017
3018 out_put:
3019         iput(inode);
3020 out_free:
3021         btrfs_release_path(path);
3022 out:
3023         spin_lock(&block_group->lock);
3024         if (!ret && dcs == BTRFS_DC_SETUP)
3025                 block_group->cache_generation = trans->transid;
3026         block_group->disk_cache_state = dcs;
3027         spin_unlock(&block_group->lock);
3028
3029         return ret;
3030 }
3031
3032 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3033                                    struct btrfs_root *root)
3034 {
3035         struct btrfs_block_group_cache *cache;
3036         int err = 0;
3037         struct btrfs_path *path;
3038         u64 last = 0;
3039
3040         path = btrfs_alloc_path();
3041         if (!path)
3042                 return -ENOMEM;
3043
3044 again:
3045         while (1) {
3046                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3047                 while (cache) {
3048                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3049                                 break;
3050                         cache = next_block_group(root, cache);
3051                 }
3052                 if (!cache) {
3053                         if (last == 0)
3054                                 break;
3055                         last = 0;
3056                         continue;
3057                 }
3058                 err = cache_save_setup(cache, trans, path);
3059                 last = cache->key.objectid + cache->key.offset;
3060                 btrfs_put_block_group(cache);
3061         }
3062
3063         while (1) {
3064                 if (last == 0) {
3065                         err = btrfs_run_delayed_refs(trans, root,
3066                                                      (unsigned long)-1);
3067                         if (err) /* File system offline */
3068                                 goto out;
3069                 }
3070
3071                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3072                 while (cache) {
3073                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3074                                 btrfs_put_block_group(cache);
3075                                 goto again;
3076                         }
3077
3078                         if (cache->dirty)
3079                                 break;
3080                         cache = next_block_group(root, cache);
3081                 }
3082                 if (!cache) {
3083                         if (last == 0)
3084                                 break;
3085                         last = 0;
3086                         continue;
3087                 }
3088
3089                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3090                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3091                 cache->dirty = 0;
3092                 last = cache->key.objectid + cache->key.offset;
3093
3094                 err = write_one_cache_group(trans, root, path, cache);
3095                 if (err) /* File system offline */
3096                         goto out;
3097
3098                 btrfs_put_block_group(cache);
3099         }
3100
3101         while (1) {
3102                 /*
3103                  * I don't think this is needed since we're just marking our
3104                  * preallocated extent as written, but just in case it can't
3105                  * hurt.
3106                  */
3107                 if (last == 0) {
3108                         err = btrfs_run_delayed_refs(trans, root,
3109                                                      (unsigned long)-1);
3110                         if (err) /* File system offline */
3111                                 goto out;
3112                 }
3113
3114                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3115                 while (cache) {
3116                         /*
3117                          * Really this shouldn't happen, but it could if we
3118                          * couldn't write the entire preallocated extent and
3119                          * splitting the extent resulted in a new block.
3120                          */
3121                         if (cache->dirty) {
3122                                 btrfs_put_block_group(cache);
3123                                 goto again;
3124                         }
3125                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3126                                 break;
3127                         cache = next_block_group(root, cache);
3128                 }
3129                 if (!cache) {
3130                         if (last == 0)
3131                                 break;
3132                         last = 0;
3133                         continue;
3134                 }
3135
3136                 err = btrfs_write_out_cache(root, trans, cache, path);
3137
3138                 /*
3139                  * If we didn't have an error then the cache state is still
3140                  * NEED_WRITE, so we can set it to WRITTEN.
3141                  */
3142                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3143                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3144                 last = cache->key.objectid + cache->key.offset;
3145                 btrfs_put_block_group(cache);
3146         }
3147 out:
3148
3149         btrfs_free_path(path);
3150         return err;
3151 }
3152
3153 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3154 {
3155         struct btrfs_block_group_cache *block_group;
3156         int readonly = 0;
3157
3158         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3159         if (!block_group || block_group->ro)
3160                 readonly = 1;
3161         if (block_group)
3162                 btrfs_put_block_group(block_group);
3163         return readonly;
3164 }
3165
3166 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3167                              u64 total_bytes, u64 bytes_used,
3168                              struct btrfs_space_info **space_info)
3169 {
3170         struct btrfs_space_info *found;
3171         int i;
3172         int factor;
3173
3174         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3175                      BTRFS_BLOCK_GROUP_RAID10))
3176                 factor = 2;
3177         else
3178                 factor = 1;
3179
3180         found = __find_space_info(info, flags);
3181         if (found) {
3182                 spin_lock(&found->lock);
3183                 found->total_bytes += total_bytes;
3184                 found->disk_total += total_bytes * factor;
3185                 found->bytes_used += bytes_used;
3186                 found->disk_used += bytes_used * factor;
3187                 found->full = 0;
3188                 spin_unlock(&found->lock);
3189                 *space_info = found;
3190                 return 0;
3191         }
3192         found = kzalloc(sizeof(*found), GFP_NOFS);
3193         if (!found)
3194                 return -ENOMEM;
3195
3196         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3197                 INIT_LIST_HEAD(&found->block_groups[i]);
3198         init_rwsem(&found->groups_sem);
3199         spin_lock_init(&found->lock);
3200         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3201         found->total_bytes = total_bytes;
3202         found->disk_total = total_bytes * factor;
3203         found->bytes_used = bytes_used;
3204         found->disk_used = bytes_used * factor;
3205         found->bytes_pinned = 0;
3206         found->bytes_reserved = 0;
3207         found->bytes_readonly = 0;
3208         found->bytes_may_use = 0;
3209         found->full = 0;
3210         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3211         found->chunk_alloc = 0;
3212         found->flush = 0;
3213         init_waitqueue_head(&found->wait);
3214         *space_info = found;
3215         list_add_rcu(&found->list, &info->space_info);
3216         if (flags & BTRFS_BLOCK_GROUP_DATA)
3217                 info->data_sinfo = found;
3218         return 0;
3219 }
3220
3221 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3222 {
3223         u64 extra_flags = chunk_to_extended(flags) &
3224                                 BTRFS_EXTENDED_PROFILE_MASK;
3225
3226         if (flags & BTRFS_BLOCK_GROUP_DATA)
3227                 fs_info->avail_data_alloc_bits |= extra_flags;
3228         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3229                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3230         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3231                 fs_info->avail_system_alloc_bits |= extra_flags;
3232 }
3233
3234 /*
3235  * returns target flags in extended format or 0 if restripe for this
3236  * chunk_type is not in progress
3237  *
3238  * should be called with either volume_mutex or balance_lock held
3239  */
3240 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3241 {
3242         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3243         u64 target = 0;
3244
3245         if (!bctl)
3246                 return 0;
3247
3248         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3249             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3250                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3251         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3252                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3253                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3254         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3255                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3256                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3257         }
3258
3259         return target;
3260 }
3261
3262 /*
3263  * @flags: available profiles in extended format (see ctree.h)
3264  *
3265  * Returns reduced profile in chunk format.  If profile changing is in
3266  * progress (either running or paused) picks the target profile (if it's
3267  * already available), otherwise falls back to plain reducing.
3268  */
3269 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3270 {
3271         /*
3272          * we add in the count of missing devices because we want
3273          * to make sure that any RAID levels on a degraded FS
3274          * continue to be honored.
3275          */
3276         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3277                 root->fs_info->fs_devices->missing_devices;
3278         u64 target;
3279
3280         /*
3281          * see if restripe for this chunk_type is in progress, if so
3282          * try to reduce to the target profile
3283          */
3284         spin_lock(&root->fs_info->balance_lock);
3285         target = get_restripe_target(root->fs_info, flags);
3286         if (target) {
3287                 /* pick target profile only if it's already available */
3288                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3289                         spin_unlock(&root->fs_info->balance_lock);
3290                         return extended_to_chunk(target);
3291                 }
3292         }
3293         spin_unlock(&root->fs_info->balance_lock);
3294
3295         if (num_devices == 1)
3296                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3297         if (num_devices < 4)
3298                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3299
3300         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3301             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3302                       BTRFS_BLOCK_GROUP_RAID10))) {
3303                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
3304         }
3305
3306         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
3307             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
3308                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3309         }
3310
3311         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3312             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3313              (flags & BTRFS_BLOCK_GROUP_RAID10) |
3314              (flags & BTRFS_BLOCK_GROUP_DUP))) {
3315                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3316         }
3317
3318         return extended_to_chunk(flags);
3319 }
3320
3321 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3322 {
3323         if (flags & BTRFS_BLOCK_GROUP_DATA)
3324                 flags |= root->fs_info->avail_data_alloc_bits;
3325         else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3326                 flags |= root->fs_info->avail_system_alloc_bits;
3327         else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3328                 flags |= root->fs_info->avail_metadata_alloc_bits;
3329
3330         return btrfs_reduce_alloc_profile(root, flags);
3331 }
3332
3333 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3334 {
3335         u64 flags;
3336
3337         if (data)
3338                 flags = BTRFS_BLOCK_GROUP_DATA;
3339         else if (root == root->fs_info->chunk_root)
3340                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3341         else
3342                 flags = BTRFS_BLOCK_GROUP_METADATA;
3343
3344         return get_alloc_profile(root, flags);
3345 }
3346
3347 /*
3348  * This will check the space that the inode allocates from to make sure we have
3349  * enough space for bytes.
3350  */
3351 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3352 {
3353         struct btrfs_space_info *data_sinfo;
3354         struct btrfs_root *root = BTRFS_I(inode)->root;
3355         struct btrfs_fs_info *fs_info = root->fs_info;
3356         u64 used;
3357         int ret = 0, committed = 0, alloc_chunk = 1;
3358
3359         /* make sure bytes are sectorsize aligned */
3360         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3361
3362         if (root == root->fs_info->tree_root ||
3363             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3364                 alloc_chunk = 0;
3365                 committed = 1;
3366         }
3367
3368         data_sinfo = fs_info->data_sinfo;
3369         if (!data_sinfo)
3370                 goto alloc;
3371
3372 again:
3373         /* make sure we have enough space to handle the data first */
3374         spin_lock(&data_sinfo->lock);
3375         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3376                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3377                 data_sinfo->bytes_may_use;
3378
3379         if (used + bytes > data_sinfo->total_bytes) {
3380                 struct btrfs_trans_handle *trans;
3381
3382                 /*
3383                  * if we don't have enough free bytes in this space then we need
3384                  * to alloc a new chunk.
3385                  */
3386                 if (!data_sinfo->full && alloc_chunk) {
3387                         u64 alloc_target;
3388
3389                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3390                         spin_unlock(&data_sinfo->lock);
3391 alloc:
3392                         alloc_target = btrfs_get_alloc_profile(root, 1);
3393                         trans = btrfs_join_transaction(root);
3394                         if (IS_ERR(trans))
3395                                 return PTR_ERR(trans);
3396
3397                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3398                                              alloc_target,
3399                                              CHUNK_ALLOC_NO_FORCE);
3400                         btrfs_end_transaction(trans, root);
3401                         if (ret < 0) {
3402                                 if (ret != -ENOSPC)
3403                                         return ret;
3404                                 else
3405                                         goto commit_trans;
3406                         }
3407
3408                         if (!data_sinfo)
3409                                 data_sinfo = fs_info->data_sinfo;
3410
3411                         goto again;
3412                 }
3413
3414                 /*
3415                  * If we have less pinned bytes than we want to allocate then
3416                  * don't bother committing the transaction, it won't help us.
3417                  */
3418                 if (data_sinfo->bytes_pinned < bytes)
3419                         committed = 1;
3420                 spin_unlock(&data_sinfo->lock);
3421
3422                 /* commit the current transaction and try again */
3423 commit_trans:
3424                 if (!committed &&
3425                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3426                         committed = 1;
3427                         trans = btrfs_join_transaction(root);
3428                         if (IS_ERR(trans))
3429                                 return PTR_ERR(trans);
3430                         ret = btrfs_commit_transaction(trans, root);
3431                         if (ret)
3432                                 return ret;
3433                         goto again;
3434                 }
3435
3436                 return -ENOSPC;
3437         }
3438         data_sinfo->bytes_may_use += bytes;
3439         trace_btrfs_space_reservation(root->fs_info, "space_info",
3440                                       data_sinfo->flags, bytes, 1);
3441         spin_unlock(&data_sinfo->lock);
3442
3443         return 0;
3444 }
3445
3446 /*
3447  * Called if we need to clear a data reservation for this inode.
3448  */
3449 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3450 {
3451         struct btrfs_root *root = BTRFS_I(inode)->root;
3452         struct btrfs_space_info *data_sinfo;
3453
3454         /* make sure bytes are sectorsize aligned */
3455         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3456
3457         data_sinfo = root->fs_info->data_sinfo;
3458         spin_lock(&data_sinfo->lock);
3459         data_sinfo->bytes_may_use -= bytes;
3460         trace_btrfs_space_reservation(root->fs_info, "space_info",
3461                                       data_sinfo->flags, bytes, 0);
3462         spin_unlock(&data_sinfo->lock);
3463 }
3464
3465 static void force_metadata_allocation(struct btrfs_fs_info *info)
3466 {
3467         struct list_head *head = &info->space_info;
3468         struct btrfs_space_info *found;
3469
3470         rcu_read_lock();
3471         list_for_each_entry_rcu(found, head, list) {
3472                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3473                         found->force_alloc = CHUNK_ALLOC_FORCE;
3474         }
3475         rcu_read_unlock();
3476 }
3477
3478 static int should_alloc_chunk(struct btrfs_root *root,
3479                               struct btrfs_space_info *sinfo, int force)
3480 {
3481         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3482         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3483         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3484         u64 thresh;
3485
3486         if (force == CHUNK_ALLOC_FORCE)
3487                 return 1;
3488
3489         /*
3490          * We need to take into account the global rsv because for all intents
3491          * and purposes it's used space.  Don't worry about locking the
3492          * global_rsv, it doesn't change except when the transaction commits.
3493          */
3494         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3495                 num_allocated += global_rsv->size;
3496
3497         /*
3498          * in limited mode, we want to have some free space up to
3499          * about 1% of the FS size.
3500          */
3501         if (force == CHUNK_ALLOC_LIMITED) {
3502                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3503                 thresh = max_t(u64, 64 * 1024 * 1024,
3504                                div_factor_fine(thresh, 1));
3505
3506                 if (num_bytes - num_allocated < thresh)
3507                         return 1;
3508         }
3509
3510         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3511                 return 0;
3512         return 1;
3513 }
3514
3515 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3516 {
3517         u64 num_dev;
3518
3519         if (type & BTRFS_BLOCK_GROUP_RAID10 ||
3520             type & BTRFS_BLOCK_GROUP_RAID0)
3521                 num_dev = root->fs_info->fs_devices->rw_devices;
3522         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3523                 num_dev = 2;
3524         else
3525                 num_dev = 1;    /* DUP or single */
3526
3527         /* metadata for updaing devices and chunk tree */
3528         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3529 }
3530
3531 static void check_system_chunk(struct btrfs_trans_handle *trans,
3532                                struct btrfs_root *root, u64 type)
3533 {
3534         struct btrfs_space_info *info;
3535         u64 left;
3536         u64 thresh;
3537
3538         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3539         spin_lock(&info->lock);
3540         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3541                 info->bytes_reserved - info->bytes_readonly;
3542         spin_unlock(&info->lock);
3543
3544         thresh = get_system_chunk_thresh(root, type);
3545         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3546                 printk(KERN_INFO "left=%llu, need=%llu, flags=%llu\n",
3547                        left, thresh, type);
3548                 dump_space_info(info, 0, 0);
3549         }
3550
3551         if (left < thresh) {
3552                 u64 flags;
3553
3554                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3555                 btrfs_alloc_chunk(trans, root, flags);
3556         }
3557 }
3558
3559 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3560                           struct btrfs_root *extent_root, u64 flags, int force)
3561 {
3562         struct btrfs_space_info *space_info;
3563         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3564         int wait_for_alloc = 0;
3565         int ret = 0;
3566
3567         space_info = __find_space_info(extent_root->fs_info, flags);
3568         if (!space_info) {
3569                 ret = update_space_info(extent_root->fs_info, flags,
3570                                         0, 0, &space_info);
3571                 BUG_ON(ret); /* -ENOMEM */
3572         }
3573         BUG_ON(!space_info); /* Logic error */
3574
3575 again:
3576         spin_lock(&space_info->lock);
3577         if (force < space_info->force_alloc)
3578                 force = space_info->force_alloc;
3579         if (space_info->full) {
3580                 spin_unlock(&space_info->lock);
3581                 return 0;
3582         }
3583
3584         if (!should_alloc_chunk(extent_root, space_info, force)) {
3585                 spin_unlock(&space_info->lock);
3586                 return 0;
3587         } else if (space_info->chunk_alloc) {
3588                 wait_for_alloc = 1;
3589         } else {
3590                 space_info->chunk_alloc = 1;
3591         }
3592
3593         spin_unlock(&space_info->lock);
3594
3595         mutex_lock(&fs_info->chunk_mutex);
3596
3597         /*
3598          * The chunk_mutex is held throughout the entirety of a chunk
3599          * allocation, so once we've acquired the chunk_mutex we know that the
3600          * other guy is done and we need to recheck and see if we should
3601          * allocate.
3602          */
3603         if (wait_for_alloc) {
3604                 mutex_unlock(&fs_info->chunk_mutex);
3605                 wait_for_alloc = 0;
3606                 goto again;
3607         }
3608
3609         /*
3610          * If we have mixed data/metadata chunks we want to make sure we keep
3611          * allocating mixed chunks instead of individual chunks.
3612          */
3613         if (btrfs_mixed_space_info(space_info))
3614                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3615
3616         /*
3617          * if we're doing a data chunk, go ahead and make sure that
3618          * we keep a reasonable number of metadata chunks allocated in the
3619          * FS as well.
3620          */
3621         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3622                 fs_info->data_chunk_allocations++;
3623                 if (!(fs_info->data_chunk_allocations %
3624                       fs_info->metadata_ratio))
3625                         force_metadata_allocation(fs_info);
3626         }
3627
3628         /*
3629          * Check if we have enough space in SYSTEM chunk because we may need
3630          * to update devices.
3631          */
3632         check_system_chunk(trans, extent_root, flags);
3633
3634         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3635         if (ret < 0 && ret != -ENOSPC)
3636                 goto out;
3637
3638         spin_lock(&space_info->lock);
3639         if (ret)
3640                 space_info->full = 1;
3641         else
3642                 ret = 1;
3643
3644         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3645         space_info->chunk_alloc = 0;
3646         spin_unlock(&space_info->lock);
3647 out:
3648         mutex_unlock(&fs_info->chunk_mutex);
3649         return ret;
3650 }
3651
3652 static int can_overcommit(struct btrfs_root *root,
3653                           struct btrfs_space_info *space_info, u64 bytes,
3654                           enum btrfs_reserve_flush_enum flush)
3655 {
3656         u64 profile = btrfs_get_alloc_profile(root, 0);
3657         u64 avail;
3658         u64 used;
3659
3660         used = space_info->bytes_used + space_info->bytes_reserved +
3661                 space_info->bytes_pinned + space_info->bytes_readonly +
3662                 space_info->bytes_may_use;
3663
3664         spin_lock(&root->fs_info->free_chunk_lock);
3665         avail = root->fs_info->free_chunk_space;
3666         spin_unlock(&root->fs_info->free_chunk_lock);
3667
3668         /*
3669          * If we have dup, raid1 or raid10 then only half of the free
3670          * space is actually useable.
3671          */
3672         if (profile & (BTRFS_BLOCK_GROUP_DUP |
3673                        BTRFS_BLOCK_GROUP_RAID1 |
3674                        BTRFS_BLOCK_GROUP_RAID10))
3675                 avail >>= 1;
3676
3677         /*
3678          * If we aren't flushing all things, let us overcommit up to
3679          * 1/2th of the space. If we can flush, don't let us overcommit
3680          * too much, let it overcommit up to 1/8 of the space.
3681          */
3682         if (flush == BTRFS_RESERVE_FLUSH_ALL)
3683                 avail >>= 3;
3684         else
3685                 avail >>= 1;
3686
3687         if (used + bytes < space_info->total_bytes + avail)
3688                 return 1;
3689         return 0;
3690 }
3691
3692 /*
3693  * shrink metadata reservation for delalloc
3694  */
3695 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
3696                             bool wait_ordered)
3697 {
3698         struct btrfs_block_rsv *block_rsv;
3699         struct btrfs_space_info *space_info;
3700         struct btrfs_trans_handle *trans;
3701         u64 delalloc_bytes;
3702         u64 max_reclaim;
3703         long time_left;
3704         unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3705         int loops = 0;
3706         enum btrfs_reserve_flush_enum flush;
3707
3708         trans = (struct btrfs_trans_handle *)current->journal_info;
3709         block_rsv = &root->fs_info->delalloc_block_rsv;
3710         space_info = block_rsv->space_info;
3711
3712         smp_mb();
3713         delalloc_bytes = root->fs_info->delalloc_bytes;
3714         if (delalloc_bytes == 0) {
3715                 if (trans)
3716                         return;
3717                 btrfs_wait_ordered_extents(root, 0);
3718                 return;
3719         }
3720
3721         while (delalloc_bytes && loops < 3) {
3722                 max_reclaim = min(delalloc_bytes, to_reclaim);
3723                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
3724                 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
3725                                                WB_REASON_FS_FREE_SPACE);
3726
3727                 /*
3728                  * We need to wait for the async pages to actually start before
3729                  * we do anything.
3730                  */
3731                 wait_event(root->fs_info->async_submit_wait,
3732                            !atomic_read(&root->fs_info->async_delalloc_pages));
3733
3734                 if (!trans)
3735                         flush = BTRFS_RESERVE_FLUSH_ALL;
3736                 else
3737                         flush = BTRFS_RESERVE_NO_FLUSH;
3738                 spin_lock(&space_info->lock);
3739                 if (can_overcommit(root, space_info, orig, flush)) {
3740                         spin_unlock(&space_info->lock);
3741                         break;
3742                 }
3743                 spin_unlock(&space_info->lock);
3744
3745                 loops++;
3746                 if (wait_ordered && !trans) {
3747                         btrfs_wait_ordered_extents(root, 0);
3748                 } else {
3749                         time_left = schedule_timeout_killable(1);
3750                         if (time_left)
3751                                 break;
3752                 }
3753                 smp_mb();
3754                 delalloc_bytes = root->fs_info->delalloc_bytes;
3755         }
3756 }
3757
3758 /**
3759  * maybe_commit_transaction - possibly commit the transaction if its ok to
3760  * @root - the root we're allocating for
3761  * @bytes - the number of bytes we want to reserve
3762  * @force - force the commit
3763  *
3764  * This will check to make sure that committing the transaction will actually
3765  * get us somewhere and then commit the transaction if it does.  Otherwise it
3766  * will return -ENOSPC.
3767  */
3768 static int may_commit_transaction(struct btrfs_root *root,
3769                                   struct btrfs_space_info *space_info,
3770                                   u64 bytes, int force)
3771 {
3772         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
3773         struct btrfs_trans_handle *trans;
3774
3775         trans = (struct btrfs_trans_handle *)current->journal_info;
3776         if (trans)
3777                 return -EAGAIN;
3778
3779         if (force)
3780                 goto commit;
3781
3782         /* See if there is enough pinned space to make this reservation */
3783         spin_lock(&space_info->lock);
3784         if (space_info->bytes_pinned >= bytes) {
3785                 spin_unlock(&space_info->lock);
3786                 goto commit;
3787         }
3788         spin_unlock(&space_info->lock);
3789
3790         /*
3791          * See if there is some space in the delayed insertion reservation for
3792          * this reservation.
3793          */
3794         if (space_info != delayed_rsv->space_info)
3795                 return -ENOSPC;
3796
3797         spin_lock(&space_info->lock);
3798         spin_lock(&delayed_rsv->lock);
3799         if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
3800                 spin_unlock(&delayed_rsv->lock);
3801                 spin_unlock(&space_info->lock);
3802                 return -ENOSPC;
3803         }
3804         spin_unlock(&delayed_rsv->lock);
3805         spin_unlock(&space_info->lock);
3806
3807 commit:
3808         trans = btrfs_join_transaction(root);
3809         if (IS_ERR(trans))
3810                 return -ENOSPC;
3811
3812         return btrfs_commit_transaction(trans, root);
3813 }
3814
3815 enum flush_state {
3816         FLUSH_DELAYED_ITEMS_NR  =       1,
3817         FLUSH_DELAYED_ITEMS     =       2,
3818         FLUSH_DELALLOC          =       3,
3819         FLUSH_DELALLOC_WAIT     =       4,
3820         ALLOC_CHUNK             =       5,
3821         COMMIT_TRANS            =       6,
3822 };
3823
3824 static int flush_space(struct btrfs_root *root,
3825                        struct btrfs_space_info *space_info, u64 num_bytes,
3826                        u64 orig_bytes, int state)
3827 {
3828         struct btrfs_trans_handle *trans;
3829         int nr;
3830         int ret = 0;
3831
3832         switch (state) {
3833         case FLUSH_DELAYED_ITEMS_NR:
3834         case FLUSH_DELAYED_ITEMS:
3835                 if (state == FLUSH_DELAYED_ITEMS_NR) {
3836                         u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
3837
3838                         nr = (int)div64_u64(num_bytes, bytes);
3839                         if (!nr)
3840                                 nr = 1;
3841                         nr *= 2;
3842                 } else {
3843                         nr = -1;
3844                 }
3845                 trans = btrfs_join_transaction(root);
3846                 if (IS_ERR(trans)) {
3847                         ret = PTR_ERR(trans);
3848                         break;
3849                 }
3850                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
3851                 btrfs_end_transaction(trans, root);
3852                 break;
3853         case FLUSH_DELALLOC:
3854         case FLUSH_DELALLOC_WAIT:
3855                 shrink_delalloc(root, num_bytes, orig_bytes,
3856                                 state == FLUSH_DELALLOC_WAIT);
3857                 break;
3858         case ALLOC_CHUNK:
3859                 trans = btrfs_join_transaction(root);
3860                 if (IS_ERR(trans)) {
3861                         ret = PTR_ERR(trans);
3862                         break;
3863                 }
3864                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3865                                      btrfs_get_alloc_profile(root, 0),
3866                                      CHUNK_ALLOC_NO_FORCE);
3867                 btrfs_end_transaction(trans, root);
3868                 if (ret == -ENOSPC)
3869                         ret = 0;
3870                 break;
3871         case COMMIT_TRANS:
3872                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
3873                 break;
3874         default:
3875                 ret = -ENOSPC;
3876                 break;
3877         }
3878
3879         return ret;
3880 }
3881 /**
3882  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
3883  * @root - the root we're allocating for
3884  * @block_rsv - the block_rsv we're allocating for
3885  * @orig_bytes - the number of bytes we want
3886  * @flush - wether or not we can flush to make our reservation
3887  *
3888  * This will reserve orgi_bytes number of bytes from the space info associated
3889  * with the block_rsv.  If there is not enough space it will make an attempt to
3890  * flush out space to make room.  It will do this by flushing delalloc if
3891  * possible or committing the transaction.  If flush is 0 then no attempts to
3892  * regain reservations will be made and this will fail if there is not enough
3893  * space already.
3894  */
3895 static int reserve_metadata_bytes(struct btrfs_root *root,
3896                                   struct btrfs_block_rsv *block_rsv,
3897                                   u64 orig_bytes,
3898                                   enum btrfs_reserve_flush_enum flush)
3899 {
3900         struct btrfs_space_info *space_info = block_rsv->space_info;
3901         u64 used;
3902         u64 num_bytes = orig_bytes;
3903         int flush_state = FLUSH_DELAYED_ITEMS_NR;
3904         int ret = 0;
3905         bool flushing = false;
3906
3907 again:
3908         ret = 0;
3909         spin_lock(&space_info->lock);
3910         /*
3911          * We only want to wait if somebody other than us is flushing and we
3912          * are actually allowed to flush all things.
3913          */
3914         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
3915                space_info->flush) {
3916                 spin_unlock(&space_info->lock);
3917                 /*
3918                  * If we have a trans handle we can't wait because the flusher
3919                  * may have to commit the transaction, which would mean we would
3920                  * deadlock since we are waiting for the flusher to finish, but
3921                  * hold the current transaction open.
3922                  */
3923                 if (current->journal_info)
3924                         return -EAGAIN;
3925                 ret = wait_event_killable(space_info->wait, !space_info->flush);
3926                 /* Must have been killed, return */
3927                 if (ret)
3928                         return -EINTR;
3929
3930                 spin_lock(&space_info->lock);
3931         }
3932
3933         ret = -ENOSPC;
3934         used = space_info->bytes_used + space_info->bytes_reserved +
3935                 space_info->bytes_pinned + space_info->bytes_readonly +
3936                 space_info->bytes_may_use;
3937
3938         /*
3939          * The idea here is that we've not already over-reserved the block group
3940          * then we can go ahead and save our reservation first and then start
3941          * flushing if we need to.  Otherwise if we've already overcommitted
3942          * lets start flushing stuff first and then come back and try to make
3943          * our reservation.
3944          */
3945         if (used <= space_info->total_bytes) {
3946                 if (used + orig_bytes <= space_info->total_bytes) {
3947                         space_info->bytes_may_use += orig_bytes;
3948                         trace_btrfs_space_reservation(root->fs_info,
3949                                 "space_info", space_info->flags, orig_bytes, 1);
3950                         ret = 0;
3951                 } else {
3952                         /*
3953                          * Ok set num_bytes to orig_bytes since we aren't
3954                          * overocmmitted, this way we only try and reclaim what
3955                          * we need.
3956                          */
3957                         num_bytes = orig_bytes;
3958                 }
3959         } else {
3960                 /*
3961                  * Ok we're over committed, set num_bytes to the overcommitted
3962                  * amount plus the amount of bytes that we need for this
3963                  * reservation.
3964                  */
3965                 num_bytes = used - space_info->total_bytes +
3966                         (orig_bytes * 2);
3967         }
3968
3969         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
3970                 space_info->bytes_may_use += orig_bytes;
3971                 trace_btrfs_space_reservation(root->fs_info, "space_info",
3972                                               space_info->flags, orig_bytes,
3973                                               1);
3974                 ret = 0;
3975         }
3976
3977         /*
3978          * Couldn't make our reservation, save our place so while we're trying
3979          * to reclaim space we can actually use it instead of somebody else
3980          * stealing it from us.
3981          *
3982          * We make the other tasks wait for the flush only when we can flush
3983          * all things.
3984          */
3985         if (ret && flush == BTRFS_RESERVE_FLUSH_ALL) {
3986                 flushing = true;
3987                 space_info->flush = 1;
3988         }
3989
3990         spin_unlock(&space_info->lock);
3991
3992         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
3993                 goto out;
3994
3995         ret = flush_space(root, space_info, num_bytes, orig_bytes,
3996                           flush_state);
3997         flush_state++;
3998
3999         /*
4000          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4001          * would happen. So skip delalloc flush.
4002          */
4003         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4004             (flush_state == FLUSH_DELALLOC ||
4005              flush_state == FLUSH_DELALLOC_WAIT))
4006                 flush_state = ALLOC_CHUNK;
4007
4008         if (!ret)
4009                 goto again;
4010         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4011                  flush_state < COMMIT_TRANS)
4012                 goto again;
4013         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4014                  flush_state <= COMMIT_TRANS)
4015                 goto again;
4016
4017 out:
4018         if (flushing) {
4019                 spin_lock(&space_info->lock);
4020                 space_info->flush = 0;
4021                 wake_up_all(&space_info->wait);
4022                 spin_unlock(&space_info->lock);
4023         }
4024         return ret;
4025 }
4026
4027 static struct btrfs_block_rsv *get_block_rsv(
4028                                         const struct btrfs_trans_handle *trans,
4029                                         const struct btrfs_root *root)
4030 {
4031         struct btrfs_block_rsv *block_rsv = NULL;
4032
4033         if (root->ref_cows)
4034                 block_rsv = trans->block_rsv;
4035
4036         if (root == root->fs_info->csum_root && trans->adding_csums)
4037                 block_rsv = trans->block_rsv;
4038
4039         if (!block_rsv)
4040                 block_rsv = root->block_rsv;
4041
4042         if (!block_rsv)
4043                 block_rsv = &root->fs_info->empty_block_rsv;
4044
4045         return block_rsv;
4046 }
4047
4048 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4049                                u64 num_bytes)
4050 {
4051         int ret = -ENOSPC;
4052         spin_lock(&block_rsv->lock);
4053         if (block_rsv->reserved >= num_bytes) {
4054                 block_rsv->reserved -= num_bytes;
4055                 if (block_rsv->reserved < block_rsv->size)
4056                         block_rsv->full = 0;
4057                 ret = 0;
4058         }
4059         spin_unlock(&block_rsv->lock);
4060         return ret;
4061 }
4062
4063 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4064                                 u64 num_bytes, int update_size)
4065 {
4066         spin_lock(&block_rsv->lock);
4067         block_rsv->reserved += num_bytes;
4068         if (update_size)
4069                 block_rsv->size += num_bytes;
4070         else if (block_rsv->reserved >= block_rsv->size)
4071                 block_rsv->full = 1;
4072         spin_unlock(&block_rsv->lock);
4073 }
4074
4075 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4076                                     struct btrfs_block_rsv *block_rsv,
4077                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4078 {
4079         struct btrfs_space_info *space_info = block_rsv->space_info;
4080
4081         spin_lock(&block_rsv->lock);
4082         if (num_bytes == (u64)-1)
4083                 num_bytes = block_rsv->size;
4084         block_rsv->size -= num_bytes;
4085         if (block_rsv->reserved >= block_rsv->size) {
4086                 num_bytes = block_rsv->reserved - block_rsv->size;
4087                 block_rsv->reserved = block_rsv->size;
4088                 block_rsv->full = 1;
4089         } else {
4090                 num_bytes = 0;
4091         }
4092         spin_unlock(&block_rsv->lock);
4093
4094         if (num_bytes > 0) {
4095                 if (dest) {
4096                         spin_lock(&dest->lock);
4097                         if (!dest->full) {
4098                                 u64 bytes_to_add;
4099
4100                                 bytes_to_add = dest->size - dest->reserved;
4101                                 bytes_to_add = min(num_bytes, bytes_to_add);
4102                                 dest->reserved += bytes_to_add;
4103                                 if (dest->reserved >= dest->size)
4104                                         dest->full = 1;
4105                                 num_bytes -= bytes_to_add;
4106                         }
4107                         spin_unlock(&dest->lock);
4108                 }
4109                 if (num_bytes) {
4110                         spin_lock(&space_info->lock);
4111                         space_info->bytes_may_use -= num_bytes;
4112                         trace_btrfs_space_reservation(fs_info, "space_info",
4113                                         space_info->flags, num_bytes, 0);
4114                         space_info->reservation_progress++;
4115                         spin_unlock(&space_info->lock);
4116                 }
4117         }
4118 }
4119
4120 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4121                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4122 {
4123         int ret;
4124
4125         ret = block_rsv_use_bytes(src, num_bytes);
4126         if (ret)
4127                 return ret;
4128
4129         block_rsv_add_bytes(dst, num_bytes, 1);
4130         return 0;
4131 }
4132
4133 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4134 {
4135         memset(rsv, 0, sizeof(*rsv));
4136         spin_lock_init(&rsv->lock);
4137         rsv->type = type;
4138 }
4139
4140 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4141                                               unsigned short type)
4142 {
4143         struct btrfs_block_rsv *block_rsv;
4144         struct btrfs_fs_info *fs_info = root->fs_info;
4145
4146         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4147         if (!block_rsv)
4148                 return NULL;
4149
4150         btrfs_init_block_rsv(block_rsv, type);
4151         block_rsv->space_info = __find_space_info(fs_info,
4152                                                   BTRFS_BLOCK_GROUP_METADATA);
4153         return block_rsv;
4154 }
4155
4156 void btrfs_free_block_rsv(struct btrfs_root *root,
4157                           struct btrfs_block_rsv *rsv)
4158 {
4159         if (!rsv)
4160                 return;
4161         btrfs_block_rsv_release(root, rsv, (u64)-1);
4162         kfree(rsv);
4163 }
4164
4165 int btrfs_block_rsv_add(struct btrfs_root *root,
4166                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4167                         enum btrfs_reserve_flush_enum flush)
4168 {
4169         int ret;
4170
4171         if (num_bytes == 0)
4172                 return 0;
4173
4174         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4175         if (!ret) {
4176                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4177                 return 0;
4178         }
4179
4180         return ret;
4181 }
4182
4183 int btrfs_block_rsv_check(struct btrfs_root *root,
4184                           struct btrfs_block_rsv *block_rsv, int min_factor)
4185 {
4186         u64 num_bytes = 0;
4187         int ret = -ENOSPC;
4188
4189         if (!block_rsv)
4190                 return 0;
4191
4192         spin_lock(&block_rsv->lock);
4193         num_bytes = div_factor(block_rsv->size, min_factor);
4194         if (block_rsv->reserved >= num_bytes)
4195                 ret = 0;
4196         spin_unlock(&block_rsv->lock);
4197
4198         return ret;
4199 }
4200
4201 int btrfs_block_rsv_refill(struct btrfs_root *root,
4202                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4203                            enum btrfs_reserve_flush_enum flush)
4204 {
4205         u64 num_bytes = 0;
4206         int ret = -ENOSPC;
4207
4208         if (!block_rsv)
4209                 return 0;
4210
4211         spin_lock(&block_rsv->lock);
4212         num_bytes = min_reserved;
4213         if (block_rsv->reserved >= num_bytes)
4214                 ret = 0;
4215         else
4216                 num_bytes -= block_rsv->reserved;
4217         spin_unlock(&block_rsv->lock);
4218
4219         if (!ret)
4220                 return 0;
4221
4222         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4223         if (!ret) {
4224                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4225                 return 0;
4226         }
4227
4228         return ret;
4229 }
4230
4231 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4232                             struct btrfs_block_rsv *dst_rsv,
4233                             u64 num_bytes)
4234 {
4235         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4236 }
4237
4238 void btrfs_block_rsv_release(struct btrfs_root *root,
4239                              struct btrfs_block_rsv *block_rsv,
4240                              u64 num_bytes)
4241 {
4242         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4243         if (global_rsv->full || global_rsv == block_rsv ||
4244             block_rsv->space_info != global_rsv->space_info)
4245                 global_rsv = NULL;
4246         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4247                                 num_bytes);
4248 }
4249
4250 /*
4251  * helper to calculate size of global block reservation.
4252  * the desired value is sum of space used by extent tree,
4253  * checksum tree and root tree
4254  */
4255 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4256 {
4257         struct btrfs_space_info *sinfo;
4258         u64 num_bytes;
4259         u64 meta_used;
4260         u64 data_used;
4261         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4262
4263         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4264         spin_lock(&sinfo->lock);
4265         data_used = sinfo->bytes_used;
4266         spin_unlock(&sinfo->lock);
4267
4268         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4269         spin_lock(&sinfo->lock);
4270         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4271                 data_used = 0;
4272         meta_used = sinfo->bytes_used;
4273         spin_unlock(&sinfo->lock);
4274
4275         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4276                     csum_size * 2;
4277         num_bytes += div64_u64(data_used + meta_used, 50);
4278
4279         if (num_bytes * 3 > meta_used)
4280                 num_bytes = div64_u64(meta_used, 3);
4281
4282         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4283 }
4284
4285 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4286 {
4287         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4288         struct btrfs_space_info *sinfo = block_rsv->space_info;
4289         u64 num_bytes;
4290
4291         num_bytes = calc_global_metadata_size(fs_info);
4292
4293         spin_lock(&sinfo->lock);
4294         spin_lock(&block_rsv->lock);
4295
4296         block_rsv->size = num_bytes;
4297
4298         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4299                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4300                     sinfo->bytes_may_use;
4301
4302         if (sinfo->total_bytes > num_bytes) {
4303                 num_bytes = sinfo->total_bytes - num_bytes;
4304                 block_rsv->reserved += num_bytes;
4305                 sinfo->bytes_may_use += num_bytes;
4306                 trace_btrfs_space_reservation(fs_info, "space_info",
4307                                       sinfo->flags, num_bytes, 1);
4308         }
4309
4310         if (block_rsv->reserved >= block_rsv->size) {
4311                 num_bytes = block_rsv->reserved - block_rsv->size;
4312                 sinfo->bytes_may_use -= num_bytes;
4313                 trace_btrfs_space_reservation(fs_info, "space_info",
4314                                       sinfo->flags, num_bytes, 0);
4315                 sinfo->reservation_progress++;
4316                 block_rsv->reserved = block_rsv->size;
4317                 block_rsv->full = 1;
4318         }
4319
4320         spin_unlock(&block_rsv->lock);
4321         spin_unlock(&sinfo->lock);
4322 }
4323
4324 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4325 {
4326         struct btrfs_space_info *space_info;
4327
4328         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4329         fs_info->chunk_block_rsv.space_info = space_info;
4330
4331         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4332         fs_info->global_block_rsv.space_info = space_info;
4333         fs_info->delalloc_block_rsv.space_info = space_info;
4334         fs_info->trans_block_rsv.space_info = space_info;
4335         fs_info->empty_block_rsv.space_info = space_info;
4336         fs_info->delayed_block_rsv.space_info = space_info;
4337
4338         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4339         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4340         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4341         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4342         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4343
4344         update_global_block_rsv(fs_info);
4345 }
4346
4347 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4348 {
4349         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4350                                 (u64)-1);
4351         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4352         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4353         WARN_ON(fs_info->trans_block_rsv.size > 0);
4354         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4355         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4356         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4357         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4358         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4359 }
4360
4361 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4362                                   struct btrfs_root *root)
4363 {
4364         if (!trans->block_rsv)
4365                 return;
4366
4367         if (!trans->bytes_reserved)
4368                 return;
4369
4370         trace_btrfs_space_reservation(root->fs_info, "transaction",
4371                                       trans->transid, trans->bytes_reserved, 0);
4372         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4373         trans->bytes_reserved = 0;
4374 }
4375
4376 /* Can only return 0 or -ENOSPC */
4377 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4378                                   struct inode *inode)
4379 {
4380         struct btrfs_root *root = BTRFS_I(inode)->root;
4381         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4382         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4383
4384         /*
4385          * We need to hold space in order to delete our orphan item once we've
4386          * added it, so this takes the reservation so we can release it later
4387          * when we are truly done with the orphan item.
4388          */
4389         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4390         trace_btrfs_space_reservation(root->fs_info, "orphan",
4391                                       btrfs_ino(inode), num_bytes, 1);
4392         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4393 }
4394
4395 void btrfs_orphan_release_metadata(struct inode *inode)
4396 {
4397         struct btrfs_root *root = BTRFS_I(inode)->root;
4398         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4399         trace_btrfs_space_reservation(root->fs_info, "orphan",
4400                                       btrfs_ino(inode), num_bytes, 0);
4401         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4402 }
4403
4404 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4405                                 struct btrfs_pending_snapshot *pending)
4406 {
4407         struct btrfs_root *root = pending->root;
4408         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4409         struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
4410         /*
4411          * two for root back/forward refs, two for directory entries,
4412          * one for root of the snapshot and one for parent inode.
4413          */
4414         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 6);
4415         dst_rsv->space_info = src_rsv->space_info;
4416         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4417 }
4418
4419 /**
4420  * drop_outstanding_extent - drop an outstanding extent
4421  * @inode: the inode we're dropping the extent for
4422  *
4423  * This is called when we are freeing up an outstanding extent, either called
4424  * after an error or after an extent is written.  This will return the number of
4425  * reserved extents that need to be freed.  This must be called with
4426  * BTRFS_I(inode)->lock held.
4427  */
4428 static unsigned drop_outstanding_extent(struct inode *inode)
4429 {
4430         unsigned drop_inode_space = 0;
4431         unsigned dropped_extents = 0;
4432
4433         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4434         BTRFS_I(inode)->outstanding_extents--;
4435
4436         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4437             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4438                                &BTRFS_I(inode)->runtime_flags))
4439                 drop_inode_space = 1;
4440
4441         /*
4442          * If we have more or the same amount of outsanding extents than we have
4443          * reserved then we need to leave the reserved extents count alone.
4444          */
4445         if (BTRFS_I(inode)->outstanding_extents >=
4446             BTRFS_I(inode)->reserved_extents)
4447                 return drop_inode_space;
4448
4449         dropped_extents = BTRFS_I(inode)->reserved_extents -
4450                 BTRFS_I(inode)->outstanding_extents;
4451         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4452         return dropped_extents + drop_inode_space;
4453 }
4454
4455 /**
4456  * calc_csum_metadata_size - return the amount of metada space that must be
4457  *      reserved/free'd for the given bytes.
4458  * @inode: the inode we're manipulating
4459  * @num_bytes: the number of bytes in question
4460  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4461  *
4462  * This adjusts the number of csum_bytes in the inode and then returns the
4463  * correct amount of metadata that must either be reserved or freed.  We
4464  * calculate how many checksums we can fit into one leaf and then divide the
4465  * number of bytes that will need to be checksumed by this value to figure out
4466  * how many checksums will be required.  If we are adding bytes then the number
4467  * may go up and we will return the number of additional bytes that must be
4468  * reserved.  If it is going down we will return the number of bytes that must
4469  * be freed.
4470  *
4471  * This must be called with BTRFS_I(inode)->lock held.
4472  */
4473 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4474                                    int reserve)
4475 {
4476         struct btrfs_root *root = BTRFS_I(inode)->root;
4477         u64 csum_size;
4478         int num_csums_per_leaf;
4479         int num_csums;
4480         int old_csums;
4481
4482         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4483             BTRFS_I(inode)->csum_bytes == 0)
4484                 return 0;
4485
4486         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4487         if (reserve)
4488                 BTRFS_I(inode)->csum_bytes += num_bytes;
4489         else
4490                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4491         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4492         num_csums_per_leaf = (int)div64_u64(csum_size,
4493                                             sizeof(struct btrfs_csum_item) +
4494                                             sizeof(struct btrfs_disk_key));
4495         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4496         num_csums = num_csums + num_csums_per_leaf - 1;
4497         num_csums = num_csums / num_csums_per_leaf;
4498
4499         old_csums = old_csums + num_csums_per_leaf - 1;
4500         old_csums = old_csums / num_csums_per_leaf;
4501
4502         /* No change, no need to reserve more */
4503         if (old_csums == num_csums)
4504                 return 0;
4505
4506         if (reserve)
4507                 return btrfs_calc_trans_metadata_size(root,
4508                                                       num_csums - old_csums);
4509
4510         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4511 }
4512
4513 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4514 {
4515         struct btrfs_root *root = BTRFS_I(inode)->root;
4516         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4517         u64 to_reserve = 0;
4518         u64 csum_bytes;
4519         unsigned nr_extents = 0;
4520         int extra_reserve = 0;
4521         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4522         int ret;
4523
4524         /* Need to be holding the i_mutex here if we aren't free space cache */
4525         if (btrfs_is_free_space_inode(inode))
4526                 flush = BTRFS_RESERVE_NO_FLUSH;
4527
4528         if (flush != BTRFS_RESERVE_NO_FLUSH &&
4529             btrfs_transaction_in_commit(root->fs_info))
4530                 schedule_timeout(1);
4531
4532         mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4533         num_bytes = ALIGN(num_bytes, root->sectorsize);
4534
4535         spin_lock(&BTRFS_I(inode)->lock);
4536         BTRFS_I(inode)->outstanding_extents++;
4537
4538         if (BTRFS_I(inode)->outstanding_extents >
4539             BTRFS_I(inode)->reserved_extents)
4540                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4541                         BTRFS_I(inode)->reserved_extents;
4542
4543         /*
4544          * Add an item to reserve for updating the inode when we complete the
4545          * delalloc io.
4546          */
4547         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4548                       &BTRFS_I(inode)->runtime_flags)) {
4549                 nr_extents++;
4550                 extra_reserve = 1;
4551         }
4552
4553         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4554         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4555         csum_bytes = BTRFS_I(inode)->csum_bytes;
4556         spin_unlock(&BTRFS_I(inode)->lock);
4557
4558         if (root->fs_info->quota_enabled) {
4559                 ret = btrfs_qgroup_reserve(root, num_bytes +
4560                                            nr_extents * root->leafsize);
4561                 if (ret) {
4562                         spin_lock(&BTRFS_I(inode)->lock);
4563                         calc_csum_metadata_size(inode, num_bytes, 0);
4564                         spin_unlock(&BTRFS_I(inode)->lock);
4565                         mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4566                         return ret;
4567                 }
4568         }
4569
4570         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4571         if (ret) {
4572                 u64 to_free = 0;
4573                 unsigned dropped;
4574
4575                 spin_lock(&BTRFS_I(inode)->lock);
4576                 dropped = drop_outstanding_extent(inode);
4577                 /*
4578                  * If the inodes csum_bytes is the same as the original
4579                  * csum_bytes then we know we haven't raced with any free()ers
4580                  * so we can just reduce our inodes csum bytes and carry on.
4581                  * Otherwise we have to do the normal free thing to account for
4582                  * the case that the free side didn't free up its reserve
4583                  * because of this outstanding reservation.
4584                  */
4585                 if (BTRFS_I(inode)->csum_bytes == csum_bytes)
4586                         calc_csum_metadata_size(inode, num_bytes, 0);
4587                 else
4588                         to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4589                 spin_unlock(&BTRFS_I(inode)->lock);
4590                 if (dropped)
4591                         to_free += btrfs_calc_trans_metadata_size(root, dropped);
4592
4593                 if (to_free) {
4594                         btrfs_block_rsv_release(root, block_rsv, to_free);
4595                         trace_btrfs_space_reservation(root->fs_info,
4596                                                       "delalloc",
4597                                                       btrfs_ino(inode),
4598                                                       to_free, 0);
4599                 }
4600                 if (root->fs_info->quota_enabled) {
4601                         btrfs_qgroup_free(root, num_bytes +
4602                                                 nr_extents * root->leafsize);
4603                 }
4604                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4605                 return ret;
4606         }
4607
4608         spin_lock(&BTRFS_I(inode)->lock);
4609         if (extra_reserve) {
4610                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4611                         &BTRFS_I(inode)->runtime_flags);
4612                 nr_extents--;
4613         }
4614         BTRFS_I(inode)->reserved_extents += nr_extents;
4615         spin_unlock(&BTRFS_I(inode)->lock);
4616         mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4617
4618         if (to_reserve)
4619                 trace_btrfs_space_reservation(root->fs_info,"delalloc",
4620                                               btrfs_ino(inode), to_reserve, 1);
4621         block_rsv_add_bytes(block_rsv, to_reserve, 1);
4622
4623         return 0;
4624 }
4625
4626 /**
4627  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4628  * @inode: the inode to release the reservation for
4629  * @num_bytes: the number of bytes we're releasing
4630  *
4631  * This will release the metadata reservation for an inode.  This can be called
4632  * once we complete IO for a given set of bytes to release their metadata
4633  * reservations.
4634  */
4635 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4636 {
4637         struct btrfs_root *root = BTRFS_I(inode)->root;
4638         u64 to_free = 0;
4639         unsigned dropped;
4640
4641         num_bytes = ALIGN(num_bytes, root->sectorsize);
4642         spin_lock(&BTRFS_I(inode)->lock);
4643         dropped = drop_outstanding_extent(inode);
4644
4645         to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4646         spin_unlock(&BTRFS_I(inode)->lock);
4647         if (dropped > 0)
4648                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4649
4650         trace_btrfs_space_reservation(root->fs_info, "delalloc",
4651                                       btrfs_ino(inode), to_free, 0);
4652         if (root->fs_info->quota_enabled) {
4653                 btrfs_qgroup_free(root, num_bytes +
4654                                         dropped * root->leafsize);
4655         }
4656
4657         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4658                                 to_free);
4659 }
4660
4661 /**
4662  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4663  * @inode: inode we're writing to
4664  * @num_bytes: the number of bytes we want to allocate
4665  *
4666  * This will do the following things
4667  *
4668  * o reserve space in the data space info for num_bytes
4669  * o reserve space in the metadata space info based on number of outstanding
4670  *   extents and how much csums will be needed
4671  * o add to the inodes ->delalloc_bytes
4672  * o add it to the fs_info's delalloc inodes list.
4673  *
4674  * This will return 0 for success and -ENOSPC if there is no space left.
4675  */
4676 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4677 {
4678         int ret;
4679
4680         ret = btrfs_check_data_free_space(inode, num_bytes);
4681         if (ret)
4682                 return ret;
4683
4684         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4685         if (ret) {
4686                 btrfs_free_reserved_data_space(inode, num_bytes);
4687                 return ret;
4688         }
4689
4690         return 0;
4691 }
4692
4693 /**
4694  * btrfs_delalloc_release_space - release data and metadata space for delalloc
4695  * @inode: inode we're releasing space for
4696  * @num_bytes: the number of bytes we want to free up
4697  *
4698  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
4699  * called in the case that we don't need the metadata AND data reservations
4700  * anymore.  So if there is an error or we insert an inline extent.
4701  *
4702  * This function will release the metadata space that was not used and will
4703  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
4704  * list if there are no delalloc bytes left.
4705  */
4706 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4707 {
4708         btrfs_delalloc_release_metadata(inode, num_bytes);
4709         btrfs_free_reserved_data_space(inode, num_bytes);
4710 }
4711
4712 static int update_block_group(struct btrfs_trans_handle *trans,
4713                               struct btrfs_root *root,
4714                               u64 bytenr, u64 num_bytes, int alloc)
4715 {
4716         struct btrfs_block_group_cache *cache = NULL;
4717         struct btrfs_fs_info *info = root->fs_info;
4718         u64 total = num_bytes;
4719         u64 old_val;
4720         u64 byte_in_group;
4721         int factor;
4722
4723         /* block accounting for super block */
4724         spin_lock(&info->delalloc_lock);
4725         old_val = btrfs_super_bytes_used(info->super_copy);
4726         if (alloc)
4727                 old_val += num_bytes;
4728         else
4729                 old_val -= num_bytes;
4730         btrfs_set_super_bytes_used(info->super_copy, old_val);
4731         spin_unlock(&info->delalloc_lock);
4732
4733         while (total) {
4734                 cache = btrfs_lookup_block_group(info, bytenr);
4735                 if (!cache)
4736                         return -ENOENT;
4737                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4738                                     BTRFS_BLOCK_GROUP_RAID1 |
4739                                     BTRFS_BLOCK_GROUP_RAID10))
4740                         factor = 2;
4741                 else
4742                         factor = 1;
4743                 /*
4744                  * If this block group has free space cache written out, we
4745                  * need to make sure to load it if we are removing space.  This
4746                  * is because we need the unpinning stage to actually add the
4747                  * space back to the block group, otherwise we will leak space.
4748                  */
4749                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4750                         cache_block_group(cache, trans, NULL, 1);
4751
4752                 byte_in_group = bytenr - cache->key.objectid;
4753                 WARN_ON(byte_in_group > cache->key.offset);
4754
4755                 spin_lock(&cache->space_info->lock);
4756                 spin_lock(&cache->lock);
4757
4758                 if (btrfs_test_opt(root, SPACE_CACHE) &&
4759                     cache->disk_cache_state < BTRFS_DC_CLEAR)
4760                         cache->disk_cache_state = BTRFS_DC_CLEAR;
4761
4762                 cache->dirty = 1;
4763                 old_val = btrfs_block_group_used(&cache->item);
4764                 num_bytes = min(total, cache->key.offset - byte_in_group);
4765                 if (alloc) {
4766                         old_val += num_bytes;
4767                         btrfs_set_block_group_used(&cache->item, old_val);
4768                         cache->reserved -= num_bytes;
4769                         cache->space_info->bytes_reserved -= num_bytes;
4770                         cache->space_info->bytes_used += num_bytes;
4771                         cache->space_info->disk_used += num_bytes * factor;
4772                         spin_unlock(&cache->lock);
4773                         spin_unlock(&cache->space_info->lock);
4774                 } else {
4775                         old_val -= num_bytes;
4776                         btrfs_set_block_group_used(&cache->item, old_val);
4777                         cache->pinned += num_bytes;
4778                         cache->space_info->bytes_pinned += num_bytes;
4779                         cache->space_info->bytes_used -= num_bytes;
4780                         cache->space_info->disk_used -= num_bytes * factor;
4781                         spin_unlock(&cache->lock);
4782                         spin_unlock(&cache->space_info->lock);
4783
4784                         set_extent_dirty(info->pinned_extents,
4785                                          bytenr, bytenr + num_bytes - 1,
4786                                          GFP_NOFS | __GFP_NOFAIL);
4787                 }
4788                 btrfs_put_block_group(cache);
4789                 total -= num_bytes;
4790                 bytenr += num_bytes;
4791         }
4792         return 0;
4793 }
4794
4795 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4796 {
4797         struct btrfs_block_group_cache *cache;
4798         u64 bytenr;
4799
4800         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4801         if (!cache)
4802                 return 0;
4803
4804         bytenr = cache->key.objectid;
4805         btrfs_put_block_group(cache);
4806
4807         return bytenr;
4808 }
4809
4810 static int pin_down_extent(struct btrfs_root *root,
4811                            struct btrfs_block_group_cache *cache,
4812                            u64 bytenr, u64 num_bytes, int reserved)
4813 {
4814         spin_lock(&cache->space_info->lock);
4815         spin_lock(&cache->lock);
4816         cache->pinned += num_bytes;
4817         cache->space_info->bytes_pinned += num_bytes;
4818         if (reserved) {
4819                 cache->reserved -= num_bytes;
4820                 cache->space_info->bytes_reserved -= num_bytes;
4821         }
4822         spin_unlock(&cache->lock);
4823         spin_unlock(&cache->space_info->lock);
4824
4825         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4826                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4827         return 0;
4828 }
4829
4830 /*
4831  * this function must be called within transaction
4832  */
4833 int btrfs_pin_extent(struct btrfs_root *root,
4834                      u64 bytenr, u64 num_bytes, int reserved)
4835 {
4836         struct btrfs_block_group_cache *cache;
4837
4838         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4839         BUG_ON(!cache); /* Logic error */
4840
4841         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4842
4843         btrfs_put_block_group(cache);
4844         return 0;
4845 }
4846
4847 /*
4848  * this function must be called within transaction
4849  */
4850 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
4851                                     struct btrfs_root *root,
4852                                     u64 bytenr, u64 num_bytes)
4853 {
4854         struct btrfs_block_group_cache *cache;
4855
4856         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4857         BUG_ON(!cache); /* Logic error */
4858
4859         /*
4860          * pull in the free space cache (if any) so that our pin
4861          * removes the free space from the cache.  We have load_only set
4862          * to one because the slow code to read in the free extents does check
4863          * the pinned extents.
4864          */
4865         cache_block_group(cache, trans, root, 1);
4866
4867         pin_down_extent(root, cache, bytenr, num_bytes, 0);
4868
4869         /* remove us from the free space cache (if we're there at all) */
4870         btrfs_remove_free_space(cache, bytenr, num_bytes);
4871         btrfs_put_block_group(cache);
4872         return 0;
4873 }
4874
4875 /**
4876  * btrfs_update_reserved_bytes - update the block_group and space info counters
4877  * @cache:      The cache we are manipulating
4878  * @num_bytes:  The number of bytes in question
4879  * @reserve:    One of the reservation enums
4880  *
4881  * This is called by the allocator when it reserves space, or by somebody who is
4882  * freeing space that was never actually used on disk.  For example if you
4883  * reserve some space for a new leaf in transaction A and before transaction A
4884  * commits you free that leaf, you call this with reserve set to 0 in order to
4885  * clear the reservation.
4886  *
4887  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
4888  * ENOSPC accounting.  For data we handle the reservation through clearing the
4889  * delalloc bits in the io_tree.  We have to do this since we could end up
4890  * allocating less disk space for the amount of data we have reserved in the
4891  * case of compression.
4892  *
4893  * If this is a reservation and the block group has become read only we cannot
4894  * make the reservation and return -EAGAIN, otherwise this function always
4895  * succeeds.
4896  */
4897 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4898                                        u64 num_bytes, int reserve)
4899 {
4900         struct btrfs_space_info *space_info = cache->space_info;
4901         int ret = 0;
4902
4903         spin_lock(&space_info->lock);
4904         spin_lock(&cache->lock);
4905         if (reserve != RESERVE_FREE) {
4906                 if (cache->ro) {
4907                         ret = -EAGAIN;
4908                 } else {
4909                         cache->reserved += num_bytes;
4910                         space_info->bytes_reserved += num_bytes;
4911                         if (reserve == RESERVE_ALLOC) {
4912                                 trace_btrfs_space_reservation(cache->fs_info,
4913                                                 "space_info", space_info->flags,
4914                                                 num_bytes, 0);
4915                                 space_info->bytes_may_use -= num_bytes;
4916                         }
4917                 }
4918         } else {
4919                 if (cache->ro)
4920                         space_info->bytes_readonly += num_bytes;
4921                 cache->reserved -= num_bytes;
4922                 space_info->bytes_reserved -= num_bytes;
4923                 space_info->reservation_progress++;
4924         }
4925         spin_unlock(&cache->lock);
4926         spin_unlock(&space_info->lock);
4927         return ret;
4928 }
4929
4930 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4931                                 struct btrfs_root *root)
4932 {
4933         struct btrfs_fs_info *fs_info = root->fs_info;
4934         struct btrfs_caching_control *next;
4935         struct btrfs_caching_control *caching_ctl;
4936         struct btrfs_block_group_cache *cache;
4937
4938         down_write(&fs_info->extent_commit_sem);
4939
4940         list_for_each_entry_safe(caching_ctl, next,
4941                                  &fs_info->caching_block_groups, list) {
4942                 cache = caching_ctl->block_group;
4943                 if (block_group_cache_done(cache)) {
4944                         cache->last_byte_to_unpin = (u64)-1;
4945                         list_del_init(&caching_ctl->list);
4946                         put_caching_control(caching_ctl);
4947                 } else {
4948                         cache->last_byte_to_unpin = caching_ctl->progress;
4949                 }
4950         }
4951
4952         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4953                 fs_info->pinned_extents = &fs_info->freed_extents[1];
4954         else
4955                 fs_info->pinned_extents = &fs_info->freed_extents[0];
4956
4957         up_write(&fs_info->extent_commit_sem);
4958
4959         update_global_block_rsv(fs_info);
4960 }
4961
4962 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4963 {
4964         struct btrfs_fs_info *fs_info = root->fs_info;
4965         struct btrfs_block_group_cache *cache = NULL;
4966         struct btrfs_space_info *space_info;
4967         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4968         u64 len;
4969         bool readonly;
4970
4971         while (start <= end) {
4972                 readonly = false;
4973                 if (!cache ||
4974                     start >= cache->key.objectid + cache->key.offset) {
4975                         if (cache)
4976                                 btrfs_put_block_group(cache);
4977                         cache = btrfs_lookup_block_group(fs_info, start);
4978                         BUG_ON(!cache); /* Logic error */
4979                 }
4980
4981                 len = cache->key.objectid + cache->key.offset - start;
4982                 len = min(len, end + 1 - start);
4983
4984                 if (start < cache->last_byte_to_unpin) {
4985                         len = min(len, cache->last_byte_to_unpin - start);
4986                         btrfs_add_free_space(cache, start, len);
4987                 }
4988
4989                 start += len;
4990                 space_info = cache->space_info;
4991
4992                 spin_lock(&space_info->lock);
4993                 spin_lock(&cache->lock);
4994                 cache->pinned -= len;
4995                 space_info->bytes_pinned -= len;
4996                 if (cache->ro) {
4997                         space_info->bytes_readonly += len;
4998                         readonly = true;
4999                 }
5000                 spin_unlock(&cache->lock);
5001                 if (!readonly && global_rsv->space_info == space_info) {
5002                         spin_lock(&global_rsv->lock);
5003                         if (!global_rsv->full) {
5004                                 len = min(len, global_rsv->size -
5005                                           global_rsv->reserved);
5006                                 global_rsv->reserved += len;
5007                                 space_info->bytes_may_use += len;
5008                                 if (global_rsv->reserved >= global_rsv->size)
5009                                         global_rsv->full = 1;
5010                         }
5011                         spin_unlock(&global_rsv->lock);
5012                 }
5013                 spin_unlock(&space_info->lock);
5014         }
5015
5016         if (cache)
5017                 btrfs_put_block_group(cache);
5018         return 0;
5019 }
5020
5021 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5022                                struct btrfs_root *root)
5023 {
5024         struct btrfs_fs_info *fs_info = root->fs_info;
5025         struct extent_io_tree *unpin;
5026         u64 start;
5027         u64 end;
5028         int ret;
5029
5030         if (trans->aborted)
5031                 return 0;
5032
5033         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5034                 unpin = &fs_info->freed_extents[1];
5035         else
5036                 unpin = &fs_info->freed_extents[0];
5037
5038         while (1) {
5039                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5040                                             EXTENT_DIRTY, NULL);
5041                 if (ret)
5042                         break;
5043
5044                 if (btrfs_test_opt(root, DISCARD))
5045                         ret = btrfs_discard_extent(root, start,
5046                                                    end + 1 - start, NULL);
5047
5048                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5049                 unpin_extent_range(root, start, end);
5050                 cond_resched();
5051         }
5052
5053         return 0;
5054 }
5055
5056 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5057                                 struct btrfs_root *root,
5058                                 u64 bytenr, u64 num_bytes, u64 parent,
5059                                 u64 root_objectid, u64 owner_objectid,
5060                                 u64 owner_offset, int refs_to_drop,
5061                                 struct btrfs_delayed_extent_op *extent_op)
5062 {
5063         struct btrfs_key key;
5064         struct btrfs_path *path;
5065         struct btrfs_fs_info *info = root->fs_info;
5066         struct btrfs_root *extent_root = info->extent_root;
5067         struct extent_buffer *leaf;
5068         struct btrfs_extent_item *ei;
5069         struct btrfs_extent_inline_ref *iref;
5070         int ret;
5071         int is_data;
5072         int extent_slot = 0;
5073         int found_extent = 0;
5074         int num_to_del = 1;
5075         u32 item_size;
5076         u64 refs;
5077
5078         path = btrfs_alloc_path();
5079         if (!path)
5080                 return -ENOMEM;
5081
5082         path->reada = 1;
5083         path->leave_spinning = 1;
5084
5085         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5086         BUG_ON(!is_data && refs_to_drop != 1);
5087
5088         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5089                                     bytenr, num_bytes, parent,
5090                                     root_objectid, owner_objectid,
5091                                     owner_offset);
5092         if (ret == 0) {
5093                 extent_slot = path->slots[0];
5094                 while (extent_slot >= 0) {
5095                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5096                                               extent_slot);
5097                         if (key.objectid != bytenr)
5098                                 break;
5099                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5100                             key.offset == num_bytes) {
5101                                 found_extent = 1;
5102                                 break;
5103                         }
5104                         if (path->slots[0] - extent_slot > 5)
5105                                 break;
5106                         extent_slot--;
5107                 }
5108 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5109                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5110                 if (found_extent && item_size < sizeof(*ei))
5111                         found_extent = 0;
5112 #endif
5113                 if (!found_extent) {
5114                         BUG_ON(iref);
5115                         ret = remove_extent_backref(trans, extent_root, path,
5116                                                     NULL, refs_to_drop,
5117                                                     is_data);
5118                         if (ret) {
5119                                 btrfs_abort_transaction(trans, extent_root, ret);
5120                                 goto out;
5121                         }
5122                         btrfs_release_path(path);
5123                         path->leave_spinning = 1;
5124
5125                         key.objectid = bytenr;
5126                         key.type = BTRFS_EXTENT_ITEM_KEY;
5127                         key.offset = num_bytes;
5128
5129                         ret = btrfs_search_slot(trans, extent_root,
5130                                                 &key, path, -1, 1);
5131                         if (ret) {
5132                                 printk(KERN_ERR "umm, got %d back from search"
5133                                        ", was looking for %llu\n", ret,
5134                                        (unsigned long long)bytenr);
5135                                 if (ret > 0)
5136                                         btrfs_print_leaf(extent_root,
5137                                                          path->nodes[0]);
5138                         }
5139                         if (ret < 0) {
5140                                 btrfs_abort_transaction(trans, extent_root, ret);
5141                                 goto out;
5142                         }
5143                         extent_slot = path->slots[0];
5144                 }
5145         } else if (ret == -ENOENT) {
5146                 btrfs_print_leaf(extent_root, path->nodes[0]);
5147                 WARN_ON(1);
5148                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
5149                        "parent %llu root %llu  owner %llu offset %llu\n",
5150                        (unsigned long long)bytenr,
5151                        (unsigned long long)parent,
5152                        (unsigned long long)root_objectid,
5153                        (unsigned long long)owner_objectid,
5154                        (unsigned long long)owner_offset);
5155         } else {
5156                 btrfs_abort_transaction(trans, extent_root, ret);
5157                 goto out;
5158         }
5159
5160         leaf = path->nodes[0];
5161         item_size = btrfs_item_size_nr(leaf, extent_slot);
5162 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5163         if (item_size < sizeof(*ei)) {
5164                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5165                 ret = convert_extent_item_v0(trans, extent_root, path,
5166                                              owner_objectid, 0);
5167                 if (ret < 0) {
5168                         btrfs_abort_transaction(trans, extent_root, ret);
5169                         goto out;
5170                 }
5171
5172                 btrfs_release_path(path);
5173                 path->leave_spinning = 1;
5174
5175                 key.objectid = bytenr;
5176                 key.type = BTRFS_EXTENT_ITEM_KEY;
5177                 key.offset = num_bytes;
5178
5179                 ret = btrfs_search_slot(trans, extent_root, &key, path,
5180                                         -1, 1);
5181                 if (ret) {
5182                         printk(KERN_ERR "umm, got %d back from search"
5183                                ", was looking for %llu\n", ret,
5184                                (unsigned long long)bytenr);
5185                         btrfs_print_leaf(extent_root, path->nodes[0]);
5186                 }
5187                 if (ret < 0) {
5188                         btrfs_abort_transaction(trans, extent_root, ret);
5189                         goto out;
5190                 }
5191
5192                 extent_slot = path->slots[0];
5193                 leaf = path->nodes[0];
5194                 item_size = btrfs_item_size_nr(leaf, extent_slot);
5195         }
5196 #endif
5197         BUG_ON(item_size < sizeof(*ei));
5198         ei = btrfs_item_ptr(leaf, extent_slot,
5199                             struct btrfs_extent_item);
5200         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5201                 struct btrfs_tree_block_info *bi;
5202                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5203                 bi = (struct btrfs_tree_block_info *)(ei + 1);
5204                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5205         }
5206
5207         refs = btrfs_extent_refs(leaf, ei);
5208         BUG_ON(refs < refs_to_drop);
5209         refs -= refs_to_drop;
5210
5211         if (refs > 0) {
5212                 if (extent_op)
5213                         __run_delayed_extent_op(extent_op, leaf, ei);
5214                 /*
5215                  * In the case of inline back ref, reference count will
5216                  * be updated by remove_extent_backref
5217                  */
5218                 if (iref) {
5219                         BUG_ON(!found_extent);
5220                 } else {
5221                         btrfs_set_extent_refs(leaf, ei, refs);
5222                         btrfs_mark_buffer_dirty(leaf);
5223                 }
5224                 if (found_extent) {
5225                         ret = remove_extent_backref(trans, extent_root, path,
5226                                                     iref, refs_to_drop,
5227                                                     is_data);
5228                         if (ret) {
5229                                 btrfs_abort_transaction(trans, extent_root, ret);
5230                                 goto out;
5231                         }
5232                 }
5233         } else {
5234                 if (found_extent) {
5235                         BUG_ON(is_data && refs_to_drop !=
5236                                extent_data_ref_count(root, path, iref));
5237                         if (iref) {
5238                                 BUG_ON(path->slots[0] != extent_slot);
5239                         } else {
5240                                 BUG_ON(path->slots[0] != extent_slot + 1);
5241                                 path->slots[0] = extent_slot;
5242                                 num_to_del = 2;
5243                         }
5244                 }
5245
5246                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5247                                       num_to_del);
5248                 if (ret) {
5249                         btrfs_abort_transaction(trans, extent_root, ret);
5250                         goto out;
5251                 }
5252                 btrfs_release_path(path);
5253
5254                 if (is_data) {
5255                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5256                         if (ret) {
5257                                 btrfs_abort_transaction(trans, extent_root, ret);
5258                                 goto out;
5259                         }
5260                 }
5261
5262                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
5263                 if (ret) {
5264                         btrfs_abort_transaction(trans, extent_root, ret);
5265                         goto out;
5266                 }
5267         }
5268 out:
5269         btrfs_free_path(path);
5270         return ret;
5271 }
5272
5273 /*
5274  * when we free an block, it is possible (and likely) that we free the last
5275  * delayed ref for that extent as well.  This searches the delayed ref tree for
5276  * a given extent, and if there are no other delayed refs to be processed, it
5277  * removes it from the tree.
5278  */
5279 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5280                                       struct btrfs_root *root, u64 bytenr)
5281 {
5282         struct btrfs_delayed_ref_head *head;
5283         struct btrfs_delayed_ref_root *delayed_refs;
5284         struct btrfs_delayed_ref_node *ref;
5285         struct rb_node *node;
5286         int ret = 0;
5287
5288         delayed_refs = &trans->transaction->delayed_refs;
5289         spin_lock(&delayed_refs->lock);
5290         head = btrfs_find_delayed_ref_head(trans, bytenr);
5291         if (!head)
5292                 goto out;
5293
5294         node = rb_prev(&head->node.rb_node);
5295         if (!node)
5296                 goto out;
5297
5298         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5299
5300         /* there are still entries for this ref, we can't drop it */
5301         if (ref->bytenr == bytenr)
5302                 goto out;
5303
5304         if (head->extent_op) {
5305                 if (!head->must_insert_reserved)
5306                         goto out;
5307                 kfree(head->extent_op);
5308                 head->extent_op = NULL;
5309         }
5310
5311         /*
5312          * waiting for the lock here would deadlock.  If someone else has it
5313          * locked they are already in the process of dropping it anyway
5314          */
5315         if (!mutex_trylock(&head->mutex))
5316                 goto out;
5317
5318         /*
5319          * at this point we have a head with no other entries.  Go
5320          * ahead and process it.
5321          */
5322         head->node.in_tree = 0;
5323         rb_erase(&head->node.rb_node, &delayed_refs->root);
5324
5325         delayed_refs->num_entries--;
5326
5327         /*
5328          * we don't take a ref on the node because we're removing it from the
5329          * tree, so we just steal the ref the tree was holding.
5330          */
5331         delayed_refs->num_heads--;
5332         if (list_empty(&head->cluster))
5333                 delayed_refs->num_heads_ready--;
5334
5335         list_del_init(&head->cluster);
5336         spin_unlock(&delayed_refs->lock);
5337
5338         BUG_ON(head->extent_op);
5339         if (head->must_insert_reserved)
5340                 ret = 1;
5341
5342         mutex_unlock(&head->mutex);
5343         btrfs_put_delayed_ref(&head->node);
5344         return ret;
5345 out:
5346         spin_unlock(&delayed_refs->lock);
5347         return 0;
5348 }
5349
5350 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5351                            struct btrfs_root *root,
5352                            struct extent_buffer *buf,
5353                            u64 parent, int last_ref)
5354 {
5355         struct btrfs_block_group_cache *cache = NULL;
5356         int ret;
5357
5358         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5359                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5360                                         buf->start, buf->len,
5361                                         parent, root->root_key.objectid,
5362                                         btrfs_header_level(buf),
5363                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
5364                 BUG_ON(ret); /* -ENOMEM */
5365         }
5366
5367         if (!last_ref)
5368                 return;
5369
5370         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5371
5372         if (btrfs_header_generation(buf) == trans->transid) {
5373                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5374                         ret = check_ref_cleanup(trans, root, buf->start);
5375                         if (!ret)
5376                                 goto out;
5377                 }
5378
5379                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5380                         pin_down_extent(root, cache, buf->start, buf->len, 1);
5381                         goto out;
5382                 }
5383
5384                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5385
5386                 btrfs_add_free_space(cache, buf->start, buf->len);
5387                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5388         }
5389 out:
5390         /*
5391          * Deleting the buffer, clear the corrupt flag since it doesn't matter
5392          * anymore.
5393          */
5394         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5395         btrfs_put_block_group(cache);
5396 }
5397
5398 /* Can return -ENOMEM */
5399 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5400                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5401                       u64 owner, u64 offset, int for_cow)
5402 {
5403         int ret;
5404         struct btrfs_fs_info *fs_info = root->fs_info;
5405
5406         /*
5407          * tree log blocks never actually go into the extent allocation
5408          * tree, just update pinning info and exit early.
5409          */
5410         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5411                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5412                 /* unlocks the pinned mutex */
5413                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
5414                 ret = 0;
5415         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5416                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5417                                         num_bytes,
5418                                         parent, root_objectid, (int)owner,
5419                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5420         } else {
5421                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5422                                                 num_bytes,
5423                                                 parent, root_objectid, owner,
5424                                                 offset, BTRFS_DROP_DELAYED_REF,
5425                                                 NULL, for_cow);
5426         }
5427         return ret;
5428 }
5429
5430 static u64 stripe_align(struct btrfs_root *root, u64 val)
5431 {
5432         u64 mask = ((u64)root->stripesize - 1);
5433         u64 ret = (val + mask) & ~mask;
5434         return ret;
5435 }
5436
5437 /*
5438  * when we wait for progress in the block group caching, its because
5439  * our allocation attempt failed at least once.  So, we must sleep
5440  * and let some progress happen before we try again.
5441  *
5442  * This function will sleep at least once waiting for new free space to
5443  * show up, and then it will check the block group free space numbers
5444  * for our min num_bytes.  Another option is to have it go ahead
5445  * and look in the rbtree for a free extent of a given size, but this
5446  * is a good start.
5447  */
5448 static noinline int
5449 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5450                                 u64 num_bytes)
5451 {
5452         struct btrfs_caching_control *caching_ctl;
5453         DEFINE_WAIT(wait);
5454
5455         caching_ctl = get_caching_control(cache);
5456         if (!caching_ctl)
5457                 return 0;
5458
5459         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
5460                    (cache->free_space_ctl->free_space >= num_bytes));
5461
5462         put_caching_control(caching_ctl);
5463         return 0;
5464 }
5465
5466 static noinline int
5467 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5468 {
5469         struct btrfs_caching_control *caching_ctl;
5470         DEFINE_WAIT(wait);
5471
5472         caching_ctl = get_caching_control(cache);
5473         if (!caching_ctl)
5474                 return 0;
5475
5476         wait_event(caching_ctl->wait, block_group_cache_done(cache));
5477
5478         put_caching_control(caching_ctl);
5479         return 0;
5480 }
5481
5482 int __get_raid_index(u64 flags)
5483 {
5484         int index;
5485
5486         if (flags & BTRFS_BLOCK_GROUP_RAID10)
5487                 index = 0;
5488         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
5489                 index = 1;
5490         else if (flags & BTRFS_BLOCK_GROUP_DUP)
5491                 index = 2;
5492         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
5493                 index = 3;
5494         else
5495                 index = 4;
5496
5497         return index;
5498 }
5499
5500 static int get_block_group_index(struct btrfs_block_group_cache *cache)
5501 {
5502         return __get_raid_index(cache->flags);
5503 }
5504
5505 enum btrfs_loop_type {
5506         LOOP_CACHING_NOWAIT = 0,
5507         LOOP_CACHING_WAIT = 1,
5508         LOOP_ALLOC_CHUNK = 2,
5509         LOOP_NO_EMPTY_SIZE = 3,
5510 };
5511
5512 /*
5513  * walks the btree of allocated extents and find a hole of a given size.
5514  * The key ins is changed to record the hole:
5515  * ins->objectid == block start
5516  * ins->flags = BTRFS_EXTENT_ITEM_KEY
5517  * ins->offset == number of blocks
5518  * Any available blocks before search_start are skipped.
5519  */
5520 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5521                                      struct btrfs_root *orig_root,
5522                                      u64 num_bytes, u64 empty_size,
5523                                      u64 hint_byte, struct btrfs_key *ins,
5524                                      u64 data)
5525 {
5526         int ret = 0;
5527         struct btrfs_root *root = orig_root->fs_info->extent_root;
5528         struct btrfs_free_cluster *last_ptr = NULL;
5529         struct btrfs_block_group_cache *block_group = NULL;
5530         struct btrfs_block_group_cache *used_block_group;
5531         u64 search_start = 0;
5532         int empty_cluster = 2 * 1024 * 1024;
5533         struct btrfs_space_info *space_info;
5534         int loop = 0;
5535         int index = 0;
5536         int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5537                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5538         bool found_uncached_bg = false;
5539         bool failed_cluster_refill = false;
5540         bool failed_alloc = false;
5541         bool use_cluster = true;
5542         bool have_caching_bg = false;
5543
5544         WARN_ON(num_bytes < root->sectorsize);
5545         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
5546         ins->objectid = 0;
5547         ins->offset = 0;
5548
5549         trace_find_free_extent(orig_root, num_bytes, empty_size, data);
5550
5551         space_info = __find_space_info(root->fs_info, data);
5552         if (!space_info) {
5553                 printk(KERN_ERR "No space info for %llu\n", data);
5554                 return -ENOSPC;
5555         }
5556
5557         /*
5558          * If the space info is for both data and metadata it means we have a
5559          * small filesystem and we can't use the clustering stuff.
5560          */
5561         if (btrfs_mixed_space_info(space_info))
5562                 use_cluster = false;
5563
5564         if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5565                 last_ptr = &root->fs_info->meta_alloc_cluster;
5566                 if (!btrfs_test_opt(root, SSD))
5567                         empty_cluster = 64 * 1024;
5568         }
5569
5570         if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5571             btrfs_test_opt(root, SSD)) {
5572                 last_ptr = &root->fs_info->data_alloc_cluster;
5573         }
5574
5575         if (last_ptr) {
5576                 spin_lock(&last_ptr->lock);
5577                 if (last_ptr->block_group)
5578                         hint_byte = last_ptr->window_start;
5579                 spin_unlock(&last_ptr->lock);
5580         }
5581
5582         search_start = max(search_start, first_logical_byte(root, 0));
5583         search_start = max(search_start, hint_byte);
5584
5585         if (!last_ptr)
5586                 empty_cluster = 0;
5587
5588         if (search_start == hint_byte) {
5589                 block_group = btrfs_lookup_block_group(root->fs_info,
5590                                                        search_start);
5591                 used_block_group = block_group;
5592                 /*
5593                  * we don't want to use the block group if it doesn't match our
5594                  * allocation bits, or if its not cached.
5595                  *
5596                  * However if we are re-searching with an ideal block group
5597                  * picked out then we don't care that the block group is cached.
5598                  */
5599                 if (block_group && block_group_bits(block_group, data) &&
5600                     block_group->cached != BTRFS_CACHE_NO) {
5601                         down_read(&space_info->groups_sem);
5602                         if (list_empty(&block_group->list) ||
5603                             block_group->ro) {
5604                                 /*
5605                                  * someone is removing this block group,
5606                                  * we can't jump into the have_block_group
5607                                  * target because our list pointers are not
5608                                  * valid
5609                                  */
5610                                 btrfs_put_block_group(block_group);
5611                                 up_read(&space_info->groups_sem);
5612                         } else {
5613                                 index = get_block_group_index(block_group);
5614                                 goto have_block_group;
5615                         }
5616                 } else if (block_group) {
5617                         btrfs_put_block_group(block_group);
5618                 }
5619         }
5620 search:
5621         have_caching_bg = false;
5622         down_read(&space_info->groups_sem);
5623         list_for_each_entry(block_group, &space_info->block_groups[index],
5624                             list) {
5625                 u64 offset;
5626                 int cached;
5627
5628                 used_block_group = block_group;
5629                 btrfs_get_block_group(block_group);
5630                 search_start = block_group->key.objectid;
5631
5632                 /*
5633                  * this can happen if we end up cycling through all the
5634                  * raid types, but we want to make sure we only allocate
5635                  * for the proper type.
5636                  */
5637                 if (!block_group_bits(block_group, data)) {
5638                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
5639                                 BTRFS_BLOCK_GROUP_RAID1 |
5640                                 BTRFS_BLOCK_GROUP_RAID10;
5641
5642                         /*
5643                          * if they asked for extra copies and this block group
5644                          * doesn't provide them, bail.  This does allow us to
5645                          * fill raid0 from raid1.
5646                          */
5647                         if ((data & extra) && !(block_group->flags & extra))
5648                                 goto loop;
5649                 }
5650
5651 have_block_group:
5652                 cached = block_group_cache_done(block_group);
5653                 if (unlikely(!cached)) {
5654                         found_uncached_bg = true;
5655                         ret = cache_block_group(block_group, trans,
5656                                                 orig_root, 0);
5657                         BUG_ON(ret < 0);
5658                         ret = 0;
5659                 }
5660
5661                 if (unlikely(block_group->ro))
5662                         goto loop;
5663
5664                 /*
5665                  * Ok we want to try and use the cluster allocator, so
5666                  * lets look there
5667                  */
5668                 if (last_ptr) {
5669                         /*
5670                          * the refill lock keeps out other
5671                          * people trying to start a new cluster
5672                          */
5673                         spin_lock(&last_ptr->refill_lock);
5674                         used_block_group = last_ptr->block_group;
5675                         if (used_block_group != block_group &&
5676                             (!used_block_group ||
5677                              used_block_group->ro ||
5678                              !block_group_bits(used_block_group, data))) {
5679                                 used_block_group = block_group;
5680                                 goto refill_cluster;
5681                         }
5682
5683                         if (used_block_group != block_group)
5684                                 btrfs_get_block_group(used_block_group);
5685
5686                         offset = btrfs_alloc_from_cluster(used_block_group,
5687                           last_ptr, num_bytes, used_block_group->key.objectid);
5688                         if (offset) {
5689                                 /* we have a block, we're done */
5690                                 spin_unlock(&last_ptr->refill_lock);
5691                                 trace_btrfs_reserve_extent_cluster(root,
5692                                         block_group, search_start, num_bytes);
5693                                 goto checks;
5694                         }
5695
5696                         WARN_ON(last_ptr->block_group != used_block_group);
5697                         if (used_block_group != block_group) {
5698                                 btrfs_put_block_group(used_block_group);
5699                                 used_block_group = block_group;
5700                         }
5701 refill_cluster:
5702                         BUG_ON(used_block_group != block_group);
5703                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
5704                          * set up a new clusters, so lets just skip it
5705                          * and let the allocator find whatever block
5706                          * it can find.  If we reach this point, we
5707                          * will have tried the cluster allocator
5708                          * plenty of times and not have found
5709                          * anything, so we are likely way too
5710                          * fragmented for the clustering stuff to find
5711                          * anything.
5712                          *
5713                          * However, if the cluster is taken from the
5714                          * current block group, release the cluster
5715                          * first, so that we stand a better chance of
5716                          * succeeding in the unclustered
5717                          * allocation.  */
5718                         if (loop >= LOOP_NO_EMPTY_SIZE &&
5719                             last_ptr->block_group != block_group) {
5720                                 spin_unlock(&last_ptr->refill_lock);
5721                                 goto unclustered_alloc;
5722                         }
5723
5724                         /*
5725                          * this cluster didn't work out, free it and
5726                          * start over
5727                          */
5728                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5729
5730                         if (loop >= LOOP_NO_EMPTY_SIZE) {
5731                                 spin_unlock(&last_ptr->refill_lock);
5732                                 goto unclustered_alloc;
5733                         }
5734
5735                         /* allocate a cluster in this block group */
5736                         ret = btrfs_find_space_cluster(trans, root,
5737                                                block_group, last_ptr,
5738                                                search_start, num_bytes,
5739                                                empty_cluster + empty_size);
5740                         if (ret == 0) {
5741                                 /*
5742                                  * now pull our allocation out of this
5743                                  * cluster
5744                                  */
5745                                 offset = btrfs_alloc_from_cluster(block_group,
5746                                                   last_ptr, num_bytes,
5747                                                   search_start);
5748                                 if (offset) {
5749                                         /* we found one, proceed */
5750                                         spin_unlock(&last_ptr->refill_lock);
5751                                         trace_btrfs_reserve_extent_cluster(root,
5752                                                 block_group, search_start,
5753                                                 num_bytes);
5754                                         goto checks;
5755                                 }
5756                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
5757                                    && !failed_cluster_refill) {
5758                                 spin_unlock(&last_ptr->refill_lock);
5759
5760                                 failed_cluster_refill = true;
5761                                 wait_block_group_cache_progress(block_group,
5762                                        num_bytes + empty_cluster + empty_size);
5763                                 goto have_block_group;
5764                         }
5765
5766                         /*
5767                          * at this point we either didn't find a cluster
5768                          * or we weren't able to allocate a block from our
5769                          * cluster.  Free the cluster we've been trying
5770                          * to use, and go to the next block group
5771                          */
5772                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5773                         spin_unlock(&last_ptr->refill_lock);
5774                         goto loop;
5775                 }
5776
5777 unclustered_alloc:
5778                 spin_lock(&block_group->free_space_ctl->tree_lock);
5779                 if (cached &&
5780                     block_group->free_space_ctl->free_space <
5781                     num_bytes + empty_cluster + empty_size) {
5782                         spin_unlock(&block_group->free_space_ctl->tree_lock);
5783                         goto loop;
5784                 }
5785                 spin_unlock(&block_group->free_space_ctl->tree_lock);
5786
5787                 offset = btrfs_find_space_for_alloc(block_group, search_start,
5788                                                     num_bytes, empty_size);
5789                 /*
5790                  * If we didn't find a chunk, and we haven't failed on this
5791                  * block group before, and this block group is in the middle of
5792                  * caching and we are ok with waiting, then go ahead and wait
5793                  * for progress to be made, and set failed_alloc to true.
5794                  *
5795                  * If failed_alloc is true then we've already waited on this
5796                  * block group once and should move on to the next block group.
5797                  */
5798                 if (!offset && !failed_alloc && !cached &&
5799                     loop > LOOP_CACHING_NOWAIT) {
5800                         wait_block_group_cache_progress(block_group,
5801                                                 num_bytes + empty_size);
5802                         failed_alloc = true;
5803                         goto have_block_group;
5804                 } else if (!offset) {
5805                         if (!cached)
5806                                 have_caching_bg = true;
5807                         goto loop;
5808                 }
5809 checks:
5810                 search_start = stripe_align(root, offset);
5811
5812                 /* move on to the next group */
5813                 if (search_start + num_bytes >
5814                     used_block_group->key.objectid + used_block_group->key.offset) {
5815                         btrfs_add_free_space(used_block_group, offset, num_bytes);
5816                         goto loop;
5817                 }
5818
5819                 if (offset < search_start)
5820                         btrfs_add_free_space(used_block_group, offset,
5821                                              search_start - offset);
5822                 BUG_ON(offset > search_start);
5823
5824                 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
5825                                                   alloc_type);
5826                 if (ret == -EAGAIN) {
5827                         btrfs_add_free_space(used_block_group, offset, num_bytes);
5828                         goto loop;
5829                 }
5830
5831                 /* we are all good, lets return */
5832                 ins->objectid = search_start;
5833                 ins->offset = num_bytes;
5834
5835                 trace_btrfs_reserve_extent(orig_root, block_group,
5836                                            search_start, num_bytes);
5837                 if (used_block_group != block_group)
5838                         btrfs_put_block_group(used_block_group);
5839                 btrfs_put_block_group(block_group);
5840                 break;
5841 loop:
5842                 failed_cluster_refill = false;
5843                 failed_alloc = false;
5844                 BUG_ON(index != get_block_group_index(block_group));
5845                 if (used_block_group != block_group)
5846                         btrfs_put_block_group(used_block_group);
5847                 btrfs_put_block_group(block_group);
5848         }
5849         up_read(&space_info->groups_sem);
5850
5851         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
5852                 goto search;
5853
5854         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5855                 goto search;
5856
5857         /*
5858          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5859          *                      caching kthreads as we move along
5860          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5861          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5862          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5863          *                      again
5864          */
5865         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
5866                 index = 0;
5867                 loop++;
5868                 if (loop == LOOP_ALLOC_CHUNK) {
5869                         ret = do_chunk_alloc(trans, root, data,
5870                                              CHUNK_ALLOC_FORCE);
5871                         /*
5872                          * Do not bail out on ENOSPC since we
5873                          * can do more things.
5874                          */
5875                         if (ret < 0 && ret != -ENOSPC) {
5876                                 btrfs_abort_transaction(trans,
5877                                                         root, ret);
5878                                 goto out;
5879                         }
5880                 }
5881
5882                 if (loop == LOOP_NO_EMPTY_SIZE) {
5883                         empty_size = 0;
5884                         empty_cluster = 0;
5885                 }
5886
5887                 goto search;
5888         } else if (!ins->objectid) {
5889                 ret = -ENOSPC;
5890         } else if (ins->objectid) {
5891                 ret = 0;
5892         }
5893 out:
5894
5895         return ret;
5896 }
5897
5898 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5899                             int dump_block_groups)
5900 {
5901         struct btrfs_block_group_cache *cache;
5902         int index = 0;
5903
5904         spin_lock(&info->lock);
5905         printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
5906                (unsigned long long)info->flags,
5907                (unsigned long long)(info->total_bytes - info->bytes_used -
5908                                     info->bytes_pinned - info->bytes_reserved -
5909                                     info->bytes_readonly),
5910                (info->full) ? "" : "not ");
5911         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5912                "reserved=%llu, may_use=%llu, readonly=%llu\n",
5913                (unsigned long long)info->total_bytes,
5914                (unsigned long long)info->bytes_used,
5915                (unsigned long long)info->bytes_pinned,
5916                (unsigned long long)info->bytes_reserved,
5917                (unsigned long long)info->bytes_may_use,
5918                (unsigned long long)info->bytes_readonly);
5919         spin_unlock(&info->lock);
5920
5921         if (!dump_block_groups)
5922                 return;
5923
5924         down_read(&info->groups_sem);
5925 again:
5926         list_for_each_entry(cache, &info->block_groups[index], list) {
5927                 spin_lock(&cache->lock);
5928                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
5929                        (unsigned long long)cache->key.objectid,
5930                        (unsigned long long)cache->key.offset,
5931                        (unsigned long long)btrfs_block_group_used(&cache->item),
5932                        (unsigned long long)cache->pinned,
5933                        (unsigned long long)cache->reserved,
5934                        cache->ro ? "[readonly]" : "");
5935                 btrfs_dump_free_space(cache, bytes);
5936                 spin_unlock(&cache->lock);
5937         }
5938         if (++index < BTRFS_NR_RAID_TYPES)
5939                 goto again;
5940         up_read(&info->groups_sem);
5941 }
5942
5943 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5944                          struct btrfs_root *root,
5945                          u64 num_bytes, u64 min_alloc_size,
5946                          u64 empty_size, u64 hint_byte,
5947                          struct btrfs_key *ins, u64 data)
5948 {
5949         bool final_tried = false;
5950         int ret;
5951
5952         data = btrfs_get_alloc_profile(root, data);
5953 again:
5954         WARN_ON(num_bytes < root->sectorsize);
5955         ret = find_free_extent(trans, root, num_bytes, empty_size,
5956                                hint_byte, ins, data);
5957
5958         if (ret == -ENOSPC) {
5959                 if (!final_tried) {
5960                         num_bytes = num_bytes >> 1;
5961                         num_bytes = num_bytes & ~(root->sectorsize - 1);
5962                         num_bytes = max(num_bytes, min_alloc_size);
5963                         if (num_bytes == min_alloc_size)
5964                                 final_tried = true;
5965                         goto again;
5966                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
5967                         struct btrfs_space_info *sinfo;
5968
5969                         sinfo = __find_space_info(root->fs_info, data);
5970                         printk(KERN_ERR "btrfs allocation failed flags %llu, "
5971                                "wanted %llu\n", (unsigned long long)data,
5972                                (unsigned long long)num_bytes);
5973                         if (sinfo)
5974                                 dump_space_info(sinfo, num_bytes, 1);
5975                 }
5976         }
5977
5978         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
5979
5980         return ret;
5981 }
5982
5983 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
5984                                         u64 start, u64 len, int pin)
5985 {
5986         struct btrfs_block_group_cache *cache;
5987         int ret = 0;
5988
5989         cache = btrfs_lookup_block_group(root->fs_info, start);
5990         if (!cache) {
5991                 printk(KERN_ERR "Unable to find block group for %llu\n",
5992                        (unsigned long long)start);
5993                 return -ENOSPC;
5994         }
5995
5996         if (btrfs_test_opt(root, DISCARD))
5997                 ret = btrfs_discard_extent(root, start, len, NULL);
5998
5999         if (pin)
6000                 pin_down_extent(root, cache, start, len, 1);
6001         else {
6002                 btrfs_add_free_space(cache, start, len);
6003                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6004         }
6005         btrfs_put_block_group(cache);
6006
6007         trace_btrfs_reserved_extent_free(root, start, len);
6008
6009         return ret;
6010 }
6011
6012 int btrfs_free_reserved_extent(struct btrfs_root *root,
6013                                         u64 start, u64 len)
6014 {
6015         return __btrfs_free_reserved_extent(root, start, len, 0);
6016 }
6017
6018 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6019                                        u64 start, u64 len)
6020 {
6021         return __btrfs_free_reserved_extent(root, start, len, 1);
6022 }
6023
6024 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6025                                       struct btrfs_root *root,
6026                                       u64 parent, u64 root_objectid,
6027                                       u64 flags, u64 owner, u64 offset,
6028                                       struct btrfs_key *ins, int ref_mod)
6029 {
6030         int ret;
6031         struct btrfs_fs_info *fs_info = root->fs_info;
6032         struct btrfs_extent_item *extent_item;
6033         struct btrfs_extent_inline_ref *iref;
6034         struct btrfs_path *path;
6035         struct extent_buffer *leaf;
6036         int type;
6037         u32 size;
6038
6039         if (parent > 0)
6040                 type = BTRFS_SHARED_DATA_REF_KEY;
6041         else
6042                 type = BTRFS_EXTENT_DATA_REF_KEY;
6043
6044         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6045
6046         path = btrfs_alloc_path();
6047         if (!path)
6048                 return -ENOMEM;
6049
6050         path->leave_spinning = 1;
6051         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6052                                       ins, size);
6053         if (ret) {
6054                 btrfs_free_path(path);
6055                 return ret;
6056         }
6057
6058         leaf = path->nodes[0];
6059         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6060                                      struct btrfs_extent_item);
6061         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6062         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6063         btrfs_set_extent_flags(leaf, extent_item,
6064                                flags | BTRFS_EXTENT_FLAG_DATA);
6065
6066         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6067         btrfs_set_extent_inline_ref_type(leaf, iref, type);
6068         if (parent > 0) {
6069                 struct btrfs_shared_data_ref *ref;
6070                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6071                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6072                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6073         } else {
6074                 struct btrfs_extent_data_ref *ref;
6075                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6076                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6077                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6078                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6079                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6080         }
6081
6082         btrfs_mark_buffer_dirty(path->nodes[0]);
6083         btrfs_free_path(path);
6084
6085         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
6086         if (ret) { /* -ENOENT, logic error */
6087                 printk(KERN_ERR "btrfs update block group failed for %llu "
6088                        "%llu\n", (unsigned long long)ins->objectid,
6089                        (unsigned long long)ins->offset);
6090                 BUG();
6091         }
6092         return ret;
6093 }
6094
6095 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6096                                      struct btrfs_root *root,
6097                                      u64 parent, u64 root_objectid,
6098                                      u64 flags, struct btrfs_disk_key *key,
6099                                      int level, struct btrfs_key *ins)
6100 {
6101         int ret;
6102         struct btrfs_fs_info *fs_info = root->fs_info;
6103         struct btrfs_extent_item *extent_item;
6104         struct btrfs_tree_block_info *block_info;
6105         struct btrfs_extent_inline_ref *iref;
6106         struct btrfs_path *path;
6107         struct extent_buffer *leaf;
6108         u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
6109
6110         path = btrfs_alloc_path();
6111         if (!path)
6112                 return -ENOMEM;
6113
6114         path->leave_spinning = 1;
6115         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6116                                       ins, size);
6117         if (ret) {
6118                 btrfs_free_path(path);
6119                 return ret;
6120         }
6121
6122         leaf = path->nodes[0];
6123         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6124                                      struct btrfs_extent_item);
6125         btrfs_set_extent_refs(leaf, extent_item, 1);
6126         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6127         btrfs_set_extent_flags(leaf, extent_item,
6128                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6129         block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6130
6131         btrfs_set_tree_block_key(leaf, block_info, key);
6132         btrfs_set_tree_block_level(leaf, block_info, level);
6133
6134         iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6135         if (parent > 0) {
6136                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6137                 btrfs_set_extent_inline_ref_type(leaf, iref,
6138                                                  BTRFS_SHARED_BLOCK_REF_KEY);
6139                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6140         } else {
6141                 btrfs_set_extent_inline_ref_type(leaf, iref,
6142                                                  BTRFS_TREE_BLOCK_REF_KEY);
6143                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6144         }
6145
6146         btrfs_mark_buffer_dirty(leaf);
6147         btrfs_free_path(path);
6148
6149         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
6150         if (ret) { /* -ENOENT, logic error */
6151                 printk(KERN_ERR "btrfs update block group failed for %llu "
6152                        "%llu\n", (unsigned long long)ins->objectid,
6153                        (unsigned long long)ins->offset);
6154                 BUG();
6155         }
6156         return ret;
6157 }
6158
6159 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6160                                      struct btrfs_root *root,
6161                                      u64 root_objectid, u64 owner,
6162                                      u64 offset, struct btrfs_key *ins)
6163 {
6164         int ret;
6165
6166         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6167
6168         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6169                                          ins->offset, 0,
6170                                          root_objectid, owner, offset,
6171                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6172         return ret;
6173 }
6174
6175 /*
6176  * this is used by the tree logging recovery code.  It records that
6177  * an extent has been allocated and makes sure to clear the free
6178  * space cache bits as well
6179  */
6180 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6181                                    struct btrfs_root *root,
6182                                    u64 root_objectid, u64 owner, u64 offset,
6183                                    struct btrfs_key *ins)
6184 {
6185         int ret;
6186         struct btrfs_block_group_cache *block_group;
6187         struct btrfs_caching_control *caching_ctl;
6188         u64 start = ins->objectid;
6189         u64 num_bytes = ins->offset;
6190
6191         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6192         cache_block_group(block_group, trans, NULL, 0);
6193         caching_ctl = get_caching_control(block_group);
6194
6195         if (!caching_ctl) {
6196                 BUG_ON(!block_group_cache_done(block_group));
6197                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6198                 BUG_ON(ret); /* -ENOMEM */
6199         } else {
6200                 mutex_lock(&caching_ctl->mutex);
6201
6202                 if (start >= caching_ctl->progress) {
6203                         ret = add_excluded_extent(root, start, num_bytes);
6204                         BUG_ON(ret); /* -ENOMEM */
6205                 } else if (start + num_bytes <= caching_ctl->progress) {
6206                         ret = btrfs_remove_free_space(block_group,
6207                                                       start, num_bytes);
6208                         BUG_ON(ret); /* -ENOMEM */
6209                 } else {
6210                         num_bytes = caching_ctl->progress - start;
6211                         ret = btrfs_remove_free_space(block_group,
6212                                                       start, num_bytes);
6213                         BUG_ON(ret); /* -ENOMEM */
6214
6215                         start = caching_ctl->progress;
6216                         num_bytes = ins->objectid + ins->offset -
6217                                     caching_ctl->progress;
6218                         ret = add_excluded_extent(root, start, num_bytes);
6219                         BUG_ON(ret); /* -ENOMEM */
6220                 }
6221
6222                 mutex_unlock(&caching_ctl->mutex);
6223                 put_caching_control(caching_ctl);
6224         }
6225
6226         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6227                                           RESERVE_ALLOC_NO_ACCOUNT);
6228         BUG_ON(ret); /* logic error */
6229         btrfs_put_block_group(block_group);
6230         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6231                                          0, owner, offset, ins, 1);
6232         return ret;
6233 }
6234
6235 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
6236                                             struct btrfs_root *root,
6237                                             u64 bytenr, u32 blocksize,
6238                                             int level)
6239 {
6240         struct extent_buffer *buf;
6241
6242         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6243         if (!buf)
6244                 return ERR_PTR(-ENOMEM);
6245         btrfs_set_header_generation(buf, trans->transid);
6246         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6247         btrfs_tree_lock(buf);
6248         clean_tree_block(trans, root, buf);
6249         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6250
6251         btrfs_set_lock_blocking(buf);
6252         btrfs_set_buffer_uptodate(buf);
6253
6254         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6255                 /*
6256                  * we allow two log transactions at a time, use different
6257                  * EXENT bit to differentiate dirty pages.
6258                  */
6259                 if (root->log_transid % 2 == 0)
6260                         set_extent_dirty(&root->dirty_log_pages, buf->start,
6261                                         buf->start + buf->len - 1, GFP_NOFS);
6262                 else
6263                         set_extent_new(&root->dirty_log_pages, buf->start,
6264                                         buf->start + buf->len - 1, GFP_NOFS);
6265         } else {
6266                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6267                          buf->start + buf->len - 1, GFP_NOFS);
6268         }
6269         trans->blocks_used++;
6270         /* this returns a buffer locked for blocking */
6271         return buf;
6272 }
6273
6274 static struct btrfs_block_rsv *
6275 use_block_rsv(struct btrfs_trans_handle *trans,
6276               struct btrfs_root *root, u32 blocksize)
6277 {
6278         struct btrfs_block_rsv *block_rsv;
6279         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6280         int ret;
6281
6282         block_rsv = get_block_rsv(trans, root);
6283
6284         if (block_rsv->size == 0) {
6285                 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6286                                              BTRFS_RESERVE_NO_FLUSH);
6287                 /*
6288                  * If we couldn't reserve metadata bytes try and use some from
6289                  * the global reserve.
6290                  */
6291                 if (ret && block_rsv != global_rsv) {
6292                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6293                         if (!ret)
6294                                 return global_rsv;
6295                         return ERR_PTR(ret);
6296                 } else if (ret) {
6297                         return ERR_PTR(ret);
6298                 }
6299                 return block_rsv;
6300         }
6301
6302         ret = block_rsv_use_bytes(block_rsv, blocksize);
6303         if (!ret)
6304                 return block_rsv;
6305         if (ret && !block_rsv->failfast) {
6306                 static DEFINE_RATELIMIT_STATE(_rs,
6307                                 DEFAULT_RATELIMIT_INTERVAL,
6308                                 /*DEFAULT_RATELIMIT_BURST*/ 2);
6309                 if (__ratelimit(&_rs))
6310                         WARN(1, KERN_DEBUG "btrfs: block rsv returned %d\n",
6311                              ret);
6312                 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6313                                              BTRFS_RESERVE_NO_FLUSH);
6314                 if (!ret) {
6315                         return block_rsv;
6316                 } else if (ret && block_rsv != global_rsv) {
6317                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6318                         if (!ret)
6319                                 return global_rsv;
6320                 }
6321         }
6322
6323         return ERR_PTR(-ENOSPC);
6324 }
6325
6326 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6327                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
6328 {
6329         block_rsv_add_bytes(block_rsv, blocksize, 0);
6330         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6331 }
6332
6333 /*
6334  * finds a free extent and does all the dirty work required for allocation
6335  * returns the key for the extent through ins, and a tree buffer for
6336  * the first block of the extent through buf.
6337  *
6338  * returns the tree buffer or NULL.
6339  */
6340 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6341                                         struct btrfs_root *root, u32 blocksize,
6342                                         u64 parent, u64 root_objectid,
6343                                         struct btrfs_disk_key *key, int level,
6344                                         u64 hint, u64 empty_size)
6345 {
6346         struct btrfs_key ins;
6347         struct btrfs_block_rsv *block_rsv;
6348         struct extent_buffer *buf;
6349         u64 flags = 0;
6350         int ret;
6351
6352
6353         block_rsv = use_block_rsv(trans, root, blocksize);
6354         if (IS_ERR(block_rsv))
6355                 return ERR_CAST(block_rsv);
6356
6357         ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6358                                    empty_size, hint, &ins, 0);
6359         if (ret) {
6360                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6361                 return ERR_PTR(ret);
6362         }
6363
6364         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6365                                     blocksize, level);
6366         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6367
6368         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6369                 if (parent == 0)
6370                         parent = ins.objectid;
6371                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6372         } else
6373                 BUG_ON(parent > 0);
6374
6375         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6376                 struct btrfs_delayed_extent_op *extent_op;
6377                 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
6378                 BUG_ON(!extent_op); /* -ENOMEM */
6379                 if (key)
6380                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
6381                 else
6382                         memset(&extent_op->key, 0, sizeof(extent_op->key));
6383                 extent_op->flags_to_set = flags;
6384                 extent_op->update_key = 1;
6385                 extent_op->update_flags = 1;
6386                 extent_op->is_data = 0;
6387
6388                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6389                                         ins.objectid,
6390                                         ins.offset, parent, root_objectid,
6391                                         level, BTRFS_ADD_DELAYED_EXTENT,
6392                                         extent_op, 0);
6393                 BUG_ON(ret); /* -ENOMEM */
6394         }
6395         return buf;
6396 }
6397
6398 struct walk_control {
6399         u64 refs[BTRFS_MAX_LEVEL];
6400         u64 flags[BTRFS_MAX_LEVEL];
6401         struct btrfs_key update_progress;
6402         int stage;
6403         int level;
6404         int shared_level;
6405         int update_ref;
6406         int keep_locks;
6407         int reada_slot;
6408         int reada_count;
6409         int for_reloc;
6410 };
6411
6412 #define DROP_REFERENCE  1
6413 #define UPDATE_BACKREF  2
6414
6415 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6416                                      struct btrfs_root *root,
6417                                      struct walk_control *wc,
6418                                      struct btrfs_path *path)
6419 {
6420         u64 bytenr;
6421         u64 generation;
6422         u64 refs;
6423         u64 flags;
6424         u32 nritems;
6425         u32 blocksize;
6426         struct btrfs_key key;
6427         struct extent_buffer *eb;
6428         int ret;
6429         int slot;
6430         int nread = 0;
6431
6432         if (path->slots[wc->level] < wc->reada_slot) {
6433                 wc->reada_count = wc->reada_count * 2 / 3;
6434                 wc->reada_count = max(wc->reada_count, 2);
6435         } else {
6436                 wc->reada_count = wc->reada_count * 3 / 2;
6437                 wc->reada_count = min_t(int, wc->reada_count,
6438                                         BTRFS_NODEPTRS_PER_BLOCK(root));
6439         }
6440
6441         eb = path->nodes[wc->level];
6442         nritems = btrfs_header_nritems(eb);
6443         blocksize = btrfs_level_size(root, wc->level - 1);
6444
6445         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6446                 if (nread >= wc->reada_count)
6447                         break;
6448
6449                 cond_resched();
6450                 bytenr = btrfs_node_blockptr(eb, slot);
6451                 generation = btrfs_node_ptr_generation(eb, slot);
6452
6453                 if (slot == path->slots[wc->level])
6454                         goto reada;
6455
6456                 if (wc->stage == UPDATE_BACKREF &&
6457                     generation <= root->root_key.offset)
6458                         continue;
6459
6460                 /* We don't lock the tree block, it's OK to be racy here */
6461                 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6462                                                &refs, &flags);
6463                 /* We don't care about errors in readahead. */
6464                 if (ret < 0)
6465                         continue;
6466                 BUG_ON(refs == 0);
6467
6468                 if (wc->stage == DROP_REFERENCE) {
6469                         if (refs == 1)
6470                                 goto reada;
6471
6472                         if (wc->level == 1 &&
6473                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6474                                 continue;
6475                         if (!wc->update_ref ||
6476                             generation <= root->root_key.offset)
6477                                 continue;
6478                         btrfs_node_key_to_cpu(eb, &key, slot);
6479                         ret = btrfs_comp_cpu_keys(&key,
6480                                                   &wc->update_progress);
6481                         if (ret < 0)
6482                                 continue;
6483                 } else {
6484                         if (wc->level == 1 &&
6485                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6486                                 continue;
6487                 }
6488 reada:
6489                 ret = readahead_tree_block(root, bytenr, blocksize,
6490                                            generation);
6491                 if (ret)
6492                         break;
6493                 nread++;
6494         }
6495         wc->reada_slot = slot;
6496 }
6497
6498 /*
6499  * hepler to process tree block while walking down the tree.
6500  *
6501  * when wc->stage == UPDATE_BACKREF, this function updates
6502  * back refs for pointers in the block.
6503  *
6504  * NOTE: return value 1 means we should stop walking down.
6505  */
6506 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6507                                    struct btrfs_root *root,
6508                                    struct btrfs_path *path,
6509                                    struct walk_control *wc, int lookup_info)
6510 {
6511         int level = wc->level;
6512         struct extent_buffer *eb = path->nodes[level];
6513         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6514         int ret;
6515
6516         if (wc->stage == UPDATE_BACKREF &&
6517             btrfs_header_owner(eb) != root->root_key.objectid)
6518                 return 1;
6519
6520         /*
6521          * when reference count of tree block is 1, it won't increase
6522          * again. once full backref flag is set, we never clear it.
6523          */
6524         if (lookup_info &&
6525             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6526              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
6527                 BUG_ON(!path->locks[level]);
6528                 ret = btrfs_lookup_extent_info(trans, root,
6529                                                eb->start, eb->len,
6530                                                &wc->refs[level],
6531                                                &wc->flags[level]);
6532                 BUG_ON(ret == -ENOMEM);
6533                 if (ret)
6534                         return ret;
6535                 BUG_ON(wc->refs[level] == 0);
6536         }
6537
6538         if (wc->stage == DROP_REFERENCE) {
6539                 if (wc->refs[level] > 1)
6540                         return 1;
6541
6542                 if (path->locks[level] && !wc->keep_locks) {
6543                         btrfs_tree_unlock_rw(eb, path->locks[level]);
6544                         path->locks[level] = 0;
6545                 }
6546                 return 0;
6547         }
6548
6549         /* wc->stage == UPDATE_BACKREF */
6550         if (!(wc->flags[level] & flag)) {
6551                 BUG_ON(!path->locks[level]);
6552                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
6553                 BUG_ON(ret); /* -ENOMEM */
6554                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6555                 BUG_ON(ret); /* -ENOMEM */
6556                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6557                                                   eb->len, flag, 0);
6558                 BUG_ON(ret); /* -ENOMEM */
6559                 wc->flags[level] |= flag;
6560         }
6561
6562         /*
6563          * the block is shared by multiple trees, so it's not good to
6564          * keep the tree lock
6565          */
6566         if (path->locks[level] && level > 0) {
6567                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6568                 path->locks[level] = 0;
6569         }
6570         return 0;
6571 }
6572
6573 /*
6574  * hepler to process tree block pointer.
6575  *
6576  * when wc->stage == DROP_REFERENCE, this function checks
6577  * reference count of the block pointed to. if the block
6578  * is shared and we need update back refs for the subtree
6579  * rooted at the block, this function changes wc->stage to
6580  * UPDATE_BACKREF. if the block is shared and there is no
6581  * need to update back, this function drops the reference
6582  * to the block.
6583  *
6584  * NOTE: return value 1 means we should stop walking down.
6585  */
6586 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6587                                  struct btrfs_root *root,
6588                                  struct btrfs_path *path,
6589                                  struct walk_control *wc, int *lookup_info)
6590 {
6591         u64 bytenr;
6592         u64 generation;
6593         u64 parent;
6594         u32 blocksize;
6595         struct btrfs_key key;
6596         struct extent_buffer *next;
6597         int level = wc->level;
6598         int reada = 0;
6599         int ret = 0;
6600
6601         generation = btrfs_node_ptr_generation(path->nodes[level],
6602                                                path->slots[level]);
6603         /*
6604          * if the lower level block was created before the snapshot
6605          * was created, we know there is no need to update back refs
6606          * for the subtree
6607          */
6608         if (wc->stage == UPDATE_BACKREF &&
6609             generation <= root->root_key.offset) {
6610                 *lookup_info = 1;
6611                 return 1;
6612         }
6613
6614         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
6615         blocksize = btrfs_level_size(root, level - 1);
6616
6617         next = btrfs_find_tree_block(root, bytenr, blocksize);
6618         if (!next) {
6619                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
6620                 if (!next)
6621                         return -ENOMEM;
6622                 reada = 1;
6623         }
6624         btrfs_tree_lock(next);
6625         btrfs_set_lock_blocking(next);
6626
6627         ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6628                                        &wc->refs[level - 1],
6629                                        &wc->flags[level - 1]);
6630         if (ret < 0) {
6631                 btrfs_tree_unlock(next);
6632                 return ret;
6633         }
6634
6635         BUG_ON(wc->refs[level - 1] == 0);
6636         *lookup_info = 0;
6637
6638         if (wc->stage == DROP_REFERENCE) {
6639                 if (wc->refs[level - 1] > 1) {
6640                         if (level == 1 &&
6641                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6642                                 goto skip;
6643
6644                         if (!wc->update_ref ||
6645                             generation <= root->root_key.offset)
6646                                 goto skip;
6647
6648                         btrfs_node_key_to_cpu(path->nodes[level], &key,
6649                                               path->slots[level]);
6650                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6651                         if (ret < 0)
6652                                 goto skip;
6653
6654                         wc->stage = UPDATE_BACKREF;
6655                         wc->shared_level = level - 1;
6656                 }
6657         } else {
6658                 if (level == 1 &&
6659                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6660                         goto skip;
6661         }
6662
6663         if (!btrfs_buffer_uptodate(next, generation, 0)) {
6664                 btrfs_tree_unlock(next);
6665                 free_extent_buffer(next);
6666                 next = NULL;
6667                 *lookup_info = 1;
6668         }
6669
6670         if (!next) {
6671                 if (reada && level == 1)
6672                         reada_walk_down(trans, root, wc, path);
6673                 next = read_tree_block(root, bytenr, blocksize, generation);
6674                 if (!next)
6675                         return -EIO;
6676                 btrfs_tree_lock(next);
6677                 btrfs_set_lock_blocking(next);
6678         }
6679
6680         level--;
6681         BUG_ON(level != btrfs_header_level(next));
6682         path->nodes[level] = next;
6683         path->slots[level] = 0;
6684         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6685         wc->level = level;
6686         if (wc->level == 1)
6687                 wc->reada_slot = 0;
6688         return 0;
6689 skip:
6690         wc->refs[level - 1] = 0;
6691         wc->flags[level - 1] = 0;
6692         if (wc->stage == DROP_REFERENCE) {
6693                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6694                         parent = path->nodes[level]->start;
6695                 } else {
6696                         BUG_ON(root->root_key.objectid !=
6697                                btrfs_header_owner(path->nodes[level]));
6698                         parent = 0;
6699                 }
6700
6701                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6702                                 root->root_key.objectid, level - 1, 0, 0);
6703                 BUG_ON(ret); /* -ENOMEM */
6704         }
6705         btrfs_tree_unlock(next);
6706         free_extent_buffer(next);
6707         *lookup_info = 1;
6708         return 1;
6709 }
6710
6711 /*
6712  * hepler to process tree block while walking up the tree.
6713  *
6714  * when wc->stage == DROP_REFERENCE, this function drops
6715  * reference count on the block.
6716  *
6717  * when wc->stage == UPDATE_BACKREF, this function changes
6718  * wc->stage back to DROP_REFERENCE if we changed wc->stage
6719  * to UPDATE_BACKREF previously while processing the block.
6720  *
6721  * NOTE: return value 1 means we should stop walking up.
6722  */
6723 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6724                                  struct btrfs_root *root,
6725                                  struct btrfs_path *path,
6726                                  struct walk_control *wc)
6727 {
6728         int ret;
6729         int level = wc->level;
6730         struct extent_buffer *eb = path->nodes[level];
6731         u64 parent = 0;
6732
6733         if (wc->stage == UPDATE_BACKREF) {
6734                 BUG_ON(wc->shared_level < level);
6735                 if (level < wc->shared_level)
6736                         goto out;
6737
6738                 ret = find_next_key(path, level + 1, &wc->update_progress);
6739                 if (ret > 0)
6740                         wc->update_ref = 0;
6741
6742                 wc->stage = DROP_REFERENCE;
6743                 wc->shared_level = -1;
6744                 path->slots[level] = 0;
6745
6746                 /*
6747                  * check reference count again if the block isn't locked.
6748                  * we should start walking down the tree again if reference
6749                  * count is one.
6750                  */
6751                 if (!path->locks[level]) {
6752                         BUG_ON(level == 0);
6753                         btrfs_tree_lock(eb);
6754                         btrfs_set_lock_blocking(eb);
6755                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6756
6757                         ret = btrfs_lookup_extent_info(trans, root,
6758                                                        eb->start, eb->len,
6759                                                        &wc->refs[level],
6760                                                        &wc->flags[level]);
6761                         if (ret < 0) {
6762                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6763                                 return ret;
6764                         }
6765                         BUG_ON(wc->refs[level] == 0);
6766                         if (wc->refs[level] == 1) {
6767                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6768                                 return 1;
6769                         }
6770                 }
6771         }
6772
6773         /* wc->stage == DROP_REFERENCE */
6774         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6775
6776         if (wc->refs[level] == 1) {
6777                 if (level == 0) {
6778                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6779                                 ret = btrfs_dec_ref(trans, root, eb, 1,
6780                                                     wc->for_reloc);
6781                         else
6782                                 ret = btrfs_dec_ref(trans, root, eb, 0,
6783                                                     wc->for_reloc);
6784                         BUG_ON(ret); /* -ENOMEM */
6785                 }
6786                 /* make block locked assertion in clean_tree_block happy */
6787                 if (!path->locks[level] &&
6788                     btrfs_header_generation(eb) == trans->transid) {
6789                         btrfs_tree_lock(eb);
6790                         btrfs_set_lock_blocking(eb);
6791                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6792                 }
6793                 clean_tree_block(trans, root, eb);
6794         }
6795
6796         if (eb == root->node) {
6797                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6798                         parent = eb->start;
6799                 else
6800                         BUG_ON(root->root_key.objectid !=
6801                                btrfs_header_owner(eb));
6802         } else {
6803                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6804                         parent = path->nodes[level + 1]->start;
6805                 else
6806                         BUG_ON(root->root_key.objectid !=
6807                                btrfs_header_owner(path->nodes[level + 1]));
6808         }
6809
6810         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
6811 out:
6812         wc->refs[level] = 0;
6813         wc->flags[level] = 0;
6814         return 0;
6815 }
6816
6817 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6818                                    struct btrfs_root *root,
6819                                    struct btrfs_path *path,
6820                                    struct walk_control *wc)
6821 {
6822         int level = wc->level;
6823         int lookup_info = 1;
6824         int ret;
6825
6826         while (level >= 0) {
6827                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
6828                 if (ret > 0)
6829                         break;
6830
6831                 if (level == 0)
6832                         break;
6833
6834                 if (path->slots[level] >=
6835                     btrfs_header_nritems(path->nodes[level]))
6836                         break;
6837
6838                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
6839                 if (ret > 0) {
6840                         path->slots[level]++;
6841                         continue;
6842                 } else if (ret < 0)
6843                         return ret;
6844                 level = wc->level;
6845         }
6846         return 0;
6847 }
6848
6849 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6850                                  struct btrfs_root *root,
6851                                  struct btrfs_path *path,
6852                                  struct walk_control *wc, int max_level)
6853 {
6854         int level = wc->level;
6855         int ret;
6856
6857         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6858         while (level < max_level && path->nodes[level]) {
6859                 wc->level = level;
6860                 if (path->slots[level] + 1 <
6861                     btrfs_header_nritems(path->nodes[level])) {
6862                         path->slots[level]++;
6863                         return 0;
6864                 } else {
6865                         ret = walk_up_proc(trans, root, path, wc);
6866                         if (ret > 0)
6867                                 return 0;
6868
6869                         if (path->locks[level]) {
6870                                 btrfs_tree_unlock_rw(path->nodes[level],
6871                                                      path->locks[level]);
6872                                 path->locks[level] = 0;
6873                         }
6874                         free_extent_buffer(path->nodes[level]);
6875                         path->nodes[level] = NULL;
6876                         level++;
6877                 }
6878         }
6879         return 1;
6880 }
6881
6882 /*
6883  * drop a subvolume tree.
6884  *
6885  * this function traverses the tree freeing any blocks that only
6886  * referenced by the tree.
6887  *
6888  * when a shared tree block is found. this function decreases its
6889  * reference count by one. if update_ref is true, this function
6890  * also make sure backrefs for the shared block and all lower level
6891  * blocks are properly updated.
6892  */
6893 int btrfs_drop_snapshot(struct btrfs_root *root,
6894                          struct btrfs_block_rsv *block_rsv, int update_ref,
6895                          int for_reloc)
6896 {
6897         struct btrfs_path *path;
6898         struct btrfs_trans_handle *trans;
6899         struct btrfs_root *tree_root = root->fs_info->tree_root;
6900         struct btrfs_root_item *root_item = &root->root_item;
6901         struct walk_control *wc;
6902         struct btrfs_key key;
6903         int err = 0;
6904         int ret;
6905         int level;
6906
6907         path = btrfs_alloc_path();
6908         if (!path) {
6909                 err = -ENOMEM;
6910                 goto out;
6911         }
6912
6913         wc = kzalloc(sizeof(*wc), GFP_NOFS);
6914         if (!wc) {
6915                 btrfs_free_path(path);
6916                 err = -ENOMEM;
6917                 goto out;
6918         }
6919
6920         trans = btrfs_start_transaction(tree_root, 0);
6921         if (IS_ERR(trans)) {
6922                 err = PTR_ERR(trans);
6923                 goto out_free;
6924         }
6925
6926         if (block_rsv)
6927                 trans->block_rsv = block_rsv;
6928
6929         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6930                 level = btrfs_header_level(root->node);
6931                 path->nodes[level] = btrfs_lock_root_node(root);
6932                 btrfs_set_lock_blocking(path->nodes[level]);
6933                 path->slots[level] = 0;
6934                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6935                 memset(&wc->update_progress, 0,
6936                        sizeof(wc->update_progress));
6937         } else {
6938                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6939                 memcpy(&wc->update_progress, &key,
6940                        sizeof(wc->update_progress));
6941
6942                 level = root_item->drop_level;
6943                 BUG_ON(level == 0);
6944                 path->lowest_level = level;
6945                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6946                 path->lowest_level = 0;
6947                 if (ret < 0) {
6948                         err = ret;
6949                         goto out_end_trans;
6950                 }
6951                 WARN_ON(ret > 0);
6952
6953                 /*
6954                  * unlock our path, this is safe because only this
6955                  * function is allowed to delete this snapshot
6956                  */
6957                 btrfs_unlock_up_safe(path, 0);
6958
6959                 level = btrfs_header_level(root->node);
6960                 while (1) {
6961                         btrfs_tree_lock(path->nodes[level]);
6962                         btrfs_set_lock_blocking(path->nodes[level]);
6963
6964                         ret = btrfs_lookup_extent_info(trans, root,
6965                                                 path->nodes[level]->start,
6966                                                 path->nodes[level]->len,
6967                                                 &wc->refs[level],
6968                                                 &wc->flags[level]);
6969                         if (ret < 0) {
6970                                 err = ret;
6971                                 goto out_end_trans;
6972                         }
6973                         BUG_ON(wc->refs[level] == 0);
6974
6975                         if (level == root_item->drop_level)
6976                                 break;
6977
6978                         btrfs_tree_unlock(path->nodes[level]);
6979                         WARN_ON(wc->refs[level] != 1);
6980                         level--;
6981                 }
6982         }
6983
6984         wc->level = level;
6985         wc->shared_level = -1;
6986         wc->stage = DROP_REFERENCE;
6987         wc->update_ref = update_ref;
6988         wc->keep_locks = 0;
6989         wc->for_reloc = for_reloc;
6990         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6991
6992         while (1) {
6993                 ret = walk_down_tree(trans, root, path, wc);
6994                 if (ret < 0) {
6995                         err = ret;
6996                         break;
6997                 }
6998
6999                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7000                 if (ret < 0) {
7001                         err = ret;
7002                         break;
7003                 }
7004
7005                 if (ret > 0) {
7006                         BUG_ON(wc->stage != DROP_REFERENCE);
7007                         break;
7008                 }
7009
7010                 if (wc->stage == DROP_REFERENCE) {
7011                         level = wc->level;
7012                         btrfs_node_key(path->nodes[level],
7013                                        &root_item->drop_progress,
7014                                        path->slots[level]);
7015                         root_item->drop_level = level;
7016                 }
7017
7018                 BUG_ON(wc->level == 0);
7019                 if (btrfs_should_end_transaction(trans, tree_root)) {
7020                         ret = btrfs_update_root(trans, tree_root,
7021                                                 &root->root_key,
7022                                                 root_item);
7023                         if (ret) {
7024                                 btrfs_abort_transaction(trans, tree_root, ret);
7025                                 err = ret;
7026                                 goto out_end_trans;
7027                         }
7028
7029                         btrfs_end_transaction_throttle(trans, tree_root);
7030                         trans = btrfs_start_transaction(tree_root, 0);
7031                         if (IS_ERR(trans)) {
7032                                 err = PTR_ERR(trans);
7033                                 goto out_free;
7034                         }
7035                         if (block_rsv)
7036                                 trans->block_rsv = block_rsv;
7037                 }
7038         }
7039         btrfs_release_path(path);
7040         if (err)
7041                 goto out_end_trans;
7042
7043         ret = btrfs_del_root(trans, tree_root, &root->root_key);
7044         if (ret) {
7045                 btrfs_abort_transaction(trans, tree_root, ret);
7046                 goto out_end_trans;
7047         }
7048
7049         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7050                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
7051                                            NULL, NULL);
7052                 if (ret < 0) {
7053                         btrfs_abort_transaction(trans, tree_root, ret);
7054                         err = ret;
7055                         goto out_end_trans;
7056                 } else if (ret > 0) {
7057                         /* if we fail to delete the orphan item this time
7058                          * around, it'll get picked up the next time.
7059                          *
7060                          * The most common failure here is just -ENOENT.
7061                          */
7062                         btrfs_del_orphan_item(trans, tree_root,
7063                                               root->root_key.objectid);
7064                 }
7065         }
7066
7067         if (root->in_radix) {
7068                 btrfs_free_fs_root(tree_root->fs_info, root);
7069         } else {
7070                 free_extent_buffer(root->node);
7071                 free_extent_buffer(root->commit_root);
7072                 kfree(root);
7073         }
7074 out_end_trans:
7075         btrfs_end_transaction_throttle(trans, tree_root);
7076 out_free:
7077         kfree(wc);
7078         btrfs_free_path(path);
7079 out:
7080         if (err)
7081                 btrfs_std_error(root->fs_info, err);
7082         return err;
7083 }
7084
7085 /*
7086  * drop subtree rooted at tree block 'node'.
7087  *
7088  * NOTE: this function will unlock and release tree block 'node'
7089  * only used by relocation code
7090  */
7091 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7092                         struct btrfs_root *root,
7093                         struct extent_buffer *node,
7094                         struct extent_buffer *parent)
7095 {
7096         struct btrfs_path *path;
7097         struct walk_control *wc;
7098         int level;
7099         int parent_level;
7100         int ret = 0;
7101         int wret;
7102
7103         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7104
7105         path = btrfs_alloc_path();
7106         if (!path)
7107                 return -ENOMEM;
7108
7109         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7110         if (!wc) {
7111                 btrfs_free_path(path);
7112                 return -ENOMEM;
7113         }
7114
7115         btrfs_assert_tree_locked(parent);
7116         parent_level = btrfs_header_level(parent);
7117         extent_buffer_get(parent);
7118         path->nodes[parent_level] = parent;
7119         path->slots[parent_level] = btrfs_header_nritems(parent);
7120
7121         btrfs_assert_tree_locked(node);
7122         level = btrfs_header_level(node);
7123         path->nodes[level] = node;
7124         path->slots[level] = 0;
7125         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7126
7127         wc->refs[parent_level] = 1;
7128         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7129         wc->level = level;
7130         wc->shared_level = -1;
7131         wc->stage = DROP_REFERENCE;
7132         wc->update_ref = 0;
7133         wc->keep_locks = 1;
7134         wc->for_reloc = 1;
7135         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7136
7137         while (1) {
7138                 wret = walk_down_tree(trans, root, path, wc);
7139                 if (wret < 0) {
7140                         ret = wret;
7141                         break;
7142                 }
7143
7144                 wret = walk_up_tree(trans, root, path, wc, parent_level);
7145                 if (wret < 0)
7146                         ret = wret;
7147                 if (wret != 0)
7148                         break;
7149         }
7150
7151         kfree(wc);
7152         btrfs_free_path(path);
7153         return ret;
7154 }
7155
7156 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7157 {
7158         u64 num_devices;
7159         u64 stripped;
7160
7161         /*
7162          * if restripe for this chunk_type is on pick target profile and
7163          * return, otherwise do the usual balance
7164          */
7165         stripped = get_restripe_target(root->fs_info, flags);
7166         if (stripped)
7167                 return extended_to_chunk(stripped);
7168
7169         /*
7170          * we add in the count of missing devices because we want
7171          * to make sure that any RAID levels on a degraded FS
7172          * continue to be honored.
7173          */
7174         num_devices = root->fs_info->fs_devices->rw_devices +
7175                 root->fs_info->fs_devices->missing_devices;
7176
7177         stripped = BTRFS_BLOCK_GROUP_RAID0 |
7178                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7179
7180         if (num_devices == 1) {
7181                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7182                 stripped = flags & ~stripped;
7183
7184                 /* turn raid0 into single device chunks */
7185                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7186                         return stripped;
7187
7188                 /* turn mirroring into duplication */
7189                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7190                              BTRFS_BLOCK_GROUP_RAID10))
7191                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7192         } else {
7193                 /* they already had raid on here, just return */
7194                 if (flags & stripped)
7195                         return flags;
7196
7197                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7198                 stripped = flags & ~stripped;
7199
7200                 /* switch duplicated blocks with raid1 */
7201                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7202                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7203
7204                 /* this is drive concat, leave it alone */
7205         }
7206
7207         return flags;
7208 }
7209
7210 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7211 {
7212         struct btrfs_space_info *sinfo = cache->space_info;
7213         u64 num_bytes;
7214         u64 min_allocable_bytes;
7215         int ret = -ENOSPC;
7216
7217
7218         /*
7219          * We need some metadata space and system metadata space for
7220          * allocating chunks in some corner cases until we force to set
7221          * it to be readonly.
7222          */
7223         if ((sinfo->flags &
7224              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7225             !force)
7226                 min_allocable_bytes = 1 * 1024 * 1024;
7227         else
7228                 min_allocable_bytes = 0;
7229
7230         spin_lock(&sinfo->lock);
7231         spin_lock(&cache->lock);
7232
7233         if (cache->ro) {
7234                 ret = 0;
7235                 goto out;
7236         }
7237
7238         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7239                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7240
7241         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7242             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7243             min_allocable_bytes <= sinfo->total_bytes) {
7244                 sinfo->bytes_readonly += num_bytes;
7245                 cache->ro = 1;
7246                 ret = 0;
7247         }
7248 out:
7249         spin_unlock(&cache->lock);
7250         spin_unlock(&sinfo->lock);
7251         return ret;
7252 }
7253
7254 int btrfs_set_block_group_ro(struct btrfs_root *root,
7255                              struct btrfs_block_group_cache *cache)
7256
7257 {
7258         struct btrfs_trans_handle *trans;
7259         u64 alloc_flags;
7260         int ret;
7261
7262         BUG_ON(cache->ro);
7263
7264         trans = btrfs_join_transaction(root);
7265         if (IS_ERR(trans))
7266                 return PTR_ERR(trans);
7267
7268         alloc_flags = update_block_group_flags(root, cache->flags);
7269         if (alloc_flags != cache->flags) {
7270                 ret = do_chunk_alloc(trans, root, alloc_flags,
7271                                      CHUNK_ALLOC_FORCE);
7272                 if (ret < 0)
7273                         goto out;
7274         }
7275
7276         ret = set_block_group_ro(cache, 0);
7277         if (!ret)
7278                 goto out;
7279         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7280         ret = do_chunk_alloc(trans, root, alloc_flags,
7281                              CHUNK_ALLOC_FORCE);
7282         if (ret < 0)
7283                 goto out;
7284         ret = set_block_group_ro(cache, 0);
7285 out:
7286         btrfs_end_transaction(trans, root);
7287         return ret;
7288 }
7289
7290 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7291                             struct btrfs_root *root, u64 type)
7292 {
7293         u64 alloc_flags = get_alloc_profile(root, type);
7294         return do_chunk_alloc(trans, root, alloc_flags,
7295                               CHUNK_ALLOC_FORCE);
7296 }
7297
7298 /*
7299  * helper to account the unused space of all the readonly block group in the
7300  * list. takes mirrors into account.
7301  */
7302 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7303 {
7304         struct btrfs_block_group_cache *block_group;
7305         u64 free_bytes = 0;
7306         int factor;
7307
7308         list_for_each_entry(block_group, groups_list, list) {
7309                 spin_lock(&block_group->lock);
7310
7311                 if (!block_group->ro) {
7312                         spin_unlock(&block_group->lock);
7313                         continue;
7314                 }
7315
7316                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7317                                           BTRFS_BLOCK_GROUP_RAID10 |
7318                                           BTRFS_BLOCK_GROUP_DUP))
7319                         factor = 2;
7320                 else
7321                         factor = 1;
7322
7323                 free_bytes += (block_group->key.offset -
7324                                btrfs_block_group_used(&block_group->item)) *
7325                                factor;
7326
7327                 spin_unlock(&block_group->lock);
7328         }
7329
7330         return free_bytes;
7331 }
7332
7333 /*
7334  * helper to account the unused space of all the readonly block group in the
7335  * space_info. takes mirrors into account.
7336  */
7337 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7338 {
7339         int i;
7340         u64 free_bytes = 0;
7341
7342         spin_lock(&sinfo->lock);
7343
7344         for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7345                 if (!list_empty(&sinfo->block_groups[i]))
7346                         free_bytes += __btrfs_get_ro_block_group_free_space(
7347                                                 &sinfo->block_groups[i]);
7348
7349         spin_unlock(&sinfo->lock);
7350
7351         return free_bytes;
7352 }
7353
7354 void btrfs_set_block_group_rw(struct btrfs_root *root,
7355                               struct btrfs_block_group_cache *cache)
7356 {
7357         struct btrfs_space_info *sinfo = cache->space_info;
7358         u64 num_bytes;
7359
7360         BUG_ON(!cache->ro);
7361
7362         spin_lock(&sinfo->lock);
7363         spin_lock(&cache->lock);
7364         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7365                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7366         sinfo->bytes_readonly -= num_bytes;
7367         cache->ro = 0;
7368         spin_unlock(&cache->lock);
7369         spin_unlock(&sinfo->lock);
7370 }
7371
7372 /*
7373  * checks to see if its even possible to relocate this block group.
7374  *
7375  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7376  * ok to go ahead and try.
7377  */
7378 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7379 {
7380         struct btrfs_block_group_cache *block_group;
7381         struct btrfs_space_info *space_info;
7382         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7383         struct btrfs_device *device;
7384         u64 min_free;
7385         u64 dev_min = 1;
7386         u64 dev_nr = 0;
7387         u64 target;
7388         int index;
7389         int full = 0;
7390         int ret = 0;
7391
7392         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7393
7394         /* odd, couldn't find the block group, leave it alone */
7395         if (!block_group)
7396                 return -1;
7397
7398         min_free = btrfs_block_group_used(&block_group->item);
7399
7400         /* no bytes used, we're good */
7401         if (!min_free)
7402                 goto out;
7403
7404         space_info = block_group->space_info;
7405         spin_lock(&space_info->lock);
7406
7407         full = space_info->full;
7408
7409         /*
7410          * if this is the last block group we have in this space, we can't
7411          * relocate it unless we're able to allocate a new chunk below.
7412          *
7413          * Otherwise, we need to make sure we have room in the space to handle
7414          * all of the extents from this block group.  If we can, we're good
7415          */
7416         if ((space_info->total_bytes != block_group->key.offset) &&
7417             (space_info->bytes_used + space_info->bytes_reserved +
7418              space_info->bytes_pinned + space_info->bytes_readonly +
7419              min_free < space_info->total_bytes)) {
7420                 spin_unlock(&space_info->lock);
7421                 goto out;
7422         }
7423         spin_unlock(&space_info->lock);
7424
7425         /*
7426          * ok we don't have enough space, but maybe we have free space on our
7427          * devices to allocate new chunks for relocation, so loop through our
7428          * alloc devices and guess if we have enough space.  if this block
7429          * group is going to be restriped, run checks against the target
7430          * profile instead of the current one.
7431          */
7432         ret = -1;
7433
7434         /*
7435          * index:
7436          *      0: raid10
7437          *      1: raid1
7438          *      2: dup
7439          *      3: raid0
7440          *      4: single
7441          */
7442         target = get_restripe_target(root->fs_info, block_group->flags);
7443         if (target) {
7444                 index = __get_raid_index(extended_to_chunk(target));
7445         } else {
7446                 /*
7447                  * this is just a balance, so if we were marked as full
7448                  * we know there is no space for a new chunk
7449                  */
7450                 if (full)
7451                         goto out;
7452
7453                 index = get_block_group_index(block_group);
7454         }
7455
7456         if (index == 0) {
7457                 dev_min = 4;
7458                 /* Divide by 2 */
7459                 min_free >>= 1;
7460         } else if (index == 1) {
7461                 dev_min = 2;
7462         } else if (index == 2) {
7463                 /* Multiply by 2 */
7464                 min_free <<= 1;
7465         } else if (index == 3) {
7466                 dev_min = fs_devices->rw_devices;
7467                 do_div(min_free, dev_min);
7468         }
7469
7470         mutex_lock(&root->fs_info->chunk_mutex);
7471         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7472                 u64 dev_offset;
7473
7474                 /*
7475                  * check to make sure we can actually find a chunk with enough
7476                  * space to fit our block group in.
7477                  */
7478                 if (device->total_bytes > device->bytes_used + min_free &&
7479                     !device->is_tgtdev_for_dev_replace) {
7480                         ret = find_free_dev_extent(device, min_free,
7481                                                    &dev_offset, NULL);
7482                         if (!ret)
7483                                 dev_nr++;
7484
7485                         if (dev_nr >= dev_min)
7486                                 break;
7487
7488                         ret = -1;
7489                 }
7490         }
7491         mutex_unlock(&root->fs_info->chunk_mutex);
7492 out:
7493         btrfs_put_block_group(block_group);
7494         return ret;
7495 }
7496
7497 static int find_first_block_group(struct btrfs_root *root,
7498                 struct btrfs_path *path, struct btrfs_key *key)
7499 {
7500         int ret = 0;
7501         struct btrfs_key found_key;
7502         struct extent_buffer *leaf;
7503         int slot;
7504
7505         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7506         if (ret < 0)
7507                 goto out;
7508
7509         while (1) {
7510                 slot = path->slots[0];
7511                 leaf = path->nodes[0];
7512                 if (slot >= btrfs_header_nritems(leaf)) {
7513                         ret = btrfs_next_leaf(root, path);
7514                         if (ret == 0)
7515                                 continue;
7516                         if (ret < 0)
7517                                 goto out;
7518                         break;
7519                 }
7520                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7521
7522                 if (found_key.objectid >= key->objectid &&
7523                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7524                         ret = 0;
7525                         goto out;
7526                 }
7527                 path->slots[0]++;
7528         }
7529 out:
7530         return ret;
7531 }
7532
7533 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7534 {
7535         struct btrfs_block_group_cache *block_group;
7536         u64 last = 0;
7537
7538         while (1) {
7539                 struct inode *inode;
7540
7541                 block_group = btrfs_lookup_first_block_group(info, last);
7542                 while (block_group) {
7543                         spin_lock(&block_group->lock);
7544                         if (block_group->iref)
7545                                 break;
7546                         spin_unlock(&block_group->lock);
7547                         block_group = next_block_group(info->tree_root,
7548                                                        block_group);
7549                 }
7550                 if (!block_group) {
7551                         if (last == 0)
7552                                 break;
7553                         last = 0;
7554                         continue;
7555                 }
7556
7557                 inode = block_group->inode;
7558                 block_group->iref = 0;
7559                 block_group->inode = NULL;
7560                 spin_unlock(&block_group->lock);
7561                 iput(inode);
7562                 last = block_group->key.objectid + block_group->key.offset;
7563                 btrfs_put_block_group(block_group);
7564         }
7565 }
7566
7567 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7568 {
7569         struct btrfs_block_group_cache *block_group;
7570         struct btrfs_space_info *space_info;
7571         struct btrfs_caching_control *caching_ctl;
7572         struct rb_node *n;
7573
7574         down_write(&info->extent_commit_sem);
7575         while (!list_empty(&info->caching_block_groups)) {
7576                 caching_ctl = list_entry(info->caching_block_groups.next,
7577                                          struct btrfs_caching_control, list);
7578                 list_del(&caching_ctl->list);
7579                 put_caching_control(caching_ctl);
7580         }
7581         up_write(&info->extent_commit_sem);
7582
7583         spin_lock(&info->block_group_cache_lock);
7584         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7585                 block_group = rb_entry(n, struct btrfs_block_group_cache,
7586                                        cache_node);
7587                 rb_erase(&block_group->cache_node,
7588                          &info->block_group_cache_tree);
7589                 spin_unlock(&info->block_group_cache_lock);
7590
7591                 down_write(&block_group->space_info->groups_sem);
7592                 list_del(&block_group->list);
7593                 up_write(&block_group->space_info->groups_sem);
7594
7595                 if (block_group->cached == BTRFS_CACHE_STARTED)
7596                         wait_block_group_cache_done(block_group);
7597
7598                 /*
7599                  * We haven't cached this block group, which means we could
7600                  * possibly have excluded extents on this block group.
7601                  */
7602                 if (block_group->cached == BTRFS_CACHE_NO)
7603                         free_excluded_extents(info->extent_root, block_group);
7604
7605                 btrfs_remove_free_space_cache(block_group);
7606                 btrfs_put_block_group(block_group);
7607
7608                 spin_lock(&info->block_group_cache_lock);
7609         }
7610         spin_unlock(&info->block_group_cache_lock);
7611
7612         /* now that all the block groups are freed, go through and
7613          * free all the space_info structs.  This is only called during
7614          * the final stages of unmount, and so we know nobody is
7615          * using them.  We call synchronize_rcu() once before we start,
7616          * just to be on the safe side.
7617          */
7618         synchronize_rcu();
7619
7620         release_global_block_rsv(info);
7621
7622         while(!list_empty(&info->space_info)) {
7623                 space_info = list_entry(info->space_info.next,
7624                                         struct btrfs_space_info,
7625                                         list);
7626                 if (space_info->bytes_pinned > 0 ||
7627                     space_info->bytes_reserved > 0 ||
7628                     space_info->bytes_may_use > 0) {
7629                         WARN_ON(1);
7630                         dump_space_info(space_info, 0, 0);
7631                 }
7632                 list_del(&space_info->list);
7633                 kfree(space_info);
7634         }
7635         return 0;
7636 }
7637
7638 static void __link_block_group(struct btrfs_space_info *space_info,
7639                                struct btrfs_block_group_cache *cache)
7640 {
7641         int index = get_block_group_index(cache);
7642
7643         down_write(&space_info->groups_sem);
7644         list_add_tail(&cache->list, &space_info->block_groups[index]);
7645         up_write(&space_info->groups_sem);
7646 }
7647
7648 int btrfs_read_block_groups(struct btrfs_root *root)
7649 {
7650         struct btrfs_path *path;
7651         int ret;
7652         struct btrfs_block_group_cache *cache;
7653         struct btrfs_fs_info *info = root->fs_info;
7654         struct btrfs_space_info *space_info;
7655         struct btrfs_key key;
7656         struct btrfs_key found_key;
7657         struct extent_buffer *leaf;
7658         int need_clear = 0;
7659         u64 cache_gen;
7660
7661         root = info->extent_root;
7662         key.objectid = 0;
7663         key.offset = 0;
7664         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7665         path = btrfs_alloc_path();
7666         if (!path)
7667                 return -ENOMEM;
7668         path->reada = 1;
7669
7670         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
7671         if (btrfs_test_opt(root, SPACE_CACHE) &&
7672             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
7673                 need_clear = 1;
7674         if (btrfs_test_opt(root, CLEAR_CACHE))
7675                 need_clear = 1;
7676
7677         while (1) {
7678                 ret = find_first_block_group(root, path, &key);
7679                 if (ret > 0)
7680                         break;
7681                 if (ret != 0)
7682                         goto error;
7683                 leaf = path->nodes[0];
7684                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7685                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7686                 if (!cache) {
7687                         ret = -ENOMEM;
7688                         goto error;
7689                 }
7690                 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7691                                                 GFP_NOFS);
7692                 if (!cache->free_space_ctl) {
7693                         kfree(cache);
7694                         ret = -ENOMEM;
7695                         goto error;
7696                 }
7697
7698                 atomic_set(&cache->count, 1);
7699                 spin_lock_init(&cache->lock);
7700                 cache->fs_info = info;
7701                 INIT_LIST_HEAD(&cache->list);
7702                 INIT_LIST_HEAD(&cache->cluster_list);
7703
7704                 if (need_clear) {
7705                         /*
7706                          * When we mount with old space cache, we need to
7707                          * set BTRFS_DC_CLEAR and set dirty flag.
7708                          *
7709                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
7710                          *    truncate the old free space cache inode and
7711                          *    setup a new one.
7712                          * b) Setting 'dirty flag' makes sure that we flush
7713                          *    the new space cache info onto disk.
7714                          */
7715                         cache->disk_cache_state = BTRFS_DC_CLEAR;
7716                         if (btrfs_test_opt(root, SPACE_CACHE))
7717                                 cache->dirty = 1;
7718                 }
7719
7720                 read_extent_buffer(leaf, &cache->item,
7721                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
7722                                    sizeof(cache->item));
7723                 memcpy(&cache->key, &found_key, sizeof(found_key));
7724
7725                 key.objectid = found_key.objectid + found_key.offset;
7726                 btrfs_release_path(path);
7727                 cache->flags = btrfs_block_group_flags(&cache->item);
7728                 cache->sectorsize = root->sectorsize;
7729
7730                 btrfs_init_free_space_ctl(cache);
7731
7732                 /*
7733                  * We need to exclude the super stripes now so that the space
7734                  * info has super bytes accounted for, otherwise we'll think
7735                  * we have more space than we actually do.
7736                  */
7737                 exclude_super_stripes(root, cache);
7738
7739                 /*
7740                  * check for two cases, either we are full, and therefore
7741                  * don't need to bother with the caching work since we won't
7742                  * find any space, or we are empty, and we can just add all
7743                  * the space in and be done with it.  This saves us _alot_ of
7744                  * time, particularly in the full case.
7745                  */
7746                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7747                         cache->last_byte_to_unpin = (u64)-1;
7748                         cache->cached = BTRFS_CACHE_FINISHED;
7749                         free_excluded_extents(root, cache);
7750                 } else if (btrfs_block_group_used(&cache->item) == 0) {
7751                         cache->last_byte_to_unpin = (u64)-1;
7752                         cache->cached = BTRFS_CACHE_FINISHED;
7753                         add_new_free_space(cache, root->fs_info,
7754                                            found_key.objectid,
7755                                            found_key.objectid +
7756                                            found_key.offset);
7757                         free_excluded_extents(root, cache);
7758                 }
7759
7760                 ret = update_space_info(info, cache->flags, found_key.offset,
7761                                         btrfs_block_group_used(&cache->item),
7762                                         &space_info);
7763                 BUG_ON(ret); /* -ENOMEM */
7764                 cache->space_info = space_info;
7765                 spin_lock(&cache->space_info->lock);
7766                 cache->space_info->bytes_readonly += cache->bytes_super;
7767                 spin_unlock(&cache->space_info->lock);
7768
7769                 __link_block_group(space_info, cache);
7770
7771                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7772                 BUG_ON(ret); /* Logic error */
7773
7774                 set_avail_alloc_bits(root->fs_info, cache->flags);
7775                 if (btrfs_chunk_readonly(root, cache->key.objectid))
7776                         set_block_group_ro(cache, 1);
7777         }
7778
7779         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7780                 if (!(get_alloc_profile(root, space_info->flags) &
7781                       (BTRFS_BLOCK_GROUP_RAID10 |
7782                        BTRFS_BLOCK_GROUP_RAID1 |
7783                        BTRFS_BLOCK_GROUP_DUP)))
7784                         continue;
7785                 /*
7786                  * avoid allocating from un-mirrored block group if there are
7787                  * mirrored block groups.
7788                  */
7789                 list_for_each_entry(cache, &space_info->block_groups[3], list)
7790                         set_block_group_ro(cache, 1);
7791                 list_for_each_entry(cache, &space_info->block_groups[4], list)
7792                         set_block_group_ro(cache, 1);
7793         }
7794
7795         init_global_block_rsv(info);
7796         ret = 0;
7797 error:
7798         btrfs_free_path(path);
7799         return ret;
7800 }
7801
7802 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
7803                                        struct btrfs_root *root)
7804 {
7805         struct btrfs_block_group_cache *block_group, *tmp;
7806         struct btrfs_root *extent_root = root->fs_info->extent_root;
7807         struct btrfs_block_group_item item;
7808         struct btrfs_key key;
7809         int ret = 0;
7810
7811         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
7812                                  new_bg_list) {
7813                 list_del_init(&block_group->new_bg_list);
7814
7815                 if (ret)
7816                         continue;
7817
7818                 spin_lock(&block_group->lock);
7819                 memcpy(&item, &block_group->item, sizeof(item));
7820                 memcpy(&key, &block_group->key, sizeof(key));
7821                 spin_unlock(&block_group->lock);
7822
7823                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
7824                                         sizeof(item));
7825                 if (ret)
7826                         btrfs_abort_transaction(trans, extent_root, ret);
7827         }
7828 }
7829
7830 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7831                            struct btrfs_root *root, u64 bytes_used,
7832                            u64 type, u64 chunk_objectid, u64 chunk_offset,
7833                            u64 size)
7834 {
7835         int ret;
7836         struct btrfs_root *extent_root;
7837         struct btrfs_block_group_cache *cache;
7838
7839         extent_root = root->fs_info->extent_root;
7840
7841         root->fs_info->last_trans_log_full_commit = trans->transid;
7842
7843         cache = kzalloc(sizeof(*cache), GFP_NOFS);
7844         if (!cache)
7845                 return -ENOMEM;
7846         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7847                                         GFP_NOFS);
7848         if (!cache->free_space_ctl) {
7849                 kfree(cache);
7850                 return -ENOMEM;
7851         }
7852
7853         cache->key.objectid = chunk_offset;
7854         cache->key.offset = size;
7855         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7856         cache->sectorsize = root->sectorsize;
7857         cache->fs_info = root->fs_info;
7858
7859         atomic_set(&cache->count, 1);
7860         spin_lock_init(&cache->lock);
7861         INIT_LIST_HEAD(&cache->list);
7862         INIT_LIST_HEAD(&cache->cluster_list);
7863         INIT_LIST_HEAD(&cache->new_bg_list);
7864
7865         btrfs_init_free_space_ctl(cache);
7866
7867         btrfs_set_block_group_used(&cache->item, bytes_used);
7868         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7869         cache->flags = type;
7870         btrfs_set_block_group_flags(&cache->item, type);
7871
7872         cache->last_byte_to_unpin = (u64)-1;
7873         cache->cached = BTRFS_CACHE_FINISHED;
7874         exclude_super_stripes(root, cache);
7875
7876         add_new_free_space(cache, root->fs_info, chunk_offset,
7877                            chunk_offset + size);
7878
7879         free_excluded_extents(root, cache);
7880
7881         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7882                                 &cache->space_info);
7883         BUG_ON(ret); /* -ENOMEM */
7884         update_global_block_rsv(root->fs_info);
7885
7886         spin_lock(&cache->space_info->lock);
7887         cache->space_info->bytes_readonly += cache->bytes_super;
7888         spin_unlock(&cache->space_info->lock);
7889
7890         __link_block_group(cache->space_info, cache);
7891
7892         ret = btrfs_add_block_group_cache(root->fs_info, cache);
7893         BUG_ON(ret); /* Logic error */
7894
7895         list_add_tail(&cache->new_bg_list, &trans->new_bgs);
7896
7897         set_avail_alloc_bits(extent_root->fs_info, type);
7898
7899         return 0;
7900 }
7901
7902 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
7903 {
7904         u64 extra_flags = chunk_to_extended(flags) &
7905                                 BTRFS_EXTENDED_PROFILE_MASK;
7906
7907         if (flags & BTRFS_BLOCK_GROUP_DATA)
7908                 fs_info->avail_data_alloc_bits &= ~extra_flags;
7909         if (flags & BTRFS_BLOCK_GROUP_METADATA)
7910                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
7911         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
7912                 fs_info->avail_system_alloc_bits &= ~extra_flags;
7913 }
7914
7915 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7916                              struct btrfs_root *root, u64 group_start)
7917 {
7918         struct btrfs_path *path;
7919         struct btrfs_block_group_cache *block_group;
7920         struct btrfs_free_cluster *cluster;
7921         struct btrfs_root *tree_root = root->fs_info->tree_root;
7922         struct btrfs_key key;
7923         struct inode *inode;
7924         int ret;
7925         int index;
7926         int factor;
7927
7928         root = root->fs_info->extent_root;
7929
7930         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7931         BUG_ON(!block_group);
7932         BUG_ON(!block_group->ro);
7933
7934         /*
7935          * Free the reserved super bytes from this block group before
7936          * remove it.
7937          */
7938         free_excluded_extents(root, block_group);
7939
7940         memcpy(&key, &block_group->key, sizeof(key));
7941         index = get_block_group_index(block_group);
7942         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
7943                                   BTRFS_BLOCK_GROUP_RAID1 |
7944                                   BTRFS_BLOCK_GROUP_RAID10))
7945                 factor = 2;
7946         else
7947                 factor = 1;
7948
7949         /* make sure this block group isn't part of an allocation cluster */
7950         cluster = &root->fs_info->data_alloc_cluster;
7951         spin_lock(&cluster->refill_lock);
7952         btrfs_return_cluster_to_free_space(block_group, cluster);
7953         spin_unlock(&cluster->refill_lock);
7954
7955         /*
7956          * make sure this block group isn't part of a metadata
7957          * allocation cluster
7958          */
7959         cluster = &root->fs_info->meta_alloc_cluster;
7960         spin_lock(&cluster->refill_lock);
7961         btrfs_return_cluster_to_free_space(block_group, cluster);
7962         spin_unlock(&cluster->refill_lock);
7963
7964         path = btrfs_alloc_path();
7965         if (!path) {
7966                 ret = -ENOMEM;
7967                 goto out;
7968         }
7969
7970         inode = lookup_free_space_inode(tree_root, block_group, path);
7971         if (!IS_ERR(inode)) {
7972                 ret = btrfs_orphan_add(trans, inode);
7973                 if (ret) {
7974                         btrfs_add_delayed_iput(inode);
7975                         goto out;
7976                 }
7977                 clear_nlink(inode);
7978                 /* One for the block groups ref */
7979                 spin_lock(&block_group->lock);
7980                 if (block_group->iref) {
7981                         block_group->iref = 0;
7982                         block_group->inode = NULL;
7983                         spin_unlock(&block_group->lock);
7984                         iput(inode);
7985                 } else {
7986                         spin_unlock(&block_group->lock);
7987                 }
7988                 /* One for our lookup ref */
7989                 btrfs_add_delayed_iput(inode);
7990         }
7991
7992         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
7993         key.offset = block_group->key.objectid;
7994         key.type = 0;
7995
7996         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
7997         if (ret < 0)
7998                 goto out;
7999         if (ret > 0)
8000                 btrfs_release_path(path);
8001         if (ret == 0) {
8002                 ret = btrfs_del_item(trans, tree_root, path);
8003                 if (ret)
8004                         goto out;
8005                 btrfs_release_path(path);
8006         }
8007
8008         spin_lock(&root->fs_info->block_group_cache_lock);
8009         rb_erase(&block_group->cache_node,
8010                  &root->fs_info->block_group_cache_tree);
8011         spin_unlock(&root->fs_info->block_group_cache_lock);
8012
8013         down_write(&block_group->space_info->groups_sem);
8014         /*
8015          * we must use list_del_init so people can check to see if they
8016          * are still on the list after taking the semaphore
8017          */
8018         list_del_init(&block_group->list);
8019         if (list_empty(&block_group->space_info->block_groups[index]))
8020                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8021         up_write(&block_group->space_info->groups_sem);
8022
8023         if (block_group->cached == BTRFS_CACHE_STARTED)
8024                 wait_block_group_cache_done(block_group);
8025
8026         btrfs_remove_free_space_cache(block_group);
8027
8028         spin_lock(&block_group->space_info->lock);
8029         block_group->space_info->total_bytes -= block_group->key.offset;
8030         block_group->space_info->bytes_readonly -= block_group->key.offset;
8031         block_group->space_info->disk_total -= block_group->key.offset * factor;
8032         spin_unlock(&block_group->space_info->lock);
8033
8034         memcpy(&key, &block_group->key, sizeof(key));
8035
8036         btrfs_clear_space_info_full(root->fs_info);
8037
8038         btrfs_put_block_group(block_group);
8039         btrfs_put_block_group(block_group);
8040
8041         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8042         if (ret > 0)
8043                 ret = -EIO;
8044         if (ret < 0)
8045                 goto out;
8046
8047         ret = btrfs_del_item(trans, root, path);
8048 out:
8049         btrfs_free_path(path);
8050         return ret;
8051 }
8052
8053 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8054 {
8055         struct btrfs_space_info *space_info;
8056         struct btrfs_super_block *disk_super;
8057         u64 features;
8058         u64 flags;
8059         int mixed = 0;
8060         int ret;
8061
8062         disk_super = fs_info->super_copy;
8063         if (!btrfs_super_root(disk_super))
8064                 return 1;
8065
8066         features = btrfs_super_incompat_flags(disk_super);
8067         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8068                 mixed = 1;
8069
8070         flags = BTRFS_BLOCK_GROUP_SYSTEM;
8071         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8072         if (ret)
8073                 goto out;
8074
8075         if (mixed) {
8076                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8077                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8078         } else {
8079                 flags = BTRFS_BLOCK_GROUP_METADATA;
8080                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8081                 if (ret)
8082                         goto out;
8083
8084                 flags = BTRFS_BLOCK_GROUP_DATA;
8085                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8086         }
8087 out:
8088         return ret;
8089 }
8090
8091 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8092 {
8093         return unpin_extent_range(root, start, end);
8094 }
8095
8096 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8097                                u64 num_bytes, u64 *actual_bytes)
8098 {
8099         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8100 }
8101
8102 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8103 {
8104         struct btrfs_fs_info *fs_info = root->fs_info;
8105         struct btrfs_block_group_cache *cache = NULL;
8106         u64 group_trimmed;
8107         u64 start;
8108         u64 end;
8109         u64 trimmed = 0;
8110         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8111         int ret = 0;
8112
8113         /*
8114          * try to trim all FS space, our block group may start from non-zero.
8115          */
8116         if (range->len == total_bytes)
8117                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8118         else
8119                 cache = btrfs_lookup_block_group(fs_info, range->start);
8120
8121         while (cache) {
8122                 if (cache->key.objectid >= (range->start + range->len)) {
8123                         btrfs_put_block_group(cache);
8124                         break;
8125                 }
8126
8127                 start = max(range->start, cache->key.objectid);
8128                 end = min(range->start + range->len,
8129                                 cache->key.objectid + cache->key.offset);
8130
8131                 if (end - start >= range->minlen) {
8132                         if (!block_group_cache_done(cache)) {
8133                                 ret = cache_block_group(cache, NULL, root, 0);
8134                                 if (!ret)
8135                                         wait_block_group_cache_done(cache);
8136                         }
8137                         ret = btrfs_trim_block_group(cache,
8138                                                      &group_trimmed,
8139                                                      start,
8140                                                      end,
8141                                                      range->minlen);
8142
8143                         trimmed += group_trimmed;
8144                         if (ret) {
8145                                 btrfs_put_block_group(cache);
8146                                 break;
8147                         }
8148                 }
8149
8150                 cache = next_block_group(fs_info->tree_root, cache);
8151         }
8152
8153         range->len = trimmed;
8154         return ret;
8155 }