OSDN Git Service

Btrfs: make free space caching faster with many non-inline extent references
[android-x86/kernel.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "compat.h"
29 #include "hash.h"
30 #include "ctree.h"
31 #include "disk-io.h"
32 #include "print-tree.h"
33 #include "transaction.h"
34 #include "volumes.h"
35 #include "raid56.h"
36 #include "locking.h"
37 #include "free-space-cache.h"
38 #include "math.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_root *root,
78                               u64 bytenr, u64 num_bytes, int alloc);
79 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
80                                 struct btrfs_root *root,
81                                 u64 bytenr, u64 num_bytes, u64 parent,
82                                 u64 root_objectid, u64 owner_objectid,
83                                 u64 owner_offset, int refs_to_drop,
84                                 struct btrfs_delayed_extent_op *extra_op);
85 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
86                                     struct extent_buffer *leaf,
87                                     struct btrfs_extent_item *ei);
88 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
89                                       struct btrfs_root *root,
90                                       u64 parent, u64 root_objectid,
91                                       u64 flags, u64 owner, u64 offset,
92                                       struct btrfs_key *ins, int ref_mod);
93 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
94                                      struct btrfs_root *root,
95                                      u64 parent, u64 root_objectid,
96                                      u64 flags, struct btrfs_disk_key *key,
97                                      int level, struct btrfs_key *ins);
98 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
99                           struct btrfs_root *extent_root, u64 flags,
100                           int force);
101 static int find_next_key(struct btrfs_path *path, int level,
102                          struct btrfs_key *key);
103 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
104                             int dump_block_groups);
105 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
106                                        u64 num_bytes, int reserve);
107 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
108                                u64 num_bytes);
109 int btrfs_pin_extent(struct btrfs_root *root,
110                      u64 bytenr, u64 num_bytes, int reserved);
111
112 static noinline int
113 block_group_cache_done(struct btrfs_block_group_cache *cache)
114 {
115         smp_mb();
116         return cache->cached == BTRFS_CACHE_FINISHED;
117 }
118
119 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
120 {
121         return (cache->flags & bits) == bits;
122 }
123
124 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
125 {
126         atomic_inc(&cache->count);
127 }
128
129 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
130 {
131         if (atomic_dec_and_test(&cache->count)) {
132                 WARN_ON(cache->pinned > 0);
133                 WARN_ON(cache->reserved > 0);
134                 kfree(cache->free_space_ctl);
135                 kfree(cache);
136         }
137 }
138
139 /*
140  * this adds the block group to the fs_info rb tree for the block group
141  * cache
142  */
143 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
144                                 struct btrfs_block_group_cache *block_group)
145 {
146         struct rb_node **p;
147         struct rb_node *parent = NULL;
148         struct btrfs_block_group_cache *cache;
149
150         spin_lock(&info->block_group_cache_lock);
151         p = &info->block_group_cache_tree.rb_node;
152
153         while (*p) {
154                 parent = *p;
155                 cache = rb_entry(parent, struct btrfs_block_group_cache,
156                                  cache_node);
157                 if (block_group->key.objectid < cache->key.objectid) {
158                         p = &(*p)->rb_left;
159                 } else if (block_group->key.objectid > cache->key.objectid) {
160                         p = &(*p)->rb_right;
161                 } else {
162                         spin_unlock(&info->block_group_cache_lock);
163                         return -EEXIST;
164                 }
165         }
166
167         rb_link_node(&block_group->cache_node, parent, p);
168         rb_insert_color(&block_group->cache_node,
169                         &info->block_group_cache_tree);
170
171         if (info->first_logical_byte > block_group->key.objectid)
172                 info->first_logical_byte = block_group->key.objectid;
173
174         spin_unlock(&info->block_group_cache_lock);
175
176         return 0;
177 }
178
179 /*
180  * This will return the block group at or after bytenr if contains is 0, else
181  * it will return the block group that contains the bytenr
182  */
183 static struct btrfs_block_group_cache *
184 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
185                               int contains)
186 {
187         struct btrfs_block_group_cache *cache, *ret = NULL;
188         struct rb_node *n;
189         u64 end, start;
190
191         spin_lock(&info->block_group_cache_lock);
192         n = info->block_group_cache_tree.rb_node;
193
194         while (n) {
195                 cache = rb_entry(n, struct btrfs_block_group_cache,
196                                  cache_node);
197                 end = cache->key.objectid + cache->key.offset - 1;
198                 start = cache->key.objectid;
199
200                 if (bytenr < start) {
201                         if (!contains && (!ret || start < ret->key.objectid))
202                                 ret = cache;
203                         n = n->rb_left;
204                 } else if (bytenr > start) {
205                         if (contains && bytenr <= end) {
206                                 ret = cache;
207                                 break;
208                         }
209                         n = n->rb_right;
210                 } else {
211                         ret = cache;
212                         break;
213                 }
214         }
215         if (ret) {
216                 btrfs_get_block_group(ret);
217                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
218                         info->first_logical_byte = ret->key.objectid;
219         }
220         spin_unlock(&info->block_group_cache_lock);
221
222         return ret;
223 }
224
225 static int add_excluded_extent(struct btrfs_root *root,
226                                u64 start, u64 num_bytes)
227 {
228         u64 end = start + num_bytes - 1;
229         set_extent_bits(&root->fs_info->freed_extents[0],
230                         start, end, EXTENT_UPTODATE, GFP_NOFS);
231         set_extent_bits(&root->fs_info->freed_extents[1],
232                         start, end, EXTENT_UPTODATE, GFP_NOFS);
233         return 0;
234 }
235
236 static void free_excluded_extents(struct btrfs_root *root,
237                                   struct btrfs_block_group_cache *cache)
238 {
239         u64 start, end;
240
241         start = cache->key.objectid;
242         end = start + cache->key.offset - 1;
243
244         clear_extent_bits(&root->fs_info->freed_extents[0],
245                           start, end, EXTENT_UPTODATE, GFP_NOFS);
246         clear_extent_bits(&root->fs_info->freed_extents[1],
247                           start, end, EXTENT_UPTODATE, GFP_NOFS);
248 }
249
250 static int exclude_super_stripes(struct btrfs_root *root,
251                                  struct btrfs_block_group_cache *cache)
252 {
253         u64 bytenr;
254         u64 *logical;
255         int stripe_len;
256         int i, nr, ret;
257
258         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
259                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
260                 cache->bytes_super += stripe_len;
261                 ret = add_excluded_extent(root, cache->key.objectid,
262                                           stripe_len);
263                 if (ret)
264                         return ret;
265         }
266
267         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
268                 bytenr = btrfs_sb_offset(i);
269                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
270                                        cache->key.objectid, bytenr,
271                                        0, &logical, &nr, &stripe_len);
272                 if (ret)
273                         return ret;
274
275                 while (nr--) {
276                         u64 start, len;
277
278                         if (logical[nr] > cache->key.objectid +
279                             cache->key.offset)
280                                 continue;
281
282                         if (logical[nr] + stripe_len <= cache->key.objectid)
283                                 continue;
284
285                         start = logical[nr];
286                         if (start < cache->key.objectid) {
287                                 start = cache->key.objectid;
288                                 len = (logical[nr] + stripe_len) - start;
289                         } else {
290                                 len = min_t(u64, stripe_len,
291                                             cache->key.objectid +
292                                             cache->key.offset - start);
293                         }
294
295                         cache->bytes_super += len;
296                         ret = add_excluded_extent(root, start, len);
297                         if (ret) {
298                                 kfree(logical);
299                                 return ret;
300                         }
301                 }
302
303                 kfree(logical);
304         }
305         return 0;
306 }
307
308 static struct btrfs_caching_control *
309 get_caching_control(struct btrfs_block_group_cache *cache)
310 {
311         struct btrfs_caching_control *ctl;
312
313         spin_lock(&cache->lock);
314         if (cache->cached != BTRFS_CACHE_STARTED) {
315                 spin_unlock(&cache->lock);
316                 return NULL;
317         }
318
319         /* We're loading it the fast way, so we don't have a caching_ctl. */
320         if (!cache->caching_ctl) {
321                 spin_unlock(&cache->lock);
322                 return NULL;
323         }
324
325         ctl = cache->caching_ctl;
326         atomic_inc(&ctl->count);
327         spin_unlock(&cache->lock);
328         return ctl;
329 }
330
331 static void put_caching_control(struct btrfs_caching_control *ctl)
332 {
333         if (atomic_dec_and_test(&ctl->count))
334                 kfree(ctl);
335 }
336
337 /*
338  * this is only called by cache_block_group, since we could have freed extents
339  * we need to check the pinned_extents for any extents that can't be used yet
340  * since their free space will be released as soon as the transaction commits.
341  */
342 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
343                               struct btrfs_fs_info *info, u64 start, u64 end)
344 {
345         u64 extent_start, extent_end, size, total_added = 0;
346         int ret;
347
348         while (start < end) {
349                 ret = find_first_extent_bit(info->pinned_extents, start,
350                                             &extent_start, &extent_end,
351                                             EXTENT_DIRTY | EXTENT_UPTODATE,
352                                             NULL);
353                 if (ret)
354                         break;
355
356                 if (extent_start <= start) {
357                         start = extent_end + 1;
358                 } else if (extent_start > start && extent_start < end) {
359                         size = extent_start - start;
360                         total_added += size;
361                         ret = btrfs_add_free_space(block_group, start,
362                                                    size);
363                         BUG_ON(ret); /* -ENOMEM or logic error */
364                         start = extent_end + 1;
365                 } else {
366                         break;
367                 }
368         }
369
370         if (start < end) {
371                 size = end - start;
372                 total_added += size;
373                 ret = btrfs_add_free_space(block_group, start, size);
374                 BUG_ON(ret); /* -ENOMEM or logic error */
375         }
376
377         return total_added;
378 }
379
380 static noinline void caching_thread(struct btrfs_work *work)
381 {
382         struct btrfs_block_group_cache *block_group;
383         struct btrfs_fs_info *fs_info;
384         struct btrfs_caching_control *caching_ctl;
385         struct btrfs_root *extent_root;
386         struct btrfs_path *path;
387         struct extent_buffer *leaf;
388         struct btrfs_key key;
389         u64 total_found = 0;
390         u64 last = 0;
391         u32 nritems;
392         int ret = 0;
393
394         caching_ctl = container_of(work, struct btrfs_caching_control, work);
395         block_group = caching_ctl->block_group;
396         fs_info = block_group->fs_info;
397         extent_root = fs_info->extent_root;
398
399         path = btrfs_alloc_path();
400         if (!path)
401                 goto out;
402
403         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
404
405         /*
406          * We don't want to deadlock with somebody trying to allocate a new
407          * extent for the extent root while also trying to search the extent
408          * root to add free space.  So we skip locking and search the commit
409          * root, since its read-only
410          */
411         path->skip_locking = 1;
412         path->search_commit_root = 1;
413         path->reada = 1;
414
415         key.objectid = last;
416         key.offset = 0;
417         key.type = BTRFS_EXTENT_ITEM_KEY;
418 again:
419         mutex_lock(&caching_ctl->mutex);
420         /* need to make sure the commit_root doesn't disappear */
421         down_read(&fs_info->extent_commit_sem);
422
423 next:
424         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
425         if (ret < 0)
426                 goto err;
427
428         leaf = path->nodes[0];
429         nritems = btrfs_header_nritems(leaf);
430
431         while (1) {
432                 if (btrfs_fs_closing(fs_info) > 1) {
433                         last = (u64)-1;
434                         break;
435                 }
436
437                 if (path->slots[0] < nritems) {
438                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
439                 } else {
440                         ret = find_next_key(path, 0, &key);
441                         if (ret)
442                                 break;
443
444                         if (need_resched()) {
445                                 caching_ctl->progress = last;
446                                 btrfs_release_path(path);
447                                 up_read(&fs_info->extent_commit_sem);
448                                 mutex_unlock(&caching_ctl->mutex);
449                                 cond_resched();
450                                 goto again;
451                         }
452
453                         ret = btrfs_next_leaf(extent_root, path);
454                         if (ret < 0)
455                                 goto err;
456                         if (ret)
457                                 break;
458                         leaf = path->nodes[0];
459                         nritems = btrfs_header_nritems(leaf);
460                         continue;
461                 }
462
463                 if (key.objectid < last) {
464                         key.objectid = last;
465                         key.offset = 0;
466                         key.type = BTRFS_EXTENT_ITEM_KEY;
467
468                         caching_ctl->progress = last;
469                         btrfs_release_path(path);
470                         goto next;
471                 }
472
473                 if (key.objectid < block_group->key.objectid) {
474                         path->slots[0]++;
475                         continue;
476                 }
477
478                 if (key.objectid >= block_group->key.objectid +
479                     block_group->key.offset)
480                         break;
481
482                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
483                     key.type == BTRFS_METADATA_ITEM_KEY) {
484                         total_found += add_new_free_space(block_group,
485                                                           fs_info, last,
486                                                           key.objectid);
487                         if (key.type == BTRFS_METADATA_ITEM_KEY)
488                                 last = key.objectid +
489                                         fs_info->tree_root->leafsize;
490                         else
491                                 last = key.objectid + key.offset;
492
493                         if (total_found > (1024 * 1024 * 2)) {
494                                 total_found = 0;
495                                 wake_up(&caching_ctl->wait);
496                         }
497                 }
498                 path->slots[0]++;
499         }
500         ret = 0;
501
502         total_found += add_new_free_space(block_group, fs_info, last,
503                                           block_group->key.objectid +
504                                           block_group->key.offset);
505         caching_ctl->progress = (u64)-1;
506
507         spin_lock(&block_group->lock);
508         block_group->caching_ctl = NULL;
509         block_group->cached = BTRFS_CACHE_FINISHED;
510         spin_unlock(&block_group->lock);
511
512 err:
513         btrfs_free_path(path);
514         up_read(&fs_info->extent_commit_sem);
515
516         free_excluded_extents(extent_root, block_group);
517
518         mutex_unlock(&caching_ctl->mutex);
519 out:
520         wake_up(&caching_ctl->wait);
521
522         put_caching_control(caching_ctl);
523         btrfs_put_block_group(block_group);
524 }
525
526 static int cache_block_group(struct btrfs_block_group_cache *cache,
527                              int load_cache_only)
528 {
529         DEFINE_WAIT(wait);
530         struct btrfs_fs_info *fs_info = cache->fs_info;
531         struct btrfs_caching_control *caching_ctl;
532         int ret = 0;
533
534         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
535         if (!caching_ctl)
536                 return -ENOMEM;
537
538         INIT_LIST_HEAD(&caching_ctl->list);
539         mutex_init(&caching_ctl->mutex);
540         init_waitqueue_head(&caching_ctl->wait);
541         caching_ctl->block_group = cache;
542         caching_ctl->progress = cache->key.objectid;
543         atomic_set(&caching_ctl->count, 1);
544         caching_ctl->work.func = caching_thread;
545
546         spin_lock(&cache->lock);
547         /*
548          * This should be a rare occasion, but this could happen I think in the
549          * case where one thread starts to load the space cache info, and then
550          * some other thread starts a transaction commit which tries to do an
551          * allocation while the other thread is still loading the space cache
552          * info.  The previous loop should have kept us from choosing this block
553          * group, but if we've moved to the state where we will wait on caching
554          * block groups we need to first check if we're doing a fast load here,
555          * so we can wait for it to finish, otherwise we could end up allocating
556          * from a block group who's cache gets evicted for one reason or
557          * another.
558          */
559         while (cache->cached == BTRFS_CACHE_FAST) {
560                 struct btrfs_caching_control *ctl;
561
562                 ctl = cache->caching_ctl;
563                 atomic_inc(&ctl->count);
564                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
565                 spin_unlock(&cache->lock);
566
567                 schedule();
568
569                 finish_wait(&ctl->wait, &wait);
570                 put_caching_control(ctl);
571                 spin_lock(&cache->lock);
572         }
573
574         if (cache->cached != BTRFS_CACHE_NO) {
575                 spin_unlock(&cache->lock);
576                 kfree(caching_ctl);
577                 return 0;
578         }
579         WARN_ON(cache->caching_ctl);
580         cache->caching_ctl = caching_ctl;
581         cache->cached = BTRFS_CACHE_FAST;
582         spin_unlock(&cache->lock);
583
584         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
585                 ret = load_free_space_cache(fs_info, cache);
586
587                 spin_lock(&cache->lock);
588                 if (ret == 1) {
589                         cache->caching_ctl = NULL;
590                         cache->cached = BTRFS_CACHE_FINISHED;
591                         cache->last_byte_to_unpin = (u64)-1;
592                 } else {
593                         if (load_cache_only) {
594                                 cache->caching_ctl = NULL;
595                                 cache->cached = BTRFS_CACHE_NO;
596                         } else {
597                                 cache->cached = BTRFS_CACHE_STARTED;
598                         }
599                 }
600                 spin_unlock(&cache->lock);
601                 wake_up(&caching_ctl->wait);
602                 if (ret == 1) {
603                         put_caching_control(caching_ctl);
604                         free_excluded_extents(fs_info->extent_root, cache);
605                         return 0;
606                 }
607         } else {
608                 /*
609                  * We are not going to do the fast caching, set cached to the
610                  * appropriate value and wakeup any waiters.
611                  */
612                 spin_lock(&cache->lock);
613                 if (load_cache_only) {
614                         cache->caching_ctl = NULL;
615                         cache->cached = BTRFS_CACHE_NO;
616                 } else {
617                         cache->cached = BTRFS_CACHE_STARTED;
618                 }
619                 spin_unlock(&cache->lock);
620                 wake_up(&caching_ctl->wait);
621         }
622
623         if (load_cache_only) {
624                 put_caching_control(caching_ctl);
625                 return 0;
626         }
627
628         down_write(&fs_info->extent_commit_sem);
629         atomic_inc(&caching_ctl->count);
630         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
631         up_write(&fs_info->extent_commit_sem);
632
633         btrfs_get_block_group(cache);
634
635         btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
636
637         return ret;
638 }
639
640 /*
641  * return the block group that starts at or after bytenr
642  */
643 static struct btrfs_block_group_cache *
644 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
645 {
646         struct btrfs_block_group_cache *cache;
647
648         cache = block_group_cache_tree_search(info, bytenr, 0);
649
650         return cache;
651 }
652
653 /*
654  * return the block group that contains the given bytenr
655  */
656 struct btrfs_block_group_cache *btrfs_lookup_block_group(
657                                                  struct btrfs_fs_info *info,
658                                                  u64 bytenr)
659 {
660         struct btrfs_block_group_cache *cache;
661
662         cache = block_group_cache_tree_search(info, bytenr, 1);
663
664         return cache;
665 }
666
667 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
668                                                   u64 flags)
669 {
670         struct list_head *head = &info->space_info;
671         struct btrfs_space_info *found;
672
673         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
674
675         rcu_read_lock();
676         list_for_each_entry_rcu(found, head, list) {
677                 if (found->flags & flags) {
678                         rcu_read_unlock();
679                         return found;
680                 }
681         }
682         rcu_read_unlock();
683         return NULL;
684 }
685
686 /*
687  * after adding space to the filesystem, we need to clear the full flags
688  * on all the space infos.
689  */
690 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
691 {
692         struct list_head *head = &info->space_info;
693         struct btrfs_space_info *found;
694
695         rcu_read_lock();
696         list_for_each_entry_rcu(found, head, list)
697                 found->full = 0;
698         rcu_read_unlock();
699 }
700
701 /* simple helper to search for an existing extent at a given offset */
702 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
703 {
704         int ret;
705         struct btrfs_key key;
706         struct btrfs_path *path;
707
708         path = btrfs_alloc_path();
709         if (!path)
710                 return -ENOMEM;
711
712         key.objectid = start;
713         key.offset = len;
714         key.type = BTRFS_EXTENT_ITEM_KEY;
715         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
716                                 0, 0);
717         if (ret > 0) {
718                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
719                 if (key.objectid == start &&
720                     key.type == BTRFS_METADATA_ITEM_KEY)
721                         ret = 0;
722         }
723         btrfs_free_path(path);
724         return ret;
725 }
726
727 /*
728  * helper function to lookup reference count and flags of a tree block.
729  *
730  * the head node for delayed ref is used to store the sum of all the
731  * reference count modifications queued up in the rbtree. the head
732  * node may also store the extent flags to set. This way you can check
733  * to see what the reference count and extent flags would be if all of
734  * the delayed refs are not processed.
735  */
736 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
737                              struct btrfs_root *root, u64 bytenr,
738                              u64 offset, int metadata, u64 *refs, u64 *flags)
739 {
740         struct btrfs_delayed_ref_head *head;
741         struct btrfs_delayed_ref_root *delayed_refs;
742         struct btrfs_path *path;
743         struct btrfs_extent_item *ei;
744         struct extent_buffer *leaf;
745         struct btrfs_key key;
746         u32 item_size;
747         u64 num_refs;
748         u64 extent_flags;
749         int ret;
750
751         /*
752          * If we don't have skinny metadata, don't bother doing anything
753          * different
754          */
755         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
756                 offset = root->leafsize;
757                 metadata = 0;
758         }
759
760         path = btrfs_alloc_path();
761         if (!path)
762                 return -ENOMEM;
763
764         if (metadata) {
765                 key.objectid = bytenr;
766                 key.type = BTRFS_METADATA_ITEM_KEY;
767                 key.offset = offset;
768         } else {
769                 key.objectid = bytenr;
770                 key.type = BTRFS_EXTENT_ITEM_KEY;
771                 key.offset = offset;
772         }
773
774         if (!trans) {
775                 path->skip_locking = 1;
776                 path->search_commit_root = 1;
777         }
778 again:
779         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
780                                 &key, path, 0, 0);
781         if (ret < 0)
782                 goto out_free;
783
784         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
785                 metadata = 0;
786                 if (path->slots[0]) {
787                         path->slots[0]--;
788                         btrfs_item_key_to_cpu(path->nodes[0], &key,
789                                               path->slots[0]);
790                         if (key.objectid == bytenr &&
791                             key.type == BTRFS_EXTENT_ITEM_KEY &&
792                             key.offset == root->leafsize)
793                                 ret = 0;
794                 }
795                 if (ret) {
796                         key.objectid = bytenr;
797                         key.type = BTRFS_EXTENT_ITEM_KEY;
798                         key.offset = root->leafsize;
799                         btrfs_release_path(path);
800                         goto again;
801                 }
802         }
803
804         if (ret == 0) {
805                 leaf = path->nodes[0];
806                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
807                 if (item_size >= sizeof(*ei)) {
808                         ei = btrfs_item_ptr(leaf, path->slots[0],
809                                             struct btrfs_extent_item);
810                         num_refs = btrfs_extent_refs(leaf, ei);
811                         extent_flags = btrfs_extent_flags(leaf, ei);
812                 } else {
813 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
814                         struct btrfs_extent_item_v0 *ei0;
815                         BUG_ON(item_size != sizeof(*ei0));
816                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
817                                              struct btrfs_extent_item_v0);
818                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
819                         /* FIXME: this isn't correct for data */
820                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
821 #else
822                         BUG();
823 #endif
824                 }
825                 BUG_ON(num_refs == 0);
826         } else {
827                 num_refs = 0;
828                 extent_flags = 0;
829                 ret = 0;
830         }
831
832         if (!trans)
833                 goto out;
834
835         delayed_refs = &trans->transaction->delayed_refs;
836         spin_lock(&delayed_refs->lock);
837         head = btrfs_find_delayed_ref_head(trans, bytenr);
838         if (head) {
839                 if (!mutex_trylock(&head->mutex)) {
840                         atomic_inc(&head->node.refs);
841                         spin_unlock(&delayed_refs->lock);
842
843                         btrfs_release_path(path);
844
845                         /*
846                          * Mutex was contended, block until it's released and try
847                          * again
848                          */
849                         mutex_lock(&head->mutex);
850                         mutex_unlock(&head->mutex);
851                         btrfs_put_delayed_ref(&head->node);
852                         goto again;
853                 }
854                 if (head->extent_op && head->extent_op->update_flags)
855                         extent_flags |= head->extent_op->flags_to_set;
856                 else
857                         BUG_ON(num_refs == 0);
858
859                 num_refs += head->node.ref_mod;
860                 mutex_unlock(&head->mutex);
861         }
862         spin_unlock(&delayed_refs->lock);
863 out:
864         WARN_ON(num_refs == 0);
865         if (refs)
866                 *refs = num_refs;
867         if (flags)
868                 *flags = extent_flags;
869 out_free:
870         btrfs_free_path(path);
871         return ret;
872 }
873
874 /*
875  * Back reference rules.  Back refs have three main goals:
876  *
877  * 1) differentiate between all holders of references to an extent so that
878  *    when a reference is dropped we can make sure it was a valid reference
879  *    before freeing the extent.
880  *
881  * 2) Provide enough information to quickly find the holders of an extent
882  *    if we notice a given block is corrupted or bad.
883  *
884  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
885  *    maintenance.  This is actually the same as #2, but with a slightly
886  *    different use case.
887  *
888  * There are two kinds of back refs. The implicit back refs is optimized
889  * for pointers in non-shared tree blocks. For a given pointer in a block,
890  * back refs of this kind provide information about the block's owner tree
891  * and the pointer's key. These information allow us to find the block by
892  * b-tree searching. The full back refs is for pointers in tree blocks not
893  * referenced by their owner trees. The location of tree block is recorded
894  * in the back refs. Actually the full back refs is generic, and can be
895  * used in all cases the implicit back refs is used. The major shortcoming
896  * of the full back refs is its overhead. Every time a tree block gets
897  * COWed, we have to update back refs entry for all pointers in it.
898  *
899  * For a newly allocated tree block, we use implicit back refs for
900  * pointers in it. This means most tree related operations only involve
901  * implicit back refs. For a tree block created in old transaction, the
902  * only way to drop a reference to it is COW it. So we can detect the
903  * event that tree block loses its owner tree's reference and do the
904  * back refs conversion.
905  *
906  * When a tree block is COW'd through a tree, there are four cases:
907  *
908  * The reference count of the block is one and the tree is the block's
909  * owner tree. Nothing to do in this case.
910  *
911  * The reference count of the block is one and the tree is not the
912  * block's owner tree. In this case, full back refs is used for pointers
913  * in the block. Remove these full back refs, add implicit back refs for
914  * every pointers in the new block.
915  *
916  * The reference count of the block is greater than one and the tree is
917  * the block's owner tree. In this case, implicit back refs is used for
918  * pointers in the block. Add full back refs for every pointers in the
919  * block, increase lower level extents' reference counts. The original
920  * implicit back refs are entailed to the new block.
921  *
922  * The reference count of the block is greater than one and the tree is
923  * not the block's owner tree. Add implicit back refs for every pointer in
924  * the new block, increase lower level extents' reference count.
925  *
926  * Back Reference Key composing:
927  *
928  * The key objectid corresponds to the first byte in the extent,
929  * The key type is used to differentiate between types of back refs.
930  * There are different meanings of the key offset for different types
931  * of back refs.
932  *
933  * File extents can be referenced by:
934  *
935  * - multiple snapshots, subvolumes, or different generations in one subvol
936  * - different files inside a single subvolume
937  * - different offsets inside a file (bookend extents in file.c)
938  *
939  * The extent ref structure for the implicit back refs has fields for:
940  *
941  * - Objectid of the subvolume root
942  * - objectid of the file holding the reference
943  * - original offset in the file
944  * - how many bookend extents
945  *
946  * The key offset for the implicit back refs is hash of the first
947  * three fields.
948  *
949  * The extent ref structure for the full back refs has field for:
950  *
951  * - number of pointers in the tree leaf
952  *
953  * The key offset for the implicit back refs is the first byte of
954  * the tree leaf
955  *
956  * When a file extent is allocated, The implicit back refs is used.
957  * the fields are filled in:
958  *
959  *     (root_key.objectid, inode objectid, offset in file, 1)
960  *
961  * When a file extent is removed file truncation, we find the
962  * corresponding implicit back refs and check the following fields:
963  *
964  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
965  *
966  * Btree extents can be referenced by:
967  *
968  * - Different subvolumes
969  *
970  * Both the implicit back refs and the full back refs for tree blocks
971  * only consist of key. The key offset for the implicit back refs is
972  * objectid of block's owner tree. The key offset for the full back refs
973  * is the first byte of parent block.
974  *
975  * When implicit back refs is used, information about the lowest key and
976  * level of the tree block are required. These information are stored in
977  * tree block info structure.
978  */
979
980 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
981 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
982                                   struct btrfs_root *root,
983                                   struct btrfs_path *path,
984                                   u64 owner, u32 extra_size)
985 {
986         struct btrfs_extent_item *item;
987         struct btrfs_extent_item_v0 *ei0;
988         struct btrfs_extent_ref_v0 *ref0;
989         struct btrfs_tree_block_info *bi;
990         struct extent_buffer *leaf;
991         struct btrfs_key key;
992         struct btrfs_key found_key;
993         u32 new_size = sizeof(*item);
994         u64 refs;
995         int ret;
996
997         leaf = path->nodes[0];
998         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
999
1000         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1001         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1002                              struct btrfs_extent_item_v0);
1003         refs = btrfs_extent_refs_v0(leaf, ei0);
1004
1005         if (owner == (u64)-1) {
1006                 while (1) {
1007                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1008                                 ret = btrfs_next_leaf(root, path);
1009                                 if (ret < 0)
1010                                         return ret;
1011                                 BUG_ON(ret > 0); /* Corruption */
1012                                 leaf = path->nodes[0];
1013                         }
1014                         btrfs_item_key_to_cpu(leaf, &found_key,
1015                                               path->slots[0]);
1016                         BUG_ON(key.objectid != found_key.objectid);
1017                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1018                                 path->slots[0]++;
1019                                 continue;
1020                         }
1021                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1022                                               struct btrfs_extent_ref_v0);
1023                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1024                         break;
1025                 }
1026         }
1027         btrfs_release_path(path);
1028
1029         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1030                 new_size += sizeof(*bi);
1031
1032         new_size -= sizeof(*ei0);
1033         ret = btrfs_search_slot(trans, root, &key, path,
1034                                 new_size + extra_size, 1);
1035         if (ret < 0)
1036                 return ret;
1037         BUG_ON(ret); /* Corruption */
1038
1039         btrfs_extend_item(root, path, new_size);
1040
1041         leaf = path->nodes[0];
1042         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1043         btrfs_set_extent_refs(leaf, item, refs);
1044         /* FIXME: get real generation */
1045         btrfs_set_extent_generation(leaf, item, 0);
1046         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1047                 btrfs_set_extent_flags(leaf, item,
1048                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1049                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1050                 bi = (struct btrfs_tree_block_info *)(item + 1);
1051                 /* FIXME: get first key of the block */
1052                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1053                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1054         } else {
1055                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1056         }
1057         btrfs_mark_buffer_dirty(leaf);
1058         return 0;
1059 }
1060 #endif
1061
1062 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1063 {
1064         u32 high_crc = ~(u32)0;
1065         u32 low_crc = ~(u32)0;
1066         __le64 lenum;
1067
1068         lenum = cpu_to_le64(root_objectid);
1069         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1070         lenum = cpu_to_le64(owner);
1071         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1072         lenum = cpu_to_le64(offset);
1073         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1074
1075         return ((u64)high_crc << 31) ^ (u64)low_crc;
1076 }
1077
1078 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1079                                      struct btrfs_extent_data_ref *ref)
1080 {
1081         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1082                                     btrfs_extent_data_ref_objectid(leaf, ref),
1083                                     btrfs_extent_data_ref_offset(leaf, ref));
1084 }
1085
1086 static int match_extent_data_ref(struct extent_buffer *leaf,
1087                                  struct btrfs_extent_data_ref *ref,
1088                                  u64 root_objectid, u64 owner, u64 offset)
1089 {
1090         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1091             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1092             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1093                 return 0;
1094         return 1;
1095 }
1096
1097 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1098                                            struct btrfs_root *root,
1099                                            struct btrfs_path *path,
1100                                            u64 bytenr, u64 parent,
1101                                            u64 root_objectid,
1102                                            u64 owner, u64 offset)
1103 {
1104         struct btrfs_key key;
1105         struct btrfs_extent_data_ref *ref;
1106         struct extent_buffer *leaf;
1107         u32 nritems;
1108         int ret;
1109         int recow;
1110         int err = -ENOENT;
1111
1112         key.objectid = bytenr;
1113         if (parent) {
1114                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1115                 key.offset = parent;
1116         } else {
1117                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1118                 key.offset = hash_extent_data_ref(root_objectid,
1119                                                   owner, offset);
1120         }
1121 again:
1122         recow = 0;
1123         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1124         if (ret < 0) {
1125                 err = ret;
1126                 goto fail;
1127         }
1128
1129         if (parent) {
1130                 if (!ret)
1131                         return 0;
1132 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1133                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1134                 btrfs_release_path(path);
1135                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1136                 if (ret < 0) {
1137                         err = ret;
1138                         goto fail;
1139                 }
1140                 if (!ret)
1141                         return 0;
1142 #endif
1143                 goto fail;
1144         }
1145
1146         leaf = path->nodes[0];
1147         nritems = btrfs_header_nritems(leaf);
1148         while (1) {
1149                 if (path->slots[0] >= nritems) {
1150                         ret = btrfs_next_leaf(root, path);
1151                         if (ret < 0)
1152                                 err = ret;
1153                         if (ret)
1154                                 goto fail;
1155
1156                         leaf = path->nodes[0];
1157                         nritems = btrfs_header_nritems(leaf);
1158                         recow = 1;
1159                 }
1160
1161                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1162                 if (key.objectid != bytenr ||
1163                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1164                         goto fail;
1165
1166                 ref = btrfs_item_ptr(leaf, path->slots[0],
1167                                      struct btrfs_extent_data_ref);
1168
1169                 if (match_extent_data_ref(leaf, ref, root_objectid,
1170                                           owner, offset)) {
1171                         if (recow) {
1172                                 btrfs_release_path(path);
1173                                 goto again;
1174                         }
1175                         err = 0;
1176                         break;
1177                 }
1178                 path->slots[0]++;
1179         }
1180 fail:
1181         return err;
1182 }
1183
1184 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1185                                            struct btrfs_root *root,
1186                                            struct btrfs_path *path,
1187                                            u64 bytenr, u64 parent,
1188                                            u64 root_objectid, u64 owner,
1189                                            u64 offset, int refs_to_add)
1190 {
1191         struct btrfs_key key;
1192         struct extent_buffer *leaf;
1193         u32 size;
1194         u32 num_refs;
1195         int ret;
1196
1197         key.objectid = bytenr;
1198         if (parent) {
1199                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1200                 key.offset = parent;
1201                 size = sizeof(struct btrfs_shared_data_ref);
1202         } else {
1203                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1204                 key.offset = hash_extent_data_ref(root_objectid,
1205                                                   owner, offset);
1206                 size = sizeof(struct btrfs_extent_data_ref);
1207         }
1208
1209         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1210         if (ret && ret != -EEXIST)
1211                 goto fail;
1212
1213         leaf = path->nodes[0];
1214         if (parent) {
1215                 struct btrfs_shared_data_ref *ref;
1216                 ref = btrfs_item_ptr(leaf, path->slots[0],
1217                                      struct btrfs_shared_data_ref);
1218                 if (ret == 0) {
1219                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1220                 } else {
1221                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1222                         num_refs += refs_to_add;
1223                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1224                 }
1225         } else {
1226                 struct btrfs_extent_data_ref *ref;
1227                 while (ret == -EEXIST) {
1228                         ref = btrfs_item_ptr(leaf, path->slots[0],
1229                                              struct btrfs_extent_data_ref);
1230                         if (match_extent_data_ref(leaf, ref, root_objectid,
1231                                                   owner, offset))
1232                                 break;
1233                         btrfs_release_path(path);
1234                         key.offset++;
1235                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1236                                                       size);
1237                         if (ret && ret != -EEXIST)
1238                                 goto fail;
1239
1240                         leaf = path->nodes[0];
1241                 }
1242                 ref = btrfs_item_ptr(leaf, path->slots[0],
1243                                      struct btrfs_extent_data_ref);
1244                 if (ret == 0) {
1245                         btrfs_set_extent_data_ref_root(leaf, ref,
1246                                                        root_objectid);
1247                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1248                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1249                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1250                 } else {
1251                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1252                         num_refs += refs_to_add;
1253                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1254                 }
1255         }
1256         btrfs_mark_buffer_dirty(leaf);
1257         ret = 0;
1258 fail:
1259         btrfs_release_path(path);
1260         return ret;
1261 }
1262
1263 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1264                                            struct btrfs_root *root,
1265                                            struct btrfs_path *path,
1266                                            int refs_to_drop)
1267 {
1268         struct btrfs_key key;
1269         struct btrfs_extent_data_ref *ref1 = NULL;
1270         struct btrfs_shared_data_ref *ref2 = NULL;
1271         struct extent_buffer *leaf;
1272         u32 num_refs = 0;
1273         int ret = 0;
1274
1275         leaf = path->nodes[0];
1276         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1277
1278         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1279                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1280                                       struct btrfs_extent_data_ref);
1281                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1282         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1283                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1284                                       struct btrfs_shared_data_ref);
1285                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1286 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1287         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1288                 struct btrfs_extent_ref_v0 *ref0;
1289                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1290                                       struct btrfs_extent_ref_v0);
1291                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1292 #endif
1293         } else {
1294                 BUG();
1295         }
1296
1297         BUG_ON(num_refs < refs_to_drop);
1298         num_refs -= refs_to_drop;
1299
1300         if (num_refs == 0) {
1301                 ret = btrfs_del_item(trans, root, path);
1302         } else {
1303                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1304                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1305                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1306                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1307 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1308                 else {
1309                         struct btrfs_extent_ref_v0 *ref0;
1310                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1311                                         struct btrfs_extent_ref_v0);
1312                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1313                 }
1314 #endif
1315                 btrfs_mark_buffer_dirty(leaf);
1316         }
1317         return ret;
1318 }
1319
1320 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1321                                           struct btrfs_path *path,
1322                                           struct btrfs_extent_inline_ref *iref)
1323 {
1324         struct btrfs_key key;
1325         struct extent_buffer *leaf;
1326         struct btrfs_extent_data_ref *ref1;
1327         struct btrfs_shared_data_ref *ref2;
1328         u32 num_refs = 0;
1329
1330         leaf = path->nodes[0];
1331         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1332         if (iref) {
1333                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1334                     BTRFS_EXTENT_DATA_REF_KEY) {
1335                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1336                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1337                 } else {
1338                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1339                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1340                 }
1341         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1342                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1343                                       struct btrfs_extent_data_ref);
1344                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1345         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1346                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1347                                       struct btrfs_shared_data_ref);
1348                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1349 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1350         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1351                 struct btrfs_extent_ref_v0 *ref0;
1352                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1353                                       struct btrfs_extent_ref_v0);
1354                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1355 #endif
1356         } else {
1357                 WARN_ON(1);
1358         }
1359         return num_refs;
1360 }
1361
1362 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1363                                           struct btrfs_root *root,
1364                                           struct btrfs_path *path,
1365                                           u64 bytenr, u64 parent,
1366                                           u64 root_objectid)
1367 {
1368         struct btrfs_key key;
1369         int ret;
1370
1371         key.objectid = bytenr;
1372         if (parent) {
1373                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1374                 key.offset = parent;
1375         } else {
1376                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1377                 key.offset = root_objectid;
1378         }
1379
1380         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1381         if (ret > 0)
1382                 ret = -ENOENT;
1383 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1384         if (ret == -ENOENT && parent) {
1385                 btrfs_release_path(path);
1386                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1387                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1388                 if (ret > 0)
1389                         ret = -ENOENT;
1390         }
1391 #endif
1392         return ret;
1393 }
1394
1395 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1396                                           struct btrfs_root *root,
1397                                           struct btrfs_path *path,
1398                                           u64 bytenr, u64 parent,
1399                                           u64 root_objectid)
1400 {
1401         struct btrfs_key key;
1402         int ret;
1403
1404         key.objectid = bytenr;
1405         if (parent) {
1406                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1407                 key.offset = parent;
1408         } else {
1409                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1410                 key.offset = root_objectid;
1411         }
1412
1413         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1414         btrfs_release_path(path);
1415         return ret;
1416 }
1417
1418 static inline int extent_ref_type(u64 parent, u64 owner)
1419 {
1420         int type;
1421         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1422                 if (parent > 0)
1423                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1424                 else
1425                         type = BTRFS_TREE_BLOCK_REF_KEY;
1426         } else {
1427                 if (parent > 0)
1428                         type = BTRFS_SHARED_DATA_REF_KEY;
1429                 else
1430                         type = BTRFS_EXTENT_DATA_REF_KEY;
1431         }
1432         return type;
1433 }
1434
1435 static int find_next_key(struct btrfs_path *path, int level,
1436                          struct btrfs_key *key)
1437
1438 {
1439         for (; level < BTRFS_MAX_LEVEL; level++) {
1440                 if (!path->nodes[level])
1441                         break;
1442                 if (path->slots[level] + 1 >=
1443                     btrfs_header_nritems(path->nodes[level]))
1444                         continue;
1445                 if (level == 0)
1446                         btrfs_item_key_to_cpu(path->nodes[level], key,
1447                                               path->slots[level] + 1);
1448                 else
1449                         btrfs_node_key_to_cpu(path->nodes[level], key,
1450                                               path->slots[level] + 1);
1451                 return 0;
1452         }
1453         return 1;
1454 }
1455
1456 /*
1457  * look for inline back ref. if back ref is found, *ref_ret is set
1458  * to the address of inline back ref, and 0 is returned.
1459  *
1460  * if back ref isn't found, *ref_ret is set to the address where it
1461  * should be inserted, and -ENOENT is returned.
1462  *
1463  * if insert is true and there are too many inline back refs, the path
1464  * points to the extent item, and -EAGAIN is returned.
1465  *
1466  * NOTE: inline back refs are ordered in the same way that back ref
1467  *       items in the tree are ordered.
1468  */
1469 static noinline_for_stack
1470 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1471                                  struct btrfs_root *root,
1472                                  struct btrfs_path *path,
1473                                  struct btrfs_extent_inline_ref **ref_ret,
1474                                  u64 bytenr, u64 num_bytes,
1475                                  u64 parent, u64 root_objectid,
1476                                  u64 owner, u64 offset, int insert)
1477 {
1478         struct btrfs_key key;
1479         struct extent_buffer *leaf;
1480         struct btrfs_extent_item *ei;
1481         struct btrfs_extent_inline_ref *iref;
1482         u64 flags;
1483         u64 item_size;
1484         unsigned long ptr;
1485         unsigned long end;
1486         int extra_size;
1487         int type;
1488         int want;
1489         int ret;
1490         int err = 0;
1491         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1492                                                  SKINNY_METADATA);
1493
1494         key.objectid = bytenr;
1495         key.type = BTRFS_EXTENT_ITEM_KEY;
1496         key.offset = num_bytes;
1497
1498         want = extent_ref_type(parent, owner);
1499         if (insert) {
1500                 extra_size = btrfs_extent_inline_ref_size(want);
1501                 path->keep_locks = 1;
1502         } else
1503                 extra_size = -1;
1504
1505         /*
1506          * Owner is our parent level, so we can just add one to get the level
1507          * for the block we are interested in.
1508          */
1509         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1510                 key.type = BTRFS_METADATA_ITEM_KEY;
1511                 key.offset = owner;
1512         }
1513
1514 again:
1515         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1516         if (ret < 0) {
1517                 err = ret;
1518                 goto out;
1519         }
1520
1521         /*
1522          * We may be a newly converted file system which still has the old fat
1523          * extent entries for metadata, so try and see if we have one of those.
1524          */
1525         if (ret > 0 && skinny_metadata) {
1526                 skinny_metadata = false;
1527                 if (path->slots[0]) {
1528                         path->slots[0]--;
1529                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1530                                               path->slots[0]);
1531                         if (key.objectid == bytenr &&
1532                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1533                             key.offset == num_bytes)
1534                                 ret = 0;
1535                 }
1536                 if (ret) {
1537                         key.type = BTRFS_EXTENT_ITEM_KEY;
1538                         key.offset = num_bytes;
1539                         btrfs_release_path(path);
1540                         goto again;
1541                 }
1542         }
1543
1544         if (ret && !insert) {
1545                 err = -ENOENT;
1546                 goto out;
1547         } else if (ret) {
1548                 err = -EIO;
1549                 WARN_ON(1);
1550                 goto out;
1551         }
1552
1553         leaf = path->nodes[0];
1554         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1555 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1556         if (item_size < sizeof(*ei)) {
1557                 if (!insert) {
1558                         err = -ENOENT;
1559                         goto out;
1560                 }
1561                 ret = convert_extent_item_v0(trans, root, path, owner,
1562                                              extra_size);
1563                 if (ret < 0) {
1564                         err = ret;
1565                         goto out;
1566                 }
1567                 leaf = path->nodes[0];
1568                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1569         }
1570 #endif
1571         BUG_ON(item_size < sizeof(*ei));
1572
1573         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1574         flags = btrfs_extent_flags(leaf, ei);
1575
1576         ptr = (unsigned long)(ei + 1);
1577         end = (unsigned long)ei + item_size;
1578
1579         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1580                 ptr += sizeof(struct btrfs_tree_block_info);
1581                 BUG_ON(ptr > end);
1582         }
1583
1584         err = -ENOENT;
1585         while (1) {
1586                 if (ptr >= end) {
1587                         WARN_ON(ptr > end);
1588                         break;
1589                 }
1590                 iref = (struct btrfs_extent_inline_ref *)ptr;
1591                 type = btrfs_extent_inline_ref_type(leaf, iref);
1592                 if (want < type)
1593                         break;
1594                 if (want > type) {
1595                         ptr += btrfs_extent_inline_ref_size(type);
1596                         continue;
1597                 }
1598
1599                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1600                         struct btrfs_extent_data_ref *dref;
1601                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1602                         if (match_extent_data_ref(leaf, dref, root_objectid,
1603                                                   owner, offset)) {
1604                                 err = 0;
1605                                 break;
1606                         }
1607                         if (hash_extent_data_ref_item(leaf, dref) <
1608                             hash_extent_data_ref(root_objectid, owner, offset))
1609                                 break;
1610                 } else {
1611                         u64 ref_offset;
1612                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1613                         if (parent > 0) {
1614                                 if (parent == ref_offset) {
1615                                         err = 0;
1616                                         break;
1617                                 }
1618                                 if (ref_offset < parent)
1619                                         break;
1620                         } else {
1621                                 if (root_objectid == ref_offset) {
1622                                         err = 0;
1623                                         break;
1624                                 }
1625                                 if (ref_offset < root_objectid)
1626                                         break;
1627                         }
1628                 }
1629                 ptr += btrfs_extent_inline_ref_size(type);
1630         }
1631         if (err == -ENOENT && insert) {
1632                 if (item_size + extra_size >=
1633                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1634                         err = -EAGAIN;
1635                         goto out;
1636                 }
1637                 /*
1638                  * To add new inline back ref, we have to make sure
1639                  * there is no corresponding back ref item.
1640                  * For simplicity, we just do not add new inline back
1641                  * ref if there is any kind of item for this block
1642                  */
1643                 if (find_next_key(path, 0, &key) == 0 &&
1644                     key.objectid == bytenr &&
1645                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1646                         err = -EAGAIN;
1647                         goto out;
1648                 }
1649         }
1650         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1651 out:
1652         if (insert) {
1653                 path->keep_locks = 0;
1654                 btrfs_unlock_up_safe(path, 1);
1655         }
1656         return err;
1657 }
1658
1659 /*
1660  * helper to add new inline back ref
1661  */
1662 static noinline_for_stack
1663 void setup_inline_extent_backref(struct btrfs_root *root,
1664                                  struct btrfs_path *path,
1665                                  struct btrfs_extent_inline_ref *iref,
1666                                  u64 parent, u64 root_objectid,
1667                                  u64 owner, u64 offset, int refs_to_add,
1668                                  struct btrfs_delayed_extent_op *extent_op)
1669 {
1670         struct extent_buffer *leaf;
1671         struct btrfs_extent_item *ei;
1672         unsigned long ptr;
1673         unsigned long end;
1674         unsigned long item_offset;
1675         u64 refs;
1676         int size;
1677         int type;
1678
1679         leaf = path->nodes[0];
1680         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1681         item_offset = (unsigned long)iref - (unsigned long)ei;
1682
1683         type = extent_ref_type(parent, owner);
1684         size = btrfs_extent_inline_ref_size(type);
1685
1686         btrfs_extend_item(root, path, size);
1687
1688         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1689         refs = btrfs_extent_refs(leaf, ei);
1690         refs += refs_to_add;
1691         btrfs_set_extent_refs(leaf, ei, refs);
1692         if (extent_op)
1693                 __run_delayed_extent_op(extent_op, leaf, ei);
1694
1695         ptr = (unsigned long)ei + item_offset;
1696         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1697         if (ptr < end - size)
1698                 memmove_extent_buffer(leaf, ptr + size, ptr,
1699                                       end - size - ptr);
1700
1701         iref = (struct btrfs_extent_inline_ref *)ptr;
1702         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1703         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1704                 struct btrfs_extent_data_ref *dref;
1705                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1706                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1707                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1708                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1709                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1710         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1711                 struct btrfs_shared_data_ref *sref;
1712                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1713                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1714                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1715         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1716                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1717         } else {
1718                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1719         }
1720         btrfs_mark_buffer_dirty(leaf);
1721 }
1722
1723 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1724                                  struct btrfs_root *root,
1725                                  struct btrfs_path *path,
1726                                  struct btrfs_extent_inline_ref **ref_ret,
1727                                  u64 bytenr, u64 num_bytes, u64 parent,
1728                                  u64 root_objectid, u64 owner, u64 offset)
1729 {
1730         int ret;
1731
1732         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1733                                            bytenr, num_bytes, parent,
1734                                            root_objectid, owner, offset, 0);
1735         if (ret != -ENOENT)
1736                 return ret;
1737
1738         btrfs_release_path(path);
1739         *ref_ret = NULL;
1740
1741         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1742                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1743                                             root_objectid);
1744         } else {
1745                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1746                                              root_objectid, owner, offset);
1747         }
1748         return ret;
1749 }
1750
1751 /*
1752  * helper to update/remove inline back ref
1753  */
1754 static noinline_for_stack
1755 void update_inline_extent_backref(struct btrfs_root *root,
1756                                   struct btrfs_path *path,
1757                                   struct btrfs_extent_inline_ref *iref,
1758                                   int refs_to_mod,
1759                                   struct btrfs_delayed_extent_op *extent_op)
1760 {
1761         struct extent_buffer *leaf;
1762         struct btrfs_extent_item *ei;
1763         struct btrfs_extent_data_ref *dref = NULL;
1764         struct btrfs_shared_data_ref *sref = NULL;
1765         unsigned long ptr;
1766         unsigned long end;
1767         u32 item_size;
1768         int size;
1769         int type;
1770         u64 refs;
1771
1772         leaf = path->nodes[0];
1773         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1774         refs = btrfs_extent_refs(leaf, ei);
1775         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1776         refs += refs_to_mod;
1777         btrfs_set_extent_refs(leaf, ei, refs);
1778         if (extent_op)
1779                 __run_delayed_extent_op(extent_op, leaf, ei);
1780
1781         type = btrfs_extent_inline_ref_type(leaf, iref);
1782
1783         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1784                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1785                 refs = btrfs_extent_data_ref_count(leaf, dref);
1786         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1787                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1788                 refs = btrfs_shared_data_ref_count(leaf, sref);
1789         } else {
1790                 refs = 1;
1791                 BUG_ON(refs_to_mod != -1);
1792         }
1793
1794         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1795         refs += refs_to_mod;
1796
1797         if (refs > 0) {
1798                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1799                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1800                 else
1801                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1802         } else {
1803                 size =  btrfs_extent_inline_ref_size(type);
1804                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1805                 ptr = (unsigned long)iref;
1806                 end = (unsigned long)ei + item_size;
1807                 if (ptr + size < end)
1808                         memmove_extent_buffer(leaf, ptr, ptr + size,
1809                                               end - ptr - size);
1810                 item_size -= size;
1811                 btrfs_truncate_item(root, path, item_size, 1);
1812         }
1813         btrfs_mark_buffer_dirty(leaf);
1814 }
1815
1816 static noinline_for_stack
1817 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1818                                  struct btrfs_root *root,
1819                                  struct btrfs_path *path,
1820                                  u64 bytenr, u64 num_bytes, u64 parent,
1821                                  u64 root_objectid, u64 owner,
1822                                  u64 offset, int refs_to_add,
1823                                  struct btrfs_delayed_extent_op *extent_op)
1824 {
1825         struct btrfs_extent_inline_ref *iref;
1826         int ret;
1827
1828         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1829                                            bytenr, num_bytes, parent,
1830                                            root_objectid, owner, offset, 1);
1831         if (ret == 0) {
1832                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1833                 update_inline_extent_backref(root, path, iref,
1834                                              refs_to_add, extent_op);
1835         } else if (ret == -ENOENT) {
1836                 setup_inline_extent_backref(root, path, iref, parent,
1837                                             root_objectid, owner, offset,
1838                                             refs_to_add, extent_op);
1839                 ret = 0;
1840         }
1841         return ret;
1842 }
1843
1844 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1845                                  struct btrfs_root *root,
1846                                  struct btrfs_path *path,
1847                                  u64 bytenr, u64 parent, u64 root_objectid,
1848                                  u64 owner, u64 offset, int refs_to_add)
1849 {
1850         int ret;
1851         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1852                 BUG_ON(refs_to_add != 1);
1853                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1854                                             parent, root_objectid);
1855         } else {
1856                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1857                                              parent, root_objectid,
1858                                              owner, offset, refs_to_add);
1859         }
1860         return ret;
1861 }
1862
1863 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1864                                  struct btrfs_root *root,
1865                                  struct btrfs_path *path,
1866                                  struct btrfs_extent_inline_ref *iref,
1867                                  int refs_to_drop, int is_data)
1868 {
1869         int ret = 0;
1870
1871         BUG_ON(!is_data && refs_to_drop != 1);
1872         if (iref) {
1873                 update_inline_extent_backref(root, path, iref,
1874                                              -refs_to_drop, NULL);
1875         } else if (is_data) {
1876                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1877         } else {
1878                 ret = btrfs_del_item(trans, root, path);
1879         }
1880         return ret;
1881 }
1882
1883 static int btrfs_issue_discard(struct block_device *bdev,
1884                                 u64 start, u64 len)
1885 {
1886         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1887 }
1888
1889 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1890                                 u64 num_bytes, u64 *actual_bytes)
1891 {
1892         int ret;
1893         u64 discarded_bytes = 0;
1894         struct btrfs_bio *bbio = NULL;
1895
1896
1897         /* Tell the block device(s) that the sectors can be discarded */
1898         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1899                               bytenr, &num_bytes, &bbio, 0);
1900         /* Error condition is -ENOMEM */
1901         if (!ret) {
1902                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1903                 int i;
1904
1905
1906                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1907                         if (!stripe->dev->can_discard)
1908                                 continue;
1909
1910                         ret = btrfs_issue_discard(stripe->dev->bdev,
1911                                                   stripe->physical,
1912                                                   stripe->length);
1913                         if (!ret)
1914                                 discarded_bytes += stripe->length;
1915                         else if (ret != -EOPNOTSUPP)
1916                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1917
1918                         /*
1919                          * Just in case we get back EOPNOTSUPP for some reason,
1920                          * just ignore the return value so we don't screw up
1921                          * people calling discard_extent.
1922                          */
1923                         ret = 0;
1924                 }
1925                 kfree(bbio);
1926         }
1927
1928         if (actual_bytes)
1929                 *actual_bytes = discarded_bytes;
1930
1931
1932         if (ret == -EOPNOTSUPP)
1933                 ret = 0;
1934         return ret;
1935 }
1936
1937 /* Can return -ENOMEM */
1938 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1939                          struct btrfs_root *root,
1940                          u64 bytenr, u64 num_bytes, u64 parent,
1941                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1942 {
1943         int ret;
1944         struct btrfs_fs_info *fs_info = root->fs_info;
1945
1946         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1947                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1948
1949         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1950                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1951                                         num_bytes,
1952                                         parent, root_objectid, (int)owner,
1953                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1954         } else {
1955                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1956                                         num_bytes,
1957                                         parent, root_objectid, owner, offset,
1958                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1959         }
1960         return ret;
1961 }
1962
1963 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1964                                   struct btrfs_root *root,
1965                                   u64 bytenr, u64 num_bytes,
1966                                   u64 parent, u64 root_objectid,
1967                                   u64 owner, u64 offset, int refs_to_add,
1968                                   struct btrfs_delayed_extent_op *extent_op)
1969 {
1970         struct btrfs_path *path;
1971         struct extent_buffer *leaf;
1972         struct btrfs_extent_item *item;
1973         u64 refs;
1974         int ret;
1975         int err = 0;
1976
1977         path = btrfs_alloc_path();
1978         if (!path)
1979                 return -ENOMEM;
1980
1981         path->reada = 1;
1982         path->leave_spinning = 1;
1983         /* this will setup the path even if it fails to insert the back ref */
1984         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1985                                            path, bytenr, num_bytes, parent,
1986                                            root_objectid, owner, offset,
1987                                            refs_to_add, extent_op);
1988         if (ret == 0)
1989                 goto out;
1990
1991         if (ret != -EAGAIN) {
1992                 err = ret;
1993                 goto out;
1994         }
1995
1996         leaf = path->nodes[0];
1997         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1998         refs = btrfs_extent_refs(leaf, item);
1999         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2000         if (extent_op)
2001                 __run_delayed_extent_op(extent_op, leaf, item);
2002
2003         btrfs_mark_buffer_dirty(leaf);
2004         btrfs_release_path(path);
2005
2006         path->reada = 1;
2007         path->leave_spinning = 1;
2008
2009         /* now insert the actual backref */
2010         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2011                                     path, bytenr, parent, root_objectid,
2012                                     owner, offset, refs_to_add);
2013         if (ret)
2014                 btrfs_abort_transaction(trans, root, ret);
2015 out:
2016         btrfs_free_path(path);
2017         return err;
2018 }
2019
2020 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2021                                 struct btrfs_root *root,
2022                                 struct btrfs_delayed_ref_node *node,
2023                                 struct btrfs_delayed_extent_op *extent_op,
2024                                 int insert_reserved)
2025 {
2026         int ret = 0;
2027         struct btrfs_delayed_data_ref *ref;
2028         struct btrfs_key ins;
2029         u64 parent = 0;
2030         u64 ref_root = 0;
2031         u64 flags = 0;
2032
2033         ins.objectid = node->bytenr;
2034         ins.offset = node->num_bytes;
2035         ins.type = BTRFS_EXTENT_ITEM_KEY;
2036
2037         ref = btrfs_delayed_node_to_data_ref(node);
2038         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2039                 parent = ref->parent;
2040         else
2041                 ref_root = ref->root;
2042
2043         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2044                 if (extent_op)
2045                         flags |= extent_op->flags_to_set;
2046                 ret = alloc_reserved_file_extent(trans, root,
2047                                                  parent, ref_root, flags,
2048                                                  ref->objectid, ref->offset,
2049                                                  &ins, node->ref_mod);
2050         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2051                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2052                                              node->num_bytes, parent,
2053                                              ref_root, ref->objectid,
2054                                              ref->offset, node->ref_mod,
2055                                              extent_op);
2056         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2057                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2058                                           node->num_bytes, parent,
2059                                           ref_root, ref->objectid,
2060                                           ref->offset, node->ref_mod,
2061                                           extent_op);
2062         } else {
2063                 BUG();
2064         }
2065         return ret;
2066 }
2067
2068 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2069                                     struct extent_buffer *leaf,
2070                                     struct btrfs_extent_item *ei)
2071 {
2072         u64 flags = btrfs_extent_flags(leaf, ei);
2073         if (extent_op->update_flags) {
2074                 flags |= extent_op->flags_to_set;
2075                 btrfs_set_extent_flags(leaf, ei, flags);
2076         }
2077
2078         if (extent_op->update_key) {
2079                 struct btrfs_tree_block_info *bi;
2080                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2081                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2082                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2083         }
2084 }
2085
2086 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2087                                  struct btrfs_root *root,
2088                                  struct btrfs_delayed_ref_node *node,
2089                                  struct btrfs_delayed_extent_op *extent_op)
2090 {
2091         struct btrfs_key key;
2092         struct btrfs_path *path;
2093         struct btrfs_extent_item *ei;
2094         struct extent_buffer *leaf;
2095         u32 item_size;
2096         int ret;
2097         int err = 0;
2098         int metadata = !extent_op->is_data;
2099
2100         if (trans->aborted)
2101                 return 0;
2102
2103         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2104                 metadata = 0;
2105
2106         path = btrfs_alloc_path();
2107         if (!path)
2108                 return -ENOMEM;
2109
2110         key.objectid = node->bytenr;
2111
2112         if (metadata) {
2113                 key.type = BTRFS_METADATA_ITEM_KEY;
2114                 key.offset = extent_op->level;
2115         } else {
2116                 key.type = BTRFS_EXTENT_ITEM_KEY;
2117                 key.offset = node->num_bytes;
2118         }
2119
2120 again:
2121         path->reada = 1;
2122         path->leave_spinning = 1;
2123         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2124                                 path, 0, 1);
2125         if (ret < 0) {
2126                 err = ret;
2127                 goto out;
2128         }
2129         if (ret > 0) {
2130                 if (metadata) {
2131                         btrfs_release_path(path);
2132                         metadata = 0;
2133
2134                         key.offset = node->num_bytes;
2135                         key.type = BTRFS_EXTENT_ITEM_KEY;
2136                         goto again;
2137                 }
2138                 err = -EIO;
2139                 goto out;
2140         }
2141
2142         leaf = path->nodes[0];
2143         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2144 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2145         if (item_size < sizeof(*ei)) {
2146                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2147                                              path, (u64)-1, 0);
2148                 if (ret < 0) {
2149                         err = ret;
2150                         goto out;
2151                 }
2152                 leaf = path->nodes[0];
2153                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2154         }
2155 #endif
2156         BUG_ON(item_size < sizeof(*ei));
2157         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2158         __run_delayed_extent_op(extent_op, leaf, ei);
2159
2160         btrfs_mark_buffer_dirty(leaf);
2161 out:
2162         btrfs_free_path(path);
2163         return err;
2164 }
2165
2166 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2167                                 struct btrfs_root *root,
2168                                 struct btrfs_delayed_ref_node *node,
2169                                 struct btrfs_delayed_extent_op *extent_op,
2170                                 int insert_reserved)
2171 {
2172         int ret = 0;
2173         struct btrfs_delayed_tree_ref *ref;
2174         struct btrfs_key ins;
2175         u64 parent = 0;
2176         u64 ref_root = 0;
2177         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2178                                                  SKINNY_METADATA);
2179
2180         ref = btrfs_delayed_node_to_tree_ref(node);
2181         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2182                 parent = ref->parent;
2183         else
2184                 ref_root = ref->root;
2185
2186         ins.objectid = node->bytenr;
2187         if (skinny_metadata) {
2188                 ins.offset = ref->level;
2189                 ins.type = BTRFS_METADATA_ITEM_KEY;
2190         } else {
2191                 ins.offset = node->num_bytes;
2192                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2193         }
2194
2195         BUG_ON(node->ref_mod != 1);
2196         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2197                 BUG_ON(!extent_op || !extent_op->update_flags);
2198                 ret = alloc_reserved_tree_block(trans, root,
2199                                                 parent, ref_root,
2200                                                 extent_op->flags_to_set,
2201                                                 &extent_op->key,
2202                                                 ref->level, &ins);
2203         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2204                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2205                                              node->num_bytes, parent, ref_root,
2206                                              ref->level, 0, 1, extent_op);
2207         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2208                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2209                                           node->num_bytes, parent, ref_root,
2210                                           ref->level, 0, 1, extent_op);
2211         } else {
2212                 BUG();
2213         }
2214         return ret;
2215 }
2216
2217 /* helper function to actually process a single delayed ref entry */
2218 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2219                                struct btrfs_root *root,
2220                                struct btrfs_delayed_ref_node *node,
2221                                struct btrfs_delayed_extent_op *extent_op,
2222                                int insert_reserved)
2223 {
2224         int ret = 0;
2225
2226         if (trans->aborted)
2227                 return 0;
2228
2229         if (btrfs_delayed_ref_is_head(node)) {
2230                 struct btrfs_delayed_ref_head *head;
2231                 /*
2232                  * we've hit the end of the chain and we were supposed
2233                  * to insert this extent into the tree.  But, it got
2234                  * deleted before we ever needed to insert it, so all
2235                  * we have to do is clean up the accounting
2236                  */
2237                 BUG_ON(extent_op);
2238                 head = btrfs_delayed_node_to_head(node);
2239                 if (insert_reserved) {
2240                         btrfs_pin_extent(root, node->bytenr,
2241                                          node->num_bytes, 1);
2242                         if (head->is_data) {
2243                                 ret = btrfs_del_csums(trans, root,
2244                                                       node->bytenr,
2245                                                       node->num_bytes);
2246                         }
2247                 }
2248                 return ret;
2249         }
2250
2251         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2252             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2253                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2254                                            insert_reserved);
2255         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2256                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2257                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2258                                            insert_reserved);
2259         else
2260                 BUG();
2261         return ret;
2262 }
2263
2264 static noinline struct btrfs_delayed_ref_node *
2265 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2266 {
2267         struct rb_node *node;
2268         struct btrfs_delayed_ref_node *ref;
2269         int action = BTRFS_ADD_DELAYED_REF;
2270 again:
2271         /*
2272          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2273          * this prevents ref count from going down to zero when
2274          * there still are pending delayed ref.
2275          */
2276         node = rb_prev(&head->node.rb_node);
2277         while (1) {
2278                 if (!node)
2279                         break;
2280                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2281                                 rb_node);
2282                 if (ref->bytenr != head->node.bytenr)
2283                         break;
2284                 if (ref->action == action)
2285                         return ref;
2286                 node = rb_prev(node);
2287         }
2288         if (action == BTRFS_ADD_DELAYED_REF) {
2289                 action = BTRFS_DROP_DELAYED_REF;
2290                 goto again;
2291         }
2292         return NULL;
2293 }
2294
2295 /*
2296  * Returns 0 on success or if called with an already aborted transaction.
2297  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2298  */
2299 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2300                                        struct btrfs_root *root,
2301                                        struct list_head *cluster)
2302 {
2303         struct btrfs_delayed_ref_root *delayed_refs;
2304         struct btrfs_delayed_ref_node *ref;
2305         struct btrfs_delayed_ref_head *locked_ref = NULL;
2306         struct btrfs_delayed_extent_op *extent_op;
2307         struct btrfs_fs_info *fs_info = root->fs_info;
2308         int ret;
2309         int count = 0;
2310         int must_insert_reserved = 0;
2311
2312         delayed_refs = &trans->transaction->delayed_refs;
2313         while (1) {
2314                 if (!locked_ref) {
2315                         /* pick a new head ref from the cluster list */
2316                         if (list_empty(cluster))
2317                                 break;
2318
2319                         locked_ref = list_entry(cluster->next,
2320                                      struct btrfs_delayed_ref_head, cluster);
2321
2322                         /* grab the lock that says we are going to process
2323                          * all the refs for this head */
2324                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2325
2326                         /*
2327                          * we may have dropped the spin lock to get the head
2328                          * mutex lock, and that might have given someone else
2329                          * time to free the head.  If that's true, it has been
2330                          * removed from our list and we can move on.
2331                          */
2332                         if (ret == -EAGAIN) {
2333                                 locked_ref = NULL;
2334                                 count++;
2335                                 continue;
2336                         }
2337                 }
2338
2339                 /*
2340                  * We need to try and merge add/drops of the same ref since we
2341                  * can run into issues with relocate dropping the implicit ref
2342                  * and then it being added back again before the drop can
2343                  * finish.  If we merged anything we need to re-loop so we can
2344                  * get a good ref.
2345                  */
2346                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2347                                          locked_ref);
2348
2349                 /*
2350                  * locked_ref is the head node, so we have to go one
2351                  * node back for any delayed ref updates
2352                  */
2353                 ref = select_delayed_ref(locked_ref);
2354
2355                 if (ref && ref->seq &&
2356                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2357                         /*
2358                          * there are still refs with lower seq numbers in the
2359                          * process of being added. Don't run this ref yet.
2360                          */
2361                         list_del_init(&locked_ref->cluster);
2362                         btrfs_delayed_ref_unlock(locked_ref);
2363                         locked_ref = NULL;
2364                         delayed_refs->num_heads_ready++;
2365                         spin_unlock(&delayed_refs->lock);
2366                         cond_resched();
2367                         spin_lock(&delayed_refs->lock);
2368                         continue;
2369                 }
2370
2371                 /*
2372                  * record the must insert reserved flag before we
2373                  * drop the spin lock.
2374                  */
2375                 must_insert_reserved = locked_ref->must_insert_reserved;
2376                 locked_ref->must_insert_reserved = 0;
2377
2378                 extent_op = locked_ref->extent_op;
2379                 locked_ref->extent_op = NULL;
2380
2381                 if (!ref) {
2382                         /* All delayed refs have been processed, Go ahead
2383                          * and send the head node to run_one_delayed_ref,
2384                          * so that any accounting fixes can happen
2385                          */
2386                         ref = &locked_ref->node;
2387
2388                         if (extent_op && must_insert_reserved) {
2389                                 btrfs_free_delayed_extent_op(extent_op);
2390                                 extent_op = NULL;
2391                         }
2392
2393                         if (extent_op) {
2394                                 spin_unlock(&delayed_refs->lock);
2395
2396                                 ret = run_delayed_extent_op(trans, root,
2397                                                             ref, extent_op);
2398                                 btrfs_free_delayed_extent_op(extent_op);
2399
2400                                 if (ret) {
2401                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2402                                         spin_lock(&delayed_refs->lock);
2403                                         btrfs_delayed_ref_unlock(locked_ref);
2404                                         return ret;
2405                                 }
2406
2407                                 goto next;
2408                         }
2409                 }
2410
2411                 ref->in_tree = 0;
2412                 rb_erase(&ref->rb_node, &delayed_refs->root);
2413                 delayed_refs->num_entries--;
2414                 if (!btrfs_delayed_ref_is_head(ref)) {
2415                         /*
2416                          * when we play the delayed ref, also correct the
2417                          * ref_mod on head
2418                          */
2419                         switch (ref->action) {
2420                         case BTRFS_ADD_DELAYED_REF:
2421                         case BTRFS_ADD_DELAYED_EXTENT:
2422                                 locked_ref->node.ref_mod -= ref->ref_mod;
2423                                 break;
2424                         case BTRFS_DROP_DELAYED_REF:
2425                                 locked_ref->node.ref_mod += ref->ref_mod;
2426                                 break;
2427                         default:
2428                                 WARN_ON(1);
2429                         }
2430                 }
2431                 spin_unlock(&delayed_refs->lock);
2432
2433                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2434                                           must_insert_reserved);
2435
2436                 btrfs_free_delayed_extent_op(extent_op);
2437                 if (ret) {
2438                         btrfs_delayed_ref_unlock(locked_ref);
2439                         btrfs_put_delayed_ref(ref);
2440                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2441                         spin_lock(&delayed_refs->lock);
2442                         return ret;
2443                 }
2444
2445                 /*
2446                  * If this node is a head, that means all the refs in this head
2447                  * have been dealt with, and we will pick the next head to deal
2448                  * with, so we must unlock the head and drop it from the cluster
2449                  * list before we release it.
2450                  */
2451                 if (btrfs_delayed_ref_is_head(ref)) {
2452                         list_del_init(&locked_ref->cluster);
2453                         btrfs_delayed_ref_unlock(locked_ref);
2454                         locked_ref = NULL;
2455                 }
2456                 btrfs_put_delayed_ref(ref);
2457                 count++;
2458 next:
2459                 cond_resched();
2460                 spin_lock(&delayed_refs->lock);
2461         }
2462         return count;
2463 }
2464
2465 #ifdef SCRAMBLE_DELAYED_REFS
2466 /*
2467  * Normally delayed refs get processed in ascending bytenr order. This
2468  * correlates in most cases to the order added. To expose dependencies on this
2469  * order, we start to process the tree in the middle instead of the beginning
2470  */
2471 static u64 find_middle(struct rb_root *root)
2472 {
2473         struct rb_node *n = root->rb_node;
2474         struct btrfs_delayed_ref_node *entry;
2475         int alt = 1;
2476         u64 middle;
2477         u64 first = 0, last = 0;
2478
2479         n = rb_first(root);
2480         if (n) {
2481                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2482                 first = entry->bytenr;
2483         }
2484         n = rb_last(root);
2485         if (n) {
2486                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2487                 last = entry->bytenr;
2488         }
2489         n = root->rb_node;
2490
2491         while (n) {
2492                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2493                 WARN_ON(!entry->in_tree);
2494
2495                 middle = entry->bytenr;
2496
2497                 if (alt)
2498                         n = n->rb_left;
2499                 else
2500                         n = n->rb_right;
2501
2502                 alt = 1 - alt;
2503         }
2504         return middle;
2505 }
2506 #endif
2507
2508 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2509                                          struct btrfs_fs_info *fs_info)
2510 {
2511         struct qgroup_update *qgroup_update;
2512         int ret = 0;
2513
2514         if (list_empty(&trans->qgroup_ref_list) !=
2515             !trans->delayed_ref_elem.seq) {
2516                 /* list without seq or seq without list */
2517                 btrfs_err(fs_info,
2518                         "qgroup accounting update error, list is%s empty, seq is %#x.%x",
2519                         list_empty(&trans->qgroup_ref_list) ? "" : " not",
2520                         (u32)(trans->delayed_ref_elem.seq >> 32),
2521                         (u32)trans->delayed_ref_elem.seq);
2522                 BUG();
2523         }
2524
2525         if (!trans->delayed_ref_elem.seq)
2526                 return 0;
2527
2528         while (!list_empty(&trans->qgroup_ref_list)) {
2529                 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2530                                                  struct qgroup_update, list);
2531                 list_del(&qgroup_update->list);
2532                 if (!ret)
2533                         ret = btrfs_qgroup_account_ref(
2534                                         trans, fs_info, qgroup_update->node,
2535                                         qgroup_update->extent_op);
2536                 kfree(qgroup_update);
2537         }
2538
2539         btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2540
2541         return ret;
2542 }
2543
2544 static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
2545                       int count)
2546 {
2547         int val = atomic_read(&delayed_refs->ref_seq);
2548
2549         if (val < seq || val >= seq + count)
2550                 return 1;
2551         return 0;
2552 }
2553
2554 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2555 {
2556         u64 num_bytes;
2557
2558         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2559                              sizeof(struct btrfs_extent_inline_ref));
2560         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2561                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2562
2563         /*
2564          * We don't ever fill up leaves all the way so multiply by 2 just to be
2565          * closer to what we're really going to want to ouse.
2566          */
2567         return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2568 }
2569
2570 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2571                                        struct btrfs_root *root)
2572 {
2573         struct btrfs_block_rsv *global_rsv;
2574         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2575         u64 num_bytes;
2576         int ret = 0;
2577
2578         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2579         num_heads = heads_to_leaves(root, num_heads);
2580         if (num_heads > 1)
2581                 num_bytes += (num_heads - 1) * root->leafsize;
2582         num_bytes <<= 1;
2583         global_rsv = &root->fs_info->global_block_rsv;
2584
2585         /*
2586          * If we can't allocate any more chunks lets make sure we have _lots_ of
2587          * wiggle room since running delayed refs can create more delayed refs.
2588          */
2589         if (global_rsv->space_info->full)
2590                 num_bytes <<= 1;
2591
2592         spin_lock(&global_rsv->lock);
2593         if (global_rsv->reserved <= num_bytes)
2594                 ret = 1;
2595         spin_unlock(&global_rsv->lock);
2596         return ret;
2597 }
2598
2599 /*
2600  * this starts processing the delayed reference count updates and
2601  * extent insertions we have queued up so far.  count can be
2602  * 0, which means to process everything in the tree at the start
2603  * of the run (but not newly added entries), or it can be some target
2604  * number you'd like to process.
2605  *
2606  * Returns 0 on success or if called with an aborted transaction
2607  * Returns <0 on error and aborts the transaction
2608  */
2609 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2610                            struct btrfs_root *root, unsigned long count)
2611 {
2612         struct rb_node *node;
2613         struct btrfs_delayed_ref_root *delayed_refs;
2614         struct btrfs_delayed_ref_node *ref;
2615         struct list_head cluster;
2616         int ret;
2617         u64 delayed_start;
2618         int run_all = count == (unsigned long)-1;
2619         int run_most = 0;
2620         int loops;
2621
2622         /* We'll clean this up in btrfs_cleanup_transaction */
2623         if (trans->aborted)
2624                 return 0;
2625
2626         if (root == root->fs_info->extent_root)
2627                 root = root->fs_info->tree_root;
2628
2629         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2630
2631         delayed_refs = &trans->transaction->delayed_refs;
2632         INIT_LIST_HEAD(&cluster);
2633         if (count == 0) {
2634                 count = delayed_refs->num_entries * 2;
2635                 run_most = 1;
2636         }
2637
2638         if (!run_all && !run_most) {
2639                 int old;
2640                 int seq = atomic_read(&delayed_refs->ref_seq);
2641
2642 progress:
2643                 old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2644                 if (old) {
2645                         DEFINE_WAIT(__wait);
2646                         if (delayed_refs->flushing ||
2647                             !btrfs_should_throttle_delayed_refs(trans, root))
2648                                 return 0;
2649
2650                         prepare_to_wait(&delayed_refs->wait, &__wait,
2651                                         TASK_UNINTERRUPTIBLE);
2652
2653                         old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2654                         if (old) {
2655                                 schedule();
2656                                 finish_wait(&delayed_refs->wait, &__wait);
2657
2658                                 if (!refs_newer(delayed_refs, seq, 256))
2659                                         goto progress;
2660                                 else
2661                                         return 0;
2662                         } else {
2663                                 finish_wait(&delayed_refs->wait, &__wait);
2664                                 goto again;
2665                         }
2666                 }
2667
2668         } else {
2669                 atomic_inc(&delayed_refs->procs_running_refs);
2670         }
2671
2672 again:
2673         loops = 0;
2674         spin_lock(&delayed_refs->lock);
2675
2676 #ifdef SCRAMBLE_DELAYED_REFS
2677         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2678 #endif
2679
2680         while (1) {
2681                 if (!(run_all || run_most) &&
2682                     !btrfs_should_throttle_delayed_refs(trans, root))
2683                         break;
2684
2685                 /*
2686                  * go find something we can process in the rbtree.  We start at
2687                  * the beginning of the tree, and then build a cluster
2688                  * of refs to process starting at the first one we are able to
2689                  * lock
2690                  */
2691                 delayed_start = delayed_refs->run_delayed_start;
2692                 ret = btrfs_find_ref_cluster(trans, &cluster,
2693                                              delayed_refs->run_delayed_start);
2694                 if (ret)
2695                         break;
2696
2697                 ret = run_clustered_refs(trans, root, &cluster);
2698                 if (ret < 0) {
2699                         btrfs_release_ref_cluster(&cluster);
2700                         spin_unlock(&delayed_refs->lock);
2701                         btrfs_abort_transaction(trans, root, ret);
2702                         atomic_dec(&delayed_refs->procs_running_refs);
2703                         wake_up(&delayed_refs->wait);
2704                         return ret;
2705                 }
2706
2707                 atomic_add(ret, &delayed_refs->ref_seq);
2708
2709                 count -= min_t(unsigned long, ret, count);
2710
2711                 if (count == 0)
2712                         break;
2713
2714                 if (delayed_start >= delayed_refs->run_delayed_start) {
2715                         if (loops == 0) {
2716                                 /*
2717                                  * btrfs_find_ref_cluster looped. let's do one
2718                                  * more cycle. if we don't run any delayed ref
2719                                  * during that cycle (because we can't because
2720                                  * all of them are blocked), bail out.
2721                                  */
2722                                 loops = 1;
2723                         } else {
2724                                 /*
2725                                  * no runnable refs left, stop trying
2726                                  */
2727                                 BUG_ON(run_all);
2728                                 break;
2729                         }
2730                 }
2731                 if (ret) {
2732                         /* refs were run, let's reset staleness detection */
2733                         loops = 0;
2734                 }
2735         }
2736
2737         if (run_all) {
2738                 if (!list_empty(&trans->new_bgs)) {
2739                         spin_unlock(&delayed_refs->lock);
2740                         btrfs_create_pending_block_groups(trans, root);
2741                         spin_lock(&delayed_refs->lock);
2742                 }
2743
2744                 node = rb_first(&delayed_refs->root);
2745                 if (!node)
2746                         goto out;
2747                 count = (unsigned long)-1;
2748
2749                 while (node) {
2750                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2751                                        rb_node);
2752                         if (btrfs_delayed_ref_is_head(ref)) {
2753                                 struct btrfs_delayed_ref_head *head;
2754
2755                                 head = btrfs_delayed_node_to_head(ref);
2756                                 atomic_inc(&ref->refs);
2757
2758                                 spin_unlock(&delayed_refs->lock);
2759                                 /*
2760                                  * Mutex was contended, block until it's
2761                                  * released and try again
2762                                  */
2763                                 mutex_lock(&head->mutex);
2764                                 mutex_unlock(&head->mutex);
2765
2766                                 btrfs_put_delayed_ref(ref);
2767                                 cond_resched();
2768                                 goto again;
2769                         }
2770                         node = rb_next(node);
2771                 }
2772                 spin_unlock(&delayed_refs->lock);
2773                 schedule_timeout(1);
2774                 goto again;
2775         }
2776 out:
2777         atomic_dec(&delayed_refs->procs_running_refs);
2778         smp_mb();
2779         if (waitqueue_active(&delayed_refs->wait))
2780                 wake_up(&delayed_refs->wait);
2781
2782         spin_unlock(&delayed_refs->lock);
2783         assert_qgroups_uptodate(trans);
2784         return 0;
2785 }
2786
2787 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2788                                 struct btrfs_root *root,
2789                                 u64 bytenr, u64 num_bytes, u64 flags,
2790                                 int level, int is_data)
2791 {
2792         struct btrfs_delayed_extent_op *extent_op;
2793         int ret;
2794
2795         extent_op = btrfs_alloc_delayed_extent_op();
2796         if (!extent_op)
2797                 return -ENOMEM;
2798
2799         extent_op->flags_to_set = flags;
2800         extent_op->update_flags = 1;
2801         extent_op->update_key = 0;
2802         extent_op->is_data = is_data ? 1 : 0;
2803         extent_op->level = level;
2804
2805         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2806                                           num_bytes, extent_op);
2807         if (ret)
2808                 btrfs_free_delayed_extent_op(extent_op);
2809         return ret;
2810 }
2811
2812 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2813                                       struct btrfs_root *root,
2814                                       struct btrfs_path *path,
2815                                       u64 objectid, u64 offset, u64 bytenr)
2816 {
2817         struct btrfs_delayed_ref_head *head;
2818         struct btrfs_delayed_ref_node *ref;
2819         struct btrfs_delayed_data_ref *data_ref;
2820         struct btrfs_delayed_ref_root *delayed_refs;
2821         struct rb_node *node;
2822         int ret = 0;
2823
2824         ret = -ENOENT;
2825         delayed_refs = &trans->transaction->delayed_refs;
2826         spin_lock(&delayed_refs->lock);
2827         head = btrfs_find_delayed_ref_head(trans, bytenr);
2828         if (!head)
2829                 goto out;
2830
2831         if (!mutex_trylock(&head->mutex)) {
2832                 atomic_inc(&head->node.refs);
2833                 spin_unlock(&delayed_refs->lock);
2834
2835                 btrfs_release_path(path);
2836
2837                 /*
2838                  * Mutex was contended, block until it's released and let
2839                  * caller try again
2840                  */
2841                 mutex_lock(&head->mutex);
2842                 mutex_unlock(&head->mutex);
2843                 btrfs_put_delayed_ref(&head->node);
2844                 return -EAGAIN;
2845         }
2846
2847         node = rb_prev(&head->node.rb_node);
2848         if (!node)
2849                 goto out_unlock;
2850
2851         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2852
2853         if (ref->bytenr != bytenr)
2854                 goto out_unlock;
2855
2856         ret = 1;
2857         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2858                 goto out_unlock;
2859
2860         data_ref = btrfs_delayed_node_to_data_ref(ref);
2861
2862         node = rb_prev(node);
2863         if (node) {
2864                 int seq = ref->seq;
2865
2866                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2867                 if (ref->bytenr == bytenr && ref->seq == seq)
2868                         goto out_unlock;
2869         }
2870
2871         if (data_ref->root != root->root_key.objectid ||
2872             data_ref->objectid != objectid || data_ref->offset != offset)
2873                 goto out_unlock;
2874
2875         ret = 0;
2876 out_unlock:
2877         mutex_unlock(&head->mutex);
2878 out:
2879         spin_unlock(&delayed_refs->lock);
2880         return ret;
2881 }
2882
2883 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2884                                         struct btrfs_root *root,
2885                                         struct btrfs_path *path,
2886                                         u64 objectid, u64 offset, u64 bytenr)
2887 {
2888         struct btrfs_root *extent_root = root->fs_info->extent_root;
2889         struct extent_buffer *leaf;
2890         struct btrfs_extent_data_ref *ref;
2891         struct btrfs_extent_inline_ref *iref;
2892         struct btrfs_extent_item *ei;
2893         struct btrfs_key key;
2894         u32 item_size;
2895         int ret;
2896
2897         key.objectid = bytenr;
2898         key.offset = (u64)-1;
2899         key.type = BTRFS_EXTENT_ITEM_KEY;
2900
2901         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2902         if (ret < 0)
2903                 goto out;
2904         BUG_ON(ret == 0); /* Corruption */
2905
2906         ret = -ENOENT;
2907         if (path->slots[0] == 0)
2908                 goto out;
2909
2910         path->slots[0]--;
2911         leaf = path->nodes[0];
2912         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2913
2914         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2915                 goto out;
2916
2917         ret = 1;
2918         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2919 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2920         if (item_size < sizeof(*ei)) {
2921                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2922                 goto out;
2923         }
2924 #endif
2925         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2926
2927         if (item_size != sizeof(*ei) +
2928             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2929                 goto out;
2930
2931         if (btrfs_extent_generation(leaf, ei) <=
2932             btrfs_root_last_snapshot(&root->root_item))
2933                 goto out;
2934
2935         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2936         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2937             BTRFS_EXTENT_DATA_REF_KEY)
2938                 goto out;
2939
2940         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2941         if (btrfs_extent_refs(leaf, ei) !=
2942             btrfs_extent_data_ref_count(leaf, ref) ||
2943             btrfs_extent_data_ref_root(leaf, ref) !=
2944             root->root_key.objectid ||
2945             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2946             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2947                 goto out;
2948
2949         ret = 0;
2950 out:
2951         return ret;
2952 }
2953
2954 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2955                           struct btrfs_root *root,
2956                           u64 objectid, u64 offset, u64 bytenr)
2957 {
2958         struct btrfs_path *path;
2959         int ret;
2960         int ret2;
2961
2962         path = btrfs_alloc_path();
2963         if (!path)
2964                 return -ENOENT;
2965
2966         do {
2967                 ret = check_committed_ref(trans, root, path, objectid,
2968                                           offset, bytenr);
2969                 if (ret && ret != -ENOENT)
2970                         goto out;
2971
2972                 ret2 = check_delayed_ref(trans, root, path, objectid,
2973                                          offset, bytenr);
2974         } while (ret2 == -EAGAIN);
2975
2976         if (ret2 && ret2 != -ENOENT) {
2977                 ret = ret2;
2978                 goto out;
2979         }
2980
2981         if (ret != -ENOENT || ret2 != -ENOENT)
2982                 ret = 0;
2983 out:
2984         btrfs_free_path(path);
2985         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2986                 WARN_ON(ret > 0);
2987         return ret;
2988 }
2989
2990 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2991                            struct btrfs_root *root,
2992                            struct extent_buffer *buf,
2993                            int full_backref, int inc, int for_cow)
2994 {
2995         u64 bytenr;
2996         u64 num_bytes;
2997         u64 parent;
2998         u64 ref_root;
2999         u32 nritems;
3000         struct btrfs_key key;
3001         struct btrfs_file_extent_item *fi;
3002         int i;
3003         int level;
3004         int ret = 0;
3005         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3006                             u64, u64, u64, u64, u64, u64, int);
3007
3008         ref_root = btrfs_header_owner(buf);
3009         nritems = btrfs_header_nritems(buf);
3010         level = btrfs_header_level(buf);
3011
3012         if (!root->ref_cows && level == 0)
3013                 return 0;
3014
3015         if (inc)
3016                 process_func = btrfs_inc_extent_ref;
3017         else
3018                 process_func = btrfs_free_extent;
3019
3020         if (full_backref)
3021                 parent = buf->start;
3022         else
3023                 parent = 0;
3024
3025         for (i = 0; i < nritems; i++) {
3026                 if (level == 0) {
3027                         btrfs_item_key_to_cpu(buf, &key, i);
3028                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3029                                 continue;
3030                         fi = btrfs_item_ptr(buf, i,
3031                                             struct btrfs_file_extent_item);
3032                         if (btrfs_file_extent_type(buf, fi) ==
3033                             BTRFS_FILE_EXTENT_INLINE)
3034                                 continue;
3035                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3036                         if (bytenr == 0)
3037                                 continue;
3038
3039                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3040                         key.offset -= btrfs_file_extent_offset(buf, fi);
3041                         ret = process_func(trans, root, bytenr, num_bytes,
3042                                            parent, ref_root, key.objectid,
3043                                            key.offset, for_cow);
3044                         if (ret)
3045                                 goto fail;
3046                 } else {
3047                         bytenr = btrfs_node_blockptr(buf, i);
3048                         num_bytes = btrfs_level_size(root, level - 1);
3049                         ret = process_func(trans, root, bytenr, num_bytes,
3050                                            parent, ref_root, level - 1, 0,
3051                                            for_cow);
3052                         if (ret)
3053                                 goto fail;
3054                 }
3055         }
3056         return 0;
3057 fail:
3058         return ret;
3059 }
3060
3061 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3062                   struct extent_buffer *buf, int full_backref, int for_cow)
3063 {
3064         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
3065 }
3066
3067 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3068                   struct extent_buffer *buf, int full_backref, int for_cow)
3069 {
3070         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
3071 }
3072
3073 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3074                                  struct btrfs_root *root,
3075                                  struct btrfs_path *path,
3076                                  struct btrfs_block_group_cache *cache)
3077 {
3078         int ret;
3079         struct btrfs_root *extent_root = root->fs_info->extent_root;
3080         unsigned long bi;
3081         struct extent_buffer *leaf;
3082
3083         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3084         if (ret < 0)
3085                 goto fail;
3086         BUG_ON(ret); /* Corruption */
3087
3088         leaf = path->nodes[0];
3089         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3090         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3091         btrfs_mark_buffer_dirty(leaf);
3092         btrfs_release_path(path);
3093 fail:
3094         if (ret) {
3095                 btrfs_abort_transaction(trans, root, ret);
3096                 return ret;
3097         }
3098         return 0;
3099
3100 }
3101
3102 static struct btrfs_block_group_cache *
3103 next_block_group(struct btrfs_root *root,
3104                  struct btrfs_block_group_cache *cache)
3105 {
3106         struct rb_node *node;
3107         spin_lock(&root->fs_info->block_group_cache_lock);
3108         node = rb_next(&cache->cache_node);
3109         btrfs_put_block_group(cache);
3110         if (node) {
3111                 cache = rb_entry(node, struct btrfs_block_group_cache,
3112                                  cache_node);
3113                 btrfs_get_block_group(cache);
3114         } else
3115                 cache = NULL;
3116         spin_unlock(&root->fs_info->block_group_cache_lock);
3117         return cache;
3118 }
3119
3120 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3121                             struct btrfs_trans_handle *trans,
3122                             struct btrfs_path *path)
3123 {
3124         struct btrfs_root *root = block_group->fs_info->tree_root;
3125         struct inode *inode = NULL;
3126         u64 alloc_hint = 0;
3127         int dcs = BTRFS_DC_ERROR;
3128         int num_pages = 0;
3129         int retries = 0;
3130         int ret = 0;
3131
3132         /*
3133          * If this block group is smaller than 100 megs don't bother caching the
3134          * block group.
3135          */
3136         if (block_group->key.offset < (100 * 1024 * 1024)) {
3137                 spin_lock(&block_group->lock);
3138                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3139                 spin_unlock(&block_group->lock);
3140                 return 0;
3141         }
3142
3143 again:
3144         inode = lookup_free_space_inode(root, block_group, path);
3145         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3146                 ret = PTR_ERR(inode);
3147                 btrfs_release_path(path);
3148                 goto out;
3149         }
3150
3151         if (IS_ERR(inode)) {
3152                 BUG_ON(retries);
3153                 retries++;
3154
3155                 if (block_group->ro)
3156                         goto out_free;
3157
3158                 ret = create_free_space_inode(root, trans, block_group, path);
3159                 if (ret)
3160                         goto out_free;
3161                 goto again;
3162         }
3163
3164         /* We've already setup this transaction, go ahead and exit */
3165         if (block_group->cache_generation == trans->transid &&
3166             i_size_read(inode)) {
3167                 dcs = BTRFS_DC_SETUP;
3168                 goto out_put;
3169         }
3170
3171         /*
3172          * We want to set the generation to 0, that way if anything goes wrong
3173          * from here on out we know not to trust this cache when we load up next
3174          * time.
3175          */
3176         BTRFS_I(inode)->generation = 0;
3177         ret = btrfs_update_inode(trans, root, inode);
3178         WARN_ON(ret);
3179
3180         if (i_size_read(inode) > 0) {
3181                 ret = btrfs_check_trunc_cache_free_space(root,
3182                                         &root->fs_info->global_block_rsv);
3183                 if (ret)
3184                         goto out_put;
3185
3186                 ret = btrfs_truncate_free_space_cache(root, trans, path,
3187                                                       inode);
3188                 if (ret)
3189                         goto out_put;
3190         }
3191
3192         spin_lock(&block_group->lock);
3193         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3194             !btrfs_test_opt(root, SPACE_CACHE)) {
3195                 /*
3196                  * don't bother trying to write stuff out _if_
3197                  * a) we're not cached,
3198                  * b) we're with nospace_cache mount option.
3199                  */
3200                 dcs = BTRFS_DC_WRITTEN;
3201                 spin_unlock(&block_group->lock);
3202                 goto out_put;
3203         }
3204         spin_unlock(&block_group->lock);
3205
3206         /*
3207          * Try to preallocate enough space based on how big the block group is.
3208          * Keep in mind this has to include any pinned space which could end up
3209          * taking up quite a bit since it's not folded into the other space
3210          * cache.
3211          */
3212         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3213         if (!num_pages)
3214                 num_pages = 1;
3215
3216         num_pages *= 16;
3217         num_pages *= PAGE_CACHE_SIZE;
3218
3219         ret = btrfs_check_data_free_space(inode, num_pages);
3220         if (ret)
3221                 goto out_put;
3222
3223         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3224                                               num_pages, num_pages,
3225                                               &alloc_hint);
3226         if (!ret)
3227                 dcs = BTRFS_DC_SETUP;
3228         btrfs_free_reserved_data_space(inode, num_pages);
3229
3230 out_put:
3231         iput(inode);
3232 out_free:
3233         btrfs_release_path(path);
3234 out:
3235         spin_lock(&block_group->lock);
3236         if (!ret && dcs == BTRFS_DC_SETUP)
3237                 block_group->cache_generation = trans->transid;
3238         block_group->disk_cache_state = dcs;
3239         spin_unlock(&block_group->lock);
3240
3241         return ret;
3242 }
3243
3244 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3245                                    struct btrfs_root *root)
3246 {
3247         struct btrfs_block_group_cache *cache;
3248         int err = 0;
3249         struct btrfs_path *path;
3250         u64 last = 0;
3251
3252         path = btrfs_alloc_path();
3253         if (!path)
3254                 return -ENOMEM;
3255
3256 again:
3257         while (1) {
3258                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3259                 while (cache) {
3260                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3261                                 break;
3262                         cache = next_block_group(root, cache);
3263                 }
3264                 if (!cache) {
3265                         if (last == 0)
3266                                 break;
3267                         last = 0;
3268                         continue;
3269                 }
3270                 err = cache_save_setup(cache, trans, path);
3271                 last = cache->key.objectid + cache->key.offset;
3272                 btrfs_put_block_group(cache);
3273         }
3274
3275         while (1) {
3276                 if (last == 0) {
3277                         err = btrfs_run_delayed_refs(trans, root,
3278                                                      (unsigned long)-1);
3279                         if (err) /* File system offline */
3280                                 goto out;
3281                 }
3282
3283                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3284                 while (cache) {
3285                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3286                                 btrfs_put_block_group(cache);
3287                                 goto again;
3288                         }
3289
3290                         if (cache->dirty)
3291                                 break;
3292                         cache = next_block_group(root, cache);
3293                 }
3294                 if (!cache) {
3295                         if (last == 0)
3296                                 break;
3297                         last = 0;
3298                         continue;
3299                 }
3300
3301                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3302                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3303                 cache->dirty = 0;
3304                 last = cache->key.objectid + cache->key.offset;
3305
3306                 err = write_one_cache_group(trans, root, path, cache);
3307                 if (err) /* File system offline */
3308                         goto out;
3309
3310                 btrfs_put_block_group(cache);
3311         }
3312
3313         while (1) {
3314                 /*
3315                  * I don't think this is needed since we're just marking our
3316                  * preallocated extent as written, but just in case it can't
3317                  * hurt.
3318                  */
3319                 if (last == 0) {
3320                         err = btrfs_run_delayed_refs(trans, root,
3321                                                      (unsigned long)-1);
3322                         if (err) /* File system offline */
3323                                 goto out;
3324                 }
3325
3326                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3327                 while (cache) {
3328                         /*
3329                          * Really this shouldn't happen, but it could if we
3330                          * couldn't write the entire preallocated extent and
3331                          * splitting the extent resulted in a new block.
3332                          */
3333                         if (cache->dirty) {
3334                                 btrfs_put_block_group(cache);
3335                                 goto again;
3336                         }
3337                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3338                                 break;
3339                         cache = next_block_group(root, cache);
3340                 }
3341                 if (!cache) {
3342                         if (last == 0)
3343                                 break;
3344                         last = 0;
3345                         continue;
3346                 }
3347
3348                 err = btrfs_write_out_cache(root, trans, cache, path);
3349
3350                 /*
3351                  * If we didn't have an error then the cache state is still
3352                  * NEED_WRITE, so we can set it to WRITTEN.
3353                  */
3354                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3355                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3356                 last = cache->key.objectid + cache->key.offset;
3357                 btrfs_put_block_group(cache);
3358         }
3359 out:
3360
3361         btrfs_free_path(path);
3362         return err;
3363 }
3364
3365 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3366 {
3367         struct btrfs_block_group_cache *block_group;
3368         int readonly = 0;
3369
3370         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3371         if (!block_group || block_group->ro)
3372                 readonly = 1;
3373         if (block_group)
3374                 btrfs_put_block_group(block_group);
3375         return readonly;
3376 }
3377
3378 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3379                              u64 total_bytes, u64 bytes_used,
3380                              struct btrfs_space_info **space_info)
3381 {
3382         struct btrfs_space_info *found;
3383         int i;
3384         int factor;
3385         int ret;
3386
3387         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3388                      BTRFS_BLOCK_GROUP_RAID10))
3389                 factor = 2;
3390         else
3391                 factor = 1;
3392
3393         found = __find_space_info(info, flags);
3394         if (found) {
3395                 spin_lock(&found->lock);
3396                 found->total_bytes += total_bytes;
3397                 found->disk_total += total_bytes * factor;
3398                 found->bytes_used += bytes_used;
3399                 found->disk_used += bytes_used * factor;
3400                 found->full = 0;
3401                 spin_unlock(&found->lock);
3402                 *space_info = found;
3403                 return 0;
3404         }
3405         found = kzalloc(sizeof(*found), GFP_NOFS);
3406         if (!found)
3407                 return -ENOMEM;
3408
3409         ret = percpu_counter_init(&found->total_bytes_pinned, 0);
3410         if (ret) {
3411                 kfree(found);
3412                 return ret;
3413         }
3414
3415         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3416                 INIT_LIST_HEAD(&found->block_groups[i]);
3417         init_rwsem(&found->groups_sem);
3418         spin_lock_init(&found->lock);
3419         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3420         found->total_bytes = total_bytes;
3421         found->disk_total = total_bytes * factor;
3422         found->bytes_used = bytes_used;
3423         found->disk_used = bytes_used * factor;
3424         found->bytes_pinned = 0;
3425         found->bytes_reserved = 0;
3426         found->bytes_readonly = 0;
3427         found->bytes_may_use = 0;
3428         found->full = 0;
3429         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3430         found->chunk_alloc = 0;
3431         found->flush = 0;
3432         init_waitqueue_head(&found->wait);
3433         *space_info = found;
3434         list_add_rcu(&found->list, &info->space_info);
3435         if (flags & BTRFS_BLOCK_GROUP_DATA)
3436                 info->data_sinfo = found;
3437         return 0;
3438 }
3439
3440 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3441 {
3442         u64 extra_flags = chunk_to_extended(flags) &
3443                                 BTRFS_EXTENDED_PROFILE_MASK;
3444
3445         write_seqlock(&fs_info->profiles_lock);
3446         if (flags & BTRFS_BLOCK_GROUP_DATA)
3447                 fs_info->avail_data_alloc_bits |= extra_flags;
3448         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3449                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3450         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3451                 fs_info->avail_system_alloc_bits |= extra_flags;
3452         write_sequnlock(&fs_info->profiles_lock);
3453 }
3454
3455 /*
3456  * returns target flags in extended format or 0 if restripe for this
3457  * chunk_type is not in progress
3458  *
3459  * should be called with either volume_mutex or balance_lock held
3460  */
3461 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3462 {
3463         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3464         u64 target = 0;
3465
3466         if (!bctl)
3467                 return 0;
3468
3469         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3470             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3471                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3472         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3473                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3474                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3475         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3476                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3477                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3478         }
3479
3480         return target;
3481 }
3482
3483 /*
3484  * @flags: available profiles in extended format (see ctree.h)
3485  *
3486  * Returns reduced profile in chunk format.  If profile changing is in
3487  * progress (either running or paused) picks the target profile (if it's
3488  * already available), otherwise falls back to plain reducing.
3489  */
3490 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3491 {
3492         /*
3493          * we add in the count of missing devices because we want
3494          * to make sure that any RAID levels on a degraded FS
3495          * continue to be honored.
3496          */
3497         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3498                 root->fs_info->fs_devices->missing_devices;
3499         u64 target;
3500         u64 tmp;
3501
3502         /*
3503          * see if restripe for this chunk_type is in progress, if so
3504          * try to reduce to the target profile
3505          */
3506         spin_lock(&root->fs_info->balance_lock);
3507         target = get_restripe_target(root->fs_info, flags);
3508         if (target) {
3509                 /* pick target profile only if it's already available */
3510                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3511                         spin_unlock(&root->fs_info->balance_lock);
3512                         return extended_to_chunk(target);
3513                 }
3514         }
3515         spin_unlock(&root->fs_info->balance_lock);
3516
3517         /* First, mask out the RAID levels which aren't possible */
3518         if (num_devices == 1)
3519                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3520                            BTRFS_BLOCK_GROUP_RAID5);
3521         if (num_devices < 3)
3522                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3523         if (num_devices < 4)
3524                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3525
3526         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3527                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3528                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3529         flags &= ~tmp;
3530
3531         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3532                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3533         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3534                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3535         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3536                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3537         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3538                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3539         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3540                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3541
3542         return extended_to_chunk(flags | tmp);
3543 }
3544
3545 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3546 {
3547         unsigned seq;
3548
3549         do {
3550                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3551
3552                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3553                         flags |= root->fs_info->avail_data_alloc_bits;
3554                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3555                         flags |= root->fs_info->avail_system_alloc_bits;
3556                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3557                         flags |= root->fs_info->avail_metadata_alloc_bits;
3558         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3559
3560         return btrfs_reduce_alloc_profile(root, flags);
3561 }
3562
3563 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3564 {
3565         u64 flags;
3566         u64 ret;
3567
3568         if (data)
3569                 flags = BTRFS_BLOCK_GROUP_DATA;
3570         else if (root == root->fs_info->chunk_root)
3571                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3572         else
3573                 flags = BTRFS_BLOCK_GROUP_METADATA;
3574
3575         ret = get_alloc_profile(root, flags);
3576         return ret;
3577 }
3578
3579 /*
3580  * This will check the space that the inode allocates from to make sure we have
3581  * enough space for bytes.
3582  */
3583 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3584 {
3585         struct btrfs_space_info *data_sinfo;
3586         struct btrfs_root *root = BTRFS_I(inode)->root;
3587         struct btrfs_fs_info *fs_info = root->fs_info;
3588         u64 used;
3589         int ret = 0, committed = 0, alloc_chunk = 1;
3590
3591         /* make sure bytes are sectorsize aligned */
3592         bytes = ALIGN(bytes, root->sectorsize);
3593
3594         if (root == root->fs_info->tree_root ||
3595             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3596                 alloc_chunk = 0;
3597                 committed = 1;
3598         }
3599
3600         data_sinfo = fs_info->data_sinfo;
3601         if (!data_sinfo)
3602                 goto alloc;
3603
3604 again:
3605         /* make sure we have enough space to handle the data first */
3606         spin_lock(&data_sinfo->lock);
3607         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3608                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3609                 data_sinfo->bytes_may_use;
3610
3611         if (used + bytes > data_sinfo->total_bytes) {
3612                 struct btrfs_trans_handle *trans;
3613
3614                 /*
3615                  * if we don't have enough free bytes in this space then we need
3616                  * to alloc a new chunk.
3617                  */
3618                 if (!data_sinfo->full && alloc_chunk) {
3619                         u64 alloc_target;
3620
3621                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3622                         spin_unlock(&data_sinfo->lock);
3623 alloc:
3624                         alloc_target = btrfs_get_alloc_profile(root, 1);
3625                         trans = btrfs_join_transaction(root);
3626                         if (IS_ERR(trans))
3627                                 return PTR_ERR(trans);
3628
3629                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3630                                              alloc_target,
3631                                              CHUNK_ALLOC_NO_FORCE);
3632                         btrfs_end_transaction(trans, root);
3633                         if (ret < 0) {
3634                                 if (ret != -ENOSPC)
3635                                         return ret;
3636                                 else
3637                                         goto commit_trans;
3638                         }
3639
3640                         if (!data_sinfo)
3641                                 data_sinfo = fs_info->data_sinfo;
3642
3643                         goto again;
3644                 }
3645
3646                 /*
3647                  * If we don't have enough pinned space to deal with this
3648                  * allocation don't bother committing the transaction.
3649                  */
3650                 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
3651                                            bytes) < 0)
3652                         committed = 1;
3653                 spin_unlock(&data_sinfo->lock);
3654
3655                 /* commit the current transaction and try again */
3656 commit_trans:
3657                 if (!committed &&
3658                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3659                         committed = 1;
3660
3661                         trans = btrfs_join_transaction(root);
3662                         if (IS_ERR(trans))
3663                                 return PTR_ERR(trans);
3664                         ret = btrfs_commit_transaction(trans, root);
3665                         if (ret)
3666                                 return ret;
3667                         goto again;
3668                 }
3669
3670                 return -ENOSPC;
3671         }
3672         data_sinfo->bytes_may_use += bytes;
3673         trace_btrfs_space_reservation(root->fs_info, "space_info",
3674                                       data_sinfo->flags, bytes, 1);
3675         spin_unlock(&data_sinfo->lock);
3676
3677         return 0;
3678 }
3679
3680 /*
3681  * Called if we need to clear a data reservation for this inode.
3682  */
3683 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3684 {
3685         struct btrfs_root *root = BTRFS_I(inode)->root;
3686         struct btrfs_space_info *data_sinfo;
3687
3688         /* make sure bytes are sectorsize aligned */
3689         bytes = ALIGN(bytes, root->sectorsize);
3690
3691         data_sinfo = root->fs_info->data_sinfo;
3692         spin_lock(&data_sinfo->lock);
3693         WARN_ON(data_sinfo->bytes_may_use < bytes);
3694         data_sinfo->bytes_may_use -= bytes;
3695         trace_btrfs_space_reservation(root->fs_info, "space_info",
3696                                       data_sinfo->flags, bytes, 0);
3697         spin_unlock(&data_sinfo->lock);
3698 }
3699
3700 static void force_metadata_allocation(struct btrfs_fs_info *info)
3701 {
3702         struct list_head *head = &info->space_info;
3703         struct btrfs_space_info *found;
3704
3705         rcu_read_lock();
3706         list_for_each_entry_rcu(found, head, list) {
3707                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3708                         found->force_alloc = CHUNK_ALLOC_FORCE;
3709         }
3710         rcu_read_unlock();
3711 }
3712
3713 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3714 {
3715         return (global->size << 1);
3716 }
3717
3718 static int should_alloc_chunk(struct btrfs_root *root,
3719                               struct btrfs_space_info *sinfo, int force)
3720 {
3721         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3722         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3723         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3724         u64 thresh;
3725
3726         if (force == CHUNK_ALLOC_FORCE)
3727                 return 1;
3728
3729         /*
3730          * We need to take into account the global rsv because for all intents
3731          * and purposes it's used space.  Don't worry about locking the
3732          * global_rsv, it doesn't change except when the transaction commits.
3733          */
3734         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3735                 num_allocated += calc_global_rsv_need_space(global_rsv);
3736
3737         /*
3738          * in limited mode, we want to have some free space up to
3739          * about 1% of the FS size.
3740          */
3741         if (force == CHUNK_ALLOC_LIMITED) {
3742                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3743                 thresh = max_t(u64, 64 * 1024 * 1024,
3744                                div_factor_fine(thresh, 1));
3745
3746                 if (num_bytes - num_allocated < thresh)
3747                         return 1;
3748         }
3749
3750         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3751                 return 0;
3752         return 1;
3753 }
3754
3755 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3756 {
3757         u64 num_dev;
3758
3759         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3760                     BTRFS_BLOCK_GROUP_RAID0 |
3761                     BTRFS_BLOCK_GROUP_RAID5 |
3762                     BTRFS_BLOCK_GROUP_RAID6))
3763                 num_dev = root->fs_info->fs_devices->rw_devices;
3764         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3765                 num_dev = 2;
3766         else
3767                 num_dev = 1;    /* DUP or single */
3768
3769         /* metadata for updaing devices and chunk tree */
3770         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3771 }
3772
3773 static void check_system_chunk(struct btrfs_trans_handle *trans,
3774                                struct btrfs_root *root, u64 type)
3775 {
3776         struct btrfs_space_info *info;
3777         u64 left;
3778         u64 thresh;
3779
3780         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3781         spin_lock(&info->lock);
3782         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3783                 info->bytes_reserved - info->bytes_readonly;
3784         spin_unlock(&info->lock);
3785
3786         thresh = get_system_chunk_thresh(root, type);
3787         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3788                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3789                         left, thresh, type);
3790                 dump_space_info(info, 0, 0);
3791         }
3792
3793         if (left < thresh) {
3794                 u64 flags;
3795
3796                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3797                 btrfs_alloc_chunk(trans, root, flags);
3798         }
3799 }
3800
3801 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3802                           struct btrfs_root *extent_root, u64 flags, int force)
3803 {
3804         struct btrfs_space_info *space_info;
3805         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3806         int wait_for_alloc = 0;
3807         int ret = 0;
3808
3809         /* Don't re-enter if we're already allocating a chunk */
3810         if (trans->allocating_chunk)
3811                 return -ENOSPC;
3812
3813         space_info = __find_space_info(extent_root->fs_info, flags);
3814         if (!space_info) {
3815                 ret = update_space_info(extent_root->fs_info, flags,
3816                                         0, 0, &space_info);
3817                 BUG_ON(ret); /* -ENOMEM */
3818         }
3819         BUG_ON(!space_info); /* Logic error */
3820
3821 again:
3822         spin_lock(&space_info->lock);
3823         if (force < space_info->force_alloc)
3824                 force = space_info->force_alloc;
3825         if (space_info->full) {
3826                 spin_unlock(&space_info->lock);
3827                 return 0;
3828         }
3829
3830         if (!should_alloc_chunk(extent_root, space_info, force)) {
3831                 spin_unlock(&space_info->lock);
3832                 return 0;
3833         } else if (space_info->chunk_alloc) {
3834                 wait_for_alloc = 1;
3835         } else {
3836                 space_info->chunk_alloc = 1;
3837         }
3838
3839         spin_unlock(&space_info->lock);
3840
3841         mutex_lock(&fs_info->chunk_mutex);
3842
3843         /*
3844          * The chunk_mutex is held throughout the entirety of a chunk
3845          * allocation, so once we've acquired the chunk_mutex we know that the
3846          * other guy is done and we need to recheck and see if we should
3847          * allocate.
3848          */
3849         if (wait_for_alloc) {
3850                 mutex_unlock(&fs_info->chunk_mutex);
3851                 wait_for_alloc = 0;
3852                 goto again;
3853         }
3854
3855         trans->allocating_chunk = true;
3856
3857         /*
3858          * If we have mixed data/metadata chunks we want to make sure we keep
3859          * allocating mixed chunks instead of individual chunks.
3860          */
3861         if (btrfs_mixed_space_info(space_info))
3862                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3863
3864         /*
3865          * if we're doing a data chunk, go ahead and make sure that
3866          * we keep a reasonable number of metadata chunks allocated in the
3867          * FS as well.
3868          */
3869         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3870                 fs_info->data_chunk_allocations++;
3871                 if (!(fs_info->data_chunk_allocations %
3872                       fs_info->metadata_ratio))
3873                         force_metadata_allocation(fs_info);
3874         }
3875
3876         /*
3877          * Check if we have enough space in SYSTEM chunk because we may need
3878          * to update devices.
3879          */
3880         check_system_chunk(trans, extent_root, flags);
3881
3882         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3883         trans->allocating_chunk = false;
3884
3885         spin_lock(&space_info->lock);
3886         if (ret < 0 && ret != -ENOSPC)
3887                 goto out;
3888         if (ret)
3889                 space_info->full = 1;
3890         else
3891                 ret = 1;
3892
3893         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3894 out:
3895         space_info->chunk_alloc = 0;
3896         spin_unlock(&space_info->lock);
3897         mutex_unlock(&fs_info->chunk_mutex);
3898         return ret;
3899 }
3900
3901 static int can_overcommit(struct btrfs_root *root,
3902                           struct btrfs_space_info *space_info, u64 bytes,
3903                           enum btrfs_reserve_flush_enum flush)
3904 {
3905         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3906         u64 profile = btrfs_get_alloc_profile(root, 0);
3907         u64 space_size;
3908         u64 avail;
3909         u64 used;
3910         u64 to_add;
3911
3912         used = space_info->bytes_used + space_info->bytes_reserved +
3913                 space_info->bytes_pinned + space_info->bytes_readonly;
3914
3915         /*
3916          * We only want to allow over committing if we have lots of actual space
3917          * free, but if we don't have enough space to handle the global reserve
3918          * space then we could end up having a real enospc problem when trying
3919          * to allocate a chunk or some other such important allocation.
3920          */
3921         spin_lock(&global_rsv->lock);
3922         space_size = calc_global_rsv_need_space(global_rsv);
3923         spin_unlock(&global_rsv->lock);
3924         if (used + space_size >= space_info->total_bytes)
3925                 return 0;
3926
3927         used += space_info->bytes_may_use;
3928
3929         spin_lock(&root->fs_info->free_chunk_lock);
3930         avail = root->fs_info->free_chunk_space;
3931         spin_unlock(&root->fs_info->free_chunk_lock);
3932
3933         /*
3934          * If we have dup, raid1 or raid10 then only half of the free
3935          * space is actually useable.  For raid56, the space info used
3936          * doesn't include the parity drive, so we don't have to
3937          * change the math
3938          */
3939         if (profile & (BTRFS_BLOCK_GROUP_DUP |
3940                        BTRFS_BLOCK_GROUP_RAID1 |
3941                        BTRFS_BLOCK_GROUP_RAID10))
3942                 avail >>= 1;
3943
3944         to_add = space_info->total_bytes;
3945
3946         /*
3947          * If we aren't flushing all things, let us overcommit up to
3948          * 1/2th of the space. If we can flush, don't let us overcommit
3949          * too much, let it overcommit up to 1/8 of the space.
3950          */
3951         if (flush == BTRFS_RESERVE_FLUSH_ALL)
3952                 to_add >>= 3;
3953         else
3954                 to_add >>= 1;
3955
3956         /*
3957          * Limit the overcommit to the amount of free space we could possibly
3958          * allocate for chunks.
3959          */
3960         to_add = min(avail, to_add);
3961
3962         if (used + bytes < space_info->total_bytes + to_add)
3963                 return 1;
3964         return 0;
3965 }
3966
3967 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3968                                          unsigned long nr_pages)
3969 {
3970         struct super_block *sb = root->fs_info->sb;
3971
3972         if (down_read_trylock(&sb->s_umount)) {
3973                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
3974                 up_read(&sb->s_umount);
3975         } else {
3976                 /*
3977                  * We needn't worry the filesystem going from r/w to r/o though
3978                  * we don't acquire ->s_umount mutex, because the filesystem
3979                  * should guarantee the delalloc inodes list be empty after
3980                  * the filesystem is readonly(all dirty pages are written to
3981                  * the disk).
3982                  */
3983                 btrfs_start_all_delalloc_inodes(root->fs_info, 0);
3984                 if (!current->journal_info)
3985                         btrfs_wait_all_ordered_extents(root->fs_info, 0);
3986         }
3987 }
3988
3989 /*
3990  * shrink metadata reservation for delalloc
3991  */
3992 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
3993                             bool wait_ordered)
3994 {
3995         struct btrfs_block_rsv *block_rsv;
3996         struct btrfs_space_info *space_info;
3997         struct btrfs_trans_handle *trans;
3998         u64 delalloc_bytes;
3999         u64 max_reclaim;
4000         long time_left;
4001         unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
4002         int loops = 0;
4003         enum btrfs_reserve_flush_enum flush;
4004
4005         trans = (struct btrfs_trans_handle *)current->journal_info;
4006         block_rsv = &root->fs_info->delalloc_block_rsv;
4007         space_info = block_rsv->space_info;
4008
4009         smp_mb();
4010         delalloc_bytes = percpu_counter_sum_positive(
4011                                                 &root->fs_info->delalloc_bytes);
4012         if (delalloc_bytes == 0) {
4013                 if (trans)
4014                         return;
4015                 btrfs_wait_all_ordered_extents(root->fs_info, 0);
4016                 return;
4017         }
4018
4019         while (delalloc_bytes && loops < 3) {
4020                 max_reclaim = min(delalloc_bytes, to_reclaim);
4021                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4022                 btrfs_writeback_inodes_sb_nr(root, nr_pages);
4023                 /*
4024                  * We need to wait for the async pages to actually start before
4025                  * we do anything.
4026                  */
4027                 wait_event(root->fs_info->async_submit_wait,
4028                            !atomic_read(&root->fs_info->async_delalloc_pages));
4029
4030                 if (!trans)
4031                         flush = BTRFS_RESERVE_FLUSH_ALL;
4032                 else
4033                         flush = BTRFS_RESERVE_NO_FLUSH;
4034                 spin_lock(&space_info->lock);
4035                 if (can_overcommit(root, space_info, orig, flush)) {
4036                         spin_unlock(&space_info->lock);
4037                         break;
4038                 }
4039                 spin_unlock(&space_info->lock);
4040
4041                 loops++;
4042                 if (wait_ordered && !trans) {
4043                         btrfs_wait_all_ordered_extents(root->fs_info, 0);
4044                 } else {
4045                         time_left = schedule_timeout_killable(1);
4046                         if (time_left)
4047                                 break;
4048                 }
4049                 smp_mb();
4050                 delalloc_bytes = percpu_counter_sum_positive(
4051                                                 &root->fs_info->delalloc_bytes);
4052         }
4053 }
4054
4055 /**
4056  * maybe_commit_transaction - possibly commit the transaction if its ok to
4057  * @root - the root we're allocating for
4058  * @bytes - the number of bytes we want to reserve
4059  * @force - force the commit
4060  *
4061  * This will check to make sure that committing the transaction will actually
4062  * get us somewhere and then commit the transaction if it does.  Otherwise it
4063  * will return -ENOSPC.
4064  */
4065 static int may_commit_transaction(struct btrfs_root *root,
4066                                   struct btrfs_space_info *space_info,
4067                                   u64 bytes, int force)
4068 {
4069         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4070         struct btrfs_trans_handle *trans;
4071
4072         trans = (struct btrfs_trans_handle *)current->journal_info;
4073         if (trans)
4074                 return -EAGAIN;
4075
4076         if (force)
4077                 goto commit;
4078
4079         /* See if there is enough pinned space to make this reservation */
4080         spin_lock(&space_info->lock);
4081         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4082                                    bytes) >= 0) {
4083                 spin_unlock(&space_info->lock);
4084                 goto commit;
4085         }
4086         spin_unlock(&space_info->lock);
4087
4088         /*
4089          * See if there is some space in the delayed insertion reservation for
4090          * this reservation.
4091          */
4092         if (space_info != delayed_rsv->space_info)
4093                 return -ENOSPC;
4094
4095         spin_lock(&space_info->lock);
4096         spin_lock(&delayed_rsv->lock);
4097         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4098                                    bytes - delayed_rsv->size) >= 0) {
4099                 spin_unlock(&delayed_rsv->lock);
4100                 spin_unlock(&space_info->lock);
4101                 return -ENOSPC;
4102         }
4103         spin_unlock(&delayed_rsv->lock);
4104         spin_unlock(&space_info->lock);
4105
4106 commit:
4107         trans = btrfs_join_transaction(root);
4108         if (IS_ERR(trans))
4109                 return -ENOSPC;
4110
4111         return btrfs_commit_transaction(trans, root);
4112 }
4113
4114 enum flush_state {
4115         FLUSH_DELAYED_ITEMS_NR  =       1,
4116         FLUSH_DELAYED_ITEMS     =       2,
4117         FLUSH_DELALLOC          =       3,
4118         FLUSH_DELALLOC_WAIT     =       4,
4119         ALLOC_CHUNK             =       5,
4120         COMMIT_TRANS            =       6,
4121 };
4122
4123 static int flush_space(struct btrfs_root *root,
4124                        struct btrfs_space_info *space_info, u64 num_bytes,
4125                        u64 orig_bytes, int state)
4126 {
4127         struct btrfs_trans_handle *trans;
4128         int nr;
4129         int ret = 0;
4130
4131         switch (state) {
4132         case FLUSH_DELAYED_ITEMS_NR:
4133         case FLUSH_DELAYED_ITEMS:
4134                 if (state == FLUSH_DELAYED_ITEMS_NR) {
4135                         u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
4136
4137                         nr = (int)div64_u64(num_bytes, bytes);
4138                         if (!nr)
4139                                 nr = 1;
4140                         nr *= 2;
4141                 } else {
4142                         nr = -1;
4143                 }
4144                 trans = btrfs_join_transaction(root);
4145                 if (IS_ERR(trans)) {
4146                         ret = PTR_ERR(trans);
4147                         break;
4148                 }
4149                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4150                 btrfs_end_transaction(trans, root);
4151                 break;
4152         case FLUSH_DELALLOC:
4153         case FLUSH_DELALLOC_WAIT:
4154                 shrink_delalloc(root, num_bytes, orig_bytes,
4155                                 state == FLUSH_DELALLOC_WAIT);
4156                 break;
4157         case ALLOC_CHUNK:
4158                 trans = btrfs_join_transaction(root);
4159                 if (IS_ERR(trans)) {
4160                         ret = PTR_ERR(trans);
4161                         break;
4162                 }
4163                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4164                                      btrfs_get_alloc_profile(root, 0),
4165                                      CHUNK_ALLOC_NO_FORCE);
4166                 btrfs_end_transaction(trans, root);
4167                 if (ret == -ENOSPC)
4168                         ret = 0;
4169                 break;
4170         case COMMIT_TRANS:
4171                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4172                 break;
4173         default:
4174                 ret = -ENOSPC;
4175                 break;
4176         }
4177
4178         return ret;
4179 }
4180 /**
4181  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4182  * @root - the root we're allocating for
4183  * @block_rsv - the block_rsv we're allocating for
4184  * @orig_bytes - the number of bytes we want
4185  * @flush - whether or not we can flush to make our reservation
4186  *
4187  * This will reserve orgi_bytes number of bytes from the space info associated
4188  * with the block_rsv.  If there is not enough space it will make an attempt to
4189  * flush out space to make room.  It will do this by flushing delalloc if
4190  * possible or committing the transaction.  If flush is 0 then no attempts to
4191  * regain reservations will be made and this will fail if there is not enough
4192  * space already.
4193  */
4194 static int reserve_metadata_bytes(struct btrfs_root *root,
4195                                   struct btrfs_block_rsv *block_rsv,
4196                                   u64 orig_bytes,
4197                                   enum btrfs_reserve_flush_enum flush)
4198 {
4199         struct btrfs_space_info *space_info = block_rsv->space_info;
4200         u64 used;
4201         u64 num_bytes = orig_bytes;
4202         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4203         int ret = 0;
4204         bool flushing = false;
4205
4206 again:
4207         ret = 0;
4208         spin_lock(&space_info->lock);
4209         /*
4210          * We only want to wait if somebody other than us is flushing and we
4211          * are actually allowed to flush all things.
4212          */
4213         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4214                space_info->flush) {
4215                 spin_unlock(&space_info->lock);
4216                 /*
4217                  * If we have a trans handle we can't wait because the flusher
4218                  * may have to commit the transaction, which would mean we would
4219                  * deadlock since we are waiting for the flusher to finish, but
4220                  * hold the current transaction open.
4221                  */
4222                 if (current->journal_info)
4223                         return -EAGAIN;
4224                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4225                 /* Must have been killed, return */
4226                 if (ret)
4227                         return -EINTR;
4228
4229                 spin_lock(&space_info->lock);
4230         }
4231
4232         ret = -ENOSPC;
4233         used = space_info->bytes_used + space_info->bytes_reserved +
4234                 space_info->bytes_pinned + space_info->bytes_readonly +
4235                 space_info->bytes_may_use;
4236
4237         /*
4238          * The idea here is that we've not already over-reserved the block group
4239          * then we can go ahead and save our reservation first and then start
4240          * flushing if we need to.  Otherwise if we've already overcommitted
4241          * lets start flushing stuff first and then come back and try to make
4242          * our reservation.
4243          */
4244         if (used <= space_info->total_bytes) {
4245                 if (used + orig_bytes <= space_info->total_bytes) {
4246                         space_info->bytes_may_use += orig_bytes;
4247                         trace_btrfs_space_reservation(root->fs_info,
4248                                 "space_info", space_info->flags, orig_bytes, 1);
4249                         ret = 0;
4250                 } else {
4251                         /*
4252                          * Ok set num_bytes to orig_bytes since we aren't
4253                          * overocmmitted, this way we only try and reclaim what
4254                          * we need.
4255                          */
4256                         num_bytes = orig_bytes;
4257                 }
4258         } else {
4259                 /*
4260                  * Ok we're over committed, set num_bytes to the overcommitted
4261                  * amount plus the amount of bytes that we need for this
4262                  * reservation.
4263                  */
4264                 num_bytes = used - space_info->total_bytes +
4265                         (orig_bytes * 2);
4266         }
4267
4268         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4269                 space_info->bytes_may_use += orig_bytes;
4270                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4271                                               space_info->flags, orig_bytes,
4272                                               1);
4273                 ret = 0;
4274         }
4275
4276         /*
4277          * Couldn't make our reservation, save our place so while we're trying
4278          * to reclaim space we can actually use it instead of somebody else
4279          * stealing it from us.
4280          *
4281          * We make the other tasks wait for the flush only when we can flush
4282          * all things.
4283          */
4284         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4285                 flushing = true;
4286                 space_info->flush = 1;
4287         }
4288
4289         spin_unlock(&space_info->lock);
4290
4291         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4292                 goto out;
4293
4294         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4295                           flush_state);
4296         flush_state++;
4297
4298         /*
4299          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4300          * would happen. So skip delalloc flush.
4301          */
4302         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4303             (flush_state == FLUSH_DELALLOC ||
4304              flush_state == FLUSH_DELALLOC_WAIT))
4305                 flush_state = ALLOC_CHUNK;
4306
4307         if (!ret)
4308                 goto again;
4309         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4310                  flush_state < COMMIT_TRANS)
4311                 goto again;
4312         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4313                  flush_state <= COMMIT_TRANS)
4314                 goto again;
4315
4316 out:
4317         if (ret == -ENOSPC &&
4318             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4319                 struct btrfs_block_rsv *global_rsv =
4320                         &root->fs_info->global_block_rsv;
4321
4322                 if (block_rsv != global_rsv &&
4323                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4324                         ret = 0;
4325         }
4326         if (flushing) {
4327                 spin_lock(&space_info->lock);
4328                 space_info->flush = 0;
4329                 wake_up_all(&space_info->wait);
4330                 spin_unlock(&space_info->lock);
4331         }
4332         return ret;
4333 }
4334
4335 static struct btrfs_block_rsv *get_block_rsv(
4336                                         const struct btrfs_trans_handle *trans,
4337                                         const struct btrfs_root *root)
4338 {
4339         struct btrfs_block_rsv *block_rsv = NULL;
4340
4341         if (root->ref_cows)
4342                 block_rsv = trans->block_rsv;
4343
4344         if (root == root->fs_info->csum_root && trans->adding_csums)
4345                 block_rsv = trans->block_rsv;
4346
4347         if (!block_rsv)
4348                 block_rsv = root->block_rsv;
4349
4350         if (!block_rsv)
4351                 block_rsv = &root->fs_info->empty_block_rsv;
4352
4353         return block_rsv;
4354 }
4355
4356 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4357                                u64 num_bytes)
4358 {
4359         int ret = -ENOSPC;
4360         spin_lock(&block_rsv->lock);
4361         if (block_rsv->reserved >= num_bytes) {
4362                 block_rsv->reserved -= num_bytes;
4363                 if (block_rsv->reserved < block_rsv->size)
4364                         block_rsv->full = 0;
4365                 ret = 0;
4366         }
4367         spin_unlock(&block_rsv->lock);
4368         return ret;
4369 }
4370
4371 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4372                                 u64 num_bytes, int update_size)
4373 {
4374         spin_lock(&block_rsv->lock);
4375         block_rsv->reserved += num_bytes;
4376         if (update_size)
4377                 block_rsv->size += num_bytes;
4378         else if (block_rsv->reserved >= block_rsv->size)
4379                 block_rsv->full = 1;
4380         spin_unlock(&block_rsv->lock);
4381 }
4382
4383 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4384                              struct btrfs_block_rsv *dest, u64 num_bytes,
4385                              int min_factor)
4386 {
4387         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4388         u64 min_bytes;
4389
4390         if (global_rsv->space_info != dest->space_info)
4391                 return -ENOSPC;
4392
4393         spin_lock(&global_rsv->lock);
4394         min_bytes = div_factor(global_rsv->size, min_factor);
4395         if (global_rsv->reserved < min_bytes + num_bytes) {
4396                 spin_unlock(&global_rsv->lock);
4397                 return -ENOSPC;
4398         }
4399         global_rsv->reserved -= num_bytes;
4400         if (global_rsv->reserved < global_rsv->size)
4401                 global_rsv->full = 0;
4402         spin_unlock(&global_rsv->lock);
4403
4404         block_rsv_add_bytes(dest, num_bytes, 1);
4405         return 0;
4406 }
4407
4408 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4409                                     struct btrfs_block_rsv *block_rsv,
4410                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4411 {
4412         struct btrfs_space_info *space_info = block_rsv->space_info;
4413
4414         spin_lock(&block_rsv->lock);
4415         if (num_bytes == (u64)-1)
4416                 num_bytes = block_rsv->size;
4417         block_rsv->size -= num_bytes;
4418         if (block_rsv->reserved >= block_rsv->size) {
4419                 num_bytes = block_rsv->reserved - block_rsv->size;
4420                 block_rsv->reserved = block_rsv->size;
4421                 block_rsv->full = 1;
4422         } else {
4423                 num_bytes = 0;
4424         }
4425         spin_unlock(&block_rsv->lock);
4426
4427         if (num_bytes > 0) {
4428                 if (dest) {
4429                         spin_lock(&dest->lock);
4430                         if (!dest->full) {
4431                                 u64 bytes_to_add;
4432
4433                                 bytes_to_add = dest->size - dest->reserved;
4434                                 bytes_to_add = min(num_bytes, bytes_to_add);
4435                                 dest->reserved += bytes_to_add;
4436                                 if (dest->reserved >= dest->size)
4437                                         dest->full = 1;
4438                                 num_bytes -= bytes_to_add;
4439                         }
4440                         spin_unlock(&dest->lock);
4441                 }
4442                 if (num_bytes) {
4443                         spin_lock(&space_info->lock);
4444                         space_info->bytes_may_use -= num_bytes;
4445                         trace_btrfs_space_reservation(fs_info, "space_info",
4446                                         space_info->flags, num_bytes, 0);
4447                         space_info->reservation_progress++;
4448                         spin_unlock(&space_info->lock);
4449                 }
4450         }
4451 }
4452
4453 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4454                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4455 {
4456         int ret;
4457
4458         ret = block_rsv_use_bytes(src, num_bytes);
4459         if (ret)
4460                 return ret;
4461
4462         block_rsv_add_bytes(dst, num_bytes, 1);
4463         return 0;
4464 }
4465
4466 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4467 {
4468         memset(rsv, 0, sizeof(*rsv));
4469         spin_lock_init(&rsv->lock);
4470         rsv->type = type;
4471 }
4472
4473 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4474                                               unsigned short type)
4475 {
4476         struct btrfs_block_rsv *block_rsv;
4477         struct btrfs_fs_info *fs_info = root->fs_info;
4478
4479         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4480         if (!block_rsv)
4481                 return NULL;
4482
4483         btrfs_init_block_rsv(block_rsv, type);
4484         block_rsv->space_info = __find_space_info(fs_info,
4485                                                   BTRFS_BLOCK_GROUP_METADATA);
4486         return block_rsv;
4487 }
4488
4489 void btrfs_free_block_rsv(struct btrfs_root *root,
4490                           struct btrfs_block_rsv *rsv)
4491 {
4492         if (!rsv)
4493                 return;
4494         btrfs_block_rsv_release(root, rsv, (u64)-1);
4495         kfree(rsv);
4496 }
4497
4498 int btrfs_block_rsv_add(struct btrfs_root *root,
4499                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4500                         enum btrfs_reserve_flush_enum flush)
4501 {
4502         int ret;
4503
4504         if (num_bytes == 0)
4505                 return 0;
4506
4507         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4508         if (!ret) {
4509                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4510                 return 0;
4511         }
4512
4513         return ret;
4514 }
4515
4516 int btrfs_block_rsv_check(struct btrfs_root *root,
4517                           struct btrfs_block_rsv *block_rsv, int min_factor)
4518 {
4519         u64 num_bytes = 0;
4520         int ret = -ENOSPC;
4521
4522         if (!block_rsv)
4523                 return 0;
4524
4525         spin_lock(&block_rsv->lock);
4526         num_bytes = div_factor(block_rsv->size, min_factor);
4527         if (block_rsv->reserved >= num_bytes)
4528                 ret = 0;
4529         spin_unlock(&block_rsv->lock);
4530
4531         return ret;
4532 }
4533
4534 int btrfs_block_rsv_refill(struct btrfs_root *root,
4535                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4536                            enum btrfs_reserve_flush_enum flush)
4537 {
4538         u64 num_bytes = 0;
4539         int ret = -ENOSPC;
4540
4541         if (!block_rsv)
4542                 return 0;
4543
4544         spin_lock(&block_rsv->lock);
4545         num_bytes = min_reserved;
4546         if (block_rsv->reserved >= num_bytes)
4547                 ret = 0;
4548         else
4549                 num_bytes -= block_rsv->reserved;
4550         spin_unlock(&block_rsv->lock);
4551
4552         if (!ret)
4553                 return 0;
4554
4555         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4556         if (!ret) {
4557                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4558                 return 0;
4559         }
4560
4561         return ret;
4562 }
4563
4564 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4565                             struct btrfs_block_rsv *dst_rsv,
4566                             u64 num_bytes)
4567 {
4568         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4569 }
4570
4571 void btrfs_block_rsv_release(struct btrfs_root *root,
4572                              struct btrfs_block_rsv *block_rsv,
4573                              u64 num_bytes)
4574 {
4575         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4576         if (global_rsv->full || global_rsv == block_rsv ||
4577             block_rsv->space_info != global_rsv->space_info)
4578                 global_rsv = NULL;
4579         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4580                                 num_bytes);
4581 }
4582
4583 /*
4584  * helper to calculate size of global block reservation.
4585  * the desired value is sum of space used by extent tree,
4586  * checksum tree and root tree
4587  */
4588 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4589 {
4590         struct btrfs_space_info *sinfo;
4591         u64 num_bytes;
4592         u64 meta_used;
4593         u64 data_used;
4594         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4595
4596         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4597         spin_lock(&sinfo->lock);
4598         data_used = sinfo->bytes_used;
4599         spin_unlock(&sinfo->lock);
4600
4601         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4602         spin_lock(&sinfo->lock);
4603         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4604                 data_used = 0;
4605         meta_used = sinfo->bytes_used;
4606         spin_unlock(&sinfo->lock);
4607
4608         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4609                     csum_size * 2;
4610         num_bytes += div64_u64(data_used + meta_used, 50);
4611
4612         if (num_bytes * 3 > meta_used)
4613                 num_bytes = div64_u64(meta_used, 3);
4614
4615         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4616 }
4617
4618 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4619 {
4620         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4621         struct btrfs_space_info *sinfo = block_rsv->space_info;
4622         u64 num_bytes;
4623
4624         num_bytes = calc_global_metadata_size(fs_info);
4625
4626         spin_lock(&sinfo->lock);
4627         spin_lock(&block_rsv->lock);
4628
4629         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4630
4631         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4632                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4633                     sinfo->bytes_may_use;
4634
4635         if (sinfo->total_bytes > num_bytes) {
4636                 num_bytes = sinfo->total_bytes - num_bytes;
4637                 block_rsv->reserved += num_bytes;
4638                 sinfo->bytes_may_use += num_bytes;
4639                 trace_btrfs_space_reservation(fs_info, "space_info",
4640                                       sinfo->flags, num_bytes, 1);
4641         }
4642
4643         if (block_rsv->reserved >= block_rsv->size) {
4644                 num_bytes = block_rsv->reserved - block_rsv->size;
4645                 sinfo->bytes_may_use -= num_bytes;
4646                 trace_btrfs_space_reservation(fs_info, "space_info",
4647                                       sinfo->flags, num_bytes, 0);
4648                 sinfo->reservation_progress++;
4649                 block_rsv->reserved = block_rsv->size;
4650                 block_rsv->full = 1;
4651         }
4652
4653         spin_unlock(&block_rsv->lock);
4654         spin_unlock(&sinfo->lock);
4655 }
4656
4657 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4658 {
4659         struct btrfs_space_info *space_info;
4660
4661         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4662         fs_info->chunk_block_rsv.space_info = space_info;
4663
4664         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4665         fs_info->global_block_rsv.space_info = space_info;
4666         fs_info->delalloc_block_rsv.space_info = space_info;
4667         fs_info->trans_block_rsv.space_info = space_info;
4668         fs_info->empty_block_rsv.space_info = space_info;
4669         fs_info->delayed_block_rsv.space_info = space_info;
4670
4671         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4672         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4673         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4674         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4675         if (fs_info->quota_root)
4676                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4677         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4678
4679         update_global_block_rsv(fs_info);
4680 }
4681
4682 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4683 {
4684         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4685                                 (u64)-1);
4686         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4687         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4688         WARN_ON(fs_info->trans_block_rsv.size > 0);
4689         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4690         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4691         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4692         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4693         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4694 }
4695
4696 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4697                                   struct btrfs_root *root)
4698 {
4699         if (!trans->block_rsv)
4700                 return;
4701
4702         if (!trans->bytes_reserved)
4703                 return;
4704
4705         trace_btrfs_space_reservation(root->fs_info, "transaction",
4706                                       trans->transid, trans->bytes_reserved, 0);
4707         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4708         trans->bytes_reserved = 0;
4709 }
4710
4711 /* Can only return 0 or -ENOSPC */
4712 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4713                                   struct inode *inode)
4714 {
4715         struct btrfs_root *root = BTRFS_I(inode)->root;
4716         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4717         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4718
4719         /*
4720          * We need to hold space in order to delete our orphan item once we've
4721          * added it, so this takes the reservation so we can release it later
4722          * when we are truly done with the orphan item.
4723          */
4724         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4725         trace_btrfs_space_reservation(root->fs_info, "orphan",
4726                                       btrfs_ino(inode), num_bytes, 1);
4727         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4728 }
4729
4730 void btrfs_orphan_release_metadata(struct inode *inode)
4731 {
4732         struct btrfs_root *root = BTRFS_I(inode)->root;
4733         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4734         trace_btrfs_space_reservation(root->fs_info, "orphan",
4735                                       btrfs_ino(inode), num_bytes, 0);
4736         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4737 }
4738
4739 /*
4740  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4741  * root: the root of the parent directory
4742  * rsv: block reservation
4743  * items: the number of items that we need do reservation
4744  * qgroup_reserved: used to return the reserved size in qgroup
4745  *
4746  * This function is used to reserve the space for snapshot/subvolume
4747  * creation and deletion. Those operations are different with the
4748  * common file/directory operations, they change two fs/file trees
4749  * and root tree, the number of items that the qgroup reserves is
4750  * different with the free space reservation. So we can not use
4751  * the space reseravtion mechanism in start_transaction().
4752  */
4753 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4754                                      struct btrfs_block_rsv *rsv,
4755                                      int items,
4756                                      u64 *qgroup_reserved,
4757                                      bool use_global_rsv)
4758 {
4759         u64 num_bytes;
4760         int ret;
4761         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4762
4763         if (root->fs_info->quota_enabled) {
4764                 /* One for parent inode, two for dir entries */
4765                 num_bytes = 3 * root->leafsize;
4766                 ret = btrfs_qgroup_reserve(root, num_bytes);
4767                 if (ret)
4768                         return ret;
4769         } else {
4770                 num_bytes = 0;
4771         }
4772
4773         *qgroup_reserved = num_bytes;
4774
4775         num_bytes = btrfs_calc_trans_metadata_size(root, items);
4776         rsv->space_info = __find_space_info(root->fs_info,
4777                                             BTRFS_BLOCK_GROUP_METADATA);
4778         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
4779                                   BTRFS_RESERVE_FLUSH_ALL);
4780
4781         if (ret == -ENOSPC && use_global_rsv)
4782                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
4783
4784         if (ret) {
4785                 if (*qgroup_reserved)
4786                         btrfs_qgroup_free(root, *qgroup_reserved);
4787         }
4788
4789         return ret;
4790 }
4791
4792 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
4793                                       struct btrfs_block_rsv *rsv,
4794                                       u64 qgroup_reserved)
4795 {
4796         btrfs_block_rsv_release(root, rsv, (u64)-1);
4797         if (qgroup_reserved)
4798                 btrfs_qgroup_free(root, qgroup_reserved);
4799 }
4800
4801 /**
4802  * drop_outstanding_extent - drop an outstanding extent
4803  * @inode: the inode we're dropping the extent for
4804  *
4805  * This is called when we are freeing up an outstanding extent, either called
4806  * after an error or after an extent is written.  This will return the number of
4807  * reserved extents that need to be freed.  This must be called with
4808  * BTRFS_I(inode)->lock held.
4809  */
4810 static unsigned drop_outstanding_extent(struct inode *inode)
4811 {
4812         unsigned drop_inode_space = 0;
4813         unsigned dropped_extents = 0;
4814
4815         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4816         BTRFS_I(inode)->outstanding_extents--;
4817
4818         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4819             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4820                                &BTRFS_I(inode)->runtime_flags))
4821                 drop_inode_space = 1;
4822
4823         /*
4824          * If we have more or the same amount of outsanding extents than we have
4825          * reserved then we need to leave the reserved extents count alone.
4826          */
4827         if (BTRFS_I(inode)->outstanding_extents >=
4828             BTRFS_I(inode)->reserved_extents)
4829                 return drop_inode_space;
4830
4831         dropped_extents = BTRFS_I(inode)->reserved_extents -
4832                 BTRFS_I(inode)->outstanding_extents;
4833         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4834         return dropped_extents + drop_inode_space;
4835 }
4836
4837 /**
4838  * calc_csum_metadata_size - return the amount of metada space that must be
4839  *      reserved/free'd for the given bytes.
4840  * @inode: the inode we're manipulating
4841  * @num_bytes: the number of bytes in question
4842  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4843  *
4844  * This adjusts the number of csum_bytes in the inode and then returns the
4845  * correct amount of metadata that must either be reserved or freed.  We
4846  * calculate how many checksums we can fit into one leaf and then divide the
4847  * number of bytes that will need to be checksumed by this value to figure out
4848  * how many checksums will be required.  If we are adding bytes then the number
4849  * may go up and we will return the number of additional bytes that must be
4850  * reserved.  If it is going down we will return the number of bytes that must
4851  * be freed.
4852  *
4853  * This must be called with BTRFS_I(inode)->lock held.
4854  */
4855 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4856                                    int reserve)
4857 {
4858         struct btrfs_root *root = BTRFS_I(inode)->root;
4859         u64 csum_size;
4860         int num_csums_per_leaf;
4861         int num_csums;
4862         int old_csums;
4863
4864         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4865             BTRFS_I(inode)->csum_bytes == 0)
4866                 return 0;
4867
4868         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4869         if (reserve)
4870                 BTRFS_I(inode)->csum_bytes += num_bytes;
4871         else
4872                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4873         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4874         num_csums_per_leaf = (int)div64_u64(csum_size,
4875                                             sizeof(struct btrfs_csum_item) +
4876                                             sizeof(struct btrfs_disk_key));
4877         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4878         num_csums = num_csums + num_csums_per_leaf - 1;
4879         num_csums = num_csums / num_csums_per_leaf;
4880
4881         old_csums = old_csums + num_csums_per_leaf - 1;
4882         old_csums = old_csums / num_csums_per_leaf;
4883
4884         /* No change, no need to reserve more */
4885         if (old_csums == num_csums)
4886                 return 0;
4887
4888         if (reserve)
4889                 return btrfs_calc_trans_metadata_size(root,
4890                                                       num_csums - old_csums);
4891
4892         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4893 }
4894
4895 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4896 {
4897         struct btrfs_root *root = BTRFS_I(inode)->root;
4898         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4899         u64 to_reserve = 0;
4900         u64 csum_bytes;
4901         unsigned nr_extents = 0;
4902         int extra_reserve = 0;
4903         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4904         int ret = 0;
4905         bool delalloc_lock = true;
4906         u64 to_free = 0;
4907         unsigned dropped;
4908
4909         /* If we are a free space inode we need to not flush since we will be in
4910          * the middle of a transaction commit.  We also don't need the delalloc
4911          * mutex since we won't race with anybody.  We need this mostly to make
4912          * lockdep shut its filthy mouth.
4913          */
4914         if (btrfs_is_free_space_inode(inode)) {
4915                 flush = BTRFS_RESERVE_NO_FLUSH;
4916                 delalloc_lock = false;
4917         }
4918
4919         if (flush != BTRFS_RESERVE_NO_FLUSH &&
4920             btrfs_transaction_in_commit(root->fs_info))
4921                 schedule_timeout(1);
4922
4923         if (delalloc_lock)
4924                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4925
4926         num_bytes = ALIGN(num_bytes, root->sectorsize);
4927
4928         spin_lock(&BTRFS_I(inode)->lock);
4929         BTRFS_I(inode)->outstanding_extents++;
4930
4931         if (BTRFS_I(inode)->outstanding_extents >
4932             BTRFS_I(inode)->reserved_extents)
4933                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4934                         BTRFS_I(inode)->reserved_extents;
4935
4936         /*
4937          * Add an item to reserve for updating the inode when we complete the
4938          * delalloc io.
4939          */
4940         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4941                       &BTRFS_I(inode)->runtime_flags)) {
4942                 nr_extents++;
4943                 extra_reserve = 1;
4944         }
4945
4946         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4947         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4948         csum_bytes = BTRFS_I(inode)->csum_bytes;
4949         spin_unlock(&BTRFS_I(inode)->lock);
4950
4951         if (root->fs_info->quota_enabled) {
4952                 ret = btrfs_qgroup_reserve(root, num_bytes +
4953                                            nr_extents * root->leafsize);
4954                 if (ret)
4955                         goto out_fail;
4956         }
4957
4958         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4959         if (unlikely(ret)) {
4960                 if (root->fs_info->quota_enabled)
4961                         btrfs_qgroup_free(root, num_bytes +
4962                                                 nr_extents * root->leafsize);
4963                 goto out_fail;
4964         }
4965
4966         spin_lock(&BTRFS_I(inode)->lock);
4967         if (extra_reserve) {
4968                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4969                         &BTRFS_I(inode)->runtime_flags);
4970                 nr_extents--;
4971         }
4972         BTRFS_I(inode)->reserved_extents += nr_extents;
4973         spin_unlock(&BTRFS_I(inode)->lock);
4974
4975         if (delalloc_lock)
4976                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4977
4978         if (to_reserve)
4979                 trace_btrfs_space_reservation(root->fs_info,"delalloc",
4980                                               btrfs_ino(inode), to_reserve, 1);
4981         block_rsv_add_bytes(block_rsv, to_reserve, 1);
4982
4983         return 0;
4984
4985 out_fail:
4986         spin_lock(&BTRFS_I(inode)->lock);
4987         dropped = drop_outstanding_extent(inode);
4988         /*
4989          * If the inodes csum_bytes is the same as the original
4990          * csum_bytes then we know we haven't raced with any free()ers
4991          * so we can just reduce our inodes csum bytes and carry on.
4992          */
4993         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
4994                 calc_csum_metadata_size(inode, num_bytes, 0);
4995         } else {
4996                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
4997                 u64 bytes;
4998
4999                 /*
5000                  * This is tricky, but first we need to figure out how much we
5001                  * free'd from any free-ers that occured during this
5002                  * reservation, so we reset ->csum_bytes to the csum_bytes
5003                  * before we dropped our lock, and then call the free for the
5004                  * number of bytes that were freed while we were trying our
5005                  * reservation.
5006                  */
5007                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5008                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5009                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5010
5011
5012                 /*
5013                  * Now we need to see how much we would have freed had we not
5014                  * been making this reservation and our ->csum_bytes were not
5015                  * artificially inflated.
5016                  */
5017                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5018                 bytes = csum_bytes - orig_csum_bytes;
5019                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5020
5021                 /*
5022                  * Now reset ->csum_bytes to what it should be.  If bytes is
5023                  * more than to_free then we would have free'd more space had we
5024                  * not had an artificially high ->csum_bytes, so we need to free
5025                  * the remainder.  If bytes is the same or less then we don't
5026                  * need to do anything, the other free-ers did the correct
5027                  * thing.
5028                  */
5029                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5030                 if (bytes > to_free)
5031                         to_free = bytes - to_free;
5032                 else
5033                         to_free = 0;
5034         }
5035         spin_unlock(&BTRFS_I(inode)->lock);
5036         if (dropped)
5037                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5038
5039         if (to_free) {
5040                 btrfs_block_rsv_release(root, block_rsv, to_free);
5041                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5042                                               btrfs_ino(inode), to_free, 0);
5043         }
5044         if (delalloc_lock)
5045                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5046         return ret;
5047 }
5048
5049 /**
5050  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5051  * @inode: the inode to release the reservation for
5052  * @num_bytes: the number of bytes we're releasing
5053  *
5054  * This will release the metadata reservation for an inode.  This can be called
5055  * once we complete IO for a given set of bytes to release their metadata
5056  * reservations.
5057  */
5058 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5059 {
5060         struct btrfs_root *root = BTRFS_I(inode)->root;
5061         u64 to_free = 0;
5062         unsigned dropped;
5063
5064         num_bytes = ALIGN(num_bytes, root->sectorsize);
5065         spin_lock(&BTRFS_I(inode)->lock);
5066         dropped = drop_outstanding_extent(inode);
5067
5068         if (num_bytes)
5069                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5070         spin_unlock(&BTRFS_I(inode)->lock);
5071         if (dropped > 0)
5072                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5073
5074         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5075                                       btrfs_ino(inode), to_free, 0);
5076         if (root->fs_info->quota_enabled) {
5077                 btrfs_qgroup_free(root, num_bytes +
5078                                         dropped * root->leafsize);
5079         }
5080
5081         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5082                                 to_free);
5083 }
5084
5085 /**
5086  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5087  * @inode: inode we're writing to
5088  * @num_bytes: the number of bytes we want to allocate
5089  *
5090  * This will do the following things
5091  *
5092  * o reserve space in the data space info for num_bytes
5093  * o reserve space in the metadata space info based on number of outstanding
5094  *   extents and how much csums will be needed
5095  * o add to the inodes ->delalloc_bytes
5096  * o add it to the fs_info's delalloc inodes list.
5097  *
5098  * This will return 0 for success and -ENOSPC if there is no space left.
5099  */
5100 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5101 {
5102         int ret;
5103
5104         ret = btrfs_check_data_free_space(inode, num_bytes);
5105         if (ret)
5106                 return ret;
5107
5108         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5109         if (ret) {
5110                 btrfs_free_reserved_data_space(inode, num_bytes);
5111                 return ret;
5112         }
5113
5114         return 0;
5115 }
5116
5117 /**
5118  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5119  * @inode: inode we're releasing space for
5120  * @num_bytes: the number of bytes we want to free up
5121  *
5122  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5123  * called in the case that we don't need the metadata AND data reservations
5124  * anymore.  So if there is an error or we insert an inline extent.
5125  *
5126  * This function will release the metadata space that was not used and will
5127  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5128  * list if there are no delalloc bytes left.
5129  */
5130 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5131 {
5132         btrfs_delalloc_release_metadata(inode, num_bytes);
5133         btrfs_free_reserved_data_space(inode, num_bytes);
5134 }
5135
5136 static int update_block_group(struct btrfs_root *root,
5137                               u64 bytenr, u64 num_bytes, int alloc)
5138 {
5139         struct btrfs_block_group_cache *cache = NULL;
5140         struct btrfs_fs_info *info = root->fs_info;
5141         u64 total = num_bytes;
5142         u64 old_val;
5143         u64 byte_in_group;
5144         int factor;
5145
5146         /* block accounting for super block */
5147         spin_lock(&info->delalloc_root_lock);
5148         old_val = btrfs_super_bytes_used(info->super_copy);
5149         if (alloc)
5150                 old_val += num_bytes;
5151         else
5152                 old_val -= num_bytes;
5153         btrfs_set_super_bytes_used(info->super_copy, old_val);
5154         spin_unlock(&info->delalloc_root_lock);
5155
5156         while (total) {
5157                 cache = btrfs_lookup_block_group(info, bytenr);
5158                 if (!cache)
5159                         return -ENOENT;
5160                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5161                                     BTRFS_BLOCK_GROUP_RAID1 |
5162                                     BTRFS_BLOCK_GROUP_RAID10))
5163                         factor = 2;
5164                 else
5165                         factor = 1;
5166                 /*
5167                  * If this block group has free space cache written out, we
5168                  * need to make sure to load it if we are removing space.  This
5169                  * is because we need the unpinning stage to actually add the
5170                  * space back to the block group, otherwise we will leak space.
5171                  */
5172                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5173                         cache_block_group(cache, 1);
5174
5175                 byte_in_group = bytenr - cache->key.objectid;
5176                 WARN_ON(byte_in_group > cache->key.offset);
5177
5178                 spin_lock(&cache->space_info->lock);
5179                 spin_lock(&cache->lock);
5180
5181                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5182                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5183                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5184
5185                 cache->dirty = 1;
5186                 old_val = btrfs_block_group_used(&cache->item);
5187                 num_bytes = min(total, cache->key.offset - byte_in_group);
5188                 if (alloc) {
5189                         old_val += num_bytes;
5190                         btrfs_set_block_group_used(&cache->item, old_val);
5191                         cache->reserved -= num_bytes;
5192                         cache->space_info->bytes_reserved -= num_bytes;
5193                         cache->space_info->bytes_used += num_bytes;
5194                         cache->space_info->disk_used += num_bytes * factor;
5195                         spin_unlock(&cache->lock);
5196                         spin_unlock(&cache->space_info->lock);
5197                 } else {
5198                         old_val -= num_bytes;
5199                         btrfs_set_block_group_used(&cache->item, old_val);
5200                         cache->pinned += num_bytes;
5201                         cache->space_info->bytes_pinned += num_bytes;
5202                         cache->space_info->bytes_used -= num_bytes;
5203                         cache->space_info->disk_used -= num_bytes * factor;
5204                         spin_unlock(&cache->lock);
5205                         spin_unlock(&cache->space_info->lock);
5206
5207                         set_extent_dirty(info->pinned_extents,
5208                                          bytenr, bytenr + num_bytes - 1,
5209                                          GFP_NOFS | __GFP_NOFAIL);
5210                 }
5211                 btrfs_put_block_group(cache);
5212                 total -= num_bytes;
5213                 bytenr += num_bytes;
5214         }
5215         return 0;
5216 }
5217
5218 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5219 {
5220         struct btrfs_block_group_cache *cache;
5221         u64 bytenr;
5222
5223         spin_lock(&root->fs_info->block_group_cache_lock);
5224         bytenr = root->fs_info->first_logical_byte;
5225         spin_unlock(&root->fs_info->block_group_cache_lock);
5226
5227         if (bytenr < (u64)-1)
5228                 return bytenr;
5229
5230         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5231         if (!cache)
5232                 return 0;
5233
5234         bytenr = cache->key.objectid;
5235         btrfs_put_block_group(cache);
5236
5237         return bytenr;
5238 }
5239
5240 static int pin_down_extent(struct btrfs_root *root,
5241                            struct btrfs_block_group_cache *cache,
5242                            u64 bytenr, u64 num_bytes, int reserved)
5243 {
5244         spin_lock(&cache->space_info->lock);
5245         spin_lock(&cache->lock);
5246         cache->pinned += num_bytes;
5247         cache->space_info->bytes_pinned += num_bytes;
5248         if (reserved) {
5249                 cache->reserved -= num_bytes;
5250                 cache->space_info->bytes_reserved -= num_bytes;
5251         }
5252         spin_unlock(&cache->lock);
5253         spin_unlock(&cache->space_info->lock);
5254
5255         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5256                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5257         return 0;
5258 }
5259
5260 /*
5261  * this function must be called within transaction
5262  */
5263 int btrfs_pin_extent(struct btrfs_root *root,
5264                      u64 bytenr, u64 num_bytes, int reserved)
5265 {
5266         struct btrfs_block_group_cache *cache;
5267
5268         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5269         BUG_ON(!cache); /* Logic error */
5270
5271         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5272
5273         btrfs_put_block_group(cache);
5274         return 0;
5275 }
5276
5277 /*
5278  * this function must be called within transaction
5279  */
5280 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5281                                     u64 bytenr, u64 num_bytes)
5282 {
5283         struct btrfs_block_group_cache *cache;
5284         int ret;
5285
5286         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5287         if (!cache)
5288                 return -EINVAL;
5289
5290         /*
5291          * pull in the free space cache (if any) so that our pin
5292          * removes the free space from the cache.  We have load_only set
5293          * to one because the slow code to read in the free extents does check
5294          * the pinned extents.
5295          */
5296         cache_block_group(cache, 1);
5297
5298         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5299
5300         /* remove us from the free space cache (if we're there at all) */
5301         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5302         btrfs_put_block_group(cache);
5303         return ret;
5304 }
5305
5306 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5307 {
5308         int ret;
5309         struct btrfs_block_group_cache *block_group;
5310         struct btrfs_caching_control *caching_ctl;
5311
5312         block_group = btrfs_lookup_block_group(root->fs_info, start);
5313         if (!block_group)
5314                 return -EINVAL;
5315
5316         cache_block_group(block_group, 0);
5317         caching_ctl = get_caching_control(block_group);
5318
5319         if (!caching_ctl) {
5320                 /* Logic error */
5321                 BUG_ON(!block_group_cache_done(block_group));
5322                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5323         } else {
5324                 mutex_lock(&caching_ctl->mutex);
5325
5326                 if (start >= caching_ctl->progress) {
5327                         ret = add_excluded_extent(root, start, num_bytes);
5328                 } else if (start + num_bytes <= caching_ctl->progress) {
5329                         ret = btrfs_remove_free_space(block_group,
5330                                                       start, num_bytes);
5331                 } else {
5332                         num_bytes = caching_ctl->progress - start;
5333                         ret = btrfs_remove_free_space(block_group,
5334                                                       start, num_bytes);
5335                         if (ret)
5336                                 goto out_lock;
5337
5338                         num_bytes = (start + num_bytes) -
5339                                 caching_ctl->progress;
5340                         start = caching_ctl->progress;
5341                         ret = add_excluded_extent(root, start, num_bytes);
5342                 }
5343 out_lock:
5344                 mutex_unlock(&caching_ctl->mutex);
5345                 put_caching_control(caching_ctl);
5346         }
5347         btrfs_put_block_group(block_group);
5348         return ret;
5349 }
5350
5351 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5352                                  struct extent_buffer *eb)
5353 {
5354         struct btrfs_file_extent_item *item;
5355         struct btrfs_key key;
5356         int found_type;
5357         int i;
5358
5359         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5360                 return 0;
5361
5362         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5363                 btrfs_item_key_to_cpu(eb, &key, i);
5364                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5365                         continue;
5366                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5367                 found_type = btrfs_file_extent_type(eb, item);
5368                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5369                         continue;
5370                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5371                         continue;
5372                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5373                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5374                 __exclude_logged_extent(log, key.objectid, key.offset);
5375         }
5376
5377         return 0;
5378 }
5379
5380 /**
5381  * btrfs_update_reserved_bytes - update the block_group and space info counters
5382  * @cache:      The cache we are manipulating
5383  * @num_bytes:  The number of bytes in question
5384  * @reserve:    One of the reservation enums
5385  *
5386  * This is called by the allocator when it reserves space, or by somebody who is
5387  * freeing space that was never actually used on disk.  For example if you
5388  * reserve some space for a new leaf in transaction A and before transaction A
5389  * commits you free that leaf, you call this with reserve set to 0 in order to
5390  * clear the reservation.
5391  *
5392  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5393  * ENOSPC accounting.  For data we handle the reservation through clearing the
5394  * delalloc bits in the io_tree.  We have to do this since we could end up
5395  * allocating less disk space for the amount of data we have reserved in the
5396  * case of compression.
5397  *
5398  * If this is a reservation and the block group has become read only we cannot
5399  * make the reservation and return -EAGAIN, otherwise this function always
5400  * succeeds.
5401  */
5402 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5403                                        u64 num_bytes, int reserve)
5404 {
5405         struct btrfs_space_info *space_info = cache->space_info;
5406         int ret = 0;
5407
5408         spin_lock(&space_info->lock);
5409         spin_lock(&cache->lock);
5410         if (reserve != RESERVE_FREE) {
5411                 if (cache->ro) {
5412                         ret = -EAGAIN;
5413                 } else {
5414                         cache->reserved += num_bytes;
5415                         space_info->bytes_reserved += num_bytes;
5416                         if (reserve == RESERVE_ALLOC) {
5417                                 trace_btrfs_space_reservation(cache->fs_info,
5418                                                 "space_info", space_info->flags,
5419                                                 num_bytes, 0);
5420                                 space_info->bytes_may_use -= num_bytes;
5421                         }
5422                 }
5423         } else {
5424                 if (cache->ro)
5425                         space_info->bytes_readonly += num_bytes;
5426                 cache->reserved -= num_bytes;
5427                 space_info->bytes_reserved -= num_bytes;
5428                 space_info->reservation_progress++;
5429         }
5430         spin_unlock(&cache->lock);
5431         spin_unlock(&space_info->lock);
5432         return ret;
5433 }
5434
5435 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5436                                 struct btrfs_root *root)
5437 {
5438         struct btrfs_fs_info *fs_info = root->fs_info;
5439         struct btrfs_caching_control *next;
5440         struct btrfs_caching_control *caching_ctl;
5441         struct btrfs_block_group_cache *cache;
5442         struct btrfs_space_info *space_info;
5443
5444         down_write(&fs_info->extent_commit_sem);
5445
5446         list_for_each_entry_safe(caching_ctl, next,
5447                                  &fs_info->caching_block_groups, list) {
5448                 cache = caching_ctl->block_group;
5449                 if (block_group_cache_done(cache)) {
5450                         cache->last_byte_to_unpin = (u64)-1;
5451                         list_del_init(&caching_ctl->list);
5452                         put_caching_control(caching_ctl);
5453                 } else {
5454                         cache->last_byte_to_unpin = caching_ctl->progress;
5455                 }
5456         }
5457
5458         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5459                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5460         else
5461                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5462
5463         up_write(&fs_info->extent_commit_sem);
5464
5465         list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
5466                 percpu_counter_set(&space_info->total_bytes_pinned, 0);
5467
5468         update_global_block_rsv(fs_info);
5469 }
5470
5471 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5472 {
5473         struct btrfs_fs_info *fs_info = root->fs_info;
5474         struct btrfs_block_group_cache *cache = NULL;
5475         struct btrfs_space_info *space_info;
5476         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5477         u64 len;
5478         bool readonly;
5479
5480         while (start <= end) {
5481                 readonly = false;
5482                 if (!cache ||
5483                     start >= cache->key.objectid + cache->key.offset) {
5484                         if (cache)
5485                                 btrfs_put_block_group(cache);
5486                         cache = btrfs_lookup_block_group(fs_info, start);
5487                         BUG_ON(!cache); /* Logic error */
5488                 }
5489
5490                 len = cache->key.objectid + cache->key.offset - start;
5491                 len = min(len, end + 1 - start);
5492
5493                 if (start < cache->last_byte_to_unpin) {
5494                         len = min(len, cache->last_byte_to_unpin - start);
5495                         btrfs_add_free_space(cache, start, len);
5496                 }
5497
5498                 start += len;
5499                 space_info = cache->space_info;
5500
5501                 spin_lock(&space_info->lock);
5502                 spin_lock(&cache->lock);
5503                 cache->pinned -= len;
5504                 space_info->bytes_pinned -= len;
5505                 if (cache->ro) {
5506                         space_info->bytes_readonly += len;
5507                         readonly = true;
5508                 }
5509                 spin_unlock(&cache->lock);
5510                 if (!readonly && global_rsv->space_info == space_info) {
5511                         spin_lock(&global_rsv->lock);
5512                         if (!global_rsv->full) {
5513                                 len = min(len, global_rsv->size -
5514                                           global_rsv->reserved);
5515                                 global_rsv->reserved += len;
5516                                 space_info->bytes_may_use += len;
5517                                 if (global_rsv->reserved >= global_rsv->size)
5518                                         global_rsv->full = 1;
5519                         }
5520                         spin_unlock(&global_rsv->lock);
5521                 }
5522                 spin_unlock(&space_info->lock);
5523         }
5524
5525         if (cache)
5526                 btrfs_put_block_group(cache);
5527         return 0;
5528 }
5529
5530 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5531                                struct btrfs_root *root)
5532 {
5533         struct btrfs_fs_info *fs_info = root->fs_info;
5534         struct extent_io_tree *unpin;
5535         u64 start;
5536         u64 end;
5537         int ret;
5538
5539         if (trans->aborted)
5540                 return 0;
5541
5542         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5543                 unpin = &fs_info->freed_extents[1];
5544         else
5545                 unpin = &fs_info->freed_extents[0];
5546
5547         while (1) {
5548                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5549                                             EXTENT_DIRTY, NULL);
5550                 if (ret)
5551                         break;
5552
5553                 if (btrfs_test_opt(root, DISCARD))
5554                         ret = btrfs_discard_extent(root, start,
5555                                                    end + 1 - start, NULL);
5556
5557                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5558                 unpin_extent_range(root, start, end);
5559                 cond_resched();
5560         }
5561
5562         return 0;
5563 }
5564
5565 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
5566                              u64 owner, u64 root_objectid)
5567 {
5568         struct btrfs_space_info *space_info;
5569         u64 flags;
5570
5571         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5572                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
5573                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
5574                 else
5575                         flags = BTRFS_BLOCK_GROUP_METADATA;
5576         } else {
5577                 flags = BTRFS_BLOCK_GROUP_DATA;
5578         }
5579
5580         space_info = __find_space_info(fs_info, flags);
5581         BUG_ON(!space_info); /* Logic bug */
5582         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
5583 }
5584
5585
5586 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5587                                 struct btrfs_root *root,
5588                                 u64 bytenr, u64 num_bytes, u64 parent,
5589                                 u64 root_objectid, u64 owner_objectid,
5590                                 u64 owner_offset, int refs_to_drop,
5591                                 struct btrfs_delayed_extent_op *extent_op)
5592 {
5593         struct btrfs_key key;
5594         struct btrfs_path *path;
5595         struct btrfs_fs_info *info = root->fs_info;
5596         struct btrfs_root *extent_root = info->extent_root;
5597         struct extent_buffer *leaf;
5598         struct btrfs_extent_item *ei;
5599         struct btrfs_extent_inline_ref *iref;
5600         int ret;
5601         int is_data;
5602         int extent_slot = 0;
5603         int found_extent = 0;
5604         int num_to_del = 1;
5605         u32 item_size;
5606         u64 refs;
5607         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5608                                                  SKINNY_METADATA);
5609
5610         path = btrfs_alloc_path();
5611         if (!path)
5612                 return -ENOMEM;
5613
5614         path->reada = 1;
5615         path->leave_spinning = 1;
5616
5617         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5618         BUG_ON(!is_data && refs_to_drop != 1);
5619
5620         if (is_data)
5621                 skinny_metadata = 0;
5622
5623         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5624                                     bytenr, num_bytes, parent,
5625                                     root_objectid, owner_objectid,
5626                                     owner_offset);
5627         if (ret == 0) {
5628                 extent_slot = path->slots[0];
5629                 while (extent_slot >= 0) {
5630                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5631                                               extent_slot);
5632                         if (key.objectid != bytenr)
5633                                 break;
5634                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5635                             key.offset == num_bytes) {
5636                                 found_extent = 1;
5637                                 break;
5638                         }
5639                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
5640                             key.offset == owner_objectid) {
5641                                 found_extent = 1;
5642                                 break;
5643                         }
5644                         if (path->slots[0] - extent_slot > 5)
5645                                 break;
5646                         extent_slot--;
5647                 }
5648 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5649                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5650                 if (found_extent && item_size < sizeof(*ei))
5651                         found_extent = 0;
5652 #endif
5653                 if (!found_extent) {
5654                         BUG_ON(iref);
5655                         ret = remove_extent_backref(trans, extent_root, path,
5656                                                     NULL, refs_to_drop,
5657                                                     is_data);
5658                         if (ret) {
5659                                 btrfs_abort_transaction(trans, extent_root, ret);
5660                                 goto out;
5661                         }
5662                         btrfs_release_path(path);
5663                         path->leave_spinning = 1;
5664
5665                         key.objectid = bytenr;
5666                         key.type = BTRFS_EXTENT_ITEM_KEY;
5667                         key.offset = num_bytes;
5668
5669                         if (!is_data && skinny_metadata) {
5670                                 key.type = BTRFS_METADATA_ITEM_KEY;
5671                                 key.offset = owner_objectid;
5672                         }
5673
5674                         ret = btrfs_search_slot(trans, extent_root,
5675                                                 &key, path, -1, 1);
5676                         if (ret > 0 && skinny_metadata && path->slots[0]) {
5677                                 /*
5678                                  * Couldn't find our skinny metadata item,
5679                                  * see if we have ye olde extent item.
5680                                  */
5681                                 path->slots[0]--;
5682                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
5683                                                       path->slots[0]);
5684                                 if (key.objectid == bytenr &&
5685                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
5686                                     key.offset == num_bytes)
5687                                         ret = 0;
5688                         }
5689
5690                         if (ret > 0 && skinny_metadata) {
5691                                 skinny_metadata = false;
5692                                 key.type = BTRFS_EXTENT_ITEM_KEY;
5693                                 key.offset = num_bytes;
5694                                 btrfs_release_path(path);
5695                                 ret = btrfs_search_slot(trans, extent_root,
5696                                                         &key, path, -1, 1);
5697                         }
5698
5699                         if (ret) {
5700                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5701                                         ret, (unsigned long long)bytenr);
5702                                 if (ret > 0)
5703                                         btrfs_print_leaf(extent_root,
5704                                                          path->nodes[0]);
5705                         }
5706                         if (ret < 0) {
5707                                 btrfs_abort_transaction(trans, extent_root, ret);
5708                                 goto out;
5709                         }
5710                         extent_slot = path->slots[0];
5711                 }
5712         } else if (ret == -ENOENT) {
5713                 btrfs_print_leaf(extent_root, path->nodes[0]);
5714                 WARN_ON(1);
5715                 btrfs_err(info,
5716                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5717                         (unsigned long long)bytenr,
5718                         (unsigned long long)parent,
5719                         (unsigned long long)root_objectid,
5720                         (unsigned long long)owner_objectid,
5721                         (unsigned long long)owner_offset);
5722         } else {
5723                 btrfs_abort_transaction(trans, extent_root, ret);
5724                 goto out;
5725         }
5726
5727         leaf = path->nodes[0];
5728         item_size = btrfs_item_size_nr(leaf, extent_slot);
5729 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5730         if (item_size < sizeof(*ei)) {
5731                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5732                 ret = convert_extent_item_v0(trans, extent_root, path,
5733                                              owner_objectid, 0);
5734                 if (ret < 0) {
5735                         btrfs_abort_transaction(trans, extent_root, ret);
5736                         goto out;
5737                 }
5738
5739                 btrfs_release_path(path);
5740                 path->leave_spinning = 1;
5741
5742                 key.objectid = bytenr;
5743                 key.type = BTRFS_EXTENT_ITEM_KEY;
5744                 key.offset = num_bytes;
5745
5746                 ret = btrfs_search_slot(trans, extent_root, &key, path,
5747                                         -1, 1);
5748                 if (ret) {
5749                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5750                                 ret, (unsigned long long)bytenr);
5751                         btrfs_print_leaf(extent_root, path->nodes[0]);
5752                 }
5753                 if (ret < 0) {
5754                         btrfs_abort_transaction(trans, extent_root, ret);
5755                         goto out;
5756                 }
5757
5758                 extent_slot = path->slots[0];
5759                 leaf = path->nodes[0];
5760                 item_size = btrfs_item_size_nr(leaf, extent_slot);
5761         }
5762 #endif
5763         BUG_ON(item_size < sizeof(*ei));
5764         ei = btrfs_item_ptr(leaf, extent_slot,
5765                             struct btrfs_extent_item);
5766         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
5767             key.type == BTRFS_EXTENT_ITEM_KEY) {
5768                 struct btrfs_tree_block_info *bi;
5769                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5770                 bi = (struct btrfs_tree_block_info *)(ei + 1);
5771                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5772         }
5773
5774         refs = btrfs_extent_refs(leaf, ei);
5775         if (refs < refs_to_drop) {
5776                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
5777                           "for bytenr %Lu\n", refs_to_drop, refs, bytenr);
5778                 ret = -EINVAL;
5779                 btrfs_abort_transaction(trans, extent_root, ret);
5780                 goto out;
5781         }
5782         refs -= refs_to_drop;
5783
5784         if (refs > 0) {
5785                 if (extent_op)
5786                         __run_delayed_extent_op(extent_op, leaf, ei);
5787                 /*
5788                  * In the case of inline back ref, reference count will
5789                  * be updated by remove_extent_backref
5790                  */
5791                 if (iref) {
5792                         BUG_ON(!found_extent);
5793                 } else {
5794                         btrfs_set_extent_refs(leaf, ei, refs);
5795                         btrfs_mark_buffer_dirty(leaf);
5796                 }
5797                 if (found_extent) {
5798                         ret = remove_extent_backref(trans, extent_root, path,
5799                                                     iref, refs_to_drop,
5800                                                     is_data);
5801                         if (ret) {
5802                                 btrfs_abort_transaction(trans, extent_root, ret);
5803                                 goto out;
5804                         }
5805                 }
5806                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
5807                                  root_objectid);
5808         } else {
5809                 if (found_extent) {
5810                         BUG_ON(is_data && refs_to_drop !=
5811                                extent_data_ref_count(root, path, iref));
5812                         if (iref) {
5813                                 BUG_ON(path->slots[0] != extent_slot);
5814                         } else {
5815                                 BUG_ON(path->slots[0] != extent_slot + 1);
5816                                 path->slots[0] = extent_slot;
5817                                 num_to_del = 2;
5818                         }
5819                 }
5820
5821                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5822                                       num_to_del);
5823                 if (ret) {
5824                         btrfs_abort_transaction(trans, extent_root, ret);
5825                         goto out;
5826                 }
5827                 btrfs_release_path(path);
5828
5829                 if (is_data) {
5830                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5831                         if (ret) {
5832                                 btrfs_abort_transaction(trans, extent_root, ret);
5833                                 goto out;
5834                         }
5835                 }
5836
5837                 ret = update_block_group(root, bytenr, num_bytes, 0);
5838                 if (ret) {
5839                         btrfs_abort_transaction(trans, extent_root, ret);
5840                         goto out;
5841                 }
5842         }
5843 out:
5844         btrfs_free_path(path);
5845         return ret;
5846 }
5847
5848 /*
5849  * when we free an block, it is possible (and likely) that we free the last
5850  * delayed ref for that extent as well.  This searches the delayed ref tree for
5851  * a given extent, and if there are no other delayed refs to be processed, it
5852  * removes it from the tree.
5853  */
5854 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5855                                       struct btrfs_root *root, u64 bytenr)
5856 {
5857         struct btrfs_delayed_ref_head *head;
5858         struct btrfs_delayed_ref_root *delayed_refs;
5859         struct btrfs_delayed_ref_node *ref;
5860         struct rb_node *node;
5861         int ret = 0;
5862
5863         delayed_refs = &trans->transaction->delayed_refs;
5864         spin_lock(&delayed_refs->lock);
5865         head = btrfs_find_delayed_ref_head(trans, bytenr);
5866         if (!head)
5867                 goto out;
5868
5869         node = rb_prev(&head->node.rb_node);
5870         if (!node)
5871                 goto out;
5872
5873         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5874
5875         /* there are still entries for this ref, we can't drop it */
5876         if (ref->bytenr == bytenr)
5877                 goto out;
5878
5879         if (head->extent_op) {
5880                 if (!head->must_insert_reserved)
5881                         goto out;
5882                 btrfs_free_delayed_extent_op(head->extent_op);
5883                 head->extent_op = NULL;
5884         }
5885
5886         /*
5887          * waiting for the lock here would deadlock.  If someone else has it
5888          * locked they are already in the process of dropping it anyway
5889          */
5890         if (!mutex_trylock(&head->mutex))
5891                 goto out;
5892
5893         /*
5894          * at this point we have a head with no other entries.  Go
5895          * ahead and process it.
5896          */
5897         head->node.in_tree = 0;
5898         rb_erase(&head->node.rb_node, &delayed_refs->root);
5899
5900         delayed_refs->num_entries--;
5901
5902         /*
5903          * we don't take a ref on the node because we're removing it from the
5904          * tree, so we just steal the ref the tree was holding.
5905          */
5906         delayed_refs->num_heads--;
5907         if (list_empty(&head->cluster))
5908                 delayed_refs->num_heads_ready--;
5909
5910         list_del_init(&head->cluster);
5911         spin_unlock(&delayed_refs->lock);
5912
5913         BUG_ON(head->extent_op);
5914         if (head->must_insert_reserved)
5915                 ret = 1;
5916
5917         mutex_unlock(&head->mutex);
5918         btrfs_put_delayed_ref(&head->node);
5919         return ret;
5920 out:
5921         spin_unlock(&delayed_refs->lock);
5922         return 0;
5923 }
5924
5925 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5926                            struct btrfs_root *root,
5927                            struct extent_buffer *buf,
5928                            u64 parent, int last_ref)
5929 {
5930         struct btrfs_block_group_cache *cache = NULL;
5931         int pin = 1;
5932         int ret;
5933
5934         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5935                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5936                                         buf->start, buf->len,
5937                                         parent, root->root_key.objectid,
5938                                         btrfs_header_level(buf),
5939                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
5940                 BUG_ON(ret); /* -ENOMEM */
5941         }
5942
5943         if (!last_ref)
5944                 return;
5945
5946         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5947
5948         if (btrfs_header_generation(buf) == trans->transid) {
5949                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5950                         ret = check_ref_cleanup(trans, root, buf->start);
5951                         if (!ret)
5952                                 goto out;
5953                 }
5954
5955                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5956                         pin_down_extent(root, cache, buf->start, buf->len, 1);
5957                         goto out;
5958                 }
5959
5960                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5961
5962                 btrfs_add_free_space(cache, buf->start, buf->len);
5963                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5964                 pin = 0;
5965         }
5966 out:
5967         if (pin)
5968                 add_pinned_bytes(root->fs_info, buf->len,
5969                                  btrfs_header_level(buf),
5970                                  root->root_key.objectid);
5971
5972         /*
5973          * Deleting the buffer, clear the corrupt flag since it doesn't matter
5974          * anymore.
5975          */
5976         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5977         btrfs_put_block_group(cache);
5978 }
5979
5980 /* Can return -ENOMEM */
5981 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5982                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5983                       u64 owner, u64 offset, int for_cow)
5984 {
5985         int ret;
5986         struct btrfs_fs_info *fs_info = root->fs_info;
5987
5988         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
5989
5990         /*
5991          * tree log blocks never actually go into the extent allocation
5992          * tree, just update pinning info and exit early.
5993          */
5994         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5995                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5996                 /* unlocks the pinned mutex */
5997                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
5998                 ret = 0;
5999         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6000                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6001                                         num_bytes,
6002                                         parent, root_objectid, (int)owner,
6003                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
6004         } else {
6005                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6006                                                 num_bytes,
6007                                                 parent, root_objectid, owner,
6008                                                 offset, BTRFS_DROP_DELAYED_REF,
6009                                                 NULL, for_cow);
6010         }
6011         return ret;
6012 }
6013
6014 static u64 stripe_align(struct btrfs_root *root,
6015                         struct btrfs_block_group_cache *cache,
6016                         u64 val, u64 num_bytes)
6017 {
6018         u64 ret = ALIGN(val, root->stripesize);
6019         return ret;
6020 }
6021
6022 /*
6023  * when we wait for progress in the block group caching, its because
6024  * our allocation attempt failed at least once.  So, we must sleep
6025  * and let some progress happen before we try again.
6026  *
6027  * This function will sleep at least once waiting for new free space to
6028  * show up, and then it will check the block group free space numbers
6029  * for our min num_bytes.  Another option is to have it go ahead
6030  * and look in the rbtree for a free extent of a given size, but this
6031  * is a good start.
6032  */
6033 static noinline int
6034 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6035                                 u64 num_bytes)
6036 {
6037         struct btrfs_caching_control *caching_ctl;
6038
6039         caching_ctl = get_caching_control(cache);
6040         if (!caching_ctl)
6041                 return 0;
6042
6043         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6044                    (cache->free_space_ctl->free_space >= num_bytes));
6045
6046         put_caching_control(caching_ctl);
6047         return 0;
6048 }
6049
6050 static noinline int
6051 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6052 {
6053         struct btrfs_caching_control *caching_ctl;
6054
6055         caching_ctl = get_caching_control(cache);
6056         if (!caching_ctl)
6057                 return 0;
6058
6059         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6060
6061         put_caching_control(caching_ctl);
6062         return 0;
6063 }
6064
6065 int __get_raid_index(u64 flags)
6066 {
6067         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6068                 return BTRFS_RAID_RAID10;
6069         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6070                 return BTRFS_RAID_RAID1;
6071         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6072                 return BTRFS_RAID_DUP;
6073         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6074                 return BTRFS_RAID_RAID0;
6075         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6076                 return BTRFS_RAID_RAID5;
6077         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6078                 return BTRFS_RAID_RAID6;
6079
6080         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6081 }
6082
6083 static int get_block_group_index(struct btrfs_block_group_cache *cache)
6084 {
6085         return __get_raid_index(cache->flags);
6086 }
6087
6088 enum btrfs_loop_type {
6089         LOOP_CACHING_NOWAIT = 0,
6090         LOOP_CACHING_WAIT = 1,
6091         LOOP_ALLOC_CHUNK = 2,
6092         LOOP_NO_EMPTY_SIZE = 3,
6093 };
6094
6095 /*
6096  * walks the btree of allocated extents and find a hole of a given size.
6097  * The key ins is changed to record the hole:
6098  * ins->objectid == block start
6099  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6100  * ins->offset == number of blocks
6101  * Any available blocks before search_start are skipped.
6102  */
6103 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
6104                                      struct btrfs_root *orig_root,
6105                                      u64 num_bytes, u64 empty_size,
6106                                      u64 hint_byte, struct btrfs_key *ins,
6107                                      u64 flags)
6108 {
6109         int ret = 0;
6110         struct btrfs_root *root = orig_root->fs_info->extent_root;
6111         struct btrfs_free_cluster *last_ptr = NULL;
6112         struct btrfs_block_group_cache *block_group = NULL;
6113         struct btrfs_block_group_cache *used_block_group;
6114         u64 search_start = 0;
6115         int empty_cluster = 2 * 1024 * 1024;
6116         struct btrfs_space_info *space_info;
6117         int loop = 0;
6118         int index = __get_raid_index(flags);
6119         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6120                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6121         bool found_uncached_bg = false;
6122         bool failed_cluster_refill = false;
6123         bool failed_alloc = false;
6124         bool use_cluster = true;
6125         bool have_caching_bg = false;
6126
6127         WARN_ON(num_bytes < root->sectorsize);
6128         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
6129         ins->objectid = 0;
6130         ins->offset = 0;
6131
6132         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6133
6134         space_info = __find_space_info(root->fs_info, flags);
6135         if (!space_info) {
6136                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6137                 return -ENOSPC;
6138         }
6139
6140         /*
6141          * If the space info is for both data and metadata it means we have a
6142          * small filesystem and we can't use the clustering stuff.
6143          */
6144         if (btrfs_mixed_space_info(space_info))
6145                 use_cluster = false;
6146
6147         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6148                 last_ptr = &root->fs_info->meta_alloc_cluster;
6149                 if (!btrfs_test_opt(root, SSD))
6150                         empty_cluster = 64 * 1024;
6151         }
6152
6153         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6154             btrfs_test_opt(root, SSD)) {
6155                 last_ptr = &root->fs_info->data_alloc_cluster;
6156         }
6157
6158         if (last_ptr) {
6159                 spin_lock(&last_ptr->lock);
6160                 if (last_ptr->block_group)
6161                         hint_byte = last_ptr->window_start;
6162                 spin_unlock(&last_ptr->lock);
6163         }
6164
6165         search_start = max(search_start, first_logical_byte(root, 0));
6166         search_start = max(search_start, hint_byte);
6167
6168         if (!last_ptr)
6169                 empty_cluster = 0;
6170
6171         if (search_start == hint_byte) {
6172                 block_group = btrfs_lookup_block_group(root->fs_info,
6173                                                        search_start);
6174                 used_block_group = block_group;
6175                 /*
6176                  * we don't want to use the block group if it doesn't match our
6177                  * allocation bits, or if its not cached.
6178                  *
6179                  * However if we are re-searching with an ideal block group
6180                  * picked out then we don't care that the block group is cached.
6181                  */
6182                 if (block_group && block_group_bits(block_group, flags) &&
6183                     block_group->cached != BTRFS_CACHE_NO) {
6184                         down_read(&space_info->groups_sem);
6185                         if (list_empty(&block_group->list) ||
6186                             block_group->ro) {
6187                                 /*
6188                                  * someone is removing this block group,
6189                                  * we can't jump into the have_block_group
6190                                  * target because our list pointers are not
6191                                  * valid
6192                                  */
6193                                 btrfs_put_block_group(block_group);
6194                                 up_read(&space_info->groups_sem);
6195                         } else {
6196                                 index = get_block_group_index(block_group);
6197                                 goto have_block_group;
6198                         }
6199                 } else if (block_group) {
6200                         btrfs_put_block_group(block_group);
6201                 }
6202         }
6203 search:
6204         have_caching_bg = false;
6205         down_read(&space_info->groups_sem);
6206         list_for_each_entry(block_group, &space_info->block_groups[index],
6207                             list) {
6208                 u64 offset;
6209                 int cached;
6210
6211                 used_block_group = block_group;
6212                 btrfs_get_block_group(block_group);
6213                 search_start = block_group->key.objectid;
6214
6215                 /*
6216                  * this can happen if we end up cycling through all the
6217                  * raid types, but we want to make sure we only allocate
6218                  * for the proper type.
6219                  */
6220                 if (!block_group_bits(block_group, flags)) {
6221                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6222                                 BTRFS_BLOCK_GROUP_RAID1 |
6223                                 BTRFS_BLOCK_GROUP_RAID5 |
6224                                 BTRFS_BLOCK_GROUP_RAID6 |
6225                                 BTRFS_BLOCK_GROUP_RAID10;
6226
6227                         /*
6228                          * if they asked for extra copies and this block group
6229                          * doesn't provide them, bail.  This does allow us to
6230                          * fill raid0 from raid1.
6231                          */
6232                         if ((flags & extra) && !(block_group->flags & extra))
6233                                 goto loop;
6234                 }
6235
6236 have_block_group:
6237                 cached = block_group_cache_done(block_group);
6238                 if (unlikely(!cached)) {
6239                         found_uncached_bg = true;
6240                         ret = cache_block_group(block_group, 0);
6241                         BUG_ON(ret < 0);
6242                         ret = 0;
6243                 }
6244
6245                 if (unlikely(block_group->ro))
6246                         goto loop;
6247
6248                 /*
6249                  * Ok we want to try and use the cluster allocator, so
6250                  * lets look there
6251                  */
6252                 if (last_ptr) {
6253                         unsigned long aligned_cluster;
6254                         /*
6255                          * the refill lock keeps out other
6256                          * people trying to start a new cluster
6257                          */
6258                         spin_lock(&last_ptr->refill_lock);
6259                         used_block_group = last_ptr->block_group;
6260                         if (used_block_group != block_group &&
6261                             (!used_block_group ||
6262                              used_block_group->ro ||
6263                              !block_group_bits(used_block_group, flags))) {
6264                                 used_block_group = block_group;
6265                                 goto refill_cluster;
6266                         }
6267
6268                         if (used_block_group != block_group)
6269                                 btrfs_get_block_group(used_block_group);
6270
6271                         offset = btrfs_alloc_from_cluster(used_block_group,
6272                           last_ptr, num_bytes, used_block_group->key.objectid);
6273                         if (offset) {
6274                                 /* we have a block, we're done */
6275                                 spin_unlock(&last_ptr->refill_lock);
6276                                 trace_btrfs_reserve_extent_cluster(root,
6277                                         block_group, search_start, num_bytes);
6278                                 goto checks;
6279                         }
6280
6281                         WARN_ON(last_ptr->block_group != used_block_group);
6282                         if (used_block_group != block_group) {
6283                                 btrfs_put_block_group(used_block_group);
6284                                 used_block_group = block_group;
6285                         }
6286 refill_cluster:
6287                         BUG_ON(used_block_group != block_group);
6288                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6289                          * set up a new clusters, so lets just skip it
6290                          * and let the allocator find whatever block
6291                          * it can find.  If we reach this point, we
6292                          * will have tried the cluster allocator
6293                          * plenty of times and not have found
6294                          * anything, so we are likely way too
6295                          * fragmented for the clustering stuff to find
6296                          * anything.
6297                          *
6298                          * However, if the cluster is taken from the
6299                          * current block group, release the cluster
6300                          * first, so that we stand a better chance of
6301                          * succeeding in the unclustered
6302                          * allocation.  */
6303                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6304                             last_ptr->block_group != block_group) {
6305                                 spin_unlock(&last_ptr->refill_lock);
6306                                 goto unclustered_alloc;
6307                         }
6308
6309                         /*
6310                          * this cluster didn't work out, free it and
6311                          * start over
6312                          */
6313                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6314
6315                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6316                                 spin_unlock(&last_ptr->refill_lock);
6317                                 goto unclustered_alloc;
6318                         }
6319
6320                         aligned_cluster = max_t(unsigned long,
6321                                                 empty_cluster + empty_size,
6322                                               block_group->full_stripe_len);
6323
6324                         /* allocate a cluster in this block group */
6325                         ret = btrfs_find_space_cluster(trans, root,
6326                                                block_group, last_ptr,
6327                                                search_start, num_bytes,
6328                                                aligned_cluster);
6329                         if (ret == 0) {
6330                                 /*
6331                                  * now pull our allocation out of this
6332                                  * cluster
6333                                  */
6334                                 offset = btrfs_alloc_from_cluster(block_group,
6335                                                   last_ptr, num_bytes,
6336                                                   search_start);
6337                                 if (offset) {
6338                                         /* we found one, proceed */
6339                                         spin_unlock(&last_ptr->refill_lock);
6340                                         trace_btrfs_reserve_extent_cluster(root,
6341                                                 block_group, search_start,
6342                                                 num_bytes);
6343                                         goto checks;
6344                                 }
6345                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6346                                    && !failed_cluster_refill) {
6347                                 spin_unlock(&last_ptr->refill_lock);
6348
6349                                 failed_cluster_refill = true;
6350                                 wait_block_group_cache_progress(block_group,
6351                                        num_bytes + empty_cluster + empty_size);
6352                                 goto have_block_group;
6353                         }
6354
6355                         /*
6356                          * at this point we either didn't find a cluster
6357                          * or we weren't able to allocate a block from our
6358                          * cluster.  Free the cluster we've been trying
6359                          * to use, and go to the next block group
6360                          */
6361                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6362                         spin_unlock(&last_ptr->refill_lock);
6363                         goto loop;
6364                 }
6365
6366 unclustered_alloc:
6367                 spin_lock(&block_group->free_space_ctl->tree_lock);
6368                 if (cached &&
6369                     block_group->free_space_ctl->free_space <
6370                     num_bytes + empty_cluster + empty_size) {
6371                         spin_unlock(&block_group->free_space_ctl->tree_lock);
6372                         goto loop;
6373                 }
6374                 spin_unlock(&block_group->free_space_ctl->tree_lock);
6375
6376                 offset = btrfs_find_space_for_alloc(block_group, search_start,
6377                                                     num_bytes, empty_size);
6378                 /*
6379                  * If we didn't find a chunk, and we haven't failed on this
6380                  * block group before, and this block group is in the middle of
6381                  * caching and we are ok with waiting, then go ahead and wait
6382                  * for progress to be made, and set failed_alloc to true.
6383                  *
6384                  * If failed_alloc is true then we've already waited on this
6385                  * block group once and should move on to the next block group.
6386                  */
6387                 if (!offset && !failed_alloc && !cached &&
6388                     loop > LOOP_CACHING_NOWAIT) {
6389                         wait_block_group_cache_progress(block_group,
6390                                                 num_bytes + empty_size);
6391                         failed_alloc = true;
6392                         goto have_block_group;
6393                 } else if (!offset) {
6394                         if (!cached)
6395                                 have_caching_bg = true;
6396                         goto loop;
6397                 }
6398 checks:
6399                 search_start = stripe_align(root, used_block_group,
6400                                             offset, num_bytes);
6401
6402                 /* move on to the next group */
6403                 if (search_start + num_bytes >
6404                     used_block_group->key.objectid + used_block_group->key.offset) {
6405                         btrfs_add_free_space(used_block_group, offset, num_bytes);
6406                         goto loop;
6407                 }
6408
6409                 if (offset < search_start)
6410                         btrfs_add_free_space(used_block_group, offset,
6411                                              search_start - offset);
6412                 BUG_ON(offset > search_start);
6413
6414                 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
6415                                                   alloc_type);
6416                 if (ret == -EAGAIN) {
6417                         btrfs_add_free_space(used_block_group, offset, num_bytes);
6418                         goto loop;
6419                 }
6420
6421                 /* we are all good, lets return */
6422                 ins->objectid = search_start;
6423                 ins->offset = num_bytes;
6424
6425                 trace_btrfs_reserve_extent(orig_root, block_group,
6426                                            search_start, num_bytes);
6427                 if (used_block_group != block_group)
6428                         btrfs_put_block_group(used_block_group);
6429                 btrfs_put_block_group(block_group);
6430                 break;
6431 loop:
6432                 failed_cluster_refill = false;
6433                 failed_alloc = false;
6434                 BUG_ON(index != get_block_group_index(block_group));
6435                 if (used_block_group != block_group)
6436                         btrfs_put_block_group(used_block_group);
6437                 btrfs_put_block_group(block_group);
6438         }
6439         up_read(&space_info->groups_sem);
6440
6441         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6442                 goto search;
6443
6444         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6445                 goto search;
6446
6447         /*
6448          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6449          *                      caching kthreads as we move along
6450          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6451          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6452          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6453          *                      again
6454          */
6455         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6456                 index = 0;
6457                 loop++;
6458                 if (loop == LOOP_ALLOC_CHUNK) {
6459                         ret = do_chunk_alloc(trans, root, flags,
6460                                              CHUNK_ALLOC_FORCE);
6461                         /*
6462                          * Do not bail out on ENOSPC since we
6463                          * can do more things.
6464                          */
6465                         if (ret < 0 && ret != -ENOSPC) {
6466                                 btrfs_abort_transaction(trans,
6467                                                         root, ret);
6468                                 goto out;
6469                         }
6470                 }
6471
6472                 if (loop == LOOP_NO_EMPTY_SIZE) {
6473                         empty_size = 0;
6474                         empty_cluster = 0;
6475                 }
6476
6477                 goto search;
6478         } else if (!ins->objectid) {
6479                 ret = -ENOSPC;
6480         } else if (ins->objectid) {
6481                 ret = 0;
6482         }
6483 out:
6484
6485         return ret;
6486 }
6487
6488 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6489                             int dump_block_groups)
6490 {
6491         struct btrfs_block_group_cache *cache;
6492         int index = 0;
6493
6494         spin_lock(&info->lock);
6495         printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
6496                (unsigned long long)info->flags,
6497                (unsigned long long)(info->total_bytes - info->bytes_used -
6498                                     info->bytes_pinned - info->bytes_reserved -
6499                                     info->bytes_readonly),
6500                (info->full) ? "" : "not ");
6501         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
6502                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6503                (unsigned long long)info->total_bytes,
6504                (unsigned long long)info->bytes_used,
6505                (unsigned long long)info->bytes_pinned,
6506                (unsigned long long)info->bytes_reserved,
6507                (unsigned long long)info->bytes_may_use,
6508                (unsigned long long)info->bytes_readonly);
6509         spin_unlock(&info->lock);
6510
6511         if (!dump_block_groups)
6512                 return;
6513
6514         down_read(&info->groups_sem);
6515 again:
6516         list_for_each_entry(cache, &info->block_groups[index], list) {
6517                 spin_lock(&cache->lock);
6518                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
6519                        (unsigned long long)cache->key.objectid,
6520                        (unsigned long long)cache->key.offset,
6521                        (unsigned long long)btrfs_block_group_used(&cache->item),
6522                        (unsigned long long)cache->pinned,
6523                        (unsigned long long)cache->reserved,
6524                        cache->ro ? "[readonly]" : "");
6525                 btrfs_dump_free_space(cache, bytes);
6526                 spin_unlock(&cache->lock);
6527         }
6528         if (++index < BTRFS_NR_RAID_TYPES)
6529                 goto again;
6530         up_read(&info->groups_sem);
6531 }
6532
6533 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
6534                          struct btrfs_root *root,
6535                          u64 num_bytes, u64 min_alloc_size,
6536                          u64 empty_size, u64 hint_byte,
6537                          struct btrfs_key *ins, int is_data)
6538 {
6539         bool final_tried = false;
6540         u64 flags;
6541         int ret;
6542
6543         flags = btrfs_get_alloc_profile(root, is_data);
6544 again:
6545         WARN_ON(num_bytes < root->sectorsize);
6546         ret = find_free_extent(trans, root, num_bytes, empty_size,
6547                                hint_byte, ins, flags);
6548
6549         if (ret == -ENOSPC) {
6550                 if (!final_tried) {
6551                         num_bytes = num_bytes >> 1;
6552                         num_bytes = round_down(num_bytes, root->sectorsize);
6553                         num_bytes = max(num_bytes, min_alloc_size);
6554                         if (num_bytes == min_alloc_size)
6555                                 final_tried = true;
6556                         goto again;
6557                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6558                         struct btrfs_space_info *sinfo;
6559
6560                         sinfo = __find_space_info(root->fs_info, flags);
6561                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6562                                 (unsigned long long)flags,
6563                                 (unsigned long long)num_bytes);
6564                         if (sinfo)
6565                                 dump_space_info(sinfo, num_bytes, 1);
6566                 }
6567         }
6568
6569         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6570
6571         return ret;
6572 }
6573
6574 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6575                                         u64 start, u64 len, int pin)
6576 {
6577         struct btrfs_block_group_cache *cache;
6578         int ret = 0;
6579
6580         cache = btrfs_lookup_block_group(root->fs_info, start);
6581         if (!cache) {
6582                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6583                         (unsigned long long)start);
6584                 return -ENOSPC;
6585         }
6586
6587         if (btrfs_test_opt(root, DISCARD))
6588                 ret = btrfs_discard_extent(root, start, len, NULL);
6589
6590         if (pin)
6591                 pin_down_extent(root, cache, start, len, 1);
6592         else {
6593                 btrfs_add_free_space(cache, start, len);
6594                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6595         }
6596         btrfs_put_block_group(cache);
6597
6598         trace_btrfs_reserved_extent_free(root, start, len);
6599
6600         return ret;
6601 }
6602
6603 int btrfs_free_reserved_extent(struct btrfs_root *root,
6604                                         u64 start, u64 len)
6605 {
6606         return __btrfs_free_reserved_extent(root, start, len, 0);
6607 }
6608
6609 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6610                                        u64 start, u64 len)
6611 {
6612         return __btrfs_free_reserved_extent(root, start, len, 1);
6613 }
6614
6615 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6616                                       struct btrfs_root *root,
6617                                       u64 parent, u64 root_objectid,
6618                                       u64 flags, u64 owner, u64 offset,
6619                                       struct btrfs_key *ins, int ref_mod)
6620 {
6621         int ret;
6622         struct btrfs_fs_info *fs_info = root->fs_info;
6623         struct btrfs_extent_item *extent_item;
6624         struct btrfs_extent_inline_ref *iref;
6625         struct btrfs_path *path;
6626         struct extent_buffer *leaf;
6627         int type;
6628         u32 size;
6629
6630         if (parent > 0)
6631                 type = BTRFS_SHARED_DATA_REF_KEY;
6632         else
6633                 type = BTRFS_EXTENT_DATA_REF_KEY;
6634
6635         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6636
6637         path = btrfs_alloc_path();
6638         if (!path)
6639                 return -ENOMEM;
6640
6641         path->leave_spinning = 1;
6642         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6643                                       ins, size);
6644         if (ret) {
6645                 btrfs_free_path(path);
6646                 return ret;
6647         }
6648
6649         leaf = path->nodes[0];
6650         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6651                                      struct btrfs_extent_item);
6652         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6653         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6654         btrfs_set_extent_flags(leaf, extent_item,
6655                                flags | BTRFS_EXTENT_FLAG_DATA);
6656
6657         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6658         btrfs_set_extent_inline_ref_type(leaf, iref, type);
6659         if (parent > 0) {
6660                 struct btrfs_shared_data_ref *ref;
6661                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6662                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6663                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6664         } else {
6665                 struct btrfs_extent_data_ref *ref;
6666                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6667                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6668                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6669                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6670                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6671         }
6672
6673         btrfs_mark_buffer_dirty(path->nodes[0]);
6674         btrfs_free_path(path);
6675
6676         ret = update_block_group(root, ins->objectid, ins->offset, 1);
6677         if (ret) { /* -ENOENT, logic error */
6678                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6679                         (unsigned long long)ins->objectid,
6680                         (unsigned long long)ins->offset);
6681                 BUG();
6682         }
6683         return ret;
6684 }
6685
6686 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6687                                      struct btrfs_root *root,
6688                                      u64 parent, u64 root_objectid,
6689                                      u64 flags, struct btrfs_disk_key *key,
6690                                      int level, struct btrfs_key *ins)
6691 {
6692         int ret;
6693         struct btrfs_fs_info *fs_info = root->fs_info;
6694         struct btrfs_extent_item *extent_item;
6695         struct btrfs_tree_block_info *block_info;
6696         struct btrfs_extent_inline_ref *iref;
6697         struct btrfs_path *path;
6698         struct extent_buffer *leaf;
6699         u32 size = sizeof(*extent_item) + sizeof(*iref);
6700         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6701                                                  SKINNY_METADATA);
6702
6703         if (!skinny_metadata)
6704                 size += sizeof(*block_info);
6705
6706         path = btrfs_alloc_path();
6707         if (!path)
6708                 return -ENOMEM;
6709
6710         path->leave_spinning = 1;
6711         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6712                                       ins, size);
6713         if (ret) {
6714                 btrfs_free_path(path);
6715                 return ret;
6716         }
6717
6718         leaf = path->nodes[0];
6719         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6720                                      struct btrfs_extent_item);
6721         btrfs_set_extent_refs(leaf, extent_item, 1);
6722         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6723         btrfs_set_extent_flags(leaf, extent_item,
6724                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6725
6726         if (skinny_metadata) {
6727                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6728         } else {
6729                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6730                 btrfs_set_tree_block_key(leaf, block_info, key);
6731                 btrfs_set_tree_block_level(leaf, block_info, level);
6732                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6733         }
6734
6735         if (parent > 0) {
6736                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6737                 btrfs_set_extent_inline_ref_type(leaf, iref,
6738                                                  BTRFS_SHARED_BLOCK_REF_KEY);
6739                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6740         } else {
6741                 btrfs_set_extent_inline_ref_type(leaf, iref,
6742                                                  BTRFS_TREE_BLOCK_REF_KEY);
6743                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6744         }
6745
6746         btrfs_mark_buffer_dirty(leaf);
6747         btrfs_free_path(path);
6748
6749         ret = update_block_group(root, ins->objectid, root->leafsize, 1);
6750         if (ret) { /* -ENOENT, logic error */
6751                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6752                         (unsigned long long)ins->objectid,
6753                         (unsigned long long)ins->offset);
6754                 BUG();
6755         }
6756         return ret;
6757 }
6758
6759 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6760                                      struct btrfs_root *root,
6761                                      u64 root_objectid, u64 owner,
6762                                      u64 offset, struct btrfs_key *ins)
6763 {
6764         int ret;
6765
6766         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6767
6768         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6769                                          ins->offset, 0,
6770                                          root_objectid, owner, offset,
6771                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6772         return ret;
6773 }
6774
6775 /*
6776  * this is used by the tree logging recovery code.  It records that
6777  * an extent has been allocated and makes sure to clear the free
6778  * space cache bits as well
6779  */
6780 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6781                                    struct btrfs_root *root,
6782                                    u64 root_objectid, u64 owner, u64 offset,
6783                                    struct btrfs_key *ins)
6784 {
6785         int ret;
6786         struct btrfs_block_group_cache *block_group;
6787
6788         /*
6789          * Mixed block groups will exclude before processing the log so we only
6790          * need to do the exlude dance if this fs isn't mixed.
6791          */
6792         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
6793                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
6794                 if (ret)
6795                         return ret;
6796         }
6797
6798         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6799         if (!block_group)
6800                 return -EINVAL;
6801
6802         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6803                                           RESERVE_ALLOC_NO_ACCOUNT);
6804         BUG_ON(ret); /* logic error */
6805         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6806                                          0, owner, offset, ins, 1);
6807         btrfs_put_block_group(block_group);
6808         return ret;
6809 }
6810
6811 static struct extent_buffer *
6812 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6813                       u64 bytenr, u32 blocksize, int level)
6814 {
6815         struct extent_buffer *buf;
6816
6817         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6818         if (!buf)
6819                 return ERR_PTR(-ENOMEM);
6820         btrfs_set_header_generation(buf, trans->transid);
6821         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6822         btrfs_tree_lock(buf);
6823         clean_tree_block(trans, root, buf);
6824         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6825
6826         btrfs_set_lock_blocking(buf);
6827         btrfs_set_buffer_uptodate(buf);
6828
6829         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6830                 /*
6831                  * we allow two log transactions at a time, use different
6832                  * EXENT bit to differentiate dirty pages.
6833                  */
6834                 if (root->log_transid % 2 == 0)
6835                         set_extent_dirty(&root->dirty_log_pages, buf->start,
6836                                         buf->start + buf->len - 1, GFP_NOFS);
6837                 else
6838                         set_extent_new(&root->dirty_log_pages, buf->start,
6839                                         buf->start + buf->len - 1, GFP_NOFS);
6840         } else {
6841                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6842                          buf->start + buf->len - 1, GFP_NOFS);
6843         }
6844         trans->blocks_used++;
6845         /* this returns a buffer locked for blocking */
6846         return buf;
6847 }
6848
6849 static struct btrfs_block_rsv *
6850 use_block_rsv(struct btrfs_trans_handle *trans,
6851               struct btrfs_root *root, u32 blocksize)
6852 {
6853         struct btrfs_block_rsv *block_rsv;
6854         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6855         int ret;
6856         bool global_updated = false;
6857
6858         block_rsv = get_block_rsv(trans, root);
6859
6860         if (unlikely(block_rsv->size == 0))
6861                 goto try_reserve;
6862 again:
6863         ret = block_rsv_use_bytes(block_rsv, blocksize);
6864         if (!ret)
6865                 return block_rsv;
6866
6867         if (block_rsv->failfast)
6868                 return ERR_PTR(ret);
6869
6870         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
6871                 global_updated = true;
6872                 update_global_block_rsv(root->fs_info);
6873                 goto again;
6874         }
6875
6876         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6877                 static DEFINE_RATELIMIT_STATE(_rs,
6878                                 DEFAULT_RATELIMIT_INTERVAL * 10,
6879                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
6880                 if (__ratelimit(&_rs))
6881                         WARN(1, KERN_DEBUG
6882                                 "btrfs: block rsv returned %d\n", ret);
6883         }
6884 try_reserve:
6885         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6886                                      BTRFS_RESERVE_NO_FLUSH);
6887         if (!ret)
6888                 return block_rsv;
6889         /*
6890          * If we couldn't reserve metadata bytes try and use some from
6891          * the global reserve if its space type is the same as the global
6892          * reservation.
6893          */
6894         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
6895             block_rsv->space_info == global_rsv->space_info) {
6896                 ret = block_rsv_use_bytes(global_rsv, blocksize);
6897                 if (!ret)
6898                         return global_rsv;
6899         }
6900         return ERR_PTR(ret);
6901 }
6902
6903 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6904                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
6905 {
6906         block_rsv_add_bytes(block_rsv, blocksize, 0);
6907         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6908 }
6909
6910 /*
6911  * finds a free extent and does all the dirty work required for allocation
6912  * returns the key for the extent through ins, and a tree buffer for
6913  * the first block of the extent through buf.
6914  *
6915  * returns the tree buffer or NULL.
6916  */
6917 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6918                                         struct btrfs_root *root, u32 blocksize,
6919                                         u64 parent, u64 root_objectid,
6920                                         struct btrfs_disk_key *key, int level,
6921                                         u64 hint, u64 empty_size)
6922 {
6923         struct btrfs_key ins;
6924         struct btrfs_block_rsv *block_rsv;
6925         struct extent_buffer *buf;
6926         u64 flags = 0;
6927         int ret;
6928         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6929                                                  SKINNY_METADATA);
6930
6931         block_rsv = use_block_rsv(trans, root, blocksize);
6932         if (IS_ERR(block_rsv))
6933                 return ERR_CAST(block_rsv);
6934
6935         ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6936                                    empty_size, hint, &ins, 0);
6937         if (ret) {
6938                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6939                 return ERR_PTR(ret);
6940         }
6941
6942         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6943                                     blocksize, level);
6944         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6945
6946         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6947                 if (parent == 0)
6948                         parent = ins.objectid;
6949                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6950         } else
6951                 BUG_ON(parent > 0);
6952
6953         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6954                 struct btrfs_delayed_extent_op *extent_op;
6955                 extent_op = btrfs_alloc_delayed_extent_op();
6956                 BUG_ON(!extent_op); /* -ENOMEM */
6957                 if (key)
6958                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
6959                 else
6960                         memset(&extent_op->key, 0, sizeof(extent_op->key));
6961                 extent_op->flags_to_set = flags;
6962                 if (skinny_metadata)
6963                         extent_op->update_key = 0;
6964                 else
6965                         extent_op->update_key = 1;
6966                 extent_op->update_flags = 1;
6967                 extent_op->is_data = 0;
6968                 extent_op->level = level;
6969
6970                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6971                                         ins.objectid,
6972                                         ins.offset, parent, root_objectid,
6973                                         level, BTRFS_ADD_DELAYED_EXTENT,
6974                                         extent_op, 0);
6975                 BUG_ON(ret); /* -ENOMEM */
6976         }
6977         return buf;
6978 }
6979
6980 struct walk_control {
6981         u64 refs[BTRFS_MAX_LEVEL];
6982         u64 flags[BTRFS_MAX_LEVEL];
6983         struct btrfs_key update_progress;
6984         int stage;
6985         int level;
6986         int shared_level;
6987         int update_ref;
6988         int keep_locks;
6989         int reada_slot;
6990         int reada_count;
6991         int for_reloc;
6992 };
6993
6994 #define DROP_REFERENCE  1
6995 #define UPDATE_BACKREF  2
6996
6997 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6998                                      struct btrfs_root *root,
6999                                      struct walk_control *wc,
7000                                      struct btrfs_path *path)
7001 {
7002         u64 bytenr;
7003         u64 generation;
7004         u64 refs;
7005         u64 flags;
7006         u32 nritems;
7007         u32 blocksize;
7008         struct btrfs_key key;
7009         struct extent_buffer *eb;
7010         int ret;
7011         int slot;
7012         int nread = 0;
7013
7014         if (path->slots[wc->level] < wc->reada_slot) {
7015                 wc->reada_count = wc->reada_count * 2 / 3;
7016                 wc->reada_count = max(wc->reada_count, 2);
7017         } else {
7018                 wc->reada_count = wc->reada_count * 3 / 2;
7019                 wc->reada_count = min_t(int, wc->reada_count,
7020                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7021         }
7022
7023         eb = path->nodes[wc->level];
7024         nritems = btrfs_header_nritems(eb);
7025         blocksize = btrfs_level_size(root, wc->level - 1);
7026
7027         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7028                 if (nread >= wc->reada_count)
7029                         break;
7030
7031                 cond_resched();
7032                 bytenr = btrfs_node_blockptr(eb, slot);
7033                 generation = btrfs_node_ptr_generation(eb, slot);
7034
7035                 if (slot == path->slots[wc->level])
7036                         goto reada;
7037
7038                 if (wc->stage == UPDATE_BACKREF &&
7039                     generation <= root->root_key.offset)
7040                         continue;
7041
7042                 /* We don't lock the tree block, it's OK to be racy here */
7043                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7044                                                wc->level - 1, 1, &refs,
7045                                                &flags);
7046                 /* We don't care about errors in readahead. */
7047                 if (ret < 0)
7048                         continue;
7049                 BUG_ON(refs == 0);
7050
7051                 if (wc->stage == DROP_REFERENCE) {
7052                         if (refs == 1)
7053                                 goto reada;
7054
7055                         if (wc->level == 1 &&
7056                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7057                                 continue;
7058                         if (!wc->update_ref ||
7059                             generation <= root->root_key.offset)
7060                                 continue;
7061                         btrfs_node_key_to_cpu(eb, &key, slot);
7062                         ret = btrfs_comp_cpu_keys(&key,
7063                                                   &wc->update_progress);
7064                         if (ret < 0)
7065                                 continue;
7066                 } else {
7067                         if (wc->level == 1 &&
7068                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7069                                 continue;
7070                 }
7071 reada:
7072                 ret = readahead_tree_block(root, bytenr, blocksize,
7073                                            generation);
7074                 if (ret)
7075                         break;
7076                 nread++;
7077         }
7078         wc->reada_slot = slot;
7079 }
7080
7081 /*
7082  * helper to process tree block while walking down the tree.
7083  *
7084  * when wc->stage == UPDATE_BACKREF, this function updates
7085  * back refs for pointers in the block.
7086  *
7087  * NOTE: return value 1 means we should stop walking down.
7088  */
7089 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7090                                    struct btrfs_root *root,
7091                                    struct btrfs_path *path,
7092                                    struct walk_control *wc, int lookup_info)
7093 {
7094         int level = wc->level;
7095         struct extent_buffer *eb = path->nodes[level];
7096         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7097         int ret;
7098
7099         if (wc->stage == UPDATE_BACKREF &&
7100             btrfs_header_owner(eb) != root->root_key.objectid)
7101                 return 1;
7102
7103         /*
7104          * when reference count of tree block is 1, it won't increase
7105          * again. once full backref flag is set, we never clear it.
7106          */
7107         if (lookup_info &&
7108             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7109              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7110                 BUG_ON(!path->locks[level]);
7111                 ret = btrfs_lookup_extent_info(trans, root,
7112                                                eb->start, level, 1,
7113                                                &wc->refs[level],
7114                                                &wc->flags[level]);
7115                 BUG_ON(ret == -ENOMEM);
7116                 if (ret)
7117                         return ret;
7118                 BUG_ON(wc->refs[level] == 0);
7119         }
7120
7121         if (wc->stage == DROP_REFERENCE) {
7122                 if (wc->refs[level] > 1)
7123                         return 1;
7124
7125                 if (path->locks[level] && !wc->keep_locks) {
7126                         btrfs_tree_unlock_rw(eb, path->locks[level]);
7127                         path->locks[level] = 0;
7128                 }
7129                 return 0;
7130         }
7131
7132         /* wc->stage == UPDATE_BACKREF */
7133         if (!(wc->flags[level] & flag)) {
7134                 BUG_ON(!path->locks[level]);
7135                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
7136                 BUG_ON(ret); /* -ENOMEM */
7137                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
7138                 BUG_ON(ret); /* -ENOMEM */
7139                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7140                                                   eb->len, flag,
7141                                                   btrfs_header_level(eb), 0);
7142                 BUG_ON(ret); /* -ENOMEM */
7143                 wc->flags[level] |= flag;
7144         }
7145
7146         /*
7147          * the block is shared by multiple trees, so it's not good to
7148          * keep the tree lock
7149          */
7150         if (path->locks[level] && level > 0) {
7151                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7152                 path->locks[level] = 0;
7153         }
7154         return 0;
7155 }
7156
7157 /*
7158  * helper to process tree block pointer.
7159  *
7160  * when wc->stage == DROP_REFERENCE, this function checks
7161  * reference count of the block pointed to. if the block
7162  * is shared and we need update back refs for the subtree
7163  * rooted at the block, this function changes wc->stage to
7164  * UPDATE_BACKREF. if the block is shared and there is no
7165  * need to update back, this function drops the reference
7166  * to the block.
7167  *
7168  * NOTE: return value 1 means we should stop walking down.
7169  */
7170 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7171                                  struct btrfs_root *root,
7172                                  struct btrfs_path *path,
7173                                  struct walk_control *wc, int *lookup_info)
7174 {
7175         u64 bytenr;
7176         u64 generation;
7177         u64 parent;
7178         u32 blocksize;
7179         struct btrfs_key key;
7180         struct extent_buffer *next;
7181         int level = wc->level;
7182         int reada = 0;
7183         int ret = 0;
7184
7185         generation = btrfs_node_ptr_generation(path->nodes[level],
7186                                                path->slots[level]);
7187         /*
7188          * if the lower level block was created before the snapshot
7189          * was created, we know there is no need to update back refs
7190          * for the subtree
7191          */
7192         if (wc->stage == UPDATE_BACKREF &&
7193             generation <= root->root_key.offset) {
7194                 *lookup_info = 1;
7195                 return 1;
7196         }
7197
7198         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7199         blocksize = btrfs_level_size(root, level - 1);
7200
7201         next = btrfs_find_tree_block(root, bytenr, blocksize);
7202         if (!next) {
7203                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7204                 if (!next)
7205                         return -ENOMEM;
7206                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
7207                                                level - 1);
7208                 reada = 1;
7209         }
7210         btrfs_tree_lock(next);
7211         btrfs_set_lock_blocking(next);
7212
7213         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7214                                        &wc->refs[level - 1],
7215                                        &wc->flags[level - 1]);
7216         if (ret < 0) {
7217                 btrfs_tree_unlock(next);
7218                 return ret;
7219         }
7220
7221         if (unlikely(wc->refs[level - 1] == 0)) {
7222                 btrfs_err(root->fs_info, "Missing references.");
7223                 BUG();
7224         }
7225         *lookup_info = 0;
7226
7227         if (wc->stage == DROP_REFERENCE) {
7228                 if (wc->refs[level - 1] > 1) {
7229                         if (level == 1 &&
7230                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7231                                 goto skip;
7232
7233                         if (!wc->update_ref ||
7234                             generation <= root->root_key.offset)
7235                                 goto skip;
7236
7237                         btrfs_node_key_to_cpu(path->nodes[level], &key,
7238                                               path->slots[level]);
7239                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7240                         if (ret < 0)
7241                                 goto skip;
7242
7243                         wc->stage = UPDATE_BACKREF;
7244                         wc->shared_level = level - 1;
7245                 }
7246         } else {
7247                 if (level == 1 &&
7248                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7249                         goto skip;
7250         }
7251
7252         if (!btrfs_buffer_uptodate(next, generation, 0)) {
7253                 btrfs_tree_unlock(next);
7254                 free_extent_buffer(next);
7255                 next = NULL;
7256                 *lookup_info = 1;
7257         }
7258
7259         if (!next) {
7260                 if (reada && level == 1)
7261                         reada_walk_down(trans, root, wc, path);
7262                 next = read_tree_block(root, bytenr, blocksize, generation);
7263                 if (!next || !extent_buffer_uptodate(next)) {
7264                         free_extent_buffer(next);
7265                         return -EIO;
7266                 }
7267                 btrfs_tree_lock(next);
7268                 btrfs_set_lock_blocking(next);
7269         }
7270
7271         level--;
7272         BUG_ON(level != btrfs_header_level(next));
7273         path->nodes[level] = next;
7274         path->slots[level] = 0;
7275         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7276         wc->level = level;
7277         if (wc->level == 1)
7278                 wc->reada_slot = 0;
7279         return 0;
7280 skip:
7281         wc->refs[level - 1] = 0;
7282         wc->flags[level - 1] = 0;
7283         if (wc->stage == DROP_REFERENCE) {
7284                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7285                         parent = path->nodes[level]->start;
7286                 } else {
7287                         BUG_ON(root->root_key.objectid !=
7288                                btrfs_header_owner(path->nodes[level]));
7289                         parent = 0;
7290                 }
7291
7292                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7293                                 root->root_key.objectid, level - 1, 0, 0);
7294                 BUG_ON(ret); /* -ENOMEM */
7295         }
7296         btrfs_tree_unlock(next);
7297         free_extent_buffer(next);
7298         *lookup_info = 1;
7299         return 1;
7300 }
7301
7302 /*
7303  * helper to process tree block while walking up the tree.
7304  *
7305  * when wc->stage == DROP_REFERENCE, this function drops
7306  * reference count on the block.
7307  *
7308  * when wc->stage == UPDATE_BACKREF, this function changes
7309  * wc->stage back to DROP_REFERENCE if we changed wc->stage
7310  * to UPDATE_BACKREF previously while processing the block.
7311  *
7312  * NOTE: return value 1 means we should stop walking up.
7313  */
7314 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7315                                  struct btrfs_root *root,
7316                                  struct btrfs_path *path,
7317                                  struct walk_control *wc)
7318 {
7319         int ret;
7320         int level = wc->level;
7321         struct extent_buffer *eb = path->nodes[level];
7322         u64 parent = 0;
7323
7324         if (wc->stage == UPDATE_BACKREF) {
7325                 BUG_ON(wc->shared_level < level);
7326                 if (level < wc->shared_level)
7327                         goto out;
7328
7329                 ret = find_next_key(path, level + 1, &wc->update_progress);
7330                 if (ret > 0)
7331                         wc->update_ref = 0;
7332
7333                 wc->stage = DROP_REFERENCE;
7334                 wc->shared_level = -1;
7335                 path->slots[level] = 0;
7336
7337                 /*
7338                  * check reference count again if the block isn't locked.
7339                  * we should start walking down the tree again if reference
7340                  * count is one.
7341                  */
7342                 if (!path->locks[level]) {
7343                         BUG_ON(level == 0);
7344                         btrfs_tree_lock(eb);
7345                         btrfs_set_lock_blocking(eb);
7346                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7347
7348                         ret = btrfs_lookup_extent_info(trans, root,
7349                                                        eb->start, level, 1,
7350                                                        &wc->refs[level],
7351                                                        &wc->flags[level]);
7352                         if (ret < 0) {
7353                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7354                                 path->locks[level] = 0;
7355                                 return ret;
7356                         }
7357                         BUG_ON(wc->refs[level] == 0);
7358                         if (wc->refs[level] == 1) {
7359                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7360                                 path->locks[level] = 0;
7361                                 return 1;
7362                         }
7363                 }
7364         }
7365
7366         /* wc->stage == DROP_REFERENCE */
7367         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
7368
7369         if (wc->refs[level] == 1) {
7370                 if (level == 0) {
7371                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7372                                 ret = btrfs_dec_ref(trans, root, eb, 1,
7373                                                     wc->for_reloc);
7374                         else
7375                                 ret = btrfs_dec_ref(trans, root, eb, 0,
7376                                                     wc->for_reloc);
7377                         BUG_ON(ret); /* -ENOMEM */
7378                 }
7379                 /* make block locked assertion in clean_tree_block happy */
7380                 if (!path->locks[level] &&
7381                     btrfs_header_generation(eb) == trans->transid) {
7382                         btrfs_tree_lock(eb);
7383                         btrfs_set_lock_blocking(eb);
7384                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7385                 }
7386                 clean_tree_block(trans, root, eb);
7387         }
7388
7389         if (eb == root->node) {
7390                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7391                         parent = eb->start;
7392                 else
7393                         BUG_ON(root->root_key.objectid !=
7394                                btrfs_header_owner(eb));
7395         } else {
7396                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7397                         parent = path->nodes[level + 1]->start;
7398                 else
7399                         BUG_ON(root->root_key.objectid !=
7400                                btrfs_header_owner(path->nodes[level + 1]));
7401         }
7402
7403         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7404 out:
7405         wc->refs[level] = 0;
7406         wc->flags[level] = 0;
7407         return 0;
7408 }
7409
7410 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
7411                                    struct btrfs_root *root,
7412                                    struct btrfs_path *path,
7413                                    struct walk_control *wc)
7414 {
7415         int level = wc->level;
7416         int lookup_info = 1;
7417         int ret;
7418
7419         while (level >= 0) {
7420                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
7421                 if (ret > 0)
7422                         break;
7423
7424                 if (level == 0)
7425                         break;
7426
7427                 if (path->slots[level] >=
7428                     btrfs_header_nritems(path->nodes[level]))
7429                         break;
7430
7431                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
7432                 if (ret > 0) {
7433                         path->slots[level]++;
7434                         continue;
7435                 } else if (ret < 0)
7436                         return ret;
7437                 level = wc->level;
7438         }
7439         return 0;
7440 }
7441
7442 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
7443                                  struct btrfs_root *root,
7444                                  struct btrfs_path *path,
7445                                  struct walk_control *wc, int max_level)
7446 {
7447         int level = wc->level;
7448         int ret;
7449
7450         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
7451         while (level < max_level && path->nodes[level]) {
7452                 wc->level = level;
7453                 if (path->slots[level] + 1 <
7454                     btrfs_header_nritems(path->nodes[level])) {
7455                         path->slots[level]++;
7456                         return 0;
7457                 } else {
7458                         ret = walk_up_proc(trans, root, path, wc);
7459                         if (ret > 0)
7460                                 return 0;
7461
7462                         if (path->locks[level]) {
7463                                 btrfs_tree_unlock_rw(path->nodes[level],
7464                                                      path->locks[level]);
7465                                 path->locks[level] = 0;
7466                         }
7467                         free_extent_buffer(path->nodes[level]);
7468                         path->nodes[level] = NULL;
7469                         level++;
7470                 }
7471         }
7472         return 1;
7473 }
7474
7475 /*
7476  * drop a subvolume tree.
7477  *
7478  * this function traverses the tree freeing any blocks that only
7479  * referenced by the tree.
7480  *
7481  * when a shared tree block is found. this function decreases its
7482  * reference count by one. if update_ref is true, this function
7483  * also make sure backrefs for the shared block and all lower level
7484  * blocks are properly updated.
7485  *
7486  * If called with for_reloc == 0, may exit early with -EAGAIN
7487  */
7488 int btrfs_drop_snapshot(struct btrfs_root *root,
7489                          struct btrfs_block_rsv *block_rsv, int update_ref,
7490                          int for_reloc)
7491 {
7492         struct btrfs_path *path;
7493         struct btrfs_trans_handle *trans;
7494         struct btrfs_root *tree_root = root->fs_info->tree_root;
7495         struct btrfs_root_item *root_item = &root->root_item;
7496         struct walk_control *wc;
7497         struct btrfs_key key;
7498         int err = 0;
7499         int ret;
7500         int level;
7501         bool root_dropped = false;
7502
7503         path = btrfs_alloc_path();
7504         if (!path) {
7505                 err = -ENOMEM;
7506                 goto out;
7507         }
7508
7509         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7510         if (!wc) {
7511                 btrfs_free_path(path);
7512                 err = -ENOMEM;
7513                 goto out;
7514         }
7515
7516         trans = btrfs_start_transaction(tree_root, 0);
7517         if (IS_ERR(trans)) {
7518                 err = PTR_ERR(trans);
7519                 goto out_free;
7520         }
7521
7522         if (block_rsv)
7523                 trans->block_rsv = block_rsv;
7524
7525         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7526                 level = btrfs_header_level(root->node);
7527                 path->nodes[level] = btrfs_lock_root_node(root);
7528                 btrfs_set_lock_blocking(path->nodes[level]);
7529                 path->slots[level] = 0;
7530                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7531                 memset(&wc->update_progress, 0,
7532                        sizeof(wc->update_progress));
7533         } else {
7534                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7535                 memcpy(&wc->update_progress, &key,
7536                        sizeof(wc->update_progress));
7537
7538                 level = root_item->drop_level;
7539                 BUG_ON(level == 0);
7540                 path->lowest_level = level;
7541                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7542                 path->lowest_level = 0;
7543                 if (ret < 0) {
7544                         err = ret;
7545                         goto out_end_trans;
7546                 }
7547                 WARN_ON(ret > 0);
7548
7549                 /*
7550                  * unlock our path, this is safe because only this
7551                  * function is allowed to delete this snapshot
7552                  */
7553                 btrfs_unlock_up_safe(path, 0);
7554
7555                 level = btrfs_header_level(root->node);
7556                 while (1) {
7557                         btrfs_tree_lock(path->nodes[level]);
7558                         btrfs_set_lock_blocking(path->nodes[level]);
7559                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7560
7561                         ret = btrfs_lookup_extent_info(trans, root,
7562                                                 path->nodes[level]->start,
7563                                                 level, 1, &wc->refs[level],
7564                                                 &wc->flags[level]);
7565                         if (ret < 0) {
7566                                 err = ret;
7567                                 goto out_end_trans;
7568                         }
7569                         BUG_ON(wc->refs[level] == 0);
7570
7571                         if (level == root_item->drop_level)
7572                                 break;
7573
7574                         btrfs_tree_unlock(path->nodes[level]);
7575                         path->locks[level] = 0;
7576                         WARN_ON(wc->refs[level] != 1);
7577                         level--;
7578                 }
7579         }
7580
7581         wc->level = level;
7582         wc->shared_level = -1;
7583         wc->stage = DROP_REFERENCE;
7584         wc->update_ref = update_ref;
7585         wc->keep_locks = 0;
7586         wc->for_reloc = for_reloc;
7587         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7588
7589         while (1) {
7590
7591                 ret = walk_down_tree(trans, root, path, wc);
7592                 if (ret < 0) {
7593                         err = ret;
7594                         break;
7595                 }
7596
7597                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7598                 if (ret < 0) {
7599                         err = ret;
7600                         break;
7601                 }
7602
7603                 if (ret > 0) {
7604                         BUG_ON(wc->stage != DROP_REFERENCE);
7605                         break;
7606                 }
7607
7608                 if (wc->stage == DROP_REFERENCE) {
7609                         level = wc->level;
7610                         btrfs_node_key(path->nodes[level],
7611                                        &root_item->drop_progress,
7612                                        path->slots[level]);
7613                         root_item->drop_level = level;
7614                 }
7615
7616                 BUG_ON(wc->level == 0);
7617                 if (btrfs_should_end_transaction(trans, tree_root) ||
7618                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
7619                         ret = btrfs_update_root(trans, tree_root,
7620                                                 &root->root_key,
7621                                                 root_item);
7622                         if (ret) {
7623                                 btrfs_abort_transaction(trans, tree_root, ret);
7624                                 err = ret;
7625                                 goto out_end_trans;
7626                         }
7627
7628                         btrfs_end_transaction_throttle(trans, tree_root);
7629                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
7630                                 pr_debug("btrfs: drop snapshot early exit\n");
7631                                 err = -EAGAIN;
7632                                 goto out_free;
7633                         }
7634
7635                         trans = btrfs_start_transaction(tree_root, 0);
7636                         if (IS_ERR(trans)) {
7637                                 err = PTR_ERR(trans);
7638                                 goto out_free;
7639                         }
7640                         if (block_rsv)
7641                                 trans->block_rsv = block_rsv;
7642                 }
7643         }
7644         btrfs_release_path(path);
7645         if (err)
7646                 goto out_end_trans;
7647
7648         ret = btrfs_del_root(trans, tree_root, &root->root_key);
7649         if (ret) {
7650                 btrfs_abort_transaction(trans, tree_root, ret);
7651                 goto out_end_trans;
7652         }
7653
7654         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7655                 ret = btrfs_find_root(tree_root, &root->root_key, path,
7656                                       NULL, NULL);
7657                 if (ret < 0) {
7658                         btrfs_abort_transaction(trans, tree_root, ret);
7659                         err = ret;
7660                         goto out_end_trans;
7661                 } else if (ret > 0) {
7662                         /* if we fail to delete the orphan item this time
7663                          * around, it'll get picked up the next time.
7664                          *
7665                          * The most common failure here is just -ENOENT.
7666                          */
7667                         btrfs_del_orphan_item(trans, tree_root,
7668                                               root->root_key.objectid);
7669                 }
7670         }
7671
7672         if (root->in_radix) {
7673                 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
7674         } else {
7675                 free_extent_buffer(root->node);
7676                 free_extent_buffer(root->commit_root);
7677                 btrfs_put_fs_root(root);
7678         }
7679         root_dropped = true;
7680 out_end_trans:
7681         btrfs_end_transaction_throttle(trans, tree_root);
7682 out_free:
7683         kfree(wc);
7684         btrfs_free_path(path);
7685 out:
7686         /*
7687          * So if we need to stop dropping the snapshot for whatever reason we
7688          * need to make sure to add it back to the dead root list so that we
7689          * keep trying to do the work later.  This also cleans up roots if we
7690          * don't have it in the radix (like when we recover after a power fail
7691          * or unmount) so we don't leak memory.
7692          */
7693         if (root_dropped == false)
7694                 btrfs_add_dead_root(root);
7695         if (err)
7696                 btrfs_std_error(root->fs_info, err);
7697         return err;
7698 }
7699
7700 /*
7701  * drop subtree rooted at tree block 'node'.
7702  *
7703  * NOTE: this function will unlock and release tree block 'node'
7704  * only used by relocation code
7705  */
7706 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7707                         struct btrfs_root *root,
7708                         struct extent_buffer *node,
7709                         struct extent_buffer *parent)
7710 {
7711         struct btrfs_path *path;
7712         struct walk_control *wc;
7713         int level;
7714         int parent_level;
7715         int ret = 0;
7716         int wret;
7717
7718         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7719
7720         path = btrfs_alloc_path();
7721         if (!path)
7722                 return -ENOMEM;
7723
7724         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7725         if (!wc) {
7726                 btrfs_free_path(path);
7727                 return -ENOMEM;
7728         }
7729
7730         btrfs_assert_tree_locked(parent);
7731         parent_level = btrfs_header_level(parent);
7732         extent_buffer_get(parent);
7733         path->nodes[parent_level] = parent;
7734         path->slots[parent_level] = btrfs_header_nritems(parent);
7735
7736         btrfs_assert_tree_locked(node);
7737         level = btrfs_header_level(node);
7738         path->nodes[level] = node;
7739         path->slots[level] = 0;
7740         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7741
7742         wc->refs[parent_level] = 1;
7743         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7744         wc->level = level;
7745         wc->shared_level = -1;
7746         wc->stage = DROP_REFERENCE;
7747         wc->update_ref = 0;
7748         wc->keep_locks = 1;
7749         wc->for_reloc = 1;
7750         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7751
7752         while (1) {
7753                 wret = walk_down_tree(trans, root, path, wc);
7754                 if (wret < 0) {
7755                         ret = wret;
7756                         break;
7757                 }
7758
7759                 wret = walk_up_tree(trans, root, path, wc, parent_level);
7760                 if (wret < 0)
7761                         ret = wret;
7762                 if (wret != 0)
7763                         break;
7764         }
7765
7766         kfree(wc);
7767         btrfs_free_path(path);
7768         return ret;
7769 }
7770
7771 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7772 {
7773         u64 num_devices;
7774         u64 stripped;
7775
7776         /*
7777          * if restripe for this chunk_type is on pick target profile and
7778          * return, otherwise do the usual balance
7779          */
7780         stripped = get_restripe_target(root->fs_info, flags);
7781         if (stripped)
7782                 return extended_to_chunk(stripped);
7783
7784         /*
7785          * we add in the count of missing devices because we want
7786          * to make sure that any RAID levels on a degraded FS
7787          * continue to be honored.
7788          */
7789         num_devices = root->fs_info->fs_devices->rw_devices +
7790                 root->fs_info->fs_devices->missing_devices;
7791
7792         stripped = BTRFS_BLOCK_GROUP_RAID0 |
7793                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7794                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7795
7796         if (num_devices == 1) {
7797                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7798                 stripped = flags & ~stripped;
7799
7800                 /* turn raid0 into single device chunks */
7801                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7802                         return stripped;
7803
7804                 /* turn mirroring into duplication */
7805                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7806                              BTRFS_BLOCK_GROUP_RAID10))
7807                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7808         } else {
7809                 /* they already had raid on here, just return */
7810                 if (flags & stripped)
7811                         return flags;
7812
7813                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7814                 stripped = flags & ~stripped;
7815
7816                 /* switch duplicated blocks with raid1 */
7817                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7818                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7819
7820                 /* this is drive concat, leave it alone */
7821         }
7822
7823         return flags;
7824 }
7825
7826 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7827 {
7828         struct btrfs_space_info *sinfo = cache->space_info;
7829         u64 num_bytes;
7830         u64 min_allocable_bytes;
7831         int ret = -ENOSPC;
7832
7833
7834         /*
7835          * We need some metadata space and system metadata space for
7836          * allocating chunks in some corner cases until we force to set
7837          * it to be readonly.
7838          */
7839         if ((sinfo->flags &
7840              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7841             !force)
7842                 min_allocable_bytes = 1 * 1024 * 1024;
7843         else
7844                 min_allocable_bytes = 0;
7845
7846         spin_lock(&sinfo->lock);
7847         spin_lock(&cache->lock);
7848
7849         if (cache->ro) {
7850                 ret = 0;
7851                 goto out;
7852         }
7853
7854         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7855                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7856
7857         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7858             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7859             min_allocable_bytes <= sinfo->total_bytes) {
7860                 sinfo->bytes_readonly += num_bytes;
7861                 cache->ro = 1;
7862                 ret = 0;
7863         }
7864 out:
7865         spin_unlock(&cache->lock);
7866         spin_unlock(&sinfo->lock);
7867         return ret;
7868 }
7869
7870 int btrfs_set_block_group_ro(struct btrfs_root *root,
7871                              struct btrfs_block_group_cache *cache)
7872
7873 {
7874         struct btrfs_trans_handle *trans;
7875         u64 alloc_flags;
7876         int ret;
7877
7878         BUG_ON(cache->ro);
7879
7880         trans = btrfs_join_transaction(root);
7881         if (IS_ERR(trans))
7882                 return PTR_ERR(trans);
7883
7884         alloc_flags = update_block_group_flags(root, cache->flags);
7885         if (alloc_flags != cache->flags) {
7886                 ret = do_chunk_alloc(trans, root, alloc_flags,
7887                                      CHUNK_ALLOC_FORCE);
7888                 if (ret < 0)
7889                         goto out;
7890         }
7891
7892         ret = set_block_group_ro(cache, 0);
7893         if (!ret)
7894                 goto out;
7895         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7896         ret = do_chunk_alloc(trans, root, alloc_flags,
7897                              CHUNK_ALLOC_FORCE);
7898         if (ret < 0)
7899                 goto out;
7900         ret = set_block_group_ro(cache, 0);
7901 out:
7902         btrfs_end_transaction(trans, root);
7903         return ret;
7904 }
7905
7906 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7907                             struct btrfs_root *root, u64 type)
7908 {
7909         u64 alloc_flags = get_alloc_profile(root, type);
7910         return do_chunk_alloc(trans, root, alloc_flags,
7911                               CHUNK_ALLOC_FORCE);
7912 }
7913
7914 /*
7915  * helper to account the unused space of all the readonly block group in the
7916  * list. takes mirrors into account.
7917  */
7918 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7919 {
7920         struct btrfs_block_group_cache *block_group;
7921         u64 free_bytes = 0;
7922         int factor;
7923
7924         list_for_each_entry(block_group, groups_list, list) {
7925                 spin_lock(&block_group->lock);
7926
7927                 if (!block_group->ro) {
7928                         spin_unlock(&block_group->lock);
7929                         continue;
7930                 }
7931
7932                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7933                                           BTRFS_BLOCK_GROUP_RAID10 |
7934                                           BTRFS_BLOCK_GROUP_DUP))
7935                         factor = 2;
7936                 else
7937                         factor = 1;
7938
7939                 free_bytes += (block_group->key.offset -
7940                                btrfs_block_group_used(&block_group->item)) *
7941                                factor;
7942
7943                 spin_unlock(&block_group->lock);
7944         }
7945
7946         return free_bytes;
7947 }
7948
7949 /*
7950  * helper to account the unused space of all the readonly block group in the
7951  * space_info. takes mirrors into account.
7952  */
7953 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7954 {
7955         int i;
7956         u64 free_bytes = 0;
7957
7958         spin_lock(&sinfo->lock);
7959
7960         for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7961                 if (!list_empty(&sinfo->block_groups[i]))
7962                         free_bytes += __btrfs_get_ro_block_group_free_space(
7963                                                 &sinfo->block_groups[i]);
7964
7965         spin_unlock(&sinfo->lock);
7966
7967         return free_bytes;
7968 }
7969
7970 void btrfs_set_block_group_rw(struct btrfs_root *root,
7971                               struct btrfs_block_group_cache *cache)
7972 {
7973         struct btrfs_space_info *sinfo = cache->space_info;
7974         u64 num_bytes;
7975
7976         BUG_ON(!cache->ro);
7977
7978         spin_lock(&sinfo->lock);
7979         spin_lock(&cache->lock);
7980         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7981                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7982         sinfo->bytes_readonly -= num_bytes;
7983         cache->ro = 0;
7984         spin_unlock(&cache->lock);
7985         spin_unlock(&sinfo->lock);
7986 }
7987
7988 /*
7989  * checks to see if its even possible to relocate this block group.
7990  *
7991  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7992  * ok to go ahead and try.
7993  */
7994 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7995 {
7996         struct btrfs_block_group_cache *block_group;
7997         struct btrfs_space_info *space_info;
7998         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7999         struct btrfs_device *device;
8000         struct btrfs_trans_handle *trans;
8001         u64 min_free;
8002         u64 dev_min = 1;
8003         u64 dev_nr = 0;
8004         u64 target;
8005         int index;
8006         int full = 0;
8007         int ret = 0;
8008
8009         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8010
8011         /* odd, couldn't find the block group, leave it alone */
8012         if (!block_group)
8013                 return -1;
8014
8015         min_free = btrfs_block_group_used(&block_group->item);
8016
8017         /* no bytes used, we're good */
8018         if (!min_free)
8019                 goto out;
8020
8021         space_info = block_group->space_info;
8022         spin_lock(&space_info->lock);
8023
8024         full = space_info->full;
8025
8026         /*
8027          * if this is the last block group we have in this space, we can't
8028          * relocate it unless we're able to allocate a new chunk below.
8029          *
8030          * Otherwise, we need to make sure we have room in the space to handle
8031          * all of the extents from this block group.  If we can, we're good
8032          */
8033         if ((space_info->total_bytes != block_group->key.offset) &&
8034             (space_info->bytes_used + space_info->bytes_reserved +
8035              space_info->bytes_pinned + space_info->bytes_readonly +
8036              min_free < space_info->total_bytes)) {
8037                 spin_unlock(&space_info->lock);
8038                 goto out;
8039         }
8040         spin_unlock(&space_info->lock);
8041
8042         /*
8043          * ok we don't have enough space, but maybe we have free space on our
8044          * devices to allocate new chunks for relocation, so loop through our
8045          * alloc devices and guess if we have enough space.  if this block
8046          * group is going to be restriped, run checks against the target
8047          * profile instead of the current one.
8048          */
8049         ret = -1;
8050
8051         /*
8052          * index:
8053          *      0: raid10
8054          *      1: raid1
8055          *      2: dup
8056          *      3: raid0
8057          *      4: single
8058          */
8059         target = get_restripe_target(root->fs_info, block_group->flags);
8060         if (target) {
8061                 index = __get_raid_index(extended_to_chunk(target));
8062         } else {
8063                 /*
8064                  * this is just a balance, so if we were marked as full
8065                  * we know there is no space for a new chunk
8066                  */
8067                 if (full)
8068                         goto out;
8069
8070                 index = get_block_group_index(block_group);
8071         }
8072
8073         if (index == BTRFS_RAID_RAID10) {
8074                 dev_min = 4;
8075                 /* Divide by 2 */
8076                 min_free >>= 1;
8077         } else if (index == BTRFS_RAID_RAID1) {
8078                 dev_min = 2;
8079         } else if (index == BTRFS_RAID_DUP) {
8080                 /* Multiply by 2 */
8081                 min_free <<= 1;
8082         } else if (index == BTRFS_RAID_RAID0) {
8083                 dev_min = fs_devices->rw_devices;
8084                 do_div(min_free, dev_min);
8085         }
8086
8087         /* We need to do this so that we can look at pending chunks */
8088         trans = btrfs_join_transaction(root);
8089         if (IS_ERR(trans)) {
8090                 ret = PTR_ERR(trans);
8091                 goto out;
8092         }
8093
8094         mutex_lock(&root->fs_info->chunk_mutex);
8095         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8096                 u64 dev_offset;
8097
8098                 /*
8099                  * check to make sure we can actually find a chunk with enough
8100                  * space to fit our block group in.
8101                  */
8102                 if (device->total_bytes > device->bytes_used + min_free &&
8103                     !device->is_tgtdev_for_dev_replace) {
8104                         ret = find_free_dev_extent(trans, device, min_free,
8105                                                    &dev_offset, NULL);
8106                         if (!ret)
8107                                 dev_nr++;
8108
8109                         if (dev_nr >= dev_min)
8110                                 break;
8111
8112                         ret = -1;
8113                 }
8114         }
8115         mutex_unlock(&root->fs_info->chunk_mutex);
8116         btrfs_end_transaction(trans, root);
8117 out:
8118         btrfs_put_block_group(block_group);
8119         return ret;
8120 }
8121
8122 static int find_first_block_group(struct btrfs_root *root,
8123                 struct btrfs_path *path, struct btrfs_key *key)
8124 {
8125         int ret = 0;
8126         struct btrfs_key found_key;
8127         struct extent_buffer *leaf;
8128         int slot;
8129
8130         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8131         if (ret < 0)
8132                 goto out;
8133
8134         while (1) {
8135                 slot = path->slots[0];
8136                 leaf = path->nodes[0];
8137                 if (slot >= btrfs_header_nritems(leaf)) {
8138                         ret = btrfs_next_leaf(root, path);
8139                         if (ret == 0)
8140                                 continue;
8141                         if (ret < 0)
8142                                 goto out;
8143                         break;
8144                 }
8145                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8146
8147                 if (found_key.objectid >= key->objectid &&
8148                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8149                         ret = 0;
8150                         goto out;
8151                 }
8152                 path->slots[0]++;
8153         }
8154 out:
8155         return ret;
8156 }
8157
8158 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8159 {
8160         struct btrfs_block_group_cache *block_group;
8161         u64 last = 0;
8162
8163         while (1) {
8164                 struct inode *inode;
8165
8166                 block_group = btrfs_lookup_first_block_group(info, last);
8167                 while (block_group) {
8168                         spin_lock(&block_group->lock);
8169                         if (block_group->iref)
8170                                 break;
8171                         spin_unlock(&block_group->lock);
8172                         block_group = next_block_group(info->tree_root,
8173                                                        block_group);
8174                 }
8175                 if (!block_group) {
8176                         if (last == 0)
8177                                 break;
8178                         last = 0;
8179                         continue;
8180                 }
8181
8182                 inode = block_group->inode;
8183                 block_group->iref = 0;
8184                 block_group->inode = NULL;
8185                 spin_unlock(&block_group->lock);
8186                 iput(inode);
8187                 last = block_group->key.objectid + block_group->key.offset;
8188                 btrfs_put_block_group(block_group);
8189         }
8190 }
8191
8192 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8193 {
8194         struct btrfs_block_group_cache *block_group;
8195         struct btrfs_space_info *space_info;
8196         struct btrfs_caching_control *caching_ctl;
8197         struct rb_node *n;
8198
8199         down_write(&info->extent_commit_sem);
8200         while (!list_empty(&info->caching_block_groups)) {
8201                 caching_ctl = list_entry(info->caching_block_groups.next,
8202                                          struct btrfs_caching_control, list);
8203                 list_del(&caching_ctl->list);
8204                 put_caching_control(caching_ctl);
8205         }
8206         up_write(&info->extent_commit_sem);
8207
8208         spin_lock(&info->block_group_cache_lock);
8209         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8210                 block_group = rb_entry(n, struct btrfs_block_group_cache,
8211                                        cache_node);
8212                 rb_erase(&block_group->cache_node,
8213                          &info->block_group_cache_tree);
8214                 spin_unlock(&info->block_group_cache_lock);
8215
8216                 down_write(&block_group->space_info->groups_sem);
8217                 list_del(&block_group->list);
8218                 up_write(&block_group->space_info->groups_sem);
8219
8220                 if (block_group->cached == BTRFS_CACHE_STARTED)
8221                         wait_block_group_cache_done(block_group);
8222
8223                 /*
8224                  * We haven't cached this block group, which means we could
8225                  * possibly have excluded extents on this block group.
8226                  */
8227                 if (block_group->cached == BTRFS_CACHE_NO)
8228                         free_excluded_extents(info->extent_root, block_group);
8229
8230                 btrfs_remove_free_space_cache(block_group);
8231                 btrfs_put_block_group(block_group);
8232
8233                 spin_lock(&info->block_group_cache_lock);
8234         }
8235         spin_unlock(&info->block_group_cache_lock);
8236
8237         /* now that all the block groups are freed, go through and
8238          * free all the space_info structs.  This is only called during
8239          * the final stages of unmount, and so we know nobody is
8240          * using them.  We call synchronize_rcu() once before we start,
8241          * just to be on the safe side.
8242          */
8243         synchronize_rcu();
8244
8245         release_global_block_rsv(info);
8246
8247         while(!list_empty(&info->space_info)) {
8248                 space_info = list_entry(info->space_info.next,
8249                                         struct btrfs_space_info,
8250                                         list);
8251                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8252                         if (space_info->bytes_pinned > 0 ||
8253                             space_info->bytes_reserved > 0 ||
8254                             space_info->bytes_may_use > 0) {
8255                                 WARN_ON(1);
8256                                 dump_space_info(space_info, 0, 0);
8257                         }
8258                 }
8259                 percpu_counter_destroy(&space_info->total_bytes_pinned);
8260                 list_del(&space_info->list);
8261                 kfree(space_info);
8262         }
8263         return 0;
8264 }
8265
8266 static void __link_block_group(struct btrfs_space_info *space_info,
8267                                struct btrfs_block_group_cache *cache)
8268 {
8269         int index = get_block_group_index(cache);
8270
8271         down_write(&space_info->groups_sem);
8272         list_add_tail(&cache->list, &space_info->block_groups[index]);
8273         up_write(&space_info->groups_sem);
8274 }
8275
8276 int btrfs_read_block_groups(struct btrfs_root *root)
8277 {
8278         struct btrfs_path *path;
8279         int ret;
8280         struct btrfs_block_group_cache *cache;
8281         struct btrfs_fs_info *info = root->fs_info;
8282         struct btrfs_space_info *space_info;
8283         struct btrfs_key key;
8284         struct btrfs_key found_key;
8285         struct extent_buffer *leaf;
8286         int need_clear = 0;
8287         u64 cache_gen;
8288
8289         root = info->extent_root;
8290         key.objectid = 0;
8291         key.offset = 0;
8292         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8293         path = btrfs_alloc_path();
8294         if (!path)
8295                 return -ENOMEM;
8296         path->reada = 1;
8297
8298         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
8299         if (btrfs_test_opt(root, SPACE_CACHE) &&
8300             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
8301                 need_clear = 1;
8302         if (btrfs_test_opt(root, CLEAR_CACHE))
8303                 need_clear = 1;
8304
8305         while (1) {
8306                 ret = find_first_block_group(root, path, &key);
8307                 if (ret > 0)
8308                         break;
8309                 if (ret != 0)
8310                         goto error;
8311                 leaf = path->nodes[0];
8312                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8313                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8314                 if (!cache) {
8315                         ret = -ENOMEM;
8316                         goto error;
8317                 }
8318                 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8319                                                 GFP_NOFS);
8320                 if (!cache->free_space_ctl) {
8321                         kfree(cache);
8322                         ret = -ENOMEM;
8323                         goto error;
8324                 }
8325
8326                 atomic_set(&cache->count, 1);
8327                 spin_lock_init(&cache->lock);
8328                 cache->fs_info = info;
8329                 INIT_LIST_HEAD(&cache->list);
8330                 INIT_LIST_HEAD(&cache->cluster_list);
8331
8332                 if (need_clear) {
8333                         /*
8334                          * When we mount with old space cache, we need to
8335                          * set BTRFS_DC_CLEAR and set dirty flag.
8336                          *
8337                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
8338                          *    truncate the old free space cache inode and
8339                          *    setup a new one.
8340                          * b) Setting 'dirty flag' makes sure that we flush
8341                          *    the new space cache info onto disk.
8342                          */
8343                         cache->disk_cache_state = BTRFS_DC_CLEAR;
8344                         if (btrfs_test_opt(root, SPACE_CACHE))
8345                                 cache->dirty = 1;
8346                 }
8347
8348                 read_extent_buffer(leaf, &cache->item,
8349                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
8350                                    sizeof(cache->item));
8351                 memcpy(&cache->key, &found_key, sizeof(found_key));
8352
8353                 key.objectid = found_key.objectid + found_key.offset;
8354                 btrfs_release_path(path);
8355                 cache->flags = btrfs_block_group_flags(&cache->item);
8356                 cache->sectorsize = root->sectorsize;
8357                 cache->full_stripe_len = btrfs_full_stripe_len(root,
8358                                                &root->fs_info->mapping_tree,
8359                                                found_key.objectid);
8360                 btrfs_init_free_space_ctl(cache);
8361
8362                 /*
8363                  * We need to exclude the super stripes now so that the space
8364                  * info has super bytes accounted for, otherwise we'll think
8365                  * we have more space than we actually do.
8366                  */
8367                 ret = exclude_super_stripes(root, cache);
8368                 if (ret) {
8369                         /*
8370                          * We may have excluded something, so call this just in
8371                          * case.
8372                          */
8373                         free_excluded_extents(root, cache);
8374                         kfree(cache->free_space_ctl);
8375                         kfree(cache);
8376                         goto error;
8377                 }
8378
8379                 /*
8380                  * check for two cases, either we are full, and therefore
8381                  * don't need to bother with the caching work since we won't
8382                  * find any space, or we are empty, and we can just add all
8383                  * the space in and be done with it.  This saves us _alot_ of
8384                  * time, particularly in the full case.
8385                  */
8386                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8387                         cache->last_byte_to_unpin = (u64)-1;
8388                         cache->cached = BTRFS_CACHE_FINISHED;
8389                         free_excluded_extents(root, cache);
8390                 } else if (btrfs_block_group_used(&cache->item) == 0) {
8391                         cache->last_byte_to_unpin = (u64)-1;
8392                         cache->cached = BTRFS_CACHE_FINISHED;
8393                         add_new_free_space(cache, root->fs_info,
8394                                            found_key.objectid,
8395                                            found_key.objectid +
8396                                            found_key.offset);
8397                         free_excluded_extents(root, cache);
8398                 }
8399
8400                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8401                 if (ret) {
8402                         btrfs_remove_free_space_cache(cache);
8403                         btrfs_put_block_group(cache);
8404                         goto error;
8405                 }
8406
8407                 ret = update_space_info(info, cache->flags, found_key.offset,
8408                                         btrfs_block_group_used(&cache->item),
8409                                         &space_info);
8410                 if (ret) {
8411                         btrfs_remove_free_space_cache(cache);
8412                         spin_lock(&info->block_group_cache_lock);
8413                         rb_erase(&cache->cache_node,
8414                                  &info->block_group_cache_tree);
8415                         spin_unlock(&info->block_group_cache_lock);
8416                         btrfs_put_block_group(cache);
8417                         goto error;
8418                 }
8419
8420                 cache->space_info = space_info;
8421                 spin_lock(&cache->space_info->lock);
8422                 cache->space_info->bytes_readonly += cache->bytes_super;
8423                 spin_unlock(&cache->space_info->lock);
8424
8425                 __link_block_group(space_info, cache);
8426
8427                 set_avail_alloc_bits(root->fs_info, cache->flags);
8428                 if (btrfs_chunk_readonly(root, cache->key.objectid))
8429                         set_block_group_ro(cache, 1);
8430         }
8431
8432         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8433                 if (!(get_alloc_profile(root, space_info->flags) &
8434                       (BTRFS_BLOCK_GROUP_RAID10 |
8435                        BTRFS_BLOCK_GROUP_RAID1 |
8436                        BTRFS_BLOCK_GROUP_RAID5 |
8437                        BTRFS_BLOCK_GROUP_RAID6 |
8438                        BTRFS_BLOCK_GROUP_DUP)))
8439                         continue;
8440                 /*
8441                  * avoid allocating from un-mirrored block group if there are
8442                  * mirrored block groups.
8443                  */
8444                 list_for_each_entry(cache, &space_info->block_groups[3], list)
8445                         set_block_group_ro(cache, 1);
8446                 list_for_each_entry(cache, &space_info->block_groups[4], list)
8447                         set_block_group_ro(cache, 1);
8448         }
8449
8450         init_global_block_rsv(info);
8451         ret = 0;
8452 error:
8453         btrfs_free_path(path);
8454         return ret;
8455 }
8456
8457 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
8458                                        struct btrfs_root *root)
8459 {
8460         struct btrfs_block_group_cache *block_group, *tmp;
8461         struct btrfs_root *extent_root = root->fs_info->extent_root;
8462         struct btrfs_block_group_item item;
8463         struct btrfs_key key;
8464         int ret = 0;
8465
8466         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
8467                                  new_bg_list) {
8468                 list_del_init(&block_group->new_bg_list);
8469
8470                 if (ret)
8471                         continue;
8472
8473                 spin_lock(&block_group->lock);
8474                 memcpy(&item, &block_group->item, sizeof(item));
8475                 memcpy(&key, &block_group->key, sizeof(key));
8476                 spin_unlock(&block_group->lock);
8477
8478                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
8479                                         sizeof(item));
8480                 if (ret)
8481                         btrfs_abort_transaction(trans, extent_root, ret);
8482                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
8483                                                key.objectid, key.offset);
8484                 if (ret)
8485                         btrfs_abort_transaction(trans, extent_root, ret);
8486         }
8487 }
8488
8489 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8490                            struct btrfs_root *root, u64 bytes_used,
8491                            u64 type, u64 chunk_objectid, u64 chunk_offset,
8492                            u64 size)
8493 {
8494         int ret;
8495         struct btrfs_root *extent_root;
8496         struct btrfs_block_group_cache *cache;
8497
8498         extent_root = root->fs_info->extent_root;
8499
8500         root->fs_info->last_trans_log_full_commit = trans->transid;
8501
8502         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8503         if (!cache)
8504                 return -ENOMEM;
8505         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8506                                         GFP_NOFS);
8507         if (!cache->free_space_ctl) {
8508                 kfree(cache);
8509                 return -ENOMEM;
8510         }
8511
8512         cache->key.objectid = chunk_offset;
8513         cache->key.offset = size;
8514         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8515         cache->sectorsize = root->sectorsize;
8516         cache->fs_info = root->fs_info;
8517         cache->full_stripe_len = btrfs_full_stripe_len(root,
8518                                                &root->fs_info->mapping_tree,
8519                                                chunk_offset);
8520
8521         atomic_set(&cache->count, 1);
8522         spin_lock_init(&cache->lock);
8523         INIT_LIST_HEAD(&cache->list);
8524         INIT_LIST_HEAD(&cache->cluster_list);
8525         INIT_LIST_HEAD(&cache->new_bg_list);
8526
8527         btrfs_init_free_space_ctl(cache);
8528
8529         btrfs_set_block_group_used(&cache->item, bytes_used);
8530         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8531         cache->flags = type;
8532         btrfs_set_block_group_flags(&cache->item, type);
8533
8534         cache->last_byte_to_unpin = (u64)-1;
8535         cache->cached = BTRFS_CACHE_FINISHED;
8536         ret = exclude_super_stripes(root, cache);
8537         if (ret) {
8538                 /*
8539                  * We may have excluded something, so call this just in
8540                  * case.
8541                  */
8542                 free_excluded_extents(root, cache);
8543                 kfree(cache->free_space_ctl);
8544                 kfree(cache);
8545                 return ret;
8546         }
8547
8548         add_new_free_space(cache, root->fs_info, chunk_offset,
8549                            chunk_offset + size);
8550
8551         free_excluded_extents(root, cache);
8552
8553         ret = btrfs_add_block_group_cache(root->fs_info, cache);
8554         if (ret) {
8555                 btrfs_remove_free_space_cache(cache);
8556                 btrfs_put_block_group(cache);
8557                 return ret;
8558         }
8559
8560         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8561                                 &cache->space_info);
8562         if (ret) {
8563                 btrfs_remove_free_space_cache(cache);
8564                 spin_lock(&root->fs_info->block_group_cache_lock);
8565                 rb_erase(&cache->cache_node,
8566                          &root->fs_info->block_group_cache_tree);
8567                 spin_unlock(&root->fs_info->block_group_cache_lock);
8568                 btrfs_put_block_group(cache);
8569                 return ret;
8570         }
8571         update_global_block_rsv(root->fs_info);
8572
8573         spin_lock(&cache->space_info->lock);
8574         cache->space_info->bytes_readonly += cache->bytes_super;
8575         spin_unlock(&cache->space_info->lock);
8576
8577         __link_block_group(cache->space_info, cache);
8578
8579         list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8580
8581         set_avail_alloc_bits(extent_root->fs_info, type);
8582
8583         return 0;
8584 }
8585
8586 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
8587 {
8588         u64 extra_flags = chunk_to_extended(flags) &
8589                                 BTRFS_EXTENDED_PROFILE_MASK;
8590
8591         write_seqlock(&fs_info->profiles_lock);
8592         if (flags & BTRFS_BLOCK_GROUP_DATA)
8593                 fs_info->avail_data_alloc_bits &= ~extra_flags;
8594         if (flags & BTRFS_BLOCK_GROUP_METADATA)
8595                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
8596         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
8597                 fs_info->avail_system_alloc_bits &= ~extra_flags;
8598         write_sequnlock(&fs_info->profiles_lock);
8599 }
8600
8601 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8602                              struct btrfs_root *root, u64 group_start)
8603 {
8604         struct btrfs_path *path;
8605         struct btrfs_block_group_cache *block_group;
8606         struct btrfs_free_cluster *cluster;
8607         struct btrfs_root *tree_root = root->fs_info->tree_root;
8608         struct btrfs_key key;
8609         struct inode *inode;
8610         int ret;
8611         int index;
8612         int factor;
8613
8614         root = root->fs_info->extent_root;
8615
8616         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8617         BUG_ON(!block_group);
8618         BUG_ON(!block_group->ro);
8619
8620         /*
8621          * Free the reserved super bytes from this block group before
8622          * remove it.
8623          */
8624         free_excluded_extents(root, block_group);
8625
8626         memcpy(&key, &block_group->key, sizeof(key));
8627         index = get_block_group_index(block_group);
8628         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8629                                   BTRFS_BLOCK_GROUP_RAID1 |
8630                                   BTRFS_BLOCK_GROUP_RAID10))
8631                 factor = 2;
8632         else
8633                 factor = 1;
8634
8635         /* make sure this block group isn't part of an allocation cluster */
8636         cluster = &root->fs_info->data_alloc_cluster;
8637         spin_lock(&cluster->refill_lock);
8638         btrfs_return_cluster_to_free_space(block_group, cluster);
8639         spin_unlock(&cluster->refill_lock);
8640
8641         /*
8642          * make sure this block group isn't part of a metadata
8643          * allocation cluster
8644          */
8645         cluster = &root->fs_info->meta_alloc_cluster;
8646         spin_lock(&cluster->refill_lock);
8647         btrfs_return_cluster_to_free_space(block_group, cluster);
8648         spin_unlock(&cluster->refill_lock);
8649
8650         path = btrfs_alloc_path();
8651         if (!path) {
8652                 ret = -ENOMEM;
8653                 goto out;
8654         }
8655
8656         inode = lookup_free_space_inode(tree_root, block_group, path);
8657         if (!IS_ERR(inode)) {
8658                 ret = btrfs_orphan_add(trans, inode);
8659                 if (ret) {
8660                         btrfs_add_delayed_iput(inode);
8661                         goto out;
8662                 }
8663                 clear_nlink(inode);
8664                 /* One for the block groups ref */
8665                 spin_lock(&block_group->lock);
8666                 if (block_group->iref) {
8667                         block_group->iref = 0;
8668                         block_group->inode = NULL;
8669                         spin_unlock(&block_group->lock);
8670                         iput(inode);
8671                 } else {
8672                         spin_unlock(&block_group->lock);
8673                 }
8674                 /* One for our lookup ref */
8675                 btrfs_add_delayed_iput(inode);
8676         }
8677
8678         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8679         key.offset = block_group->key.objectid;
8680         key.type = 0;
8681
8682         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8683         if (ret < 0)
8684                 goto out;
8685         if (ret > 0)
8686                 btrfs_release_path(path);
8687         if (ret == 0) {
8688                 ret = btrfs_del_item(trans, tree_root, path);
8689                 if (ret)
8690                         goto out;
8691                 btrfs_release_path(path);
8692         }
8693
8694         spin_lock(&root->fs_info->block_group_cache_lock);
8695         rb_erase(&block_group->cache_node,
8696                  &root->fs_info->block_group_cache_tree);
8697
8698         if (root->fs_info->first_logical_byte == block_group->key.objectid)
8699                 root->fs_info->first_logical_byte = (u64)-1;
8700         spin_unlock(&root->fs_info->block_group_cache_lock);
8701
8702         down_write(&block_group->space_info->groups_sem);
8703         /*
8704          * we must use list_del_init so people can check to see if they
8705          * are still on the list after taking the semaphore
8706          */
8707         list_del_init(&block_group->list);
8708         if (list_empty(&block_group->space_info->block_groups[index]))
8709                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8710         up_write(&block_group->space_info->groups_sem);
8711
8712         if (block_group->cached == BTRFS_CACHE_STARTED)
8713                 wait_block_group_cache_done(block_group);
8714
8715         btrfs_remove_free_space_cache(block_group);
8716
8717         spin_lock(&block_group->space_info->lock);
8718         block_group->space_info->total_bytes -= block_group->key.offset;
8719         block_group->space_info->bytes_readonly -= block_group->key.offset;
8720         block_group->space_info->disk_total -= block_group->key.offset * factor;
8721         spin_unlock(&block_group->space_info->lock);
8722
8723         memcpy(&key, &block_group->key, sizeof(key));
8724
8725         btrfs_clear_space_info_full(root->fs_info);
8726
8727         btrfs_put_block_group(block_group);
8728         btrfs_put_block_group(block_group);
8729
8730         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8731         if (ret > 0)
8732                 ret = -EIO;
8733         if (ret < 0)
8734                 goto out;
8735
8736         ret = btrfs_del_item(trans, root, path);
8737 out:
8738         btrfs_free_path(path);
8739         return ret;
8740 }
8741
8742 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8743 {
8744         struct btrfs_space_info *space_info;
8745         struct btrfs_super_block *disk_super;
8746         u64 features;
8747         u64 flags;
8748         int mixed = 0;
8749         int ret;
8750
8751         disk_super = fs_info->super_copy;
8752         if (!btrfs_super_root(disk_super))
8753                 return 1;
8754
8755         features = btrfs_super_incompat_flags(disk_super);
8756         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8757                 mixed = 1;
8758
8759         flags = BTRFS_BLOCK_GROUP_SYSTEM;
8760         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8761         if (ret)
8762                 goto out;
8763
8764         if (mixed) {
8765                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8766                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8767         } else {
8768                 flags = BTRFS_BLOCK_GROUP_METADATA;
8769                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8770                 if (ret)
8771                         goto out;
8772
8773                 flags = BTRFS_BLOCK_GROUP_DATA;
8774                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8775         }
8776 out:
8777         return ret;
8778 }
8779
8780 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8781 {
8782         return unpin_extent_range(root, start, end);
8783 }
8784
8785 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8786                                u64 num_bytes, u64 *actual_bytes)
8787 {
8788         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8789 }
8790
8791 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8792 {
8793         struct btrfs_fs_info *fs_info = root->fs_info;
8794         struct btrfs_block_group_cache *cache = NULL;
8795         u64 group_trimmed;
8796         u64 start;
8797         u64 end;
8798         u64 trimmed = 0;
8799         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8800         int ret = 0;
8801
8802         /*
8803          * try to trim all FS space, our block group may start from non-zero.
8804          */
8805         if (range->len == total_bytes)
8806                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8807         else
8808                 cache = btrfs_lookup_block_group(fs_info, range->start);
8809
8810         while (cache) {
8811                 if (cache->key.objectid >= (range->start + range->len)) {
8812                         btrfs_put_block_group(cache);
8813                         break;
8814                 }
8815
8816                 start = max(range->start, cache->key.objectid);
8817                 end = min(range->start + range->len,
8818                                 cache->key.objectid + cache->key.offset);
8819
8820                 if (end - start >= range->minlen) {
8821                         if (!block_group_cache_done(cache)) {
8822                                 ret = cache_block_group(cache, 0);
8823                                 if (ret) {
8824                                         btrfs_put_block_group(cache);
8825                                         break;
8826                                 }
8827                                 ret = wait_block_group_cache_done(cache);
8828                                 if (ret) {
8829                                         btrfs_put_block_group(cache);
8830                                         break;
8831                                 }
8832                         }
8833                         ret = btrfs_trim_block_group(cache,
8834                                                      &group_trimmed,
8835                                                      start,
8836                                                      end,
8837                                                      range->minlen);
8838
8839                         trimmed += group_trimmed;
8840                         if (ret) {
8841                                 btrfs_put_block_group(cache);
8842                                 break;
8843                         }
8844                 }
8845
8846                 cache = next_block_group(fs_info->tree_root, cache);
8847         }
8848
8849         range->len = trimmed;
8850         return ret;
8851 }