OSDN Git Service

dbc115a25798c68a688ae0725c6e37090282374c
[android-x86/kernel.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_root *root,
78                               u64 bytenr, u64 num_bytes, int alloc);
79 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
80                                 struct btrfs_root *root,
81                                 u64 bytenr, u64 num_bytes, u64 parent,
82                                 u64 root_objectid, u64 owner_objectid,
83                                 u64 owner_offset, int refs_to_drop,
84                                 struct btrfs_delayed_extent_op *extra_op,
85                                 int no_quota);
86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
87                                     struct extent_buffer *leaf,
88                                     struct btrfs_extent_item *ei);
89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
90                                       struct btrfs_root *root,
91                                       u64 parent, u64 root_objectid,
92                                       u64 flags, u64 owner, u64 offset,
93                                       struct btrfs_key *ins, int ref_mod);
94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
95                                      struct btrfs_root *root,
96                                      u64 parent, u64 root_objectid,
97                                      u64 flags, struct btrfs_disk_key *key,
98                                      int level, struct btrfs_key *ins,
99                                      int no_quota);
100 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
101                           struct btrfs_root *extent_root, u64 flags,
102                           int force);
103 static int find_next_key(struct btrfs_path *path, int level,
104                          struct btrfs_key *key);
105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106                             int dump_block_groups);
107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108                                        u64 num_bytes, int reserve,
109                                        int delalloc);
110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111                                u64 num_bytes);
112 int btrfs_pin_extent(struct btrfs_root *root,
113                      u64 bytenr, u64 num_bytes, int reserved);
114
115 static noinline int
116 block_group_cache_done(struct btrfs_block_group_cache *cache)
117 {
118         smp_mb();
119         return cache->cached == BTRFS_CACHE_FINISHED ||
120                 cache->cached == BTRFS_CACHE_ERROR;
121 }
122
123 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 {
125         return (cache->flags & bits) == bits;
126 }
127
128 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
129 {
130         atomic_inc(&cache->count);
131 }
132
133 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
134 {
135         if (atomic_dec_and_test(&cache->count)) {
136                 WARN_ON(cache->pinned > 0);
137                 WARN_ON(cache->reserved > 0);
138                 kfree(cache->free_space_ctl);
139                 kfree(cache);
140         }
141 }
142
143 /*
144  * this adds the block group to the fs_info rb tree for the block group
145  * cache
146  */
147 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
148                                 struct btrfs_block_group_cache *block_group)
149 {
150         struct rb_node **p;
151         struct rb_node *parent = NULL;
152         struct btrfs_block_group_cache *cache;
153
154         spin_lock(&info->block_group_cache_lock);
155         p = &info->block_group_cache_tree.rb_node;
156
157         while (*p) {
158                 parent = *p;
159                 cache = rb_entry(parent, struct btrfs_block_group_cache,
160                                  cache_node);
161                 if (block_group->key.objectid < cache->key.objectid) {
162                         p = &(*p)->rb_left;
163                 } else if (block_group->key.objectid > cache->key.objectid) {
164                         p = &(*p)->rb_right;
165                 } else {
166                         spin_unlock(&info->block_group_cache_lock);
167                         return -EEXIST;
168                 }
169         }
170
171         rb_link_node(&block_group->cache_node, parent, p);
172         rb_insert_color(&block_group->cache_node,
173                         &info->block_group_cache_tree);
174
175         if (info->first_logical_byte > block_group->key.objectid)
176                 info->first_logical_byte = block_group->key.objectid;
177
178         spin_unlock(&info->block_group_cache_lock);
179
180         return 0;
181 }
182
183 /*
184  * This will return the block group at or after bytenr if contains is 0, else
185  * it will return the block group that contains the bytenr
186  */
187 static struct btrfs_block_group_cache *
188 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
189                               int contains)
190 {
191         struct btrfs_block_group_cache *cache, *ret = NULL;
192         struct rb_node *n;
193         u64 end, start;
194
195         spin_lock(&info->block_group_cache_lock);
196         n = info->block_group_cache_tree.rb_node;
197
198         while (n) {
199                 cache = rb_entry(n, struct btrfs_block_group_cache,
200                                  cache_node);
201                 end = cache->key.objectid + cache->key.offset - 1;
202                 start = cache->key.objectid;
203
204                 if (bytenr < start) {
205                         if (!contains && (!ret || start < ret->key.objectid))
206                                 ret = cache;
207                         n = n->rb_left;
208                 } else if (bytenr > start) {
209                         if (contains && bytenr <= end) {
210                                 ret = cache;
211                                 break;
212                         }
213                         n = n->rb_right;
214                 } else {
215                         ret = cache;
216                         break;
217                 }
218         }
219         if (ret) {
220                 btrfs_get_block_group(ret);
221                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
222                         info->first_logical_byte = ret->key.objectid;
223         }
224         spin_unlock(&info->block_group_cache_lock);
225
226         return ret;
227 }
228
229 static int add_excluded_extent(struct btrfs_root *root,
230                                u64 start, u64 num_bytes)
231 {
232         u64 end = start + num_bytes - 1;
233         set_extent_bits(&root->fs_info->freed_extents[0],
234                         start, end, EXTENT_UPTODATE, GFP_NOFS);
235         set_extent_bits(&root->fs_info->freed_extents[1],
236                         start, end, EXTENT_UPTODATE, GFP_NOFS);
237         return 0;
238 }
239
240 static void free_excluded_extents(struct btrfs_root *root,
241                                   struct btrfs_block_group_cache *cache)
242 {
243         u64 start, end;
244
245         start = cache->key.objectid;
246         end = start + cache->key.offset - 1;
247
248         clear_extent_bits(&root->fs_info->freed_extents[0],
249                           start, end, EXTENT_UPTODATE, GFP_NOFS);
250         clear_extent_bits(&root->fs_info->freed_extents[1],
251                           start, end, EXTENT_UPTODATE, GFP_NOFS);
252 }
253
254 static int exclude_super_stripes(struct btrfs_root *root,
255                                  struct btrfs_block_group_cache *cache)
256 {
257         u64 bytenr;
258         u64 *logical;
259         int stripe_len;
260         int i, nr, ret;
261
262         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
263                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
264                 cache->bytes_super += stripe_len;
265                 ret = add_excluded_extent(root, cache->key.objectid,
266                                           stripe_len);
267                 if (ret)
268                         return ret;
269         }
270
271         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
272                 bytenr = btrfs_sb_offset(i);
273                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
274                                        cache->key.objectid, bytenr,
275                                        0, &logical, &nr, &stripe_len);
276                 if (ret)
277                         return ret;
278
279                 while (nr--) {
280                         u64 start, len;
281
282                         if (logical[nr] > cache->key.objectid +
283                             cache->key.offset)
284                                 continue;
285
286                         if (logical[nr] + stripe_len <= cache->key.objectid)
287                                 continue;
288
289                         start = logical[nr];
290                         if (start < cache->key.objectid) {
291                                 start = cache->key.objectid;
292                                 len = (logical[nr] + stripe_len) - start;
293                         } else {
294                                 len = min_t(u64, stripe_len,
295                                             cache->key.objectid +
296                                             cache->key.offset - start);
297                         }
298
299                         cache->bytes_super += len;
300                         ret = add_excluded_extent(root, start, len);
301                         if (ret) {
302                                 kfree(logical);
303                                 return ret;
304                         }
305                 }
306
307                 kfree(logical);
308         }
309         return 0;
310 }
311
312 static struct btrfs_caching_control *
313 get_caching_control(struct btrfs_block_group_cache *cache)
314 {
315         struct btrfs_caching_control *ctl;
316
317         spin_lock(&cache->lock);
318         if (cache->cached != BTRFS_CACHE_STARTED) {
319                 spin_unlock(&cache->lock);
320                 return NULL;
321         }
322
323         /* We're loading it the fast way, so we don't have a caching_ctl. */
324         if (!cache->caching_ctl) {
325                 spin_unlock(&cache->lock);
326                 return NULL;
327         }
328
329         ctl = cache->caching_ctl;
330         atomic_inc(&ctl->count);
331         spin_unlock(&cache->lock);
332         return ctl;
333 }
334
335 static void put_caching_control(struct btrfs_caching_control *ctl)
336 {
337         if (atomic_dec_and_test(&ctl->count))
338                 kfree(ctl);
339 }
340
341 /*
342  * this is only called by cache_block_group, since we could have freed extents
343  * we need to check the pinned_extents for any extents that can't be used yet
344  * since their free space will be released as soon as the transaction commits.
345  */
346 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
347                               struct btrfs_fs_info *info, u64 start, u64 end)
348 {
349         u64 extent_start, extent_end, size, total_added = 0;
350         int ret;
351
352         while (start < end) {
353                 ret = find_first_extent_bit(info->pinned_extents, start,
354                                             &extent_start, &extent_end,
355                                             EXTENT_DIRTY | EXTENT_UPTODATE,
356                                             NULL);
357                 if (ret)
358                         break;
359
360                 if (extent_start <= start) {
361                         start = extent_end + 1;
362                 } else if (extent_start > start && extent_start < end) {
363                         size = extent_start - start;
364                         total_added += size;
365                         ret = btrfs_add_free_space(block_group, start,
366                                                    size);
367                         BUG_ON(ret); /* -ENOMEM or logic error */
368                         start = extent_end + 1;
369                 } else {
370                         break;
371                 }
372         }
373
374         if (start < end) {
375                 size = end - start;
376                 total_added += size;
377                 ret = btrfs_add_free_space(block_group, start, size);
378                 BUG_ON(ret); /* -ENOMEM or logic error */
379         }
380
381         return total_added;
382 }
383
384 static noinline void caching_thread(struct btrfs_work *work)
385 {
386         struct btrfs_block_group_cache *block_group;
387         struct btrfs_fs_info *fs_info;
388         struct btrfs_caching_control *caching_ctl;
389         struct btrfs_root *extent_root;
390         struct btrfs_path *path;
391         struct extent_buffer *leaf;
392         struct btrfs_key key;
393         u64 total_found = 0;
394         u64 last = 0;
395         u32 nritems;
396         int ret = -ENOMEM;
397
398         caching_ctl = container_of(work, struct btrfs_caching_control, work);
399         block_group = caching_ctl->block_group;
400         fs_info = block_group->fs_info;
401         extent_root = fs_info->extent_root;
402
403         path = btrfs_alloc_path();
404         if (!path)
405                 goto out;
406
407         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
408
409         /*
410          * We don't want to deadlock with somebody trying to allocate a new
411          * extent for the extent root while also trying to search the extent
412          * root to add free space.  So we skip locking and search the commit
413          * root, since its read-only
414          */
415         path->skip_locking = 1;
416         path->search_commit_root = 1;
417         path->reada = 1;
418
419         key.objectid = last;
420         key.offset = 0;
421         key.type = BTRFS_EXTENT_ITEM_KEY;
422 again:
423         mutex_lock(&caching_ctl->mutex);
424         /* need to make sure the commit_root doesn't disappear */
425         down_read(&fs_info->commit_root_sem);
426
427 next:
428         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
429         if (ret < 0)
430                 goto err;
431
432         leaf = path->nodes[0];
433         nritems = btrfs_header_nritems(leaf);
434
435         while (1) {
436                 if (btrfs_fs_closing(fs_info) > 1) {
437                         last = (u64)-1;
438                         break;
439                 }
440
441                 if (path->slots[0] < nritems) {
442                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
443                 } else {
444                         ret = find_next_key(path, 0, &key);
445                         if (ret)
446                                 break;
447
448                         if (need_resched() ||
449                             rwsem_is_contended(&fs_info->commit_root_sem)) {
450                                 caching_ctl->progress = last;
451                                 btrfs_release_path(path);
452                                 up_read(&fs_info->commit_root_sem);
453                                 mutex_unlock(&caching_ctl->mutex);
454                                 cond_resched();
455                                 goto again;
456                         }
457
458                         ret = btrfs_next_leaf(extent_root, path);
459                         if (ret < 0)
460                                 goto err;
461                         if (ret)
462                                 break;
463                         leaf = path->nodes[0];
464                         nritems = btrfs_header_nritems(leaf);
465                         continue;
466                 }
467
468                 if (key.objectid < last) {
469                         key.objectid = last;
470                         key.offset = 0;
471                         key.type = BTRFS_EXTENT_ITEM_KEY;
472
473                         caching_ctl->progress = last;
474                         btrfs_release_path(path);
475                         goto next;
476                 }
477
478                 if (key.objectid < block_group->key.objectid) {
479                         path->slots[0]++;
480                         continue;
481                 }
482
483                 if (key.objectid >= block_group->key.objectid +
484                     block_group->key.offset)
485                         break;
486
487                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
488                     key.type == BTRFS_METADATA_ITEM_KEY) {
489                         total_found += add_new_free_space(block_group,
490                                                           fs_info, last,
491                                                           key.objectid);
492                         if (key.type == BTRFS_METADATA_ITEM_KEY)
493                                 last = key.objectid +
494                                         fs_info->tree_root->nodesize;
495                         else
496                                 last = key.objectid + key.offset;
497
498                         if (total_found > (1024 * 1024 * 2)) {
499                                 total_found = 0;
500                                 wake_up(&caching_ctl->wait);
501                         }
502                 }
503                 path->slots[0]++;
504         }
505         ret = 0;
506
507         total_found += add_new_free_space(block_group, fs_info, last,
508                                           block_group->key.objectid +
509                                           block_group->key.offset);
510         caching_ctl->progress = (u64)-1;
511
512         spin_lock(&block_group->lock);
513         block_group->caching_ctl = NULL;
514         block_group->cached = BTRFS_CACHE_FINISHED;
515         spin_unlock(&block_group->lock);
516
517 err:
518         btrfs_free_path(path);
519         up_read(&fs_info->commit_root_sem);
520
521         free_excluded_extents(extent_root, block_group);
522
523         mutex_unlock(&caching_ctl->mutex);
524 out:
525         if (ret) {
526                 spin_lock(&block_group->lock);
527                 block_group->caching_ctl = NULL;
528                 block_group->cached = BTRFS_CACHE_ERROR;
529                 spin_unlock(&block_group->lock);
530         }
531         wake_up(&caching_ctl->wait);
532
533         put_caching_control(caching_ctl);
534         btrfs_put_block_group(block_group);
535 }
536
537 static int cache_block_group(struct btrfs_block_group_cache *cache,
538                              int load_cache_only)
539 {
540         DEFINE_WAIT(wait);
541         struct btrfs_fs_info *fs_info = cache->fs_info;
542         struct btrfs_caching_control *caching_ctl;
543         int ret = 0;
544
545         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
546         if (!caching_ctl)
547                 return -ENOMEM;
548
549         INIT_LIST_HEAD(&caching_ctl->list);
550         mutex_init(&caching_ctl->mutex);
551         init_waitqueue_head(&caching_ctl->wait);
552         caching_ctl->block_group = cache;
553         caching_ctl->progress = cache->key.objectid;
554         atomic_set(&caching_ctl->count, 1);
555         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
556                         caching_thread, NULL, NULL);
557
558         spin_lock(&cache->lock);
559         /*
560          * This should be a rare occasion, but this could happen I think in the
561          * case where one thread starts to load the space cache info, and then
562          * some other thread starts a transaction commit which tries to do an
563          * allocation while the other thread is still loading the space cache
564          * info.  The previous loop should have kept us from choosing this block
565          * group, but if we've moved to the state where we will wait on caching
566          * block groups we need to first check if we're doing a fast load here,
567          * so we can wait for it to finish, otherwise we could end up allocating
568          * from a block group who's cache gets evicted for one reason or
569          * another.
570          */
571         while (cache->cached == BTRFS_CACHE_FAST) {
572                 struct btrfs_caching_control *ctl;
573
574                 ctl = cache->caching_ctl;
575                 atomic_inc(&ctl->count);
576                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
577                 spin_unlock(&cache->lock);
578
579                 schedule();
580
581                 finish_wait(&ctl->wait, &wait);
582                 put_caching_control(ctl);
583                 spin_lock(&cache->lock);
584         }
585
586         if (cache->cached != BTRFS_CACHE_NO) {
587                 spin_unlock(&cache->lock);
588                 kfree(caching_ctl);
589                 return 0;
590         }
591         WARN_ON(cache->caching_ctl);
592         cache->caching_ctl = caching_ctl;
593         cache->cached = BTRFS_CACHE_FAST;
594         spin_unlock(&cache->lock);
595
596         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
597                 ret = load_free_space_cache(fs_info, cache);
598
599                 spin_lock(&cache->lock);
600                 if (ret == 1) {
601                         cache->caching_ctl = NULL;
602                         cache->cached = BTRFS_CACHE_FINISHED;
603                         cache->last_byte_to_unpin = (u64)-1;
604                 } else {
605                         if (load_cache_only) {
606                                 cache->caching_ctl = NULL;
607                                 cache->cached = BTRFS_CACHE_NO;
608                         } else {
609                                 cache->cached = BTRFS_CACHE_STARTED;
610                                 cache->has_caching_ctl = 1;
611                         }
612                 }
613                 spin_unlock(&cache->lock);
614                 wake_up(&caching_ctl->wait);
615                 if (ret == 1) {
616                         put_caching_control(caching_ctl);
617                         free_excluded_extents(fs_info->extent_root, cache);
618                         return 0;
619                 }
620         } else {
621                 /*
622                  * We are not going to do the fast caching, set cached to the
623                  * appropriate value and wakeup any waiters.
624                  */
625                 spin_lock(&cache->lock);
626                 if (load_cache_only) {
627                         cache->caching_ctl = NULL;
628                         cache->cached = BTRFS_CACHE_NO;
629                 } else {
630                         cache->cached = BTRFS_CACHE_STARTED;
631                         cache->has_caching_ctl = 1;
632                 }
633                 spin_unlock(&cache->lock);
634                 wake_up(&caching_ctl->wait);
635         }
636
637         if (load_cache_only) {
638                 put_caching_control(caching_ctl);
639                 return 0;
640         }
641
642         down_write(&fs_info->commit_root_sem);
643         atomic_inc(&caching_ctl->count);
644         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
645         up_write(&fs_info->commit_root_sem);
646
647         btrfs_get_block_group(cache);
648
649         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
650
651         return ret;
652 }
653
654 /*
655  * return the block group that starts at or after bytenr
656  */
657 static struct btrfs_block_group_cache *
658 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
659 {
660         struct btrfs_block_group_cache *cache;
661
662         cache = block_group_cache_tree_search(info, bytenr, 0);
663
664         return cache;
665 }
666
667 /*
668  * return the block group that contains the given bytenr
669  */
670 struct btrfs_block_group_cache *btrfs_lookup_block_group(
671                                                  struct btrfs_fs_info *info,
672                                                  u64 bytenr)
673 {
674         struct btrfs_block_group_cache *cache;
675
676         cache = block_group_cache_tree_search(info, bytenr, 1);
677
678         return cache;
679 }
680
681 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
682                                                   u64 flags)
683 {
684         struct list_head *head = &info->space_info;
685         struct btrfs_space_info *found;
686
687         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
688
689         rcu_read_lock();
690         list_for_each_entry_rcu(found, head, list) {
691                 if (found->flags & flags) {
692                         rcu_read_unlock();
693                         return found;
694                 }
695         }
696         rcu_read_unlock();
697         return NULL;
698 }
699
700 /*
701  * after adding space to the filesystem, we need to clear the full flags
702  * on all the space infos.
703  */
704 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
705 {
706         struct list_head *head = &info->space_info;
707         struct btrfs_space_info *found;
708
709         rcu_read_lock();
710         list_for_each_entry_rcu(found, head, list)
711                 found->full = 0;
712         rcu_read_unlock();
713 }
714
715 /* simple helper to search for an existing data extent at a given offset */
716 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
717 {
718         int ret;
719         struct btrfs_key key;
720         struct btrfs_path *path;
721
722         path = btrfs_alloc_path();
723         if (!path)
724                 return -ENOMEM;
725
726         key.objectid = start;
727         key.offset = len;
728         key.type = BTRFS_EXTENT_ITEM_KEY;
729         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
730                                 0, 0);
731         btrfs_free_path(path);
732         return ret;
733 }
734
735 /*
736  * helper function to lookup reference count and flags of a tree block.
737  *
738  * the head node for delayed ref is used to store the sum of all the
739  * reference count modifications queued up in the rbtree. the head
740  * node may also store the extent flags to set. This way you can check
741  * to see what the reference count and extent flags would be if all of
742  * the delayed refs are not processed.
743  */
744 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
745                              struct btrfs_root *root, u64 bytenr,
746                              u64 offset, int metadata, u64 *refs, u64 *flags)
747 {
748         struct btrfs_delayed_ref_head *head;
749         struct btrfs_delayed_ref_root *delayed_refs;
750         struct btrfs_path *path;
751         struct btrfs_extent_item *ei;
752         struct extent_buffer *leaf;
753         struct btrfs_key key;
754         u32 item_size;
755         u64 num_refs;
756         u64 extent_flags;
757         int ret;
758
759         /*
760          * If we don't have skinny metadata, don't bother doing anything
761          * different
762          */
763         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
764                 offset = root->nodesize;
765                 metadata = 0;
766         }
767
768         path = btrfs_alloc_path();
769         if (!path)
770                 return -ENOMEM;
771
772         if (!trans) {
773                 path->skip_locking = 1;
774                 path->search_commit_root = 1;
775         }
776
777 search_again:
778         key.objectid = bytenr;
779         key.offset = offset;
780         if (metadata)
781                 key.type = BTRFS_METADATA_ITEM_KEY;
782         else
783                 key.type = BTRFS_EXTENT_ITEM_KEY;
784
785         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
786                                 &key, path, 0, 0);
787         if (ret < 0)
788                 goto out_free;
789
790         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
791                 if (path->slots[0]) {
792                         path->slots[0]--;
793                         btrfs_item_key_to_cpu(path->nodes[0], &key,
794                                               path->slots[0]);
795                         if (key.objectid == bytenr &&
796                             key.type == BTRFS_EXTENT_ITEM_KEY &&
797                             key.offset == root->nodesize)
798                                 ret = 0;
799                 }
800         }
801
802         if (ret == 0) {
803                 leaf = path->nodes[0];
804                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
805                 if (item_size >= sizeof(*ei)) {
806                         ei = btrfs_item_ptr(leaf, path->slots[0],
807                                             struct btrfs_extent_item);
808                         num_refs = btrfs_extent_refs(leaf, ei);
809                         extent_flags = btrfs_extent_flags(leaf, ei);
810                 } else {
811 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
812                         struct btrfs_extent_item_v0 *ei0;
813                         BUG_ON(item_size != sizeof(*ei0));
814                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
815                                              struct btrfs_extent_item_v0);
816                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
817                         /* FIXME: this isn't correct for data */
818                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
819 #else
820                         BUG();
821 #endif
822                 }
823                 BUG_ON(num_refs == 0);
824         } else {
825                 num_refs = 0;
826                 extent_flags = 0;
827                 ret = 0;
828         }
829
830         if (!trans)
831                 goto out;
832
833         delayed_refs = &trans->transaction->delayed_refs;
834         spin_lock(&delayed_refs->lock);
835         head = btrfs_find_delayed_ref_head(trans, bytenr);
836         if (head) {
837                 if (!mutex_trylock(&head->mutex)) {
838                         atomic_inc(&head->node.refs);
839                         spin_unlock(&delayed_refs->lock);
840
841                         btrfs_release_path(path);
842
843                         /*
844                          * Mutex was contended, block until it's released and try
845                          * again
846                          */
847                         mutex_lock(&head->mutex);
848                         mutex_unlock(&head->mutex);
849                         btrfs_put_delayed_ref(&head->node);
850                         goto search_again;
851                 }
852                 spin_lock(&head->lock);
853                 if (head->extent_op && head->extent_op->update_flags)
854                         extent_flags |= head->extent_op->flags_to_set;
855                 else
856                         BUG_ON(num_refs == 0);
857
858                 num_refs += head->node.ref_mod;
859                 spin_unlock(&head->lock);
860                 mutex_unlock(&head->mutex);
861         }
862         spin_unlock(&delayed_refs->lock);
863 out:
864         WARN_ON(num_refs == 0);
865         if (refs)
866                 *refs = num_refs;
867         if (flags)
868                 *flags = extent_flags;
869 out_free:
870         btrfs_free_path(path);
871         return ret;
872 }
873
874 /*
875  * Back reference rules.  Back refs have three main goals:
876  *
877  * 1) differentiate between all holders of references to an extent so that
878  *    when a reference is dropped we can make sure it was a valid reference
879  *    before freeing the extent.
880  *
881  * 2) Provide enough information to quickly find the holders of an extent
882  *    if we notice a given block is corrupted or bad.
883  *
884  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
885  *    maintenance.  This is actually the same as #2, but with a slightly
886  *    different use case.
887  *
888  * There are two kinds of back refs. The implicit back refs is optimized
889  * for pointers in non-shared tree blocks. For a given pointer in a block,
890  * back refs of this kind provide information about the block's owner tree
891  * and the pointer's key. These information allow us to find the block by
892  * b-tree searching. The full back refs is for pointers in tree blocks not
893  * referenced by their owner trees. The location of tree block is recorded
894  * in the back refs. Actually the full back refs is generic, and can be
895  * used in all cases the implicit back refs is used. The major shortcoming
896  * of the full back refs is its overhead. Every time a tree block gets
897  * COWed, we have to update back refs entry for all pointers in it.
898  *
899  * For a newly allocated tree block, we use implicit back refs for
900  * pointers in it. This means most tree related operations only involve
901  * implicit back refs. For a tree block created in old transaction, the
902  * only way to drop a reference to it is COW it. So we can detect the
903  * event that tree block loses its owner tree's reference and do the
904  * back refs conversion.
905  *
906  * When a tree block is COW'd through a tree, there are four cases:
907  *
908  * The reference count of the block is one and the tree is the block's
909  * owner tree. Nothing to do in this case.
910  *
911  * The reference count of the block is one and the tree is not the
912  * block's owner tree. In this case, full back refs is used for pointers
913  * in the block. Remove these full back refs, add implicit back refs for
914  * every pointers in the new block.
915  *
916  * The reference count of the block is greater than one and the tree is
917  * the block's owner tree. In this case, implicit back refs is used for
918  * pointers in the block. Add full back refs for every pointers in the
919  * block, increase lower level extents' reference counts. The original
920  * implicit back refs are entailed to the new block.
921  *
922  * The reference count of the block is greater than one and the tree is
923  * not the block's owner tree. Add implicit back refs for every pointer in
924  * the new block, increase lower level extents' reference count.
925  *
926  * Back Reference Key composing:
927  *
928  * The key objectid corresponds to the first byte in the extent,
929  * The key type is used to differentiate between types of back refs.
930  * There are different meanings of the key offset for different types
931  * of back refs.
932  *
933  * File extents can be referenced by:
934  *
935  * - multiple snapshots, subvolumes, or different generations in one subvol
936  * - different files inside a single subvolume
937  * - different offsets inside a file (bookend extents in file.c)
938  *
939  * The extent ref structure for the implicit back refs has fields for:
940  *
941  * - Objectid of the subvolume root
942  * - objectid of the file holding the reference
943  * - original offset in the file
944  * - how many bookend extents
945  *
946  * The key offset for the implicit back refs is hash of the first
947  * three fields.
948  *
949  * The extent ref structure for the full back refs has field for:
950  *
951  * - number of pointers in the tree leaf
952  *
953  * The key offset for the implicit back refs is the first byte of
954  * the tree leaf
955  *
956  * When a file extent is allocated, The implicit back refs is used.
957  * the fields are filled in:
958  *
959  *     (root_key.objectid, inode objectid, offset in file, 1)
960  *
961  * When a file extent is removed file truncation, we find the
962  * corresponding implicit back refs and check the following fields:
963  *
964  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
965  *
966  * Btree extents can be referenced by:
967  *
968  * - Different subvolumes
969  *
970  * Both the implicit back refs and the full back refs for tree blocks
971  * only consist of key. The key offset for the implicit back refs is
972  * objectid of block's owner tree. The key offset for the full back refs
973  * is the first byte of parent block.
974  *
975  * When implicit back refs is used, information about the lowest key and
976  * level of the tree block are required. These information are stored in
977  * tree block info structure.
978  */
979
980 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
981 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
982                                   struct btrfs_root *root,
983                                   struct btrfs_path *path,
984                                   u64 owner, u32 extra_size)
985 {
986         struct btrfs_extent_item *item;
987         struct btrfs_extent_item_v0 *ei0;
988         struct btrfs_extent_ref_v0 *ref0;
989         struct btrfs_tree_block_info *bi;
990         struct extent_buffer *leaf;
991         struct btrfs_key key;
992         struct btrfs_key found_key;
993         u32 new_size = sizeof(*item);
994         u64 refs;
995         int ret;
996
997         leaf = path->nodes[0];
998         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
999
1000         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1001         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1002                              struct btrfs_extent_item_v0);
1003         refs = btrfs_extent_refs_v0(leaf, ei0);
1004
1005         if (owner == (u64)-1) {
1006                 while (1) {
1007                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1008                                 ret = btrfs_next_leaf(root, path);
1009                                 if (ret < 0)
1010                                         return ret;
1011                                 BUG_ON(ret > 0); /* Corruption */
1012                                 leaf = path->nodes[0];
1013                         }
1014                         btrfs_item_key_to_cpu(leaf, &found_key,
1015                                               path->slots[0]);
1016                         BUG_ON(key.objectid != found_key.objectid);
1017                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1018                                 path->slots[0]++;
1019                                 continue;
1020                         }
1021                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1022                                               struct btrfs_extent_ref_v0);
1023                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1024                         break;
1025                 }
1026         }
1027         btrfs_release_path(path);
1028
1029         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1030                 new_size += sizeof(*bi);
1031
1032         new_size -= sizeof(*ei0);
1033         ret = btrfs_search_slot(trans, root, &key, path,
1034                                 new_size + extra_size, 1);
1035         if (ret < 0)
1036                 return ret;
1037         BUG_ON(ret); /* Corruption */
1038
1039         btrfs_extend_item(root, path, new_size);
1040
1041         leaf = path->nodes[0];
1042         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1043         btrfs_set_extent_refs(leaf, item, refs);
1044         /* FIXME: get real generation */
1045         btrfs_set_extent_generation(leaf, item, 0);
1046         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1047                 btrfs_set_extent_flags(leaf, item,
1048                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1049                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1050                 bi = (struct btrfs_tree_block_info *)(item + 1);
1051                 /* FIXME: get first key of the block */
1052                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1053                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1054         } else {
1055                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1056         }
1057         btrfs_mark_buffer_dirty(leaf);
1058         return 0;
1059 }
1060 #endif
1061
1062 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1063 {
1064         u32 high_crc = ~(u32)0;
1065         u32 low_crc = ~(u32)0;
1066         __le64 lenum;
1067
1068         lenum = cpu_to_le64(root_objectid);
1069         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1070         lenum = cpu_to_le64(owner);
1071         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1072         lenum = cpu_to_le64(offset);
1073         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1074
1075         return ((u64)high_crc << 31) ^ (u64)low_crc;
1076 }
1077
1078 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1079                                      struct btrfs_extent_data_ref *ref)
1080 {
1081         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1082                                     btrfs_extent_data_ref_objectid(leaf, ref),
1083                                     btrfs_extent_data_ref_offset(leaf, ref));
1084 }
1085
1086 static int match_extent_data_ref(struct extent_buffer *leaf,
1087                                  struct btrfs_extent_data_ref *ref,
1088                                  u64 root_objectid, u64 owner, u64 offset)
1089 {
1090         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1091             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1092             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1093                 return 0;
1094         return 1;
1095 }
1096
1097 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1098                                            struct btrfs_root *root,
1099                                            struct btrfs_path *path,
1100                                            u64 bytenr, u64 parent,
1101                                            u64 root_objectid,
1102                                            u64 owner, u64 offset)
1103 {
1104         struct btrfs_key key;
1105         struct btrfs_extent_data_ref *ref;
1106         struct extent_buffer *leaf;
1107         u32 nritems;
1108         int ret;
1109         int recow;
1110         int err = -ENOENT;
1111
1112         key.objectid = bytenr;
1113         if (parent) {
1114                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1115                 key.offset = parent;
1116         } else {
1117                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1118                 key.offset = hash_extent_data_ref(root_objectid,
1119                                                   owner, offset);
1120         }
1121 again:
1122         recow = 0;
1123         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1124         if (ret < 0) {
1125                 err = ret;
1126                 goto fail;
1127         }
1128
1129         if (parent) {
1130                 if (!ret)
1131                         return 0;
1132 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1133                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1134                 btrfs_release_path(path);
1135                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1136                 if (ret < 0) {
1137                         err = ret;
1138                         goto fail;
1139                 }
1140                 if (!ret)
1141                         return 0;
1142 #endif
1143                 goto fail;
1144         }
1145
1146         leaf = path->nodes[0];
1147         nritems = btrfs_header_nritems(leaf);
1148         while (1) {
1149                 if (path->slots[0] >= nritems) {
1150                         ret = btrfs_next_leaf(root, path);
1151                         if (ret < 0)
1152                                 err = ret;
1153                         if (ret)
1154                                 goto fail;
1155
1156                         leaf = path->nodes[0];
1157                         nritems = btrfs_header_nritems(leaf);
1158                         recow = 1;
1159                 }
1160
1161                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1162                 if (key.objectid != bytenr ||
1163                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1164                         goto fail;
1165
1166                 ref = btrfs_item_ptr(leaf, path->slots[0],
1167                                      struct btrfs_extent_data_ref);
1168
1169                 if (match_extent_data_ref(leaf, ref, root_objectid,
1170                                           owner, offset)) {
1171                         if (recow) {
1172                                 btrfs_release_path(path);
1173                                 goto again;
1174                         }
1175                         err = 0;
1176                         break;
1177                 }
1178                 path->slots[0]++;
1179         }
1180 fail:
1181         return err;
1182 }
1183
1184 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1185                                            struct btrfs_root *root,
1186                                            struct btrfs_path *path,
1187                                            u64 bytenr, u64 parent,
1188                                            u64 root_objectid, u64 owner,
1189                                            u64 offset, int refs_to_add)
1190 {
1191         struct btrfs_key key;
1192         struct extent_buffer *leaf;
1193         u32 size;
1194         u32 num_refs;
1195         int ret;
1196
1197         key.objectid = bytenr;
1198         if (parent) {
1199                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1200                 key.offset = parent;
1201                 size = sizeof(struct btrfs_shared_data_ref);
1202         } else {
1203                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1204                 key.offset = hash_extent_data_ref(root_objectid,
1205                                                   owner, offset);
1206                 size = sizeof(struct btrfs_extent_data_ref);
1207         }
1208
1209         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1210         if (ret && ret != -EEXIST)
1211                 goto fail;
1212
1213         leaf = path->nodes[0];
1214         if (parent) {
1215                 struct btrfs_shared_data_ref *ref;
1216                 ref = btrfs_item_ptr(leaf, path->slots[0],
1217                                      struct btrfs_shared_data_ref);
1218                 if (ret == 0) {
1219                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1220                 } else {
1221                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1222                         num_refs += refs_to_add;
1223                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1224                 }
1225         } else {
1226                 struct btrfs_extent_data_ref *ref;
1227                 while (ret == -EEXIST) {
1228                         ref = btrfs_item_ptr(leaf, path->slots[0],
1229                                              struct btrfs_extent_data_ref);
1230                         if (match_extent_data_ref(leaf, ref, root_objectid,
1231                                                   owner, offset))
1232                                 break;
1233                         btrfs_release_path(path);
1234                         key.offset++;
1235                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1236                                                       size);
1237                         if (ret && ret != -EEXIST)
1238                                 goto fail;
1239
1240                         leaf = path->nodes[0];
1241                 }
1242                 ref = btrfs_item_ptr(leaf, path->slots[0],
1243                                      struct btrfs_extent_data_ref);
1244                 if (ret == 0) {
1245                         btrfs_set_extent_data_ref_root(leaf, ref,
1246                                                        root_objectid);
1247                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1248                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1249                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1250                 } else {
1251                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1252                         num_refs += refs_to_add;
1253                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1254                 }
1255         }
1256         btrfs_mark_buffer_dirty(leaf);
1257         ret = 0;
1258 fail:
1259         btrfs_release_path(path);
1260         return ret;
1261 }
1262
1263 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1264                                            struct btrfs_root *root,
1265                                            struct btrfs_path *path,
1266                                            int refs_to_drop, int *last_ref)
1267 {
1268         struct btrfs_key key;
1269         struct btrfs_extent_data_ref *ref1 = NULL;
1270         struct btrfs_shared_data_ref *ref2 = NULL;
1271         struct extent_buffer *leaf;
1272         u32 num_refs = 0;
1273         int ret = 0;
1274
1275         leaf = path->nodes[0];
1276         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1277
1278         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1279                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1280                                       struct btrfs_extent_data_ref);
1281                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1282         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1283                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1284                                       struct btrfs_shared_data_ref);
1285                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1286 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1287         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1288                 struct btrfs_extent_ref_v0 *ref0;
1289                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1290                                       struct btrfs_extent_ref_v0);
1291                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1292 #endif
1293         } else {
1294                 BUG();
1295         }
1296
1297         BUG_ON(num_refs < refs_to_drop);
1298         num_refs -= refs_to_drop;
1299
1300         if (num_refs == 0) {
1301                 ret = btrfs_del_item(trans, root, path);
1302                 *last_ref = 1;
1303         } else {
1304                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1305                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1306                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1307                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1308 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1309                 else {
1310                         struct btrfs_extent_ref_v0 *ref0;
1311                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1312                                         struct btrfs_extent_ref_v0);
1313                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1314                 }
1315 #endif
1316                 btrfs_mark_buffer_dirty(leaf);
1317         }
1318         return ret;
1319 }
1320
1321 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1322                                           struct btrfs_path *path,
1323                                           struct btrfs_extent_inline_ref *iref)
1324 {
1325         struct btrfs_key key;
1326         struct extent_buffer *leaf;
1327         struct btrfs_extent_data_ref *ref1;
1328         struct btrfs_shared_data_ref *ref2;
1329         u32 num_refs = 0;
1330
1331         leaf = path->nodes[0];
1332         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1333         if (iref) {
1334                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1335                     BTRFS_EXTENT_DATA_REF_KEY) {
1336                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1337                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1338                 } else {
1339                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1340                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1341                 }
1342         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1343                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1344                                       struct btrfs_extent_data_ref);
1345                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1346         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1347                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1348                                       struct btrfs_shared_data_ref);
1349                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1350 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1351         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1352                 struct btrfs_extent_ref_v0 *ref0;
1353                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1354                                       struct btrfs_extent_ref_v0);
1355                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1356 #endif
1357         } else {
1358                 WARN_ON(1);
1359         }
1360         return num_refs;
1361 }
1362
1363 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1364                                           struct btrfs_root *root,
1365                                           struct btrfs_path *path,
1366                                           u64 bytenr, u64 parent,
1367                                           u64 root_objectid)
1368 {
1369         struct btrfs_key key;
1370         int ret;
1371
1372         key.objectid = bytenr;
1373         if (parent) {
1374                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1375                 key.offset = parent;
1376         } else {
1377                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1378                 key.offset = root_objectid;
1379         }
1380
1381         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1382         if (ret > 0)
1383                 ret = -ENOENT;
1384 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1385         if (ret == -ENOENT && parent) {
1386                 btrfs_release_path(path);
1387                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1388                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1389                 if (ret > 0)
1390                         ret = -ENOENT;
1391         }
1392 #endif
1393         return ret;
1394 }
1395
1396 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1397                                           struct btrfs_root *root,
1398                                           struct btrfs_path *path,
1399                                           u64 bytenr, u64 parent,
1400                                           u64 root_objectid)
1401 {
1402         struct btrfs_key key;
1403         int ret;
1404
1405         key.objectid = bytenr;
1406         if (parent) {
1407                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1408                 key.offset = parent;
1409         } else {
1410                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1411                 key.offset = root_objectid;
1412         }
1413
1414         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1415         btrfs_release_path(path);
1416         return ret;
1417 }
1418
1419 static inline int extent_ref_type(u64 parent, u64 owner)
1420 {
1421         int type;
1422         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1423                 if (parent > 0)
1424                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1425                 else
1426                         type = BTRFS_TREE_BLOCK_REF_KEY;
1427         } else {
1428                 if (parent > 0)
1429                         type = BTRFS_SHARED_DATA_REF_KEY;
1430                 else
1431                         type = BTRFS_EXTENT_DATA_REF_KEY;
1432         }
1433         return type;
1434 }
1435
1436 static int find_next_key(struct btrfs_path *path, int level,
1437                          struct btrfs_key *key)
1438
1439 {
1440         for (; level < BTRFS_MAX_LEVEL; level++) {
1441                 if (!path->nodes[level])
1442                         break;
1443                 if (path->slots[level] + 1 >=
1444                     btrfs_header_nritems(path->nodes[level]))
1445                         continue;
1446                 if (level == 0)
1447                         btrfs_item_key_to_cpu(path->nodes[level], key,
1448                                               path->slots[level] + 1);
1449                 else
1450                         btrfs_node_key_to_cpu(path->nodes[level], key,
1451                                               path->slots[level] + 1);
1452                 return 0;
1453         }
1454         return 1;
1455 }
1456
1457 /*
1458  * look for inline back ref. if back ref is found, *ref_ret is set
1459  * to the address of inline back ref, and 0 is returned.
1460  *
1461  * if back ref isn't found, *ref_ret is set to the address where it
1462  * should be inserted, and -ENOENT is returned.
1463  *
1464  * if insert is true and there are too many inline back refs, the path
1465  * points to the extent item, and -EAGAIN is returned.
1466  *
1467  * NOTE: inline back refs are ordered in the same way that back ref
1468  *       items in the tree are ordered.
1469  */
1470 static noinline_for_stack
1471 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1472                                  struct btrfs_root *root,
1473                                  struct btrfs_path *path,
1474                                  struct btrfs_extent_inline_ref **ref_ret,
1475                                  u64 bytenr, u64 num_bytes,
1476                                  u64 parent, u64 root_objectid,
1477                                  u64 owner, u64 offset, int insert)
1478 {
1479         struct btrfs_key key;
1480         struct extent_buffer *leaf;
1481         struct btrfs_extent_item *ei;
1482         struct btrfs_extent_inline_ref *iref;
1483         u64 flags;
1484         u64 item_size;
1485         unsigned long ptr;
1486         unsigned long end;
1487         int extra_size;
1488         int type;
1489         int want;
1490         int ret;
1491         int err = 0;
1492         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1493                                                  SKINNY_METADATA);
1494
1495         key.objectid = bytenr;
1496         key.type = BTRFS_EXTENT_ITEM_KEY;
1497         key.offset = num_bytes;
1498
1499         want = extent_ref_type(parent, owner);
1500         if (insert) {
1501                 extra_size = btrfs_extent_inline_ref_size(want);
1502                 path->keep_locks = 1;
1503         } else
1504                 extra_size = -1;
1505
1506         /*
1507          * Owner is our parent level, so we can just add one to get the level
1508          * for the block we are interested in.
1509          */
1510         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1511                 key.type = BTRFS_METADATA_ITEM_KEY;
1512                 key.offset = owner;
1513         }
1514
1515 again:
1516         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1517         if (ret < 0) {
1518                 err = ret;
1519                 goto out;
1520         }
1521
1522         /*
1523          * We may be a newly converted file system which still has the old fat
1524          * extent entries for metadata, so try and see if we have one of those.
1525          */
1526         if (ret > 0 && skinny_metadata) {
1527                 skinny_metadata = false;
1528                 if (path->slots[0]) {
1529                         path->slots[0]--;
1530                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1531                                               path->slots[0]);
1532                         if (key.objectid == bytenr &&
1533                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1534                             key.offset == num_bytes)
1535                                 ret = 0;
1536                 }
1537                 if (ret) {
1538                         key.objectid = bytenr;
1539                         key.type = BTRFS_EXTENT_ITEM_KEY;
1540                         key.offset = num_bytes;
1541                         btrfs_release_path(path);
1542                         goto again;
1543                 }
1544         }
1545
1546         if (ret && !insert) {
1547                 err = -ENOENT;
1548                 goto out;
1549         } else if (WARN_ON(ret)) {
1550                 err = -EIO;
1551                 goto out;
1552         }
1553
1554         leaf = path->nodes[0];
1555         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1556 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1557         if (item_size < sizeof(*ei)) {
1558                 if (!insert) {
1559                         err = -ENOENT;
1560                         goto out;
1561                 }
1562                 ret = convert_extent_item_v0(trans, root, path, owner,
1563                                              extra_size);
1564                 if (ret < 0) {
1565                         err = ret;
1566                         goto out;
1567                 }
1568                 leaf = path->nodes[0];
1569                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1570         }
1571 #endif
1572         BUG_ON(item_size < sizeof(*ei));
1573
1574         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1575         flags = btrfs_extent_flags(leaf, ei);
1576
1577         ptr = (unsigned long)(ei + 1);
1578         end = (unsigned long)ei + item_size;
1579
1580         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1581                 ptr += sizeof(struct btrfs_tree_block_info);
1582                 BUG_ON(ptr > end);
1583         }
1584
1585         err = -ENOENT;
1586         while (1) {
1587                 if (ptr >= end) {
1588                         WARN_ON(ptr > end);
1589                         break;
1590                 }
1591                 iref = (struct btrfs_extent_inline_ref *)ptr;
1592                 type = btrfs_extent_inline_ref_type(leaf, iref);
1593                 if (want < type)
1594                         break;
1595                 if (want > type) {
1596                         ptr += btrfs_extent_inline_ref_size(type);
1597                         continue;
1598                 }
1599
1600                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1601                         struct btrfs_extent_data_ref *dref;
1602                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1603                         if (match_extent_data_ref(leaf, dref, root_objectid,
1604                                                   owner, offset)) {
1605                                 err = 0;
1606                                 break;
1607                         }
1608                         if (hash_extent_data_ref_item(leaf, dref) <
1609                             hash_extent_data_ref(root_objectid, owner, offset))
1610                                 break;
1611                 } else {
1612                         u64 ref_offset;
1613                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1614                         if (parent > 0) {
1615                                 if (parent == ref_offset) {
1616                                         err = 0;
1617                                         break;
1618                                 }
1619                                 if (ref_offset < parent)
1620                                         break;
1621                         } else {
1622                                 if (root_objectid == ref_offset) {
1623                                         err = 0;
1624                                         break;
1625                                 }
1626                                 if (ref_offset < root_objectid)
1627                                         break;
1628                         }
1629                 }
1630                 ptr += btrfs_extent_inline_ref_size(type);
1631         }
1632         if (err == -ENOENT && insert) {
1633                 if (item_size + extra_size >=
1634                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1635                         err = -EAGAIN;
1636                         goto out;
1637                 }
1638                 /*
1639                  * To add new inline back ref, we have to make sure
1640                  * there is no corresponding back ref item.
1641                  * For simplicity, we just do not add new inline back
1642                  * ref if there is any kind of item for this block
1643                  */
1644                 if (find_next_key(path, 0, &key) == 0 &&
1645                     key.objectid == bytenr &&
1646                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1647                         err = -EAGAIN;
1648                         goto out;
1649                 }
1650         }
1651         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1652 out:
1653         if (insert) {
1654                 path->keep_locks = 0;
1655                 btrfs_unlock_up_safe(path, 1);
1656         }
1657         return err;
1658 }
1659
1660 /*
1661  * helper to add new inline back ref
1662  */
1663 static noinline_for_stack
1664 void setup_inline_extent_backref(struct btrfs_root *root,
1665                                  struct btrfs_path *path,
1666                                  struct btrfs_extent_inline_ref *iref,
1667                                  u64 parent, u64 root_objectid,
1668                                  u64 owner, u64 offset, int refs_to_add,
1669                                  struct btrfs_delayed_extent_op *extent_op)
1670 {
1671         struct extent_buffer *leaf;
1672         struct btrfs_extent_item *ei;
1673         unsigned long ptr;
1674         unsigned long end;
1675         unsigned long item_offset;
1676         u64 refs;
1677         int size;
1678         int type;
1679
1680         leaf = path->nodes[0];
1681         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1682         item_offset = (unsigned long)iref - (unsigned long)ei;
1683
1684         type = extent_ref_type(parent, owner);
1685         size = btrfs_extent_inline_ref_size(type);
1686
1687         btrfs_extend_item(root, path, size);
1688
1689         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1690         refs = btrfs_extent_refs(leaf, ei);
1691         refs += refs_to_add;
1692         btrfs_set_extent_refs(leaf, ei, refs);
1693         if (extent_op)
1694                 __run_delayed_extent_op(extent_op, leaf, ei);
1695
1696         ptr = (unsigned long)ei + item_offset;
1697         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1698         if (ptr < end - size)
1699                 memmove_extent_buffer(leaf, ptr + size, ptr,
1700                                       end - size - ptr);
1701
1702         iref = (struct btrfs_extent_inline_ref *)ptr;
1703         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1704         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1705                 struct btrfs_extent_data_ref *dref;
1706                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1707                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1708                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1709                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1710                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1711         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1712                 struct btrfs_shared_data_ref *sref;
1713                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1714                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1715                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1716         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1717                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1718         } else {
1719                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1720         }
1721         btrfs_mark_buffer_dirty(leaf);
1722 }
1723
1724 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1725                                  struct btrfs_root *root,
1726                                  struct btrfs_path *path,
1727                                  struct btrfs_extent_inline_ref **ref_ret,
1728                                  u64 bytenr, u64 num_bytes, u64 parent,
1729                                  u64 root_objectid, u64 owner, u64 offset)
1730 {
1731         int ret;
1732
1733         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1734                                            bytenr, num_bytes, parent,
1735                                            root_objectid, owner, offset, 0);
1736         if (ret != -ENOENT)
1737                 return ret;
1738
1739         btrfs_release_path(path);
1740         *ref_ret = NULL;
1741
1742         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1743                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1744                                             root_objectid);
1745         } else {
1746                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1747                                              root_objectid, owner, offset);
1748         }
1749         return ret;
1750 }
1751
1752 /*
1753  * helper to update/remove inline back ref
1754  */
1755 static noinline_for_stack
1756 void update_inline_extent_backref(struct btrfs_root *root,
1757                                   struct btrfs_path *path,
1758                                   struct btrfs_extent_inline_ref *iref,
1759                                   int refs_to_mod,
1760                                   struct btrfs_delayed_extent_op *extent_op,
1761                                   int *last_ref)
1762 {
1763         struct extent_buffer *leaf;
1764         struct btrfs_extent_item *ei;
1765         struct btrfs_extent_data_ref *dref = NULL;
1766         struct btrfs_shared_data_ref *sref = NULL;
1767         unsigned long ptr;
1768         unsigned long end;
1769         u32 item_size;
1770         int size;
1771         int type;
1772         u64 refs;
1773
1774         leaf = path->nodes[0];
1775         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1776         refs = btrfs_extent_refs(leaf, ei);
1777         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1778         refs += refs_to_mod;
1779         btrfs_set_extent_refs(leaf, ei, refs);
1780         if (extent_op)
1781                 __run_delayed_extent_op(extent_op, leaf, ei);
1782
1783         type = btrfs_extent_inline_ref_type(leaf, iref);
1784
1785         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1786                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1787                 refs = btrfs_extent_data_ref_count(leaf, dref);
1788         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1789                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1790                 refs = btrfs_shared_data_ref_count(leaf, sref);
1791         } else {
1792                 refs = 1;
1793                 BUG_ON(refs_to_mod != -1);
1794         }
1795
1796         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1797         refs += refs_to_mod;
1798
1799         if (refs > 0) {
1800                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1801                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1802                 else
1803                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1804         } else {
1805                 *last_ref = 1;
1806                 size =  btrfs_extent_inline_ref_size(type);
1807                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1808                 ptr = (unsigned long)iref;
1809                 end = (unsigned long)ei + item_size;
1810                 if (ptr + size < end)
1811                         memmove_extent_buffer(leaf, ptr, ptr + size,
1812                                               end - ptr - size);
1813                 item_size -= size;
1814                 btrfs_truncate_item(root, path, item_size, 1);
1815         }
1816         btrfs_mark_buffer_dirty(leaf);
1817 }
1818
1819 static noinline_for_stack
1820 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1821                                  struct btrfs_root *root,
1822                                  struct btrfs_path *path,
1823                                  u64 bytenr, u64 num_bytes, u64 parent,
1824                                  u64 root_objectid, u64 owner,
1825                                  u64 offset, int refs_to_add,
1826                                  struct btrfs_delayed_extent_op *extent_op)
1827 {
1828         struct btrfs_extent_inline_ref *iref;
1829         int ret;
1830
1831         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1832                                            bytenr, num_bytes, parent,
1833                                            root_objectid, owner, offset, 1);
1834         if (ret == 0) {
1835                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1836                 update_inline_extent_backref(root, path, iref,
1837                                              refs_to_add, extent_op, NULL);
1838         } else if (ret == -ENOENT) {
1839                 setup_inline_extent_backref(root, path, iref, parent,
1840                                             root_objectid, owner, offset,
1841                                             refs_to_add, extent_op);
1842                 ret = 0;
1843         }
1844         return ret;
1845 }
1846
1847 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1848                                  struct btrfs_root *root,
1849                                  struct btrfs_path *path,
1850                                  u64 bytenr, u64 parent, u64 root_objectid,
1851                                  u64 owner, u64 offset, int refs_to_add)
1852 {
1853         int ret;
1854         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1855                 BUG_ON(refs_to_add != 1);
1856                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1857                                             parent, root_objectid);
1858         } else {
1859                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1860                                              parent, root_objectid,
1861                                              owner, offset, refs_to_add);
1862         }
1863         return ret;
1864 }
1865
1866 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1867                                  struct btrfs_root *root,
1868                                  struct btrfs_path *path,
1869                                  struct btrfs_extent_inline_ref *iref,
1870                                  int refs_to_drop, int is_data, int *last_ref)
1871 {
1872         int ret = 0;
1873
1874         BUG_ON(!is_data && refs_to_drop != 1);
1875         if (iref) {
1876                 update_inline_extent_backref(root, path, iref,
1877                                              -refs_to_drop, NULL, last_ref);
1878         } else if (is_data) {
1879                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1880                                              last_ref);
1881         } else {
1882                 *last_ref = 1;
1883                 ret = btrfs_del_item(trans, root, path);
1884         }
1885         return ret;
1886 }
1887
1888 static int btrfs_issue_discard(struct block_device *bdev,
1889                                 u64 start, u64 len)
1890 {
1891         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1892 }
1893
1894 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1895                                 u64 num_bytes, u64 *actual_bytes)
1896 {
1897         int ret;
1898         u64 discarded_bytes = 0;
1899         struct btrfs_bio *bbio = NULL;
1900
1901
1902         /* Tell the block device(s) that the sectors can be discarded */
1903         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1904                               bytenr, &num_bytes, &bbio, 0);
1905         /* Error condition is -ENOMEM */
1906         if (!ret) {
1907                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1908                 int i;
1909
1910
1911                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1912                         if (!stripe->dev->can_discard)
1913                                 continue;
1914
1915                         ret = btrfs_issue_discard(stripe->dev->bdev,
1916                                                   stripe->physical,
1917                                                   stripe->length);
1918                         if (!ret)
1919                                 discarded_bytes += stripe->length;
1920                         else if (ret != -EOPNOTSUPP)
1921                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1922
1923                         /*
1924                          * Just in case we get back EOPNOTSUPP for some reason,
1925                          * just ignore the return value so we don't screw up
1926                          * people calling discard_extent.
1927                          */
1928                         ret = 0;
1929                 }
1930                 kfree(bbio);
1931         }
1932
1933         if (actual_bytes)
1934                 *actual_bytes = discarded_bytes;
1935
1936
1937         if (ret == -EOPNOTSUPP)
1938                 ret = 0;
1939         return ret;
1940 }
1941
1942 /* Can return -ENOMEM */
1943 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1944                          struct btrfs_root *root,
1945                          u64 bytenr, u64 num_bytes, u64 parent,
1946                          u64 root_objectid, u64 owner, u64 offset,
1947                          int no_quota)
1948 {
1949         int ret;
1950         struct btrfs_fs_info *fs_info = root->fs_info;
1951
1952         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1953                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1954
1955         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1956                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1957                                         num_bytes,
1958                                         parent, root_objectid, (int)owner,
1959                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1960         } else {
1961                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1962                                         num_bytes,
1963                                         parent, root_objectid, owner, offset,
1964                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1965         }
1966         return ret;
1967 }
1968
1969 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1970                                   struct btrfs_root *root,
1971                                   u64 bytenr, u64 num_bytes,
1972                                   u64 parent, u64 root_objectid,
1973                                   u64 owner, u64 offset, int refs_to_add,
1974                                   int no_quota,
1975                                   struct btrfs_delayed_extent_op *extent_op)
1976 {
1977         struct btrfs_fs_info *fs_info = root->fs_info;
1978         struct btrfs_path *path;
1979         struct extent_buffer *leaf;
1980         struct btrfs_extent_item *item;
1981         struct btrfs_key key;
1982         u64 refs;
1983         int ret;
1984         enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_ADD_EXCL;
1985
1986         path = btrfs_alloc_path();
1987         if (!path)
1988                 return -ENOMEM;
1989
1990         if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
1991                 no_quota = 1;
1992
1993         path->reada = 1;
1994         path->leave_spinning = 1;
1995         /* this will setup the path even if it fails to insert the back ref */
1996         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
1997                                            bytenr, num_bytes, parent,
1998                                            root_objectid, owner, offset,
1999                                            refs_to_add, extent_op);
2000         if ((ret < 0 && ret != -EAGAIN) || (!ret && no_quota))
2001                 goto out;
2002         /*
2003          * Ok we were able to insert an inline extent and it appears to be a new
2004          * reference, deal with the qgroup accounting.
2005          */
2006         if (!ret && !no_quota) {
2007                 ASSERT(root->fs_info->quota_enabled);
2008                 leaf = path->nodes[0];
2009                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2010                 item = btrfs_item_ptr(leaf, path->slots[0],
2011                                       struct btrfs_extent_item);
2012                 if (btrfs_extent_refs(leaf, item) > (u64)refs_to_add)
2013                         type = BTRFS_QGROUP_OPER_ADD_SHARED;
2014                 btrfs_release_path(path);
2015
2016                 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
2017                                               bytenr, num_bytes, type, 0);
2018                 goto out;
2019         }
2020
2021         /*
2022          * Ok we had -EAGAIN which means we didn't have space to insert and
2023          * inline extent ref, so just update the reference count and add a
2024          * normal backref.
2025          */
2026         leaf = path->nodes[0];
2027         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2028         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2029         refs = btrfs_extent_refs(leaf, item);
2030         if (refs)
2031                 type = BTRFS_QGROUP_OPER_ADD_SHARED;
2032         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2033         if (extent_op)
2034                 __run_delayed_extent_op(extent_op, leaf, item);
2035
2036         btrfs_mark_buffer_dirty(leaf);
2037         btrfs_release_path(path);
2038
2039         if (!no_quota) {
2040                 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
2041                                               bytenr, num_bytes, type, 0);
2042                 if (ret)
2043                         goto out;
2044         }
2045
2046         path->reada = 1;
2047         path->leave_spinning = 1;
2048         /* now insert the actual backref */
2049         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2050                                     path, bytenr, parent, root_objectid,
2051                                     owner, offset, refs_to_add);
2052         if (ret)
2053                 btrfs_abort_transaction(trans, root, ret);
2054 out:
2055         btrfs_free_path(path);
2056         return ret;
2057 }
2058
2059 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2060                                 struct btrfs_root *root,
2061                                 struct btrfs_delayed_ref_node *node,
2062                                 struct btrfs_delayed_extent_op *extent_op,
2063                                 int insert_reserved)
2064 {
2065         int ret = 0;
2066         struct btrfs_delayed_data_ref *ref;
2067         struct btrfs_key ins;
2068         u64 parent = 0;
2069         u64 ref_root = 0;
2070         u64 flags = 0;
2071
2072         ins.objectid = node->bytenr;
2073         ins.offset = node->num_bytes;
2074         ins.type = BTRFS_EXTENT_ITEM_KEY;
2075
2076         ref = btrfs_delayed_node_to_data_ref(node);
2077         trace_run_delayed_data_ref(node, ref, node->action);
2078
2079         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2080                 parent = ref->parent;
2081         ref_root = ref->root;
2082
2083         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2084                 if (extent_op)
2085                         flags |= extent_op->flags_to_set;
2086                 ret = alloc_reserved_file_extent(trans, root,
2087                                                  parent, ref_root, flags,
2088                                                  ref->objectid, ref->offset,
2089                                                  &ins, node->ref_mod);
2090         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2091                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2092                                              node->num_bytes, parent,
2093                                              ref_root, ref->objectid,
2094                                              ref->offset, node->ref_mod,
2095                                              node->no_quota, extent_op);
2096         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2097                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2098                                           node->num_bytes, parent,
2099                                           ref_root, ref->objectid,
2100                                           ref->offset, node->ref_mod,
2101                                           extent_op, node->no_quota);
2102         } else {
2103                 BUG();
2104         }
2105         return ret;
2106 }
2107
2108 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2109                                     struct extent_buffer *leaf,
2110                                     struct btrfs_extent_item *ei)
2111 {
2112         u64 flags = btrfs_extent_flags(leaf, ei);
2113         if (extent_op->update_flags) {
2114                 flags |= extent_op->flags_to_set;
2115                 btrfs_set_extent_flags(leaf, ei, flags);
2116         }
2117
2118         if (extent_op->update_key) {
2119                 struct btrfs_tree_block_info *bi;
2120                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2121                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2122                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2123         }
2124 }
2125
2126 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2127                                  struct btrfs_root *root,
2128                                  struct btrfs_delayed_ref_node *node,
2129                                  struct btrfs_delayed_extent_op *extent_op)
2130 {
2131         struct btrfs_key key;
2132         struct btrfs_path *path;
2133         struct btrfs_extent_item *ei;
2134         struct extent_buffer *leaf;
2135         u32 item_size;
2136         int ret;
2137         int err = 0;
2138         int metadata = !extent_op->is_data;
2139
2140         if (trans->aborted)
2141                 return 0;
2142
2143         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2144                 metadata = 0;
2145
2146         path = btrfs_alloc_path();
2147         if (!path)
2148                 return -ENOMEM;
2149
2150         key.objectid = node->bytenr;
2151
2152         if (metadata) {
2153                 key.type = BTRFS_METADATA_ITEM_KEY;
2154                 key.offset = extent_op->level;
2155         } else {
2156                 key.type = BTRFS_EXTENT_ITEM_KEY;
2157                 key.offset = node->num_bytes;
2158         }
2159
2160 again:
2161         path->reada = 1;
2162         path->leave_spinning = 1;
2163         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2164                                 path, 0, 1);
2165         if (ret < 0) {
2166                 err = ret;
2167                 goto out;
2168         }
2169         if (ret > 0) {
2170                 if (metadata) {
2171                         if (path->slots[0] > 0) {
2172                                 path->slots[0]--;
2173                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2174                                                       path->slots[0]);
2175                                 if (key.objectid == node->bytenr &&
2176                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2177                                     key.offset == node->num_bytes)
2178                                         ret = 0;
2179                         }
2180                         if (ret > 0) {
2181                                 btrfs_release_path(path);
2182                                 metadata = 0;
2183
2184                                 key.objectid = node->bytenr;
2185                                 key.offset = node->num_bytes;
2186                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2187                                 goto again;
2188                         }
2189                 } else {
2190                         err = -EIO;
2191                         goto out;
2192                 }
2193         }
2194
2195         leaf = path->nodes[0];
2196         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2197 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2198         if (item_size < sizeof(*ei)) {
2199                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2200                                              path, (u64)-1, 0);
2201                 if (ret < 0) {
2202                         err = ret;
2203                         goto out;
2204                 }
2205                 leaf = path->nodes[0];
2206                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2207         }
2208 #endif
2209         BUG_ON(item_size < sizeof(*ei));
2210         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2211         __run_delayed_extent_op(extent_op, leaf, ei);
2212
2213         btrfs_mark_buffer_dirty(leaf);
2214 out:
2215         btrfs_free_path(path);
2216         return err;
2217 }
2218
2219 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2220                                 struct btrfs_root *root,
2221                                 struct btrfs_delayed_ref_node *node,
2222                                 struct btrfs_delayed_extent_op *extent_op,
2223                                 int insert_reserved)
2224 {
2225         int ret = 0;
2226         struct btrfs_delayed_tree_ref *ref;
2227         struct btrfs_key ins;
2228         u64 parent = 0;
2229         u64 ref_root = 0;
2230         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2231                                                  SKINNY_METADATA);
2232
2233         ref = btrfs_delayed_node_to_tree_ref(node);
2234         trace_run_delayed_tree_ref(node, ref, node->action);
2235
2236         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2237                 parent = ref->parent;
2238         ref_root = ref->root;
2239
2240         ins.objectid = node->bytenr;
2241         if (skinny_metadata) {
2242                 ins.offset = ref->level;
2243                 ins.type = BTRFS_METADATA_ITEM_KEY;
2244         } else {
2245                 ins.offset = node->num_bytes;
2246                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2247         }
2248
2249         BUG_ON(node->ref_mod != 1);
2250         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2251                 BUG_ON(!extent_op || !extent_op->update_flags);
2252                 ret = alloc_reserved_tree_block(trans, root,
2253                                                 parent, ref_root,
2254                                                 extent_op->flags_to_set,
2255                                                 &extent_op->key,
2256                                                 ref->level, &ins,
2257                                                 node->no_quota);
2258         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2259                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2260                                              node->num_bytes, parent, ref_root,
2261                                              ref->level, 0, 1, node->no_quota,
2262                                              extent_op);
2263         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2264                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2265                                           node->num_bytes, parent, ref_root,
2266                                           ref->level, 0, 1, extent_op,
2267                                           node->no_quota);
2268         } else {
2269                 BUG();
2270         }
2271         return ret;
2272 }
2273
2274 /* helper function to actually process a single delayed ref entry */
2275 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2276                                struct btrfs_root *root,
2277                                struct btrfs_delayed_ref_node *node,
2278                                struct btrfs_delayed_extent_op *extent_op,
2279                                int insert_reserved)
2280 {
2281         int ret = 0;
2282
2283         if (trans->aborted) {
2284                 if (insert_reserved)
2285                         btrfs_pin_extent(root, node->bytenr,
2286                                          node->num_bytes, 1);
2287                 return 0;
2288         }
2289
2290         if (btrfs_delayed_ref_is_head(node)) {
2291                 struct btrfs_delayed_ref_head *head;
2292                 /*
2293                  * we've hit the end of the chain and we were supposed
2294                  * to insert this extent into the tree.  But, it got
2295                  * deleted before we ever needed to insert it, so all
2296                  * we have to do is clean up the accounting
2297                  */
2298                 BUG_ON(extent_op);
2299                 head = btrfs_delayed_node_to_head(node);
2300                 trace_run_delayed_ref_head(node, head, node->action);
2301
2302                 if (insert_reserved) {
2303                         btrfs_pin_extent(root, node->bytenr,
2304                                          node->num_bytes, 1);
2305                         if (head->is_data) {
2306                                 ret = btrfs_del_csums(trans, root,
2307                                                       node->bytenr,
2308                                                       node->num_bytes);
2309                         }
2310                 }
2311                 return ret;
2312         }
2313
2314         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2315             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2316                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2317                                            insert_reserved);
2318         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2319                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2320                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2321                                            insert_reserved);
2322         else
2323                 BUG();
2324         return ret;
2325 }
2326
2327 static noinline struct btrfs_delayed_ref_node *
2328 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2329 {
2330         struct rb_node *node;
2331         struct btrfs_delayed_ref_node *ref, *last = NULL;;
2332
2333         /*
2334          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2335          * this prevents ref count from going down to zero when
2336          * there still are pending delayed ref.
2337          */
2338         node = rb_first(&head->ref_root);
2339         while (node) {
2340                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2341                                 rb_node);
2342                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2343                         return ref;
2344                 else if (last == NULL)
2345                         last = ref;
2346                 node = rb_next(node);
2347         }
2348         return last;
2349 }
2350
2351 /*
2352  * Returns 0 on success or if called with an already aborted transaction.
2353  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2354  */
2355 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2356                                              struct btrfs_root *root,
2357                                              unsigned long nr)
2358 {
2359         struct btrfs_delayed_ref_root *delayed_refs;
2360         struct btrfs_delayed_ref_node *ref;
2361         struct btrfs_delayed_ref_head *locked_ref = NULL;
2362         struct btrfs_delayed_extent_op *extent_op;
2363         struct btrfs_fs_info *fs_info = root->fs_info;
2364         ktime_t start = ktime_get();
2365         int ret;
2366         unsigned long count = 0;
2367         unsigned long actual_count = 0;
2368         int must_insert_reserved = 0;
2369
2370         delayed_refs = &trans->transaction->delayed_refs;
2371         while (1) {
2372                 if (!locked_ref) {
2373                         if (count >= nr)
2374                                 break;
2375
2376                         spin_lock(&delayed_refs->lock);
2377                         locked_ref = btrfs_select_ref_head(trans);
2378                         if (!locked_ref) {
2379                                 spin_unlock(&delayed_refs->lock);
2380                                 break;
2381                         }
2382
2383                         /* grab the lock that says we are going to process
2384                          * all the refs for this head */
2385                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2386                         spin_unlock(&delayed_refs->lock);
2387                         /*
2388                          * we may have dropped the spin lock to get the head
2389                          * mutex lock, and that might have given someone else
2390                          * time to free the head.  If that's true, it has been
2391                          * removed from our list and we can move on.
2392                          */
2393                         if (ret == -EAGAIN) {
2394                                 locked_ref = NULL;
2395                                 count++;
2396                                 continue;
2397                         }
2398                 }
2399
2400                 /*
2401                  * We need to try and merge add/drops of the same ref since we
2402                  * can run into issues with relocate dropping the implicit ref
2403                  * and then it being added back again before the drop can
2404                  * finish.  If we merged anything we need to re-loop so we can
2405                  * get a good ref.
2406                  */
2407                 spin_lock(&locked_ref->lock);
2408                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2409                                          locked_ref);
2410
2411                 /*
2412                  * locked_ref is the head node, so we have to go one
2413                  * node back for any delayed ref updates
2414                  */
2415                 ref = select_delayed_ref(locked_ref);
2416
2417                 if (ref && ref->seq &&
2418                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2419                         spin_unlock(&locked_ref->lock);
2420                         btrfs_delayed_ref_unlock(locked_ref);
2421                         spin_lock(&delayed_refs->lock);
2422                         locked_ref->processing = 0;
2423                         delayed_refs->num_heads_ready++;
2424                         spin_unlock(&delayed_refs->lock);
2425                         locked_ref = NULL;
2426                         cond_resched();
2427                         count++;
2428                         continue;
2429                 }
2430
2431                 /*
2432                  * record the must insert reserved flag before we
2433                  * drop the spin lock.
2434                  */
2435                 must_insert_reserved = locked_ref->must_insert_reserved;
2436                 locked_ref->must_insert_reserved = 0;
2437
2438                 extent_op = locked_ref->extent_op;
2439                 locked_ref->extent_op = NULL;
2440
2441                 if (!ref) {
2442
2443
2444                         /* All delayed refs have been processed, Go ahead
2445                          * and send the head node to run_one_delayed_ref,
2446                          * so that any accounting fixes can happen
2447                          */
2448                         ref = &locked_ref->node;
2449
2450                         if (extent_op && must_insert_reserved) {
2451                                 btrfs_free_delayed_extent_op(extent_op);
2452                                 extent_op = NULL;
2453                         }
2454
2455                         if (extent_op) {
2456                                 spin_unlock(&locked_ref->lock);
2457                                 ret = run_delayed_extent_op(trans, root,
2458                                                             ref, extent_op);
2459                                 btrfs_free_delayed_extent_op(extent_op);
2460
2461                                 if (ret) {
2462                                         /*
2463                                          * Need to reset must_insert_reserved if
2464                                          * there was an error so the abort stuff
2465                                          * can cleanup the reserved space
2466                                          * properly.
2467                                          */
2468                                         if (must_insert_reserved)
2469                                                 locked_ref->must_insert_reserved = 1;
2470                                         locked_ref->processing = 0;
2471                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2472                                         btrfs_delayed_ref_unlock(locked_ref);
2473                                         return ret;
2474                                 }
2475                                 continue;
2476                         }
2477
2478                         /*
2479                          * Need to drop our head ref lock and re-aqcuire the
2480                          * delayed ref lock and then re-check to make sure
2481                          * nobody got added.
2482                          */
2483                         spin_unlock(&locked_ref->lock);
2484                         spin_lock(&delayed_refs->lock);
2485                         spin_lock(&locked_ref->lock);
2486                         if (rb_first(&locked_ref->ref_root) ||
2487                             locked_ref->extent_op) {
2488                                 spin_unlock(&locked_ref->lock);
2489                                 spin_unlock(&delayed_refs->lock);
2490                                 continue;
2491                         }
2492                         ref->in_tree = 0;
2493                         delayed_refs->num_heads--;
2494                         rb_erase(&locked_ref->href_node,
2495                                  &delayed_refs->href_root);
2496                         spin_unlock(&delayed_refs->lock);
2497                 } else {
2498                         actual_count++;
2499                         ref->in_tree = 0;
2500                         rb_erase(&ref->rb_node, &locked_ref->ref_root);
2501                 }
2502                 atomic_dec(&delayed_refs->num_entries);
2503
2504                 if (!btrfs_delayed_ref_is_head(ref)) {
2505                         /*
2506                          * when we play the delayed ref, also correct the
2507                          * ref_mod on head
2508                          */
2509                         switch (ref->action) {
2510                         case BTRFS_ADD_DELAYED_REF:
2511                         case BTRFS_ADD_DELAYED_EXTENT:
2512                                 locked_ref->node.ref_mod -= ref->ref_mod;
2513                                 break;
2514                         case BTRFS_DROP_DELAYED_REF:
2515                                 locked_ref->node.ref_mod += ref->ref_mod;
2516                                 break;
2517                         default:
2518                                 WARN_ON(1);
2519                         }
2520                 }
2521                 spin_unlock(&locked_ref->lock);
2522
2523                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2524                                           must_insert_reserved);
2525
2526                 btrfs_free_delayed_extent_op(extent_op);
2527                 if (ret) {
2528                         locked_ref->processing = 0;
2529                         btrfs_delayed_ref_unlock(locked_ref);
2530                         btrfs_put_delayed_ref(ref);
2531                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2532                         return ret;
2533                 }
2534
2535                 /*
2536                  * If this node is a head, that means all the refs in this head
2537                  * have been dealt with, and we will pick the next head to deal
2538                  * with, so we must unlock the head and drop it from the cluster
2539                  * list before we release it.
2540                  */
2541                 if (btrfs_delayed_ref_is_head(ref)) {
2542                         btrfs_delayed_ref_unlock(locked_ref);
2543                         locked_ref = NULL;
2544                 }
2545                 btrfs_put_delayed_ref(ref);
2546                 count++;
2547                 cond_resched();
2548         }
2549
2550         /*
2551          * We don't want to include ref heads since we can have empty ref heads
2552          * and those will drastically skew our runtime down since we just do
2553          * accounting, no actual extent tree updates.
2554          */
2555         if (actual_count > 0) {
2556                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2557                 u64 avg;
2558
2559                 /*
2560                  * We weigh the current average higher than our current runtime
2561                  * to avoid large swings in the average.
2562                  */
2563                 spin_lock(&delayed_refs->lock);
2564                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2565                 avg = div64_u64(avg, 4);
2566                 fs_info->avg_delayed_ref_runtime = avg;
2567                 spin_unlock(&delayed_refs->lock);
2568         }
2569         return 0;
2570 }
2571
2572 #ifdef SCRAMBLE_DELAYED_REFS
2573 /*
2574  * Normally delayed refs get processed in ascending bytenr order. This
2575  * correlates in most cases to the order added. To expose dependencies on this
2576  * order, we start to process the tree in the middle instead of the beginning
2577  */
2578 static u64 find_middle(struct rb_root *root)
2579 {
2580         struct rb_node *n = root->rb_node;
2581         struct btrfs_delayed_ref_node *entry;
2582         int alt = 1;
2583         u64 middle;
2584         u64 first = 0, last = 0;
2585
2586         n = rb_first(root);
2587         if (n) {
2588                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2589                 first = entry->bytenr;
2590         }
2591         n = rb_last(root);
2592         if (n) {
2593                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2594                 last = entry->bytenr;
2595         }
2596         n = root->rb_node;
2597
2598         while (n) {
2599                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2600                 WARN_ON(!entry->in_tree);
2601
2602                 middle = entry->bytenr;
2603
2604                 if (alt)
2605                         n = n->rb_left;
2606                 else
2607                         n = n->rb_right;
2608
2609                 alt = 1 - alt;
2610         }
2611         return middle;
2612 }
2613 #endif
2614
2615 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2616 {
2617         u64 num_bytes;
2618
2619         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2620                              sizeof(struct btrfs_extent_inline_ref));
2621         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2622                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2623
2624         /*
2625          * We don't ever fill up leaves all the way so multiply by 2 just to be
2626          * closer to what we're really going to want to ouse.
2627          */
2628         return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2629 }
2630
2631 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2632                                        struct btrfs_root *root)
2633 {
2634         struct btrfs_block_rsv *global_rsv;
2635         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2636         u64 num_bytes;
2637         int ret = 0;
2638
2639         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2640         num_heads = heads_to_leaves(root, num_heads);
2641         if (num_heads > 1)
2642                 num_bytes += (num_heads - 1) * root->nodesize;
2643         num_bytes <<= 1;
2644         global_rsv = &root->fs_info->global_block_rsv;
2645
2646         /*
2647          * If we can't allocate any more chunks lets make sure we have _lots_ of
2648          * wiggle room since running delayed refs can create more delayed refs.
2649          */
2650         if (global_rsv->space_info->full)
2651                 num_bytes <<= 1;
2652
2653         spin_lock(&global_rsv->lock);
2654         if (global_rsv->reserved <= num_bytes)
2655                 ret = 1;
2656         spin_unlock(&global_rsv->lock);
2657         return ret;
2658 }
2659
2660 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2661                                        struct btrfs_root *root)
2662 {
2663         struct btrfs_fs_info *fs_info = root->fs_info;
2664         u64 num_entries =
2665                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2666         u64 avg_runtime;
2667         u64 val;
2668
2669         smp_mb();
2670         avg_runtime = fs_info->avg_delayed_ref_runtime;
2671         val = num_entries * avg_runtime;
2672         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2673                 return 1;
2674         if (val >= NSEC_PER_SEC / 2)
2675                 return 2;
2676
2677         return btrfs_check_space_for_delayed_refs(trans, root);
2678 }
2679
2680 struct async_delayed_refs {
2681         struct btrfs_root *root;
2682         int count;
2683         int error;
2684         int sync;
2685         struct completion wait;
2686         struct btrfs_work work;
2687 };
2688
2689 static void delayed_ref_async_start(struct btrfs_work *work)
2690 {
2691         struct async_delayed_refs *async;
2692         struct btrfs_trans_handle *trans;
2693         int ret;
2694
2695         async = container_of(work, struct async_delayed_refs, work);
2696
2697         trans = btrfs_join_transaction(async->root);
2698         if (IS_ERR(trans)) {
2699                 async->error = PTR_ERR(trans);
2700                 goto done;
2701         }
2702
2703         /*
2704          * trans->sync means that when we call end_transaciton, we won't
2705          * wait on delayed refs
2706          */
2707         trans->sync = true;
2708         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2709         if (ret)
2710                 async->error = ret;
2711
2712         ret = btrfs_end_transaction(trans, async->root);
2713         if (ret && !async->error)
2714                 async->error = ret;
2715 done:
2716         if (async->sync)
2717                 complete(&async->wait);
2718         else
2719                 kfree(async);
2720 }
2721
2722 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2723                                  unsigned long count, int wait)
2724 {
2725         struct async_delayed_refs *async;
2726         int ret;
2727
2728         async = kmalloc(sizeof(*async), GFP_NOFS);
2729         if (!async)
2730                 return -ENOMEM;
2731
2732         async->root = root->fs_info->tree_root;
2733         async->count = count;
2734         async->error = 0;
2735         if (wait)
2736                 async->sync = 1;
2737         else
2738                 async->sync = 0;
2739         init_completion(&async->wait);
2740
2741         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2742                         delayed_ref_async_start, NULL, NULL);
2743
2744         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2745
2746         if (wait) {
2747                 wait_for_completion(&async->wait);
2748                 ret = async->error;
2749                 kfree(async);
2750                 return ret;
2751         }
2752         return 0;
2753 }
2754
2755 /*
2756  * this starts processing the delayed reference count updates and
2757  * extent insertions we have queued up so far.  count can be
2758  * 0, which means to process everything in the tree at the start
2759  * of the run (but not newly added entries), or it can be some target
2760  * number you'd like to process.
2761  *
2762  * Returns 0 on success or if called with an aborted transaction
2763  * Returns <0 on error and aborts the transaction
2764  */
2765 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2766                            struct btrfs_root *root, unsigned long count)
2767 {
2768         struct rb_node *node;
2769         struct btrfs_delayed_ref_root *delayed_refs;
2770         struct btrfs_delayed_ref_head *head;
2771         int ret;
2772         int run_all = count == (unsigned long)-1;
2773         int run_most = 0;
2774
2775         /* We'll clean this up in btrfs_cleanup_transaction */
2776         if (trans->aborted)
2777                 return 0;
2778
2779         if (root == root->fs_info->extent_root)
2780                 root = root->fs_info->tree_root;
2781
2782         delayed_refs = &trans->transaction->delayed_refs;
2783         if (count == 0) {
2784                 count = atomic_read(&delayed_refs->num_entries) * 2;
2785                 run_most = 1;
2786         }
2787
2788 again:
2789 #ifdef SCRAMBLE_DELAYED_REFS
2790         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2791 #endif
2792         ret = __btrfs_run_delayed_refs(trans, root, count);
2793         if (ret < 0) {
2794                 btrfs_abort_transaction(trans, root, ret);
2795                 return ret;
2796         }
2797
2798         if (run_all) {
2799                 if (!list_empty(&trans->new_bgs))
2800                         btrfs_create_pending_block_groups(trans, root);
2801
2802                 spin_lock(&delayed_refs->lock);
2803                 node = rb_first(&delayed_refs->href_root);
2804                 if (!node) {
2805                         spin_unlock(&delayed_refs->lock);
2806                         goto out;
2807                 }
2808                 count = (unsigned long)-1;
2809
2810                 while (node) {
2811                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2812                                         href_node);
2813                         if (btrfs_delayed_ref_is_head(&head->node)) {
2814                                 struct btrfs_delayed_ref_node *ref;
2815
2816                                 ref = &head->node;
2817                                 atomic_inc(&ref->refs);
2818
2819                                 spin_unlock(&delayed_refs->lock);
2820                                 /*
2821                                  * Mutex was contended, block until it's
2822                                  * released and try again
2823                                  */
2824                                 mutex_lock(&head->mutex);
2825                                 mutex_unlock(&head->mutex);
2826
2827                                 btrfs_put_delayed_ref(ref);
2828                                 cond_resched();
2829                                 goto again;
2830                         } else {
2831                                 WARN_ON(1);
2832                         }
2833                         node = rb_next(node);
2834                 }
2835                 spin_unlock(&delayed_refs->lock);
2836                 cond_resched();
2837                 goto again;
2838         }
2839 out:
2840         ret = btrfs_delayed_qgroup_accounting(trans, root->fs_info);
2841         if (ret)
2842                 return ret;
2843         assert_qgroups_uptodate(trans);
2844         return 0;
2845 }
2846
2847 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2848                                 struct btrfs_root *root,
2849                                 u64 bytenr, u64 num_bytes, u64 flags,
2850                                 int level, int is_data)
2851 {
2852         struct btrfs_delayed_extent_op *extent_op;
2853         int ret;
2854
2855         extent_op = btrfs_alloc_delayed_extent_op();
2856         if (!extent_op)
2857                 return -ENOMEM;
2858
2859         extent_op->flags_to_set = flags;
2860         extent_op->update_flags = 1;
2861         extent_op->update_key = 0;
2862         extent_op->is_data = is_data ? 1 : 0;
2863         extent_op->level = level;
2864
2865         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2866                                           num_bytes, extent_op);
2867         if (ret)
2868                 btrfs_free_delayed_extent_op(extent_op);
2869         return ret;
2870 }
2871
2872 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2873                                       struct btrfs_root *root,
2874                                       struct btrfs_path *path,
2875                                       u64 objectid, u64 offset, u64 bytenr)
2876 {
2877         struct btrfs_delayed_ref_head *head;
2878         struct btrfs_delayed_ref_node *ref;
2879         struct btrfs_delayed_data_ref *data_ref;
2880         struct btrfs_delayed_ref_root *delayed_refs;
2881         struct rb_node *node;
2882         int ret = 0;
2883
2884         delayed_refs = &trans->transaction->delayed_refs;
2885         spin_lock(&delayed_refs->lock);
2886         head = btrfs_find_delayed_ref_head(trans, bytenr);
2887         if (!head) {
2888                 spin_unlock(&delayed_refs->lock);
2889                 return 0;
2890         }
2891
2892         if (!mutex_trylock(&head->mutex)) {
2893                 atomic_inc(&head->node.refs);
2894                 spin_unlock(&delayed_refs->lock);
2895
2896                 btrfs_release_path(path);
2897
2898                 /*
2899                  * Mutex was contended, block until it's released and let
2900                  * caller try again
2901                  */
2902                 mutex_lock(&head->mutex);
2903                 mutex_unlock(&head->mutex);
2904                 btrfs_put_delayed_ref(&head->node);
2905                 return -EAGAIN;
2906         }
2907         spin_unlock(&delayed_refs->lock);
2908
2909         spin_lock(&head->lock);
2910         node = rb_first(&head->ref_root);
2911         while (node) {
2912                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2913                 node = rb_next(node);
2914
2915                 /* If it's a shared ref we know a cross reference exists */
2916                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2917                         ret = 1;
2918                         break;
2919                 }
2920
2921                 data_ref = btrfs_delayed_node_to_data_ref(ref);
2922
2923                 /*
2924                  * If our ref doesn't match the one we're currently looking at
2925                  * then we have a cross reference.
2926                  */
2927                 if (data_ref->root != root->root_key.objectid ||
2928                     data_ref->objectid != objectid ||
2929                     data_ref->offset != offset) {
2930                         ret = 1;
2931                         break;
2932                 }
2933         }
2934         spin_unlock(&head->lock);
2935         mutex_unlock(&head->mutex);
2936         return ret;
2937 }
2938
2939 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2940                                         struct btrfs_root *root,
2941                                         struct btrfs_path *path,
2942                                         u64 objectid, u64 offset, u64 bytenr)
2943 {
2944         struct btrfs_root *extent_root = root->fs_info->extent_root;
2945         struct extent_buffer *leaf;
2946         struct btrfs_extent_data_ref *ref;
2947         struct btrfs_extent_inline_ref *iref;
2948         struct btrfs_extent_item *ei;
2949         struct btrfs_key key;
2950         u32 item_size;
2951         int ret;
2952
2953         key.objectid = bytenr;
2954         key.offset = (u64)-1;
2955         key.type = BTRFS_EXTENT_ITEM_KEY;
2956
2957         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2958         if (ret < 0)
2959                 goto out;
2960         BUG_ON(ret == 0); /* Corruption */
2961
2962         ret = -ENOENT;
2963         if (path->slots[0] == 0)
2964                 goto out;
2965
2966         path->slots[0]--;
2967         leaf = path->nodes[0];
2968         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2969
2970         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2971                 goto out;
2972
2973         ret = 1;
2974         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2975 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2976         if (item_size < sizeof(*ei)) {
2977                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2978                 goto out;
2979         }
2980 #endif
2981         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2982
2983         if (item_size != sizeof(*ei) +
2984             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2985                 goto out;
2986
2987         if (btrfs_extent_generation(leaf, ei) <=
2988             btrfs_root_last_snapshot(&root->root_item))
2989                 goto out;
2990
2991         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2992         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2993             BTRFS_EXTENT_DATA_REF_KEY)
2994                 goto out;
2995
2996         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2997         if (btrfs_extent_refs(leaf, ei) !=
2998             btrfs_extent_data_ref_count(leaf, ref) ||
2999             btrfs_extent_data_ref_root(leaf, ref) !=
3000             root->root_key.objectid ||
3001             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3002             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3003                 goto out;
3004
3005         ret = 0;
3006 out:
3007         return ret;
3008 }
3009
3010 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3011                           struct btrfs_root *root,
3012                           u64 objectid, u64 offset, u64 bytenr)
3013 {
3014         struct btrfs_path *path;
3015         int ret;
3016         int ret2;
3017
3018         path = btrfs_alloc_path();
3019         if (!path)
3020                 return -ENOENT;
3021
3022         do {
3023                 ret = check_committed_ref(trans, root, path, objectid,
3024                                           offset, bytenr);
3025                 if (ret && ret != -ENOENT)
3026                         goto out;
3027
3028                 ret2 = check_delayed_ref(trans, root, path, objectid,
3029                                          offset, bytenr);
3030         } while (ret2 == -EAGAIN);
3031
3032         if (ret2 && ret2 != -ENOENT) {
3033                 ret = ret2;
3034                 goto out;
3035         }
3036
3037         if (ret != -ENOENT || ret2 != -ENOENT)
3038                 ret = 0;
3039 out:
3040         btrfs_free_path(path);
3041         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3042                 WARN_ON(ret > 0);
3043         return ret;
3044 }
3045
3046 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3047                            struct btrfs_root *root,
3048                            struct extent_buffer *buf,
3049                            int full_backref, int inc)
3050 {
3051         u64 bytenr;
3052         u64 num_bytes;
3053         u64 parent;
3054         u64 ref_root;
3055         u32 nritems;
3056         struct btrfs_key key;
3057         struct btrfs_file_extent_item *fi;
3058         int i;
3059         int level;
3060         int ret = 0;
3061         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3062                             u64, u64, u64, u64, u64, u64, int);
3063
3064
3065         if (btrfs_test_is_dummy_root(root))
3066                 return 0;
3067
3068         ref_root = btrfs_header_owner(buf);
3069         nritems = btrfs_header_nritems(buf);
3070         level = btrfs_header_level(buf);
3071
3072         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3073                 return 0;
3074
3075         if (inc)
3076                 process_func = btrfs_inc_extent_ref;
3077         else
3078                 process_func = btrfs_free_extent;
3079
3080         if (full_backref)
3081                 parent = buf->start;
3082         else
3083                 parent = 0;
3084
3085         for (i = 0; i < nritems; i++) {
3086                 if (level == 0) {
3087                         btrfs_item_key_to_cpu(buf, &key, i);
3088                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3089                                 continue;
3090                         fi = btrfs_item_ptr(buf, i,
3091                                             struct btrfs_file_extent_item);
3092                         if (btrfs_file_extent_type(buf, fi) ==
3093                             BTRFS_FILE_EXTENT_INLINE)
3094                                 continue;
3095                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3096                         if (bytenr == 0)
3097                                 continue;
3098
3099                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3100                         key.offset -= btrfs_file_extent_offset(buf, fi);
3101                         ret = process_func(trans, root, bytenr, num_bytes,
3102                                            parent, ref_root, key.objectid,
3103                                            key.offset, 1);
3104                         if (ret)
3105                                 goto fail;
3106                 } else {
3107                         bytenr = btrfs_node_blockptr(buf, i);
3108                         num_bytes = root->nodesize;
3109                         ret = process_func(trans, root, bytenr, num_bytes,
3110                                            parent, ref_root, level - 1, 0,
3111                                            1);
3112                         if (ret)
3113                                 goto fail;
3114                 }
3115         }
3116         return 0;
3117 fail:
3118         return ret;
3119 }
3120
3121 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3122                   struct extent_buffer *buf, int full_backref)
3123 {
3124         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3125 }
3126
3127 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3128                   struct extent_buffer *buf, int full_backref)
3129 {
3130         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3131 }
3132
3133 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3134                                  struct btrfs_root *root,
3135                                  struct btrfs_path *path,
3136                                  struct btrfs_block_group_cache *cache)
3137 {
3138         int ret;
3139         struct btrfs_root *extent_root = root->fs_info->extent_root;
3140         unsigned long bi;
3141         struct extent_buffer *leaf;
3142
3143         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3144         if (ret < 0)
3145                 goto fail;
3146         BUG_ON(ret); /* Corruption */
3147
3148         leaf = path->nodes[0];
3149         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3150         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3151         btrfs_mark_buffer_dirty(leaf);
3152         btrfs_release_path(path);
3153 fail:
3154         if (ret) {
3155                 btrfs_abort_transaction(trans, root, ret);
3156                 return ret;
3157         }
3158         return 0;
3159
3160 }
3161
3162 static struct btrfs_block_group_cache *
3163 next_block_group(struct btrfs_root *root,
3164                  struct btrfs_block_group_cache *cache)
3165 {
3166         struct rb_node *node;
3167
3168         spin_lock(&root->fs_info->block_group_cache_lock);
3169
3170         /* If our block group was removed, we need a full search. */
3171         if (RB_EMPTY_NODE(&cache->cache_node)) {
3172                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3173
3174                 spin_unlock(&root->fs_info->block_group_cache_lock);
3175                 btrfs_put_block_group(cache);
3176                 cache = btrfs_lookup_first_block_group(root->fs_info,
3177                                                        next_bytenr);
3178                 return cache;
3179         }
3180         node = rb_next(&cache->cache_node);
3181         btrfs_put_block_group(cache);
3182         if (node) {
3183                 cache = rb_entry(node, struct btrfs_block_group_cache,
3184                                  cache_node);
3185                 btrfs_get_block_group(cache);
3186         } else
3187                 cache = NULL;
3188         spin_unlock(&root->fs_info->block_group_cache_lock);
3189         return cache;
3190 }
3191
3192 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3193                             struct btrfs_trans_handle *trans,
3194                             struct btrfs_path *path)
3195 {
3196         struct btrfs_root *root = block_group->fs_info->tree_root;
3197         struct inode *inode = NULL;
3198         u64 alloc_hint = 0;
3199         int dcs = BTRFS_DC_ERROR;
3200         int num_pages = 0;
3201         int retries = 0;
3202         int ret = 0;
3203
3204         /*
3205          * If this block group is smaller than 100 megs don't bother caching the
3206          * block group.
3207          */
3208         if (block_group->key.offset < (100 * 1024 * 1024)) {
3209                 spin_lock(&block_group->lock);
3210                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3211                 spin_unlock(&block_group->lock);
3212                 return 0;
3213         }
3214
3215 again:
3216         inode = lookup_free_space_inode(root, block_group, path);
3217         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3218                 ret = PTR_ERR(inode);
3219                 btrfs_release_path(path);
3220                 goto out;
3221         }
3222
3223         if (IS_ERR(inode)) {
3224                 BUG_ON(retries);
3225                 retries++;
3226
3227                 if (block_group->ro)
3228                         goto out_free;
3229
3230                 ret = create_free_space_inode(root, trans, block_group, path);
3231                 if (ret)
3232                         goto out_free;
3233                 goto again;
3234         }
3235
3236         /* We've already setup this transaction, go ahead and exit */
3237         if (block_group->cache_generation == trans->transid &&
3238             i_size_read(inode)) {
3239                 dcs = BTRFS_DC_SETUP;
3240                 goto out_put;
3241         }
3242
3243         /*
3244          * We want to set the generation to 0, that way if anything goes wrong
3245          * from here on out we know not to trust this cache when we load up next
3246          * time.
3247          */
3248         BTRFS_I(inode)->generation = 0;
3249         ret = btrfs_update_inode(trans, root, inode);
3250         WARN_ON(ret);
3251
3252         if (i_size_read(inode) > 0) {
3253                 ret = btrfs_check_trunc_cache_free_space(root,
3254                                         &root->fs_info->global_block_rsv);
3255                 if (ret)
3256                         goto out_put;
3257
3258                 ret = btrfs_truncate_free_space_cache(root, trans, inode);
3259                 if (ret)
3260                         goto out_put;
3261         }
3262
3263         spin_lock(&block_group->lock);
3264         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3265             !btrfs_test_opt(root, SPACE_CACHE) ||
3266             block_group->delalloc_bytes) {
3267                 /*
3268                  * don't bother trying to write stuff out _if_
3269                  * a) we're not cached,
3270                  * b) we're with nospace_cache mount option.
3271                  */
3272                 dcs = BTRFS_DC_WRITTEN;
3273                 spin_unlock(&block_group->lock);
3274                 goto out_put;
3275         }
3276         spin_unlock(&block_group->lock);
3277
3278         /*
3279          * Try to preallocate enough space based on how big the block group is.
3280          * Keep in mind this has to include any pinned space which could end up
3281          * taking up quite a bit since it's not folded into the other space
3282          * cache.
3283          */
3284         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3285         if (!num_pages)
3286                 num_pages = 1;
3287
3288         num_pages *= 16;
3289         num_pages *= PAGE_CACHE_SIZE;
3290
3291         ret = btrfs_check_data_free_space(inode, num_pages);
3292         if (ret)
3293                 goto out_put;
3294
3295         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3296                                               num_pages, num_pages,
3297                                               &alloc_hint);
3298         if (!ret)
3299                 dcs = BTRFS_DC_SETUP;
3300         btrfs_free_reserved_data_space(inode, num_pages);
3301
3302 out_put:
3303         iput(inode);
3304 out_free:
3305         btrfs_release_path(path);
3306 out:
3307         spin_lock(&block_group->lock);
3308         if (!ret && dcs == BTRFS_DC_SETUP)
3309                 block_group->cache_generation = trans->transid;
3310         block_group->disk_cache_state = dcs;
3311         spin_unlock(&block_group->lock);
3312
3313         return ret;
3314 }
3315
3316 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3317                                    struct btrfs_root *root)
3318 {
3319         struct btrfs_block_group_cache *cache;
3320         int err = 0;
3321         struct btrfs_path *path;
3322         u64 last = 0;
3323
3324         path = btrfs_alloc_path();
3325         if (!path)
3326                 return -ENOMEM;
3327
3328 again:
3329         while (1) {
3330                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3331                 while (cache) {
3332                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3333                                 break;
3334                         cache = next_block_group(root, cache);
3335                 }
3336                 if (!cache) {
3337                         if (last == 0)
3338                                 break;
3339                         last = 0;
3340                         continue;
3341                 }
3342                 err = cache_save_setup(cache, trans, path);
3343                 last = cache->key.objectid + cache->key.offset;
3344                 btrfs_put_block_group(cache);
3345         }
3346
3347         while (1) {
3348                 if (last == 0) {
3349                         err = btrfs_run_delayed_refs(trans, root,
3350                                                      (unsigned long)-1);
3351                         if (err) /* File system offline */
3352                                 goto out;
3353                 }
3354
3355                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3356                 while (cache) {
3357                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3358                                 btrfs_put_block_group(cache);
3359                                 goto again;
3360                         }
3361
3362                         if (cache->dirty)
3363                                 break;
3364                         cache = next_block_group(root, cache);
3365                 }
3366                 if (!cache) {
3367                         if (last == 0)
3368                                 break;
3369                         last = 0;
3370                         continue;
3371                 }
3372
3373                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3374                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3375                 cache->dirty = 0;
3376                 last = cache->key.objectid + cache->key.offset;
3377
3378                 err = write_one_cache_group(trans, root, path, cache);
3379                 btrfs_put_block_group(cache);
3380                 if (err) /* File system offline */
3381                         goto out;
3382         }
3383
3384         while (1) {
3385                 /*
3386                  * I don't think this is needed since we're just marking our
3387                  * preallocated extent as written, but just in case it can't
3388                  * hurt.
3389                  */
3390                 if (last == 0) {
3391                         err = btrfs_run_delayed_refs(trans, root,
3392                                                      (unsigned long)-1);
3393                         if (err) /* File system offline */
3394                                 goto out;
3395                 }
3396
3397                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3398                 while (cache) {
3399                         /*
3400                          * Really this shouldn't happen, but it could if we
3401                          * couldn't write the entire preallocated extent and
3402                          * splitting the extent resulted in a new block.
3403                          */
3404                         if (cache->dirty) {
3405                                 btrfs_put_block_group(cache);
3406                                 goto again;
3407                         }
3408                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3409                                 break;
3410                         cache = next_block_group(root, cache);
3411                 }
3412                 if (!cache) {
3413                         if (last == 0)
3414                                 break;
3415                         last = 0;
3416                         continue;
3417                 }
3418
3419                 err = btrfs_write_out_cache(root, trans, cache, path);
3420
3421                 /*
3422                  * If we didn't have an error then the cache state is still
3423                  * NEED_WRITE, so we can set it to WRITTEN.
3424                  */
3425                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3426                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3427                 last = cache->key.objectid + cache->key.offset;
3428                 btrfs_put_block_group(cache);
3429         }
3430 out:
3431
3432         btrfs_free_path(path);
3433         return err;
3434 }
3435
3436 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3437 {
3438         struct btrfs_block_group_cache *block_group;
3439         int readonly = 0;
3440
3441         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3442         if (!block_group || block_group->ro)
3443                 readonly = 1;
3444         if (block_group)
3445                 btrfs_put_block_group(block_group);
3446         return readonly;
3447 }
3448
3449 static const char *alloc_name(u64 flags)
3450 {
3451         switch (flags) {
3452         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3453                 return "mixed";
3454         case BTRFS_BLOCK_GROUP_METADATA:
3455                 return "metadata";
3456         case BTRFS_BLOCK_GROUP_DATA:
3457                 return "data";
3458         case BTRFS_BLOCK_GROUP_SYSTEM:
3459                 return "system";
3460         default:
3461                 WARN_ON(1);
3462                 return "invalid-combination";
3463         };
3464 }
3465
3466 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3467                              u64 total_bytes, u64 bytes_used,
3468                              struct btrfs_space_info **space_info)
3469 {
3470         struct btrfs_space_info *found;
3471         int i;
3472         int factor;
3473         int ret;
3474
3475         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3476                      BTRFS_BLOCK_GROUP_RAID10))
3477                 factor = 2;
3478         else
3479                 factor = 1;
3480
3481         found = __find_space_info(info, flags);
3482         if (found) {
3483                 spin_lock(&found->lock);
3484                 found->total_bytes += total_bytes;
3485                 found->disk_total += total_bytes * factor;
3486                 found->bytes_used += bytes_used;
3487                 found->disk_used += bytes_used * factor;
3488                 found->full = 0;
3489                 spin_unlock(&found->lock);
3490                 *space_info = found;
3491                 return 0;
3492         }
3493         found = kzalloc(sizeof(*found), GFP_NOFS);
3494         if (!found)
3495                 return -ENOMEM;
3496
3497         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3498         if (ret) {
3499                 kfree(found);
3500                 return ret;
3501         }
3502
3503         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3504                 INIT_LIST_HEAD(&found->block_groups[i]);
3505         init_rwsem(&found->groups_sem);
3506         spin_lock_init(&found->lock);
3507         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3508         found->total_bytes = total_bytes;
3509         found->disk_total = total_bytes * factor;
3510         found->bytes_used = bytes_used;
3511         found->disk_used = bytes_used * factor;
3512         found->bytes_pinned = 0;
3513         found->bytes_reserved = 0;
3514         found->bytes_readonly = 0;
3515         found->bytes_may_use = 0;
3516         found->full = 0;
3517         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3518         found->chunk_alloc = 0;
3519         found->flush = 0;
3520         init_waitqueue_head(&found->wait);
3521         INIT_LIST_HEAD(&found->ro_bgs);
3522
3523         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3524                                     info->space_info_kobj, "%s",
3525                                     alloc_name(found->flags));
3526         if (ret) {
3527                 kfree(found);
3528                 return ret;
3529         }
3530
3531         *space_info = found;
3532         list_add_rcu(&found->list, &info->space_info);
3533         if (flags & BTRFS_BLOCK_GROUP_DATA)
3534                 info->data_sinfo = found;
3535
3536         return ret;
3537 }
3538
3539 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3540 {
3541         u64 extra_flags = chunk_to_extended(flags) &
3542                                 BTRFS_EXTENDED_PROFILE_MASK;
3543
3544         write_seqlock(&fs_info->profiles_lock);
3545         if (flags & BTRFS_BLOCK_GROUP_DATA)
3546                 fs_info->avail_data_alloc_bits |= extra_flags;
3547         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3548                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3549         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3550                 fs_info->avail_system_alloc_bits |= extra_flags;
3551         write_sequnlock(&fs_info->profiles_lock);
3552 }
3553
3554 /*
3555  * returns target flags in extended format or 0 if restripe for this
3556  * chunk_type is not in progress
3557  *
3558  * should be called with either volume_mutex or balance_lock held
3559  */
3560 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3561 {
3562         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3563         u64 target = 0;
3564
3565         if (!bctl)
3566                 return 0;
3567
3568         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3569             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3570                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3571         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3572                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3573                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3574         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3575                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3576                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3577         }
3578
3579         return target;
3580 }
3581
3582 /*
3583  * @flags: available profiles in extended format (see ctree.h)
3584  *
3585  * Returns reduced profile in chunk format.  If profile changing is in
3586  * progress (either running or paused) picks the target profile (if it's
3587  * already available), otherwise falls back to plain reducing.
3588  */
3589 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3590 {
3591         u64 num_devices = root->fs_info->fs_devices->rw_devices;
3592         u64 target;
3593         u64 tmp;
3594
3595         /*
3596          * see if restripe for this chunk_type is in progress, if so
3597          * try to reduce to the target profile
3598          */
3599         spin_lock(&root->fs_info->balance_lock);
3600         target = get_restripe_target(root->fs_info, flags);
3601         if (target) {
3602                 /* pick target profile only if it's already available */
3603                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3604                         spin_unlock(&root->fs_info->balance_lock);
3605                         return extended_to_chunk(target);
3606                 }
3607         }
3608         spin_unlock(&root->fs_info->balance_lock);
3609
3610         /* First, mask out the RAID levels which aren't possible */
3611         if (num_devices == 1)
3612                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3613                            BTRFS_BLOCK_GROUP_RAID5);
3614         if (num_devices < 3)
3615                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3616         if (num_devices < 4)
3617                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3618
3619         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3620                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3621                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3622         flags &= ~tmp;
3623
3624         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3625                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3626         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3627                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3628         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3629                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3630         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3631                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3632         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3633                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3634
3635         return extended_to_chunk(flags | tmp);
3636 }
3637
3638 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3639 {
3640         unsigned seq;
3641         u64 flags;
3642
3643         do {
3644                 flags = orig_flags;
3645                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3646
3647                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3648                         flags |= root->fs_info->avail_data_alloc_bits;
3649                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3650                         flags |= root->fs_info->avail_system_alloc_bits;
3651                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3652                         flags |= root->fs_info->avail_metadata_alloc_bits;
3653         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3654
3655         return btrfs_reduce_alloc_profile(root, flags);
3656 }
3657
3658 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3659 {
3660         u64 flags;
3661         u64 ret;
3662
3663         if (data)
3664                 flags = BTRFS_BLOCK_GROUP_DATA;
3665         else if (root == root->fs_info->chunk_root)
3666                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3667         else
3668                 flags = BTRFS_BLOCK_GROUP_METADATA;
3669
3670         ret = get_alloc_profile(root, flags);
3671         return ret;
3672 }
3673
3674 /*
3675  * This will check the space that the inode allocates from to make sure we have
3676  * enough space for bytes.
3677  */
3678 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3679 {
3680         struct btrfs_space_info *data_sinfo;
3681         struct btrfs_root *root = BTRFS_I(inode)->root;
3682         struct btrfs_fs_info *fs_info = root->fs_info;
3683         u64 used;
3684         int ret = 0, committed = 0, alloc_chunk = 1;
3685
3686         /* make sure bytes are sectorsize aligned */
3687         bytes = ALIGN(bytes, root->sectorsize);
3688
3689         if (btrfs_is_free_space_inode(inode)) {
3690                 committed = 1;
3691                 ASSERT(current->journal_info);
3692         }
3693
3694         data_sinfo = fs_info->data_sinfo;
3695         if (!data_sinfo)
3696                 goto alloc;
3697
3698 again:
3699         /* make sure we have enough space to handle the data first */
3700         spin_lock(&data_sinfo->lock);
3701         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3702                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3703                 data_sinfo->bytes_may_use;
3704
3705         if (used + bytes > data_sinfo->total_bytes) {
3706                 struct btrfs_trans_handle *trans;
3707
3708                 /*
3709                  * if we don't have enough free bytes in this space then we need
3710                  * to alloc a new chunk.
3711                  */
3712                 if (!data_sinfo->full && alloc_chunk) {
3713                         u64 alloc_target;
3714
3715                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3716                         spin_unlock(&data_sinfo->lock);
3717 alloc:
3718                         alloc_target = btrfs_get_alloc_profile(root, 1);
3719                         /*
3720                          * It is ugly that we don't call nolock join
3721                          * transaction for the free space inode case here.
3722                          * But it is safe because we only do the data space
3723                          * reservation for the free space cache in the
3724                          * transaction context, the common join transaction
3725                          * just increase the counter of the current transaction
3726                          * handler, doesn't try to acquire the trans_lock of
3727                          * the fs.
3728                          */
3729                         trans = btrfs_join_transaction(root);
3730                         if (IS_ERR(trans))
3731                                 return PTR_ERR(trans);
3732
3733                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3734                                              alloc_target,
3735                                              CHUNK_ALLOC_NO_FORCE);
3736                         btrfs_end_transaction(trans, root);
3737                         if (ret < 0) {
3738                                 if (ret != -ENOSPC)
3739                                         return ret;
3740                                 else
3741                                         goto commit_trans;
3742                         }
3743
3744                         if (!data_sinfo)
3745                                 data_sinfo = fs_info->data_sinfo;
3746
3747                         goto again;
3748                 }
3749
3750                 /*
3751                  * If we don't have enough pinned space to deal with this
3752                  * allocation don't bother committing the transaction.
3753                  */
3754                 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
3755                                            bytes) < 0)
3756                         committed = 1;
3757                 spin_unlock(&data_sinfo->lock);
3758
3759                 /* commit the current transaction and try again */
3760 commit_trans:
3761                 if (!committed &&
3762                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3763                         committed = 1;
3764
3765                         trans = btrfs_join_transaction(root);
3766                         if (IS_ERR(trans))
3767                                 return PTR_ERR(trans);
3768                         ret = btrfs_commit_transaction(trans, root);
3769                         if (ret)
3770                                 return ret;
3771                         goto again;
3772                 }
3773
3774                 trace_btrfs_space_reservation(root->fs_info,
3775                                               "space_info:enospc",
3776                                               data_sinfo->flags, bytes, 1);
3777                 return -ENOSPC;
3778         }
3779         data_sinfo->bytes_may_use += bytes;
3780         trace_btrfs_space_reservation(root->fs_info, "space_info",
3781                                       data_sinfo->flags, bytes, 1);
3782         spin_unlock(&data_sinfo->lock);
3783
3784         return 0;
3785 }
3786
3787 /*
3788  * Called if we need to clear a data reservation for this inode.
3789  */
3790 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3791 {
3792         struct btrfs_root *root = BTRFS_I(inode)->root;
3793         struct btrfs_space_info *data_sinfo;
3794
3795         /* make sure bytes are sectorsize aligned */
3796         bytes = ALIGN(bytes, root->sectorsize);
3797
3798         data_sinfo = root->fs_info->data_sinfo;
3799         spin_lock(&data_sinfo->lock);
3800         WARN_ON(data_sinfo->bytes_may_use < bytes);
3801         data_sinfo->bytes_may_use -= bytes;
3802         trace_btrfs_space_reservation(root->fs_info, "space_info",
3803                                       data_sinfo->flags, bytes, 0);
3804         spin_unlock(&data_sinfo->lock);
3805 }
3806
3807 static void force_metadata_allocation(struct btrfs_fs_info *info)
3808 {
3809         struct list_head *head = &info->space_info;
3810         struct btrfs_space_info *found;
3811
3812         rcu_read_lock();
3813         list_for_each_entry_rcu(found, head, list) {
3814                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3815                         found->force_alloc = CHUNK_ALLOC_FORCE;
3816         }
3817         rcu_read_unlock();
3818 }
3819
3820 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3821 {
3822         return (global->size << 1);
3823 }
3824
3825 static int should_alloc_chunk(struct btrfs_root *root,
3826                               struct btrfs_space_info *sinfo, int force)
3827 {
3828         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3829         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3830         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3831         u64 thresh;
3832
3833         if (force == CHUNK_ALLOC_FORCE)
3834                 return 1;
3835
3836         /*
3837          * We need to take into account the global rsv because for all intents
3838          * and purposes it's used space.  Don't worry about locking the
3839          * global_rsv, it doesn't change except when the transaction commits.
3840          */
3841         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3842                 num_allocated += calc_global_rsv_need_space(global_rsv);
3843
3844         /*
3845          * in limited mode, we want to have some free space up to
3846          * about 1% of the FS size.
3847          */
3848         if (force == CHUNK_ALLOC_LIMITED) {
3849                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3850                 thresh = max_t(u64, 64 * 1024 * 1024,
3851                                div_factor_fine(thresh, 1));
3852
3853                 if (num_bytes - num_allocated < thresh)
3854                         return 1;
3855         }
3856
3857         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3858                 return 0;
3859         return 1;
3860 }
3861
3862 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3863 {
3864         u64 num_dev;
3865
3866         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3867                     BTRFS_BLOCK_GROUP_RAID0 |
3868                     BTRFS_BLOCK_GROUP_RAID5 |
3869                     BTRFS_BLOCK_GROUP_RAID6))
3870                 num_dev = root->fs_info->fs_devices->rw_devices;
3871         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3872                 num_dev = 2;
3873         else
3874                 num_dev = 1;    /* DUP or single */
3875
3876         /* metadata for updaing devices and chunk tree */
3877         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3878 }
3879
3880 static void check_system_chunk(struct btrfs_trans_handle *trans,
3881                                struct btrfs_root *root, u64 type)
3882 {
3883         struct btrfs_space_info *info;
3884         u64 left;
3885         u64 thresh;
3886
3887         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3888         spin_lock(&info->lock);
3889         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3890                 info->bytes_reserved - info->bytes_readonly;
3891         spin_unlock(&info->lock);
3892
3893         thresh = get_system_chunk_thresh(root, type);
3894         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3895                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3896                         left, thresh, type);
3897                 dump_space_info(info, 0, 0);
3898         }
3899
3900         if (left < thresh) {
3901                 u64 flags;
3902
3903                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3904                 btrfs_alloc_chunk(trans, root, flags);
3905         }
3906 }
3907
3908 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3909                           struct btrfs_root *extent_root, u64 flags, int force)
3910 {
3911         struct btrfs_space_info *space_info;
3912         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3913         int wait_for_alloc = 0;
3914         int ret = 0;
3915
3916         /* Don't re-enter if we're already allocating a chunk */
3917         if (trans->allocating_chunk)
3918                 return -ENOSPC;
3919
3920         space_info = __find_space_info(extent_root->fs_info, flags);
3921         if (!space_info) {
3922                 ret = update_space_info(extent_root->fs_info, flags,
3923                                         0, 0, &space_info);
3924                 BUG_ON(ret); /* -ENOMEM */
3925         }
3926         BUG_ON(!space_info); /* Logic error */
3927
3928 again:
3929         spin_lock(&space_info->lock);
3930         if (force < space_info->force_alloc)
3931                 force = space_info->force_alloc;
3932         if (space_info->full) {
3933                 if (should_alloc_chunk(extent_root, space_info, force))
3934                         ret = -ENOSPC;
3935                 else
3936                         ret = 0;
3937                 spin_unlock(&space_info->lock);
3938                 return ret;
3939         }
3940
3941         if (!should_alloc_chunk(extent_root, space_info, force)) {
3942                 spin_unlock(&space_info->lock);
3943                 return 0;
3944         } else if (space_info->chunk_alloc) {
3945                 wait_for_alloc = 1;
3946         } else {
3947                 space_info->chunk_alloc = 1;
3948         }
3949
3950         spin_unlock(&space_info->lock);
3951
3952         mutex_lock(&fs_info->chunk_mutex);
3953
3954         /*
3955          * The chunk_mutex is held throughout the entirety of a chunk
3956          * allocation, so once we've acquired the chunk_mutex we know that the
3957          * other guy is done and we need to recheck and see if we should
3958          * allocate.
3959          */
3960         if (wait_for_alloc) {
3961                 mutex_unlock(&fs_info->chunk_mutex);
3962                 wait_for_alloc = 0;
3963                 goto again;
3964         }
3965
3966         trans->allocating_chunk = true;
3967
3968         /*
3969          * If we have mixed data/metadata chunks we want to make sure we keep
3970          * allocating mixed chunks instead of individual chunks.
3971          */
3972         if (btrfs_mixed_space_info(space_info))
3973                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3974
3975         /*
3976          * if we're doing a data chunk, go ahead and make sure that
3977          * we keep a reasonable number of metadata chunks allocated in the
3978          * FS as well.
3979          */
3980         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3981                 fs_info->data_chunk_allocations++;
3982                 if (!(fs_info->data_chunk_allocations %
3983                       fs_info->metadata_ratio))
3984                         force_metadata_allocation(fs_info);
3985         }
3986
3987         /*
3988          * Check if we have enough space in SYSTEM chunk because we may need
3989          * to update devices.
3990          */
3991         check_system_chunk(trans, extent_root, flags);
3992
3993         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3994         trans->allocating_chunk = false;
3995
3996         spin_lock(&space_info->lock);
3997         if (ret < 0 && ret != -ENOSPC)
3998                 goto out;
3999         if (ret)
4000                 space_info->full = 1;
4001         else
4002                 ret = 1;
4003
4004         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4005 out:
4006         space_info->chunk_alloc = 0;
4007         spin_unlock(&space_info->lock);
4008         mutex_unlock(&fs_info->chunk_mutex);
4009         return ret;
4010 }
4011
4012 static int can_overcommit(struct btrfs_root *root,
4013                           struct btrfs_space_info *space_info, u64 bytes,
4014                           enum btrfs_reserve_flush_enum flush)
4015 {
4016         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4017         u64 profile = btrfs_get_alloc_profile(root, 0);
4018         u64 space_size;
4019         u64 avail;
4020         u64 used;
4021
4022         used = space_info->bytes_used + space_info->bytes_reserved +
4023                 space_info->bytes_pinned + space_info->bytes_readonly;
4024
4025         /*
4026          * We only want to allow over committing if we have lots of actual space
4027          * free, but if we don't have enough space to handle the global reserve
4028          * space then we could end up having a real enospc problem when trying
4029          * to allocate a chunk or some other such important allocation.
4030          */
4031         spin_lock(&global_rsv->lock);
4032         space_size = calc_global_rsv_need_space(global_rsv);
4033         spin_unlock(&global_rsv->lock);
4034         if (used + space_size >= space_info->total_bytes)
4035                 return 0;
4036
4037         used += space_info->bytes_may_use;
4038
4039         spin_lock(&root->fs_info->free_chunk_lock);
4040         avail = root->fs_info->free_chunk_space;
4041         spin_unlock(&root->fs_info->free_chunk_lock);
4042
4043         /*
4044          * If we have dup, raid1 or raid10 then only half of the free
4045          * space is actually useable.  For raid56, the space info used
4046          * doesn't include the parity drive, so we don't have to
4047          * change the math
4048          */
4049         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4050                        BTRFS_BLOCK_GROUP_RAID1 |
4051                        BTRFS_BLOCK_GROUP_RAID10))
4052                 avail >>= 1;
4053
4054         /*
4055          * If we aren't flushing all things, let us overcommit up to
4056          * 1/2th of the space. If we can flush, don't let us overcommit
4057          * too much, let it overcommit up to 1/8 of the space.
4058          */
4059         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4060                 avail >>= 3;
4061         else
4062                 avail >>= 1;
4063
4064         if (used + bytes < space_info->total_bytes + avail)
4065                 return 1;
4066         return 0;
4067 }
4068
4069 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4070                                          unsigned long nr_pages, int nr_items)
4071 {
4072         struct super_block *sb = root->fs_info->sb;
4073
4074         if (down_read_trylock(&sb->s_umount)) {
4075                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4076                 up_read(&sb->s_umount);
4077         } else {
4078                 /*
4079                  * We needn't worry the filesystem going from r/w to r/o though
4080                  * we don't acquire ->s_umount mutex, because the filesystem
4081                  * should guarantee the delalloc inodes list be empty after
4082                  * the filesystem is readonly(all dirty pages are written to
4083                  * the disk).
4084                  */
4085                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4086                 if (!current->journal_info)
4087                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
4088         }
4089 }
4090
4091 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4092 {
4093         u64 bytes;
4094         int nr;
4095
4096         bytes = btrfs_calc_trans_metadata_size(root, 1);
4097         nr = (int)div64_u64(to_reclaim, bytes);
4098         if (!nr)
4099                 nr = 1;
4100         return nr;
4101 }
4102
4103 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4104
4105 /*
4106  * shrink metadata reservation for delalloc
4107  */
4108 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4109                             bool wait_ordered)
4110 {
4111         struct btrfs_block_rsv *block_rsv;
4112         struct btrfs_space_info *space_info;
4113         struct btrfs_trans_handle *trans;
4114         u64 delalloc_bytes;
4115         u64 max_reclaim;
4116         long time_left;
4117         unsigned long nr_pages;
4118         int loops;
4119         int items;
4120         enum btrfs_reserve_flush_enum flush;
4121
4122         /* Calc the number of the pages we need flush for space reservation */
4123         items = calc_reclaim_items_nr(root, to_reclaim);
4124         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4125
4126         trans = (struct btrfs_trans_handle *)current->journal_info;
4127         block_rsv = &root->fs_info->delalloc_block_rsv;
4128         space_info = block_rsv->space_info;
4129
4130         delalloc_bytes = percpu_counter_sum_positive(
4131                                                 &root->fs_info->delalloc_bytes);
4132         if (delalloc_bytes == 0) {
4133                 if (trans)
4134                         return;
4135                 if (wait_ordered)
4136                         btrfs_wait_ordered_roots(root->fs_info, items);
4137                 return;
4138         }
4139
4140         loops = 0;
4141         while (delalloc_bytes && loops < 3) {
4142                 max_reclaim = min(delalloc_bytes, to_reclaim);
4143                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4144                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4145                 /*
4146                  * We need to wait for the async pages to actually start before
4147                  * we do anything.
4148                  */
4149                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4150                 if (!max_reclaim)
4151                         goto skip_async;
4152
4153                 if (max_reclaim <= nr_pages)
4154                         max_reclaim = 0;
4155                 else
4156                         max_reclaim -= nr_pages;
4157
4158                 wait_event(root->fs_info->async_submit_wait,
4159                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4160                            (int)max_reclaim);
4161 skip_async:
4162                 if (!trans)
4163                         flush = BTRFS_RESERVE_FLUSH_ALL;
4164                 else
4165                         flush = BTRFS_RESERVE_NO_FLUSH;
4166                 spin_lock(&space_info->lock);
4167                 if (can_overcommit(root, space_info, orig, flush)) {
4168                         spin_unlock(&space_info->lock);
4169                         break;
4170                 }
4171                 spin_unlock(&space_info->lock);
4172
4173                 loops++;
4174                 if (wait_ordered && !trans) {
4175                         btrfs_wait_ordered_roots(root->fs_info, items);
4176                 } else {
4177                         time_left = schedule_timeout_killable(1);
4178                         if (time_left)
4179                                 break;
4180                 }
4181                 delalloc_bytes = percpu_counter_sum_positive(
4182                                                 &root->fs_info->delalloc_bytes);
4183         }
4184 }
4185
4186 /**
4187  * maybe_commit_transaction - possibly commit the transaction if its ok to
4188  * @root - the root we're allocating for
4189  * @bytes - the number of bytes we want to reserve
4190  * @force - force the commit
4191  *
4192  * This will check to make sure that committing the transaction will actually
4193  * get us somewhere and then commit the transaction if it does.  Otherwise it
4194  * will return -ENOSPC.
4195  */
4196 static int may_commit_transaction(struct btrfs_root *root,
4197                                   struct btrfs_space_info *space_info,
4198                                   u64 bytes, int force)
4199 {
4200         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4201         struct btrfs_trans_handle *trans;
4202
4203         trans = (struct btrfs_trans_handle *)current->journal_info;
4204         if (trans)
4205                 return -EAGAIN;
4206
4207         if (force)
4208                 goto commit;
4209
4210         /* See if there is enough pinned space to make this reservation */
4211         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4212                                    bytes) >= 0)
4213                 goto commit;
4214
4215         /*
4216          * See if there is some space in the delayed insertion reservation for
4217          * this reservation.
4218          */
4219         if (space_info != delayed_rsv->space_info)
4220                 return -ENOSPC;
4221
4222         spin_lock(&delayed_rsv->lock);
4223         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4224                                    bytes - delayed_rsv->size) >= 0) {
4225                 spin_unlock(&delayed_rsv->lock);
4226                 return -ENOSPC;
4227         }
4228         spin_unlock(&delayed_rsv->lock);
4229
4230 commit:
4231         trans = btrfs_join_transaction(root);
4232         if (IS_ERR(trans))
4233                 return -ENOSPC;
4234
4235         return btrfs_commit_transaction(trans, root);
4236 }
4237
4238 enum flush_state {
4239         FLUSH_DELAYED_ITEMS_NR  =       1,
4240         FLUSH_DELAYED_ITEMS     =       2,
4241         FLUSH_DELALLOC          =       3,
4242         FLUSH_DELALLOC_WAIT     =       4,
4243         ALLOC_CHUNK             =       5,
4244         COMMIT_TRANS            =       6,
4245 };
4246
4247 static int flush_space(struct btrfs_root *root,
4248                        struct btrfs_space_info *space_info, u64 num_bytes,
4249                        u64 orig_bytes, int state)
4250 {
4251         struct btrfs_trans_handle *trans;
4252         int nr;
4253         int ret = 0;
4254
4255         switch (state) {
4256         case FLUSH_DELAYED_ITEMS_NR:
4257         case FLUSH_DELAYED_ITEMS:
4258                 if (state == FLUSH_DELAYED_ITEMS_NR)
4259                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4260                 else
4261                         nr = -1;
4262
4263                 trans = btrfs_join_transaction(root);
4264                 if (IS_ERR(trans)) {
4265                         ret = PTR_ERR(trans);
4266                         break;
4267                 }
4268                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4269                 btrfs_end_transaction(trans, root);
4270                 break;
4271         case FLUSH_DELALLOC:
4272         case FLUSH_DELALLOC_WAIT:
4273                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4274                                 state == FLUSH_DELALLOC_WAIT);
4275                 break;
4276         case ALLOC_CHUNK:
4277                 trans = btrfs_join_transaction(root);
4278                 if (IS_ERR(trans)) {
4279                         ret = PTR_ERR(trans);
4280                         break;
4281                 }
4282                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4283                                      btrfs_get_alloc_profile(root, 0),
4284                                      CHUNK_ALLOC_NO_FORCE);
4285                 btrfs_end_transaction(trans, root);
4286                 if (ret == -ENOSPC)
4287                         ret = 0;
4288                 break;
4289         case COMMIT_TRANS:
4290                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4291                 break;
4292         default:
4293                 ret = -ENOSPC;
4294                 break;
4295         }
4296
4297         return ret;
4298 }
4299
4300 static inline u64
4301 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4302                                  struct btrfs_space_info *space_info)
4303 {
4304         u64 used;
4305         u64 expected;
4306         u64 to_reclaim;
4307
4308         to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4309                                 16 * 1024 * 1024);
4310         spin_lock(&space_info->lock);
4311         if (can_overcommit(root, space_info, to_reclaim,
4312                            BTRFS_RESERVE_FLUSH_ALL)) {
4313                 to_reclaim = 0;
4314                 goto out;
4315         }
4316
4317         used = space_info->bytes_used + space_info->bytes_reserved +
4318                space_info->bytes_pinned + space_info->bytes_readonly +
4319                space_info->bytes_may_use;
4320         if (can_overcommit(root, space_info, 1024 * 1024,
4321                            BTRFS_RESERVE_FLUSH_ALL))
4322                 expected = div_factor_fine(space_info->total_bytes, 95);
4323         else
4324                 expected = div_factor_fine(space_info->total_bytes, 90);
4325
4326         if (used > expected)
4327                 to_reclaim = used - expected;
4328         else
4329                 to_reclaim = 0;
4330         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4331                                      space_info->bytes_reserved);
4332 out:
4333         spin_unlock(&space_info->lock);
4334
4335         return to_reclaim;
4336 }
4337
4338 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4339                                         struct btrfs_fs_info *fs_info, u64 used)
4340 {
4341         return (used >= div_factor_fine(space_info->total_bytes, 98) &&
4342                 !btrfs_fs_closing(fs_info) &&
4343                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4344 }
4345
4346 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4347                                        struct btrfs_fs_info *fs_info,
4348                                        int flush_state)
4349 {
4350         u64 used;
4351
4352         spin_lock(&space_info->lock);
4353         /*
4354          * We run out of space and have not got any free space via flush_space,
4355          * so don't bother doing async reclaim.
4356          */
4357         if (flush_state > COMMIT_TRANS && space_info->full) {
4358                 spin_unlock(&space_info->lock);
4359                 return 0;
4360         }
4361
4362         used = space_info->bytes_used + space_info->bytes_reserved +
4363                space_info->bytes_pinned + space_info->bytes_readonly +
4364                space_info->bytes_may_use;
4365         if (need_do_async_reclaim(space_info, fs_info, used)) {
4366                 spin_unlock(&space_info->lock);
4367                 return 1;
4368         }
4369         spin_unlock(&space_info->lock);
4370
4371         return 0;
4372 }
4373
4374 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4375 {
4376         struct btrfs_fs_info *fs_info;
4377         struct btrfs_space_info *space_info;
4378         u64 to_reclaim;
4379         int flush_state;
4380
4381         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4382         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4383
4384         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4385                                                       space_info);
4386         if (!to_reclaim)
4387                 return;
4388
4389         flush_state = FLUSH_DELAYED_ITEMS_NR;
4390         do {
4391                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4392                             to_reclaim, flush_state);
4393                 flush_state++;
4394                 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4395                                                  flush_state))
4396                         return;
4397         } while (flush_state <= COMMIT_TRANS);
4398
4399         if (btrfs_need_do_async_reclaim(space_info, fs_info, flush_state))
4400                 queue_work(system_unbound_wq, work);
4401 }
4402
4403 void btrfs_init_async_reclaim_work(struct work_struct *work)
4404 {
4405         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4406 }
4407
4408 /**
4409  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4410  * @root - the root we're allocating for
4411  * @block_rsv - the block_rsv we're allocating for
4412  * @orig_bytes - the number of bytes we want
4413  * @flush - whether or not we can flush to make our reservation
4414  *
4415  * This will reserve orgi_bytes number of bytes from the space info associated
4416  * with the block_rsv.  If there is not enough space it will make an attempt to
4417  * flush out space to make room.  It will do this by flushing delalloc if
4418  * possible or committing the transaction.  If flush is 0 then no attempts to
4419  * regain reservations will be made and this will fail if there is not enough
4420  * space already.
4421  */
4422 static int reserve_metadata_bytes(struct btrfs_root *root,
4423                                   struct btrfs_block_rsv *block_rsv,
4424                                   u64 orig_bytes,
4425                                   enum btrfs_reserve_flush_enum flush)
4426 {
4427         struct btrfs_space_info *space_info = block_rsv->space_info;
4428         u64 used;
4429         u64 num_bytes = orig_bytes;
4430         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4431         int ret = 0;
4432         bool flushing = false;
4433
4434 again:
4435         ret = 0;
4436         spin_lock(&space_info->lock);
4437         /*
4438          * We only want to wait if somebody other than us is flushing and we
4439          * are actually allowed to flush all things.
4440          */
4441         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4442                space_info->flush) {
4443                 spin_unlock(&space_info->lock);
4444                 /*
4445                  * If we have a trans handle we can't wait because the flusher
4446                  * may have to commit the transaction, which would mean we would
4447                  * deadlock since we are waiting for the flusher to finish, but
4448                  * hold the current transaction open.
4449                  */
4450                 if (current->journal_info)
4451                         return -EAGAIN;
4452                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4453                 /* Must have been killed, return */
4454                 if (ret)
4455                         return -EINTR;
4456
4457                 spin_lock(&space_info->lock);
4458         }
4459
4460         ret = -ENOSPC;
4461         used = space_info->bytes_used + space_info->bytes_reserved +
4462                 space_info->bytes_pinned + space_info->bytes_readonly +
4463                 space_info->bytes_may_use;
4464
4465         /*
4466          * The idea here is that we've not already over-reserved the block group
4467          * then we can go ahead and save our reservation first and then start
4468          * flushing if we need to.  Otherwise if we've already overcommitted
4469          * lets start flushing stuff first and then come back and try to make
4470          * our reservation.
4471          */
4472         if (used <= space_info->total_bytes) {
4473                 if (used + orig_bytes <= space_info->total_bytes) {
4474                         space_info->bytes_may_use += orig_bytes;
4475                         trace_btrfs_space_reservation(root->fs_info,
4476                                 "space_info", space_info->flags, orig_bytes, 1);
4477                         ret = 0;
4478                 } else {
4479                         /*
4480                          * Ok set num_bytes to orig_bytes since we aren't
4481                          * overocmmitted, this way we only try and reclaim what
4482                          * we need.
4483                          */
4484                         num_bytes = orig_bytes;
4485                 }
4486         } else {
4487                 /*
4488                  * Ok we're over committed, set num_bytes to the overcommitted
4489                  * amount plus the amount of bytes that we need for this
4490                  * reservation.
4491                  */
4492                 num_bytes = used - space_info->total_bytes +
4493                         (orig_bytes * 2);
4494         }
4495
4496         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4497                 space_info->bytes_may_use += orig_bytes;
4498                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4499                                               space_info->flags, orig_bytes,
4500                                               1);
4501                 ret = 0;
4502         }
4503
4504         /*
4505          * Couldn't make our reservation, save our place so while we're trying
4506          * to reclaim space we can actually use it instead of somebody else
4507          * stealing it from us.
4508          *
4509          * We make the other tasks wait for the flush only when we can flush
4510          * all things.
4511          */
4512         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4513                 flushing = true;
4514                 space_info->flush = 1;
4515         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4516                 used += orig_bytes;
4517                 /*
4518                  * We will do the space reservation dance during log replay,
4519                  * which means we won't have fs_info->fs_root set, so don't do
4520                  * the async reclaim as we will panic.
4521                  */
4522                 if (!root->fs_info->log_root_recovering &&
4523                     need_do_async_reclaim(space_info, root->fs_info, used) &&
4524                     !work_busy(&root->fs_info->async_reclaim_work))
4525                         queue_work(system_unbound_wq,
4526                                    &root->fs_info->async_reclaim_work);
4527         }
4528         spin_unlock(&space_info->lock);
4529
4530         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4531                 goto out;
4532
4533         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4534                           flush_state);
4535         flush_state++;
4536
4537         /*
4538          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4539          * would happen. So skip delalloc flush.
4540          */
4541         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4542             (flush_state == FLUSH_DELALLOC ||
4543              flush_state == FLUSH_DELALLOC_WAIT))
4544                 flush_state = ALLOC_CHUNK;
4545
4546         if (!ret)
4547                 goto again;
4548         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4549                  flush_state < COMMIT_TRANS)
4550                 goto again;
4551         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4552                  flush_state <= COMMIT_TRANS)
4553                 goto again;
4554
4555 out:
4556         if (ret == -ENOSPC &&
4557             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4558                 struct btrfs_block_rsv *global_rsv =
4559                         &root->fs_info->global_block_rsv;
4560
4561                 if (block_rsv != global_rsv &&
4562                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4563                         ret = 0;
4564         }
4565         if (ret == -ENOSPC)
4566                 trace_btrfs_space_reservation(root->fs_info,
4567                                               "space_info:enospc",
4568                                               space_info->flags, orig_bytes, 1);
4569         if (flushing) {
4570                 spin_lock(&space_info->lock);
4571                 space_info->flush = 0;
4572                 wake_up_all(&space_info->wait);
4573                 spin_unlock(&space_info->lock);
4574         }
4575         return ret;
4576 }
4577
4578 static struct btrfs_block_rsv *get_block_rsv(
4579                                         const struct btrfs_trans_handle *trans,
4580                                         const struct btrfs_root *root)
4581 {
4582         struct btrfs_block_rsv *block_rsv = NULL;
4583
4584         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4585                 block_rsv = trans->block_rsv;
4586
4587         if (root == root->fs_info->csum_root && trans->adding_csums)
4588                 block_rsv = trans->block_rsv;
4589
4590         if (root == root->fs_info->uuid_root)
4591                 block_rsv = trans->block_rsv;
4592
4593         if (!block_rsv)
4594                 block_rsv = root->block_rsv;
4595
4596         if (!block_rsv)
4597                 block_rsv = &root->fs_info->empty_block_rsv;
4598
4599         return block_rsv;
4600 }
4601
4602 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4603                                u64 num_bytes)
4604 {
4605         int ret = -ENOSPC;
4606         spin_lock(&block_rsv->lock);
4607         if (block_rsv->reserved >= num_bytes) {
4608                 block_rsv->reserved -= num_bytes;
4609                 if (block_rsv->reserved < block_rsv->size)
4610                         block_rsv->full = 0;
4611                 ret = 0;
4612         }
4613         spin_unlock(&block_rsv->lock);
4614         return ret;
4615 }
4616
4617 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4618                                 u64 num_bytes, int update_size)
4619 {
4620         spin_lock(&block_rsv->lock);
4621         block_rsv->reserved += num_bytes;
4622         if (update_size)
4623                 block_rsv->size += num_bytes;
4624         else if (block_rsv->reserved >= block_rsv->size)
4625                 block_rsv->full = 1;
4626         spin_unlock(&block_rsv->lock);
4627 }
4628
4629 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4630                              struct btrfs_block_rsv *dest, u64 num_bytes,
4631                              int min_factor)
4632 {
4633         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4634         u64 min_bytes;
4635
4636         if (global_rsv->space_info != dest->space_info)
4637                 return -ENOSPC;
4638
4639         spin_lock(&global_rsv->lock);
4640         min_bytes = div_factor(global_rsv->size, min_factor);
4641         if (global_rsv->reserved < min_bytes + num_bytes) {
4642                 spin_unlock(&global_rsv->lock);
4643                 return -ENOSPC;
4644         }
4645         global_rsv->reserved -= num_bytes;
4646         if (global_rsv->reserved < global_rsv->size)
4647                 global_rsv->full = 0;
4648         spin_unlock(&global_rsv->lock);
4649
4650         block_rsv_add_bytes(dest, num_bytes, 1);
4651         return 0;
4652 }
4653
4654 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4655                                     struct btrfs_block_rsv *block_rsv,
4656                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4657 {
4658         struct btrfs_space_info *space_info = block_rsv->space_info;
4659
4660         spin_lock(&block_rsv->lock);
4661         if (num_bytes == (u64)-1)
4662                 num_bytes = block_rsv->size;
4663         block_rsv->size -= num_bytes;
4664         if (block_rsv->reserved >= block_rsv->size) {
4665                 num_bytes = block_rsv->reserved - block_rsv->size;
4666                 block_rsv->reserved = block_rsv->size;
4667                 block_rsv->full = 1;
4668         } else {
4669                 num_bytes = 0;
4670         }
4671         spin_unlock(&block_rsv->lock);
4672
4673         if (num_bytes > 0) {
4674                 if (dest) {
4675                         spin_lock(&dest->lock);
4676                         if (!dest->full) {
4677                                 u64 bytes_to_add;
4678
4679                                 bytes_to_add = dest->size - dest->reserved;
4680                                 bytes_to_add = min(num_bytes, bytes_to_add);
4681                                 dest->reserved += bytes_to_add;
4682                                 if (dest->reserved >= dest->size)
4683                                         dest->full = 1;
4684                                 num_bytes -= bytes_to_add;
4685                         }
4686                         spin_unlock(&dest->lock);
4687                 }
4688                 if (num_bytes) {
4689                         spin_lock(&space_info->lock);
4690                         space_info->bytes_may_use -= num_bytes;
4691                         trace_btrfs_space_reservation(fs_info, "space_info",
4692                                         space_info->flags, num_bytes, 0);
4693                         spin_unlock(&space_info->lock);
4694                 }
4695         }
4696 }
4697
4698 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4699                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4700 {
4701         int ret;
4702
4703         ret = block_rsv_use_bytes(src, num_bytes);
4704         if (ret)
4705                 return ret;
4706
4707         block_rsv_add_bytes(dst, num_bytes, 1);
4708         return 0;
4709 }
4710
4711 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4712 {
4713         memset(rsv, 0, sizeof(*rsv));
4714         spin_lock_init(&rsv->lock);
4715         rsv->type = type;
4716 }
4717
4718 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4719                                               unsigned short type)
4720 {
4721         struct btrfs_block_rsv *block_rsv;
4722         struct btrfs_fs_info *fs_info = root->fs_info;
4723
4724         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4725         if (!block_rsv)
4726                 return NULL;
4727
4728         btrfs_init_block_rsv(block_rsv, type);
4729         block_rsv->space_info = __find_space_info(fs_info,
4730                                                   BTRFS_BLOCK_GROUP_METADATA);
4731         return block_rsv;
4732 }
4733
4734 void btrfs_free_block_rsv(struct btrfs_root *root,
4735                           struct btrfs_block_rsv *rsv)
4736 {
4737         if (!rsv)
4738                 return;
4739         btrfs_block_rsv_release(root, rsv, (u64)-1);
4740         kfree(rsv);
4741 }
4742
4743 int btrfs_block_rsv_add(struct btrfs_root *root,
4744                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4745                         enum btrfs_reserve_flush_enum flush)
4746 {
4747         int ret;
4748
4749         if (num_bytes == 0)
4750                 return 0;
4751
4752         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4753         if (!ret) {
4754                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4755                 return 0;
4756         }
4757
4758         return ret;
4759 }
4760
4761 int btrfs_block_rsv_check(struct btrfs_root *root,
4762                           struct btrfs_block_rsv *block_rsv, int min_factor)
4763 {
4764         u64 num_bytes = 0;
4765         int ret = -ENOSPC;
4766
4767         if (!block_rsv)
4768                 return 0;
4769
4770         spin_lock(&block_rsv->lock);
4771         num_bytes = div_factor(block_rsv->size, min_factor);
4772         if (block_rsv->reserved >= num_bytes)
4773                 ret = 0;
4774         spin_unlock(&block_rsv->lock);
4775
4776         return ret;
4777 }
4778
4779 int btrfs_block_rsv_refill(struct btrfs_root *root,
4780                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4781                            enum btrfs_reserve_flush_enum flush)
4782 {
4783         u64 num_bytes = 0;
4784         int ret = -ENOSPC;
4785
4786         if (!block_rsv)
4787                 return 0;
4788
4789         spin_lock(&block_rsv->lock);
4790         num_bytes = min_reserved;
4791         if (block_rsv->reserved >= num_bytes)
4792                 ret = 0;
4793         else
4794                 num_bytes -= block_rsv->reserved;
4795         spin_unlock(&block_rsv->lock);
4796
4797         if (!ret)
4798                 return 0;
4799
4800         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4801         if (!ret) {
4802                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4803                 return 0;
4804         }
4805
4806         return ret;
4807 }
4808
4809 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4810                             struct btrfs_block_rsv *dst_rsv,
4811                             u64 num_bytes)
4812 {
4813         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4814 }
4815
4816 void btrfs_block_rsv_release(struct btrfs_root *root,
4817                              struct btrfs_block_rsv *block_rsv,
4818                              u64 num_bytes)
4819 {
4820         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4821         if (global_rsv == block_rsv ||
4822             block_rsv->space_info != global_rsv->space_info)
4823                 global_rsv = NULL;
4824         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4825                                 num_bytes);
4826 }
4827
4828 /*
4829  * helper to calculate size of global block reservation.
4830  * the desired value is sum of space used by extent tree,
4831  * checksum tree and root tree
4832  */
4833 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4834 {
4835         struct btrfs_space_info *sinfo;
4836         u64 num_bytes;
4837         u64 meta_used;
4838         u64 data_used;
4839         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4840
4841         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4842         spin_lock(&sinfo->lock);
4843         data_used = sinfo->bytes_used;
4844         spin_unlock(&sinfo->lock);
4845
4846         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4847         spin_lock(&sinfo->lock);
4848         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4849                 data_used = 0;
4850         meta_used = sinfo->bytes_used;
4851         spin_unlock(&sinfo->lock);
4852
4853         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4854                     csum_size * 2;
4855         num_bytes += div64_u64(data_used + meta_used, 50);
4856
4857         if (num_bytes * 3 > meta_used)
4858                 num_bytes = div64_u64(meta_used, 3);
4859
4860         return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
4861 }
4862
4863 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4864 {
4865         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4866         struct btrfs_space_info *sinfo = block_rsv->space_info;
4867         u64 num_bytes;
4868
4869         num_bytes = calc_global_metadata_size(fs_info);
4870
4871         spin_lock(&sinfo->lock);
4872         spin_lock(&block_rsv->lock);
4873
4874         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4875
4876         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4877                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4878                     sinfo->bytes_may_use;
4879
4880         if (sinfo->total_bytes > num_bytes) {
4881                 num_bytes = sinfo->total_bytes - num_bytes;
4882                 block_rsv->reserved += num_bytes;
4883                 sinfo->bytes_may_use += num_bytes;
4884                 trace_btrfs_space_reservation(fs_info, "space_info",
4885                                       sinfo->flags, num_bytes, 1);
4886         }
4887
4888         if (block_rsv->reserved >= block_rsv->size) {
4889                 num_bytes = block_rsv->reserved - block_rsv->size;
4890                 sinfo->bytes_may_use -= num_bytes;
4891                 trace_btrfs_space_reservation(fs_info, "space_info",
4892                                       sinfo->flags, num_bytes, 0);
4893                 block_rsv->reserved = block_rsv->size;
4894                 block_rsv->full = 1;
4895         }
4896
4897         spin_unlock(&block_rsv->lock);
4898         spin_unlock(&sinfo->lock);
4899 }
4900
4901 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4902 {
4903         struct btrfs_space_info *space_info;
4904
4905         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4906         fs_info->chunk_block_rsv.space_info = space_info;
4907
4908         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4909         fs_info->global_block_rsv.space_info = space_info;
4910         fs_info->delalloc_block_rsv.space_info = space_info;
4911         fs_info->trans_block_rsv.space_info = space_info;
4912         fs_info->empty_block_rsv.space_info = space_info;
4913         fs_info->delayed_block_rsv.space_info = space_info;
4914
4915         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4916         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4917         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4918         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4919         if (fs_info->quota_root)
4920                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4921         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4922
4923         update_global_block_rsv(fs_info);
4924 }
4925
4926 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4927 {
4928         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4929                                 (u64)-1);
4930         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4931         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4932         WARN_ON(fs_info->trans_block_rsv.size > 0);
4933         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4934         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4935         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4936         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4937         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4938 }
4939
4940 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4941                                   struct btrfs_root *root)
4942 {
4943         if (!trans->block_rsv)
4944                 return;
4945
4946         if (!trans->bytes_reserved)
4947                 return;
4948
4949         trace_btrfs_space_reservation(root->fs_info, "transaction",
4950                                       trans->transid, trans->bytes_reserved, 0);
4951         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4952         trans->bytes_reserved = 0;
4953 }
4954
4955 /* Can only return 0 or -ENOSPC */
4956 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4957                                   struct inode *inode)
4958 {
4959         struct btrfs_root *root = BTRFS_I(inode)->root;
4960         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4961         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4962
4963         /*
4964          * We need to hold space in order to delete our orphan item once we've
4965          * added it, so this takes the reservation so we can release it later
4966          * when we are truly done with the orphan item.
4967          */
4968         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4969         trace_btrfs_space_reservation(root->fs_info, "orphan",
4970                                       btrfs_ino(inode), num_bytes, 1);
4971         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4972 }
4973
4974 void btrfs_orphan_release_metadata(struct inode *inode)
4975 {
4976         struct btrfs_root *root = BTRFS_I(inode)->root;
4977         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4978         trace_btrfs_space_reservation(root->fs_info, "orphan",
4979                                       btrfs_ino(inode), num_bytes, 0);
4980         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4981 }
4982
4983 /*
4984  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4985  * root: the root of the parent directory
4986  * rsv: block reservation
4987  * items: the number of items that we need do reservation
4988  * qgroup_reserved: used to return the reserved size in qgroup
4989  *
4990  * This function is used to reserve the space for snapshot/subvolume
4991  * creation and deletion. Those operations are different with the
4992  * common file/directory operations, they change two fs/file trees
4993  * and root tree, the number of items that the qgroup reserves is
4994  * different with the free space reservation. So we can not use
4995  * the space reseravtion mechanism in start_transaction().
4996  */
4997 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4998                                      struct btrfs_block_rsv *rsv,
4999                                      int items,
5000                                      u64 *qgroup_reserved,
5001                                      bool use_global_rsv)
5002 {
5003         u64 num_bytes;
5004         int ret;
5005         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5006
5007         if (root->fs_info->quota_enabled) {
5008                 /* One for parent inode, two for dir entries */
5009                 num_bytes = 3 * root->nodesize;
5010                 ret = btrfs_qgroup_reserve(root, num_bytes);
5011                 if (ret)
5012                         return ret;
5013         } else {
5014                 num_bytes = 0;
5015         }
5016
5017         *qgroup_reserved = num_bytes;
5018
5019         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5020         rsv->space_info = __find_space_info(root->fs_info,
5021                                             BTRFS_BLOCK_GROUP_METADATA);
5022         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5023                                   BTRFS_RESERVE_FLUSH_ALL);
5024
5025         if (ret == -ENOSPC && use_global_rsv)
5026                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5027
5028         if (ret) {
5029                 if (*qgroup_reserved)
5030                         btrfs_qgroup_free(root, *qgroup_reserved);
5031         }
5032
5033         return ret;
5034 }
5035
5036 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5037                                       struct btrfs_block_rsv *rsv,
5038                                       u64 qgroup_reserved)
5039 {
5040         btrfs_block_rsv_release(root, rsv, (u64)-1);
5041         if (qgroup_reserved)
5042                 btrfs_qgroup_free(root, qgroup_reserved);
5043 }
5044
5045 /**
5046  * drop_outstanding_extent - drop an outstanding extent
5047  * @inode: the inode we're dropping the extent for
5048  *
5049  * This is called when we are freeing up an outstanding extent, either called
5050  * after an error or after an extent is written.  This will return the number of
5051  * reserved extents that need to be freed.  This must be called with
5052  * BTRFS_I(inode)->lock held.
5053  */
5054 static unsigned drop_outstanding_extent(struct inode *inode)
5055 {
5056         unsigned drop_inode_space = 0;
5057         unsigned dropped_extents = 0;
5058
5059         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
5060         BTRFS_I(inode)->outstanding_extents--;
5061
5062         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5063             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5064                                &BTRFS_I(inode)->runtime_flags))
5065                 drop_inode_space = 1;
5066
5067         /*
5068          * If we have more or the same amount of outsanding extents than we have
5069          * reserved then we need to leave the reserved extents count alone.
5070          */
5071         if (BTRFS_I(inode)->outstanding_extents >=
5072             BTRFS_I(inode)->reserved_extents)
5073                 return drop_inode_space;
5074
5075         dropped_extents = BTRFS_I(inode)->reserved_extents -
5076                 BTRFS_I(inode)->outstanding_extents;
5077         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5078         return dropped_extents + drop_inode_space;
5079 }
5080
5081 /**
5082  * calc_csum_metadata_size - return the amount of metada space that must be
5083  *      reserved/free'd for the given bytes.
5084  * @inode: the inode we're manipulating
5085  * @num_bytes: the number of bytes in question
5086  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5087  *
5088  * This adjusts the number of csum_bytes in the inode and then returns the
5089  * correct amount of metadata that must either be reserved or freed.  We
5090  * calculate how many checksums we can fit into one leaf and then divide the
5091  * number of bytes that will need to be checksumed by this value to figure out
5092  * how many checksums will be required.  If we are adding bytes then the number
5093  * may go up and we will return the number of additional bytes that must be
5094  * reserved.  If it is going down we will return the number of bytes that must
5095  * be freed.
5096  *
5097  * This must be called with BTRFS_I(inode)->lock held.
5098  */
5099 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5100                                    int reserve)
5101 {
5102         struct btrfs_root *root = BTRFS_I(inode)->root;
5103         u64 csum_size;
5104         int num_csums_per_leaf;
5105         int num_csums;
5106         int old_csums;
5107
5108         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5109             BTRFS_I(inode)->csum_bytes == 0)
5110                 return 0;
5111
5112         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
5113         if (reserve)
5114                 BTRFS_I(inode)->csum_bytes += num_bytes;
5115         else
5116                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5117         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
5118         num_csums_per_leaf = (int)div64_u64(csum_size,
5119                                             sizeof(struct btrfs_csum_item) +
5120                                             sizeof(struct btrfs_disk_key));
5121         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
5122         num_csums = num_csums + num_csums_per_leaf - 1;
5123         num_csums = num_csums / num_csums_per_leaf;
5124
5125         old_csums = old_csums + num_csums_per_leaf - 1;
5126         old_csums = old_csums / num_csums_per_leaf;
5127
5128         /* No change, no need to reserve more */
5129         if (old_csums == num_csums)
5130                 return 0;
5131
5132         if (reserve)
5133                 return btrfs_calc_trans_metadata_size(root,
5134                                                       num_csums - old_csums);
5135
5136         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5137 }
5138
5139 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5140 {
5141         struct btrfs_root *root = BTRFS_I(inode)->root;
5142         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5143         u64 to_reserve = 0;
5144         u64 csum_bytes;
5145         unsigned nr_extents = 0;
5146         int extra_reserve = 0;
5147         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5148         int ret = 0;
5149         bool delalloc_lock = true;
5150         u64 to_free = 0;
5151         unsigned dropped;
5152
5153         /* If we are a free space inode we need to not flush since we will be in
5154          * the middle of a transaction commit.  We also don't need the delalloc
5155          * mutex since we won't race with anybody.  We need this mostly to make
5156          * lockdep shut its filthy mouth.
5157          */
5158         if (btrfs_is_free_space_inode(inode)) {
5159                 flush = BTRFS_RESERVE_NO_FLUSH;
5160                 delalloc_lock = false;
5161         }
5162
5163         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5164             btrfs_transaction_in_commit(root->fs_info))
5165                 schedule_timeout(1);
5166
5167         if (delalloc_lock)
5168                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5169
5170         num_bytes = ALIGN(num_bytes, root->sectorsize);
5171
5172         spin_lock(&BTRFS_I(inode)->lock);
5173         BTRFS_I(inode)->outstanding_extents++;
5174
5175         if (BTRFS_I(inode)->outstanding_extents >
5176             BTRFS_I(inode)->reserved_extents)
5177                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5178                         BTRFS_I(inode)->reserved_extents;
5179
5180         /*
5181          * Add an item to reserve for updating the inode when we complete the
5182          * delalloc io.
5183          */
5184         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5185                       &BTRFS_I(inode)->runtime_flags)) {
5186                 nr_extents++;
5187                 extra_reserve = 1;
5188         }
5189
5190         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5191         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5192         csum_bytes = BTRFS_I(inode)->csum_bytes;
5193         spin_unlock(&BTRFS_I(inode)->lock);
5194
5195         if (root->fs_info->quota_enabled) {
5196                 ret = btrfs_qgroup_reserve(root, num_bytes +
5197                                            nr_extents * root->nodesize);
5198                 if (ret)
5199                         goto out_fail;
5200         }
5201
5202         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5203         if (unlikely(ret)) {
5204                 if (root->fs_info->quota_enabled)
5205                         btrfs_qgroup_free(root, num_bytes +
5206                                                 nr_extents * root->nodesize);
5207                 goto out_fail;
5208         }
5209
5210         spin_lock(&BTRFS_I(inode)->lock);
5211         if (extra_reserve) {
5212                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5213                         &BTRFS_I(inode)->runtime_flags);
5214                 nr_extents--;
5215         }
5216         BTRFS_I(inode)->reserved_extents += nr_extents;
5217         spin_unlock(&BTRFS_I(inode)->lock);
5218
5219         if (delalloc_lock)
5220                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5221
5222         if (to_reserve)
5223                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5224                                               btrfs_ino(inode), to_reserve, 1);
5225         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5226
5227         return 0;
5228
5229 out_fail:
5230         spin_lock(&BTRFS_I(inode)->lock);
5231         dropped = drop_outstanding_extent(inode);
5232         /*
5233          * If the inodes csum_bytes is the same as the original
5234          * csum_bytes then we know we haven't raced with any free()ers
5235          * so we can just reduce our inodes csum bytes and carry on.
5236          */
5237         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5238                 calc_csum_metadata_size(inode, num_bytes, 0);
5239         } else {
5240                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5241                 u64 bytes;
5242
5243                 /*
5244                  * This is tricky, but first we need to figure out how much we
5245                  * free'd from any free-ers that occured during this
5246                  * reservation, so we reset ->csum_bytes to the csum_bytes
5247                  * before we dropped our lock, and then call the free for the
5248                  * number of bytes that were freed while we were trying our
5249                  * reservation.
5250                  */
5251                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5252                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5253                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5254
5255
5256                 /*
5257                  * Now we need to see how much we would have freed had we not
5258                  * been making this reservation and our ->csum_bytes were not
5259                  * artificially inflated.
5260                  */
5261                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5262                 bytes = csum_bytes - orig_csum_bytes;
5263                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5264
5265                 /*
5266                  * Now reset ->csum_bytes to what it should be.  If bytes is
5267                  * more than to_free then we would have free'd more space had we
5268                  * not had an artificially high ->csum_bytes, so we need to free
5269                  * the remainder.  If bytes is the same or less then we don't
5270                  * need to do anything, the other free-ers did the correct
5271                  * thing.
5272                  */
5273                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5274                 if (bytes > to_free)
5275                         to_free = bytes - to_free;
5276                 else
5277                         to_free = 0;
5278         }
5279         spin_unlock(&BTRFS_I(inode)->lock);
5280         if (dropped)
5281                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5282
5283         if (to_free) {
5284                 btrfs_block_rsv_release(root, block_rsv, to_free);
5285                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5286                                               btrfs_ino(inode), to_free, 0);
5287         }
5288         if (delalloc_lock)
5289                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5290         return ret;
5291 }
5292
5293 /**
5294  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5295  * @inode: the inode to release the reservation for
5296  * @num_bytes: the number of bytes we're releasing
5297  *
5298  * This will release the metadata reservation for an inode.  This can be called
5299  * once we complete IO for a given set of bytes to release their metadata
5300  * reservations.
5301  */
5302 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5303 {
5304         struct btrfs_root *root = BTRFS_I(inode)->root;
5305         u64 to_free = 0;
5306         unsigned dropped;
5307
5308         num_bytes = ALIGN(num_bytes, root->sectorsize);
5309         spin_lock(&BTRFS_I(inode)->lock);
5310         dropped = drop_outstanding_extent(inode);
5311
5312         if (num_bytes)
5313                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5314         spin_unlock(&BTRFS_I(inode)->lock);
5315         if (dropped > 0)
5316                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5317
5318         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5319                                       btrfs_ino(inode), to_free, 0);
5320         if (root->fs_info->quota_enabled) {
5321                 btrfs_qgroup_free(root, num_bytes +
5322                                         dropped * root->nodesize);
5323         }
5324
5325         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5326                                 to_free);
5327 }
5328
5329 /**
5330  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5331  * @inode: inode we're writing to
5332  * @num_bytes: the number of bytes we want to allocate
5333  *
5334  * This will do the following things
5335  *
5336  * o reserve space in the data space info for num_bytes
5337  * o reserve space in the metadata space info based on number of outstanding
5338  *   extents and how much csums will be needed
5339  * o add to the inodes ->delalloc_bytes
5340  * o add it to the fs_info's delalloc inodes list.
5341  *
5342  * This will return 0 for success and -ENOSPC if there is no space left.
5343  */
5344 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5345 {
5346         int ret;
5347
5348         ret = btrfs_check_data_free_space(inode, num_bytes);
5349         if (ret)
5350                 return ret;
5351
5352         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5353         if (ret) {
5354                 btrfs_free_reserved_data_space(inode, num_bytes);
5355                 return ret;
5356         }
5357
5358         return 0;
5359 }
5360
5361 /**
5362  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5363  * @inode: inode we're releasing space for
5364  * @num_bytes: the number of bytes we want to free up
5365  *
5366  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5367  * called in the case that we don't need the metadata AND data reservations
5368  * anymore.  So if there is an error or we insert an inline extent.
5369  *
5370  * This function will release the metadata space that was not used and will
5371  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5372  * list if there are no delalloc bytes left.
5373  */
5374 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5375 {
5376         btrfs_delalloc_release_metadata(inode, num_bytes);
5377         btrfs_free_reserved_data_space(inode, num_bytes);
5378 }
5379
5380 static int update_block_group(struct btrfs_root *root,
5381                               u64 bytenr, u64 num_bytes, int alloc)
5382 {
5383         struct btrfs_block_group_cache *cache = NULL;
5384         struct btrfs_fs_info *info = root->fs_info;
5385         u64 total = num_bytes;
5386         u64 old_val;
5387         u64 byte_in_group;
5388         int factor;
5389
5390         /* block accounting for super block */
5391         spin_lock(&info->delalloc_root_lock);
5392         old_val = btrfs_super_bytes_used(info->super_copy);
5393         if (alloc)
5394                 old_val += num_bytes;
5395         else
5396                 old_val -= num_bytes;
5397         btrfs_set_super_bytes_used(info->super_copy, old_val);
5398         spin_unlock(&info->delalloc_root_lock);
5399
5400         while (total) {
5401                 cache = btrfs_lookup_block_group(info, bytenr);
5402                 if (!cache)
5403                         return -ENOENT;
5404                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5405                                     BTRFS_BLOCK_GROUP_RAID1 |
5406                                     BTRFS_BLOCK_GROUP_RAID10))
5407                         factor = 2;
5408                 else
5409                         factor = 1;
5410                 /*
5411                  * If this block group has free space cache written out, we
5412                  * need to make sure to load it if we are removing space.  This
5413                  * is because we need the unpinning stage to actually add the
5414                  * space back to the block group, otherwise we will leak space.
5415                  */
5416                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5417                         cache_block_group(cache, 1);
5418
5419                 byte_in_group = bytenr - cache->key.objectid;
5420                 WARN_ON(byte_in_group > cache->key.offset);
5421
5422                 spin_lock(&cache->space_info->lock);
5423                 spin_lock(&cache->lock);
5424
5425                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5426                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5427                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5428
5429                 cache->dirty = 1;
5430                 old_val = btrfs_block_group_used(&cache->item);
5431                 num_bytes = min(total, cache->key.offset - byte_in_group);
5432                 if (alloc) {
5433                         old_val += num_bytes;
5434                         btrfs_set_block_group_used(&cache->item, old_val);
5435                         cache->reserved -= num_bytes;
5436                         cache->space_info->bytes_reserved -= num_bytes;
5437                         cache->space_info->bytes_used += num_bytes;
5438                         cache->space_info->disk_used += num_bytes * factor;
5439                         spin_unlock(&cache->lock);
5440                         spin_unlock(&cache->space_info->lock);
5441                 } else {
5442                         old_val -= num_bytes;
5443                         btrfs_set_block_group_used(&cache->item, old_val);
5444                         cache->pinned += num_bytes;
5445                         cache->space_info->bytes_pinned += num_bytes;
5446                         cache->space_info->bytes_used -= num_bytes;
5447                         cache->space_info->disk_used -= num_bytes * factor;
5448                         spin_unlock(&cache->lock);
5449                         spin_unlock(&cache->space_info->lock);
5450
5451                         set_extent_dirty(info->pinned_extents,
5452                                          bytenr, bytenr + num_bytes - 1,
5453                                          GFP_NOFS | __GFP_NOFAIL);
5454                         /*
5455                          * No longer have used bytes in this block group, queue
5456                          * it for deletion.
5457                          */
5458                         if (old_val == 0) {
5459                                 spin_lock(&info->unused_bgs_lock);
5460                                 if (list_empty(&cache->bg_list)) {
5461                                         btrfs_get_block_group(cache);
5462                                         list_add_tail(&cache->bg_list,
5463                                                       &info->unused_bgs);
5464                                 }
5465                                 spin_unlock(&info->unused_bgs_lock);
5466                         }
5467                 }
5468                 btrfs_put_block_group(cache);
5469                 total -= num_bytes;
5470                 bytenr += num_bytes;
5471         }
5472         return 0;
5473 }
5474
5475 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5476 {
5477         struct btrfs_block_group_cache *cache;
5478         u64 bytenr;
5479
5480         spin_lock(&root->fs_info->block_group_cache_lock);
5481         bytenr = root->fs_info->first_logical_byte;
5482         spin_unlock(&root->fs_info->block_group_cache_lock);
5483
5484         if (bytenr < (u64)-1)
5485                 return bytenr;
5486
5487         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5488         if (!cache)
5489                 return 0;
5490
5491         bytenr = cache->key.objectid;
5492         btrfs_put_block_group(cache);
5493
5494         return bytenr;
5495 }
5496
5497 static int pin_down_extent(struct btrfs_root *root,
5498                            struct btrfs_block_group_cache *cache,
5499                            u64 bytenr, u64 num_bytes, int reserved)
5500 {
5501         spin_lock(&cache->space_info->lock);
5502         spin_lock(&cache->lock);
5503         cache->pinned += num_bytes;
5504         cache->space_info->bytes_pinned += num_bytes;
5505         if (reserved) {
5506                 cache->reserved -= num_bytes;
5507                 cache->space_info->bytes_reserved -= num_bytes;
5508         }
5509         spin_unlock(&cache->lock);
5510         spin_unlock(&cache->space_info->lock);
5511
5512         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5513                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5514         if (reserved)
5515                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5516         return 0;
5517 }
5518
5519 /*
5520  * this function must be called within transaction
5521  */
5522 int btrfs_pin_extent(struct btrfs_root *root,
5523                      u64 bytenr, u64 num_bytes, int reserved)
5524 {
5525         struct btrfs_block_group_cache *cache;
5526
5527         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5528         BUG_ON(!cache); /* Logic error */
5529
5530         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5531
5532         btrfs_put_block_group(cache);
5533         return 0;
5534 }
5535
5536 /*
5537  * this function must be called within transaction
5538  */
5539 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5540                                     u64 bytenr, u64 num_bytes)
5541 {
5542         struct btrfs_block_group_cache *cache;
5543         int ret;
5544
5545         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5546         if (!cache)
5547                 return -EINVAL;
5548
5549         /*
5550          * pull in the free space cache (if any) so that our pin
5551          * removes the free space from the cache.  We have load_only set
5552          * to one because the slow code to read in the free extents does check
5553          * the pinned extents.
5554          */
5555         cache_block_group(cache, 1);
5556
5557         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5558
5559         /* remove us from the free space cache (if we're there at all) */
5560         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5561         btrfs_put_block_group(cache);
5562         return ret;
5563 }
5564
5565 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5566 {
5567         int ret;
5568         struct btrfs_block_group_cache *block_group;
5569         struct btrfs_caching_control *caching_ctl;
5570
5571         block_group = btrfs_lookup_block_group(root->fs_info, start);
5572         if (!block_group)
5573                 return -EINVAL;
5574
5575         cache_block_group(block_group, 0);
5576         caching_ctl = get_caching_control(block_group);
5577
5578         if (!caching_ctl) {
5579                 /* Logic error */
5580                 BUG_ON(!block_group_cache_done(block_group));
5581                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5582         } else {
5583                 mutex_lock(&caching_ctl->mutex);
5584
5585                 if (start >= caching_ctl->progress) {
5586                         ret = add_excluded_extent(root, start, num_bytes);
5587                 } else if (start + num_bytes <= caching_ctl->progress) {
5588                         ret = btrfs_remove_free_space(block_group,
5589                                                       start, num_bytes);
5590                 } else {
5591                         num_bytes = caching_ctl->progress - start;
5592                         ret = btrfs_remove_free_space(block_group,
5593                                                       start, num_bytes);
5594                         if (ret)
5595                                 goto out_lock;
5596
5597                         num_bytes = (start + num_bytes) -
5598                                 caching_ctl->progress;
5599                         start = caching_ctl->progress;
5600                         ret = add_excluded_extent(root, start, num_bytes);
5601                 }
5602 out_lock:
5603                 mutex_unlock(&caching_ctl->mutex);
5604                 put_caching_control(caching_ctl);
5605         }
5606         btrfs_put_block_group(block_group);
5607         return ret;
5608 }
5609
5610 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5611                                  struct extent_buffer *eb)
5612 {
5613         struct btrfs_file_extent_item *item;
5614         struct btrfs_key key;
5615         int found_type;
5616         int i;
5617
5618         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5619                 return 0;
5620
5621         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5622                 btrfs_item_key_to_cpu(eb, &key, i);
5623                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5624                         continue;
5625                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5626                 found_type = btrfs_file_extent_type(eb, item);
5627                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5628                         continue;
5629                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5630                         continue;
5631                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5632                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5633                 __exclude_logged_extent(log, key.objectid, key.offset);
5634         }
5635
5636         return 0;
5637 }
5638
5639 /**
5640  * btrfs_update_reserved_bytes - update the block_group and space info counters
5641  * @cache:      The cache we are manipulating
5642  * @num_bytes:  The number of bytes in question
5643  * @reserve:    One of the reservation enums
5644  * @delalloc:   The blocks are allocated for the delalloc write
5645  *
5646  * This is called by the allocator when it reserves space, or by somebody who is
5647  * freeing space that was never actually used on disk.  For example if you
5648  * reserve some space for a new leaf in transaction A and before transaction A
5649  * commits you free that leaf, you call this with reserve set to 0 in order to
5650  * clear the reservation.
5651  *
5652  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5653  * ENOSPC accounting.  For data we handle the reservation through clearing the
5654  * delalloc bits in the io_tree.  We have to do this since we could end up
5655  * allocating less disk space for the amount of data we have reserved in the
5656  * case of compression.
5657  *
5658  * If this is a reservation and the block group has become read only we cannot
5659  * make the reservation and return -EAGAIN, otherwise this function always
5660  * succeeds.
5661  */
5662 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5663                                        u64 num_bytes, int reserve, int delalloc)
5664 {
5665         struct btrfs_space_info *space_info = cache->space_info;
5666         int ret = 0;
5667
5668         spin_lock(&space_info->lock);
5669         spin_lock(&cache->lock);
5670         if (reserve != RESERVE_FREE) {
5671                 if (cache->ro) {
5672                         ret = -EAGAIN;
5673                 } else {
5674                         cache->reserved += num_bytes;
5675                         space_info->bytes_reserved += num_bytes;
5676                         if (reserve == RESERVE_ALLOC) {
5677                                 trace_btrfs_space_reservation(cache->fs_info,
5678                                                 "space_info", space_info->flags,
5679                                                 num_bytes, 0);
5680                                 space_info->bytes_may_use -= num_bytes;
5681                         }
5682
5683                         if (delalloc)
5684                                 cache->delalloc_bytes += num_bytes;
5685                 }
5686         } else {
5687                 if (cache->ro)
5688                         space_info->bytes_readonly += num_bytes;
5689                 cache->reserved -= num_bytes;
5690                 space_info->bytes_reserved -= num_bytes;
5691
5692                 if (delalloc)
5693                         cache->delalloc_bytes -= num_bytes;
5694         }
5695         spin_unlock(&cache->lock);
5696         spin_unlock(&space_info->lock);
5697         return ret;
5698 }
5699
5700 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5701                                 struct btrfs_root *root)
5702 {
5703         struct btrfs_fs_info *fs_info = root->fs_info;
5704         struct btrfs_caching_control *next;
5705         struct btrfs_caching_control *caching_ctl;
5706         struct btrfs_block_group_cache *cache;
5707
5708         down_write(&fs_info->commit_root_sem);
5709
5710         list_for_each_entry_safe(caching_ctl, next,
5711                                  &fs_info->caching_block_groups, list) {
5712                 cache = caching_ctl->block_group;
5713                 if (block_group_cache_done(cache)) {
5714                         cache->last_byte_to_unpin = (u64)-1;
5715                         list_del_init(&caching_ctl->list);
5716                         put_caching_control(caching_ctl);
5717                 } else {
5718                         cache->last_byte_to_unpin = caching_ctl->progress;
5719                 }
5720         }
5721
5722         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5723                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5724         else
5725                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5726
5727         up_write(&fs_info->commit_root_sem);
5728
5729         update_global_block_rsv(fs_info);
5730 }
5731
5732 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5733 {
5734         struct btrfs_fs_info *fs_info = root->fs_info;
5735         struct btrfs_block_group_cache *cache = NULL;
5736         struct btrfs_space_info *space_info;
5737         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5738         u64 len;
5739         bool readonly;
5740
5741         while (start <= end) {
5742                 readonly = false;
5743                 if (!cache ||
5744                     start >= cache->key.objectid + cache->key.offset) {
5745                         if (cache)
5746                                 btrfs_put_block_group(cache);
5747                         cache = btrfs_lookup_block_group(fs_info, start);
5748                         BUG_ON(!cache); /* Logic error */
5749                 }
5750
5751                 len = cache->key.objectid + cache->key.offset - start;
5752                 len = min(len, end + 1 - start);
5753
5754                 if (start < cache->last_byte_to_unpin) {
5755                         len = min(len, cache->last_byte_to_unpin - start);
5756                         btrfs_add_free_space(cache, start, len);
5757                 }
5758
5759                 start += len;
5760                 space_info = cache->space_info;
5761
5762                 spin_lock(&space_info->lock);
5763                 spin_lock(&cache->lock);
5764                 cache->pinned -= len;
5765                 space_info->bytes_pinned -= len;
5766                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
5767                 if (cache->ro) {
5768                         space_info->bytes_readonly += len;
5769                         readonly = true;
5770                 }
5771                 spin_unlock(&cache->lock);
5772                 if (!readonly && global_rsv->space_info == space_info) {
5773                         spin_lock(&global_rsv->lock);
5774                         if (!global_rsv->full) {
5775                                 len = min(len, global_rsv->size -
5776                                           global_rsv->reserved);
5777                                 global_rsv->reserved += len;
5778                                 space_info->bytes_may_use += len;
5779                                 if (global_rsv->reserved >= global_rsv->size)
5780                                         global_rsv->full = 1;
5781                         }
5782                         spin_unlock(&global_rsv->lock);
5783                 }
5784                 spin_unlock(&space_info->lock);
5785         }
5786
5787         if (cache)
5788                 btrfs_put_block_group(cache);
5789         return 0;
5790 }
5791
5792 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5793                                struct btrfs_root *root)
5794 {
5795         struct btrfs_fs_info *fs_info = root->fs_info;
5796         struct extent_io_tree *unpin;
5797         u64 start;
5798         u64 end;
5799         int ret;
5800
5801         if (trans->aborted)
5802                 return 0;
5803
5804         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5805                 unpin = &fs_info->freed_extents[1];
5806         else
5807                 unpin = &fs_info->freed_extents[0];
5808
5809         while (1) {
5810                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5811                                             EXTENT_DIRTY, NULL);
5812                 if (ret)
5813                         break;
5814
5815                 if (btrfs_test_opt(root, DISCARD))
5816                         ret = btrfs_discard_extent(root, start,
5817                                                    end + 1 - start, NULL);
5818
5819                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5820                 unpin_extent_range(root, start, end);
5821                 cond_resched();
5822         }
5823
5824         return 0;
5825 }
5826
5827 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
5828                              u64 owner, u64 root_objectid)
5829 {
5830         struct btrfs_space_info *space_info;
5831         u64 flags;
5832
5833         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5834                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
5835                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
5836                 else
5837                         flags = BTRFS_BLOCK_GROUP_METADATA;
5838         } else {
5839                 flags = BTRFS_BLOCK_GROUP_DATA;
5840         }
5841
5842         space_info = __find_space_info(fs_info, flags);
5843         BUG_ON(!space_info); /* Logic bug */
5844         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
5845 }
5846
5847
5848 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5849                                 struct btrfs_root *root,
5850                                 u64 bytenr, u64 num_bytes, u64 parent,
5851                                 u64 root_objectid, u64 owner_objectid,
5852                                 u64 owner_offset, int refs_to_drop,
5853                                 struct btrfs_delayed_extent_op *extent_op,
5854                                 int no_quota)
5855 {
5856         struct btrfs_key key;
5857         struct btrfs_path *path;
5858         struct btrfs_fs_info *info = root->fs_info;
5859         struct btrfs_root *extent_root = info->extent_root;
5860         struct extent_buffer *leaf;
5861         struct btrfs_extent_item *ei;
5862         struct btrfs_extent_inline_ref *iref;
5863         int ret;
5864         int is_data;
5865         int extent_slot = 0;
5866         int found_extent = 0;
5867         int num_to_del = 1;
5868         u32 item_size;
5869         u64 refs;
5870         int last_ref = 0;
5871         enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_SUB_EXCL;
5872         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5873                                                  SKINNY_METADATA);
5874
5875         if (!info->quota_enabled || !is_fstree(root_objectid))
5876                 no_quota = 1;
5877
5878         path = btrfs_alloc_path();
5879         if (!path)
5880                 return -ENOMEM;
5881
5882         path->reada = 1;
5883         path->leave_spinning = 1;
5884
5885         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5886         BUG_ON(!is_data && refs_to_drop != 1);
5887
5888         if (is_data)
5889                 skinny_metadata = 0;
5890
5891         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5892                                     bytenr, num_bytes, parent,
5893                                     root_objectid, owner_objectid,
5894                                     owner_offset);
5895         if (ret == 0) {
5896                 extent_slot = path->slots[0];
5897                 while (extent_slot >= 0) {
5898                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5899                                               extent_slot);
5900                         if (key.objectid != bytenr)
5901                                 break;
5902                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5903                             key.offset == num_bytes) {
5904                                 found_extent = 1;
5905                                 break;
5906                         }
5907                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
5908                             key.offset == owner_objectid) {
5909                                 found_extent = 1;
5910                                 break;
5911                         }
5912                         if (path->slots[0] - extent_slot > 5)
5913                                 break;
5914                         extent_slot--;
5915                 }
5916 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5917                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5918                 if (found_extent && item_size < sizeof(*ei))
5919                         found_extent = 0;
5920 #endif
5921                 if (!found_extent) {
5922                         BUG_ON(iref);
5923                         ret = remove_extent_backref(trans, extent_root, path,
5924                                                     NULL, refs_to_drop,
5925                                                     is_data, &last_ref);
5926                         if (ret) {
5927                                 btrfs_abort_transaction(trans, extent_root, ret);
5928                                 goto out;
5929                         }
5930                         btrfs_release_path(path);
5931                         path->leave_spinning = 1;
5932
5933                         key.objectid = bytenr;
5934                         key.type = BTRFS_EXTENT_ITEM_KEY;
5935                         key.offset = num_bytes;
5936
5937                         if (!is_data && skinny_metadata) {
5938                                 key.type = BTRFS_METADATA_ITEM_KEY;
5939                                 key.offset = owner_objectid;
5940                         }
5941
5942                         ret = btrfs_search_slot(trans, extent_root,
5943                                                 &key, path, -1, 1);
5944                         if (ret > 0 && skinny_metadata && path->slots[0]) {
5945                                 /*
5946                                  * Couldn't find our skinny metadata item,
5947                                  * see if we have ye olde extent item.
5948                                  */
5949                                 path->slots[0]--;
5950                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
5951                                                       path->slots[0]);
5952                                 if (key.objectid == bytenr &&
5953                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
5954                                     key.offset == num_bytes)
5955                                         ret = 0;
5956                         }
5957
5958                         if (ret > 0 && skinny_metadata) {
5959                                 skinny_metadata = false;
5960                                 key.objectid = bytenr;
5961                                 key.type = BTRFS_EXTENT_ITEM_KEY;
5962                                 key.offset = num_bytes;
5963                                 btrfs_release_path(path);
5964                                 ret = btrfs_search_slot(trans, extent_root,
5965                                                         &key, path, -1, 1);
5966                         }
5967
5968                         if (ret) {
5969                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5970                                         ret, bytenr);
5971                                 if (ret > 0)
5972                                         btrfs_print_leaf(extent_root,
5973                                                          path->nodes[0]);
5974                         }
5975                         if (ret < 0) {
5976                                 btrfs_abort_transaction(trans, extent_root, ret);
5977                                 goto out;
5978                         }
5979                         extent_slot = path->slots[0];
5980                 }
5981         } else if (WARN_ON(ret == -ENOENT)) {
5982                 btrfs_print_leaf(extent_root, path->nodes[0]);
5983                 btrfs_err(info,
5984                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5985                         bytenr, parent, root_objectid, owner_objectid,
5986                         owner_offset);
5987                 btrfs_abort_transaction(trans, extent_root, ret);
5988                 goto out;
5989         } else {
5990                 btrfs_abort_transaction(trans, extent_root, ret);
5991                 goto out;
5992         }
5993
5994         leaf = path->nodes[0];
5995         item_size = btrfs_item_size_nr(leaf, extent_slot);
5996 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5997         if (item_size < sizeof(*ei)) {
5998                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5999                 ret = convert_extent_item_v0(trans, extent_root, path,
6000                                              owner_objectid, 0);
6001                 if (ret < 0) {
6002                         btrfs_abort_transaction(trans, extent_root, ret);
6003                         goto out;
6004                 }
6005
6006                 btrfs_release_path(path);
6007                 path->leave_spinning = 1;
6008
6009                 key.objectid = bytenr;
6010                 key.type = BTRFS_EXTENT_ITEM_KEY;
6011                 key.offset = num_bytes;
6012
6013                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6014                                         -1, 1);
6015                 if (ret) {
6016                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6017                                 ret, bytenr);
6018                         btrfs_print_leaf(extent_root, path->nodes[0]);
6019                 }
6020                 if (ret < 0) {
6021                         btrfs_abort_transaction(trans, extent_root, ret);
6022                         goto out;
6023                 }
6024
6025                 extent_slot = path->slots[0];
6026                 leaf = path->nodes[0];
6027                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6028         }
6029 #endif
6030         BUG_ON(item_size < sizeof(*ei));
6031         ei = btrfs_item_ptr(leaf, extent_slot,
6032                             struct btrfs_extent_item);
6033         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6034             key.type == BTRFS_EXTENT_ITEM_KEY) {
6035                 struct btrfs_tree_block_info *bi;
6036                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6037                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6038                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6039         }
6040
6041         refs = btrfs_extent_refs(leaf, ei);
6042         if (refs < refs_to_drop) {
6043                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6044                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
6045                 ret = -EINVAL;
6046                 btrfs_abort_transaction(trans, extent_root, ret);
6047                 goto out;
6048         }
6049         refs -= refs_to_drop;
6050
6051         if (refs > 0) {
6052                 type = BTRFS_QGROUP_OPER_SUB_SHARED;
6053                 if (extent_op)
6054                         __run_delayed_extent_op(extent_op, leaf, ei);
6055                 /*
6056                  * In the case of inline back ref, reference count will
6057                  * be updated by remove_extent_backref
6058                  */
6059                 if (iref) {
6060                         BUG_ON(!found_extent);
6061                 } else {
6062                         btrfs_set_extent_refs(leaf, ei, refs);
6063                         btrfs_mark_buffer_dirty(leaf);
6064                 }
6065                 if (found_extent) {
6066                         ret = remove_extent_backref(trans, extent_root, path,
6067                                                     iref, refs_to_drop,
6068                                                     is_data, &last_ref);
6069                         if (ret) {
6070                                 btrfs_abort_transaction(trans, extent_root, ret);
6071                                 goto out;
6072                         }
6073                 }
6074                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6075                                  root_objectid);
6076         } else {
6077                 if (found_extent) {
6078                         BUG_ON(is_data && refs_to_drop !=
6079                                extent_data_ref_count(root, path, iref));
6080                         if (iref) {
6081                                 BUG_ON(path->slots[0] != extent_slot);
6082                         } else {
6083                                 BUG_ON(path->slots[0] != extent_slot + 1);
6084                                 path->slots[0] = extent_slot;
6085                                 num_to_del = 2;
6086                         }
6087                 }
6088
6089                 last_ref = 1;
6090                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6091                                       num_to_del);
6092                 if (ret) {
6093                         btrfs_abort_transaction(trans, extent_root, ret);
6094                         goto out;
6095                 }
6096                 btrfs_release_path(path);
6097
6098                 if (is_data) {
6099                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6100                         if (ret) {
6101                                 btrfs_abort_transaction(trans, extent_root, ret);
6102                                 goto out;
6103                         }
6104                 }
6105
6106                 ret = update_block_group(root, bytenr, num_bytes, 0);
6107                 if (ret) {
6108                         btrfs_abort_transaction(trans, extent_root, ret);
6109                         goto out;
6110                 }
6111         }
6112         btrfs_release_path(path);
6113
6114         /* Deal with the quota accounting */
6115         if (!ret && last_ref && !no_quota) {
6116                 int mod_seq = 0;
6117
6118                 if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
6119                     type == BTRFS_QGROUP_OPER_SUB_SHARED)
6120                         mod_seq = 1;
6121
6122                 ret = btrfs_qgroup_record_ref(trans, info, root_objectid,
6123                                               bytenr, num_bytes, type,
6124                                               mod_seq);
6125         }
6126 out:
6127         btrfs_free_path(path);
6128         return ret;
6129 }
6130
6131 /*
6132  * when we free an block, it is possible (and likely) that we free the last
6133  * delayed ref for that extent as well.  This searches the delayed ref tree for
6134  * a given extent, and if there are no other delayed refs to be processed, it
6135  * removes it from the tree.
6136  */
6137 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6138                                       struct btrfs_root *root, u64 bytenr)
6139 {
6140         struct btrfs_delayed_ref_head *head;
6141         struct btrfs_delayed_ref_root *delayed_refs;
6142         int ret = 0;
6143
6144         delayed_refs = &trans->transaction->delayed_refs;
6145         spin_lock(&delayed_refs->lock);
6146         head = btrfs_find_delayed_ref_head(trans, bytenr);
6147         if (!head)
6148                 goto out_delayed_unlock;
6149
6150         spin_lock(&head->lock);
6151         if (rb_first(&head->ref_root))
6152                 goto out;
6153
6154         if (head->extent_op) {
6155                 if (!head->must_insert_reserved)
6156                         goto out;
6157                 btrfs_free_delayed_extent_op(head->extent_op);
6158                 head->extent_op = NULL;
6159         }
6160
6161         /*
6162          * waiting for the lock here would deadlock.  If someone else has it
6163          * locked they are already in the process of dropping it anyway
6164          */
6165         if (!mutex_trylock(&head->mutex))
6166                 goto out;
6167
6168         /*
6169          * at this point we have a head with no other entries.  Go
6170          * ahead and process it.
6171          */
6172         head->node.in_tree = 0;
6173         rb_erase(&head->href_node, &delayed_refs->href_root);
6174
6175         atomic_dec(&delayed_refs->num_entries);
6176
6177         /*
6178          * we don't take a ref on the node because we're removing it from the
6179          * tree, so we just steal the ref the tree was holding.
6180          */
6181         delayed_refs->num_heads--;
6182         if (head->processing == 0)
6183                 delayed_refs->num_heads_ready--;
6184         head->processing = 0;
6185         spin_unlock(&head->lock);
6186         spin_unlock(&delayed_refs->lock);
6187
6188         BUG_ON(head->extent_op);
6189         if (head->must_insert_reserved)
6190                 ret = 1;
6191
6192         mutex_unlock(&head->mutex);
6193         btrfs_put_delayed_ref(&head->node);
6194         return ret;
6195 out:
6196         spin_unlock(&head->lock);
6197
6198 out_delayed_unlock:
6199         spin_unlock(&delayed_refs->lock);
6200         return 0;
6201 }
6202
6203 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6204                            struct btrfs_root *root,
6205                            struct extent_buffer *buf,
6206                            u64 parent, int last_ref)
6207 {
6208         struct btrfs_block_group_cache *cache = NULL;
6209         int pin = 1;
6210         int ret;
6211
6212         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6213                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6214                                         buf->start, buf->len,
6215                                         parent, root->root_key.objectid,
6216                                         btrfs_header_level(buf),
6217                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
6218                 BUG_ON(ret); /* -ENOMEM */
6219         }
6220
6221         if (!last_ref)
6222                 return;
6223
6224         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6225
6226         if (btrfs_header_generation(buf) == trans->transid) {
6227                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6228                         ret = check_ref_cleanup(trans, root, buf->start);
6229                         if (!ret)
6230                                 goto out;
6231                 }
6232
6233                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6234                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6235                         goto out;
6236                 }
6237
6238                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6239
6240                 btrfs_add_free_space(cache, buf->start, buf->len);
6241                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6242                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6243                 pin = 0;
6244         }
6245 out:
6246         if (pin)
6247                 add_pinned_bytes(root->fs_info, buf->len,
6248                                  btrfs_header_level(buf),
6249                                  root->root_key.objectid);
6250
6251         /*
6252          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6253          * anymore.
6254          */
6255         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6256         btrfs_put_block_group(cache);
6257 }
6258
6259 /* Can return -ENOMEM */
6260 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6261                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6262                       u64 owner, u64 offset, int no_quota)
6263 {
6264         int ret;
6265         struct btrfs_fs_info *fs_info = root->fs_info;
6266
6267         if (btrfs_test_is_dummy_root(root))
6268                 return 0;
6269
6270         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6271
6272         /*
6273          * tree log blocks never actually go into the extent allocation
6274          * tree, just update pinning info and exit early.
6275          */
6276         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6277                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6278                 /* unlocks the pinned mutex */
6279                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6280                 ret = 0;
6281         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6282                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6283                                         num_bytes,
6284                                         parent, root_objectid, (int)owner,
6285                                         BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6286         } else {
6287                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6288                                                 num_bytes,
6289                                                 parent, root_objectid, owner,
6290                                                 offset, BTRFS_DROP_DELAYED_REF,
6291                                                 NULL, no_quota);
6292         }
6293         return ret;
6294 }
6295
6296 /*
6297  * when we wait for progress in the block group caching, its because
6298  * our allocation attempt failed at least once.  So, we must sleep
6299  * and let some progress happen before we try again.
6300  *
6301  * This function will sleep at least once waiting for new free space to
6302  * show up, and then it will check the block group free space numbers
6303  * for our min num_bytes.  Another option is to have it go ahead
6304  * and look in the rbtree for a free extent of a given size, but this
6305  * is a good start.
6306  *
6307  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6308  * any of the information in this block group.
6309  */
6310 static noinline void
6311 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6312                                 u64 num_bytes)
6313 {
6314         struct btrfs_caching_control *caching_ctl;
6315
6316         caching_ctl = get_caching_control(cache);
6317         if (!caching_ctl)
6318                 return;
6319
6320         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6321                    (cache->free_space_ctl->free_space >= num_bytes));
6322
6323         put_caching_control(caching_ctl);
6324 }
6325
6326 static noinline int
6327 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6328 {
6329         struct btrfs_caching_control *caching_ctl;
6330         int ret = 0;
6331
6332         caching_ctl = get_caching_control(cache);
6333         if (!caching_ctl)
6334                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6335
6336         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6337         if (cache->cached == BTRFS_CACHE_ERROR)
6338                 ret = -EIO;
6339         put_caching_control(caching_ctl);
6340         return ret;
6341 }
6342
6343 int __get_raid_index(u64 flags)
6344 {
6345         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6346                 return BTRFS_RAID_RAID10;
6347         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6348                 return BTRFS_RAID_RAID1;
6349         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6350                 return BTRFS_RAID_DUP;
6351         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6352                 return BTRFS_RAID_RAID0;
6353         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6354                 return BTRFS_RAID_RAID5;
6355         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6356                 return BTRFS_RAID_RAID6;
6357
6358         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6359 }
6360
6361 int get_block_group_index(struct btrfs_block_group_cache *cache)
6362 {
6363         return __get_raid_index(cache->flags);
6364 }
6365
6366 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6367         [BTRFS_RAID_RAID10]     = "raid10",
6368         [BTRFS_RAID_RAID1]      = "raid1",
6369         [BTRFS_RAID_DUP]        = "dup",
6370         [BTRFS_RAID_RAID0]      = "raid0",
6371         [BTRFS_RAID_SINGLE]     = "single",
6372         [BTRFS_RAID_RAID5]      = "raid5",
6373         [BTRFS_RAID_RAID6]      = "raid6",
6374 };
6375
6376 static const char *get_raid_name(enum btrfs_raid_types type)
6377 {
6378         if (type >= BTRFS_NR_RAID_TYPES)
6379                 return NULL;
6380
6381         return btrfs_raid_type_names[type];
6382 }
6383
6384 enum btrfs_loop_type {
6385         LOOP_CACHING_NOWAIT = 0,
6386         LOOP_CACHING_WAIT = 1,
6387         LOOP_ALLOC_CHUNK = 2,
6388         LOOP_NO_EMPTY_SIZE = 3,
6389 };
6390
6391 static inline void
6392 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6393                        int delalloc)
6394 {
6395         if (delalloc)
6396                 down_read(&cache->data_rwsem);
6397 }
6398
6399 static inline void
6400 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6401                        int delalloc)
6402 {
6403         btrfs_get_block_group(cache);
6404         if (delalloc)
6405                 down_read(&cache->data_rwsem);
6406 }
6407
6408 static struct btrfs_block_group_cache *
6409 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6410                    struct btrfs_free_cluster *cluster,
6411                    int delalloc)
6412 {
6413         struct btrfs_block_group_cache *used_bg;
6414         bool locked = false;
6415 again:
6416         spin_lock(&cluster->refill_lock);
6417         if (locked) {
6418                 if (used_bg == cluster->block_group)
6419                         return used_bg;
6420
6421                 up_read(&used_bg->data_rwsem);
6422                 btrfs_put_block_group(used_bg);
6423         }
6424
6425         used_bg = cluster->block_group;
6426         if (!used_bg)
6427                 return NULL;
6428
6429         if (used_bg == block_group)
6430                 return used_bg;
6431
6432         btrfs_get_block_group(used_bg);
6433
6434         if (!delalloc)
6435                 return used_bg;
6436
6437         if (down_read_trylock(&used_bg->data_rwsem))
6438                 return used_bg;
6439
6440         spin_unlock(&cluster->refill_lock);
6441         down_read(&used_bg->data_rwsem);
6442         locked = true;
6443         goto again;
6444 }
6445
6446 static inline void
6447 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6448                          int delalloc)
6449 {
6450         if (delalloc)
6451                 up_read(&cache->data_rwsem);
6452         btrfs_put_block_group(cache);
6453 }
6454
6455 /*
6456  * walks the btree of allocated extents and find a hole of a given size.
6457  * The key ins is changed to record the hole:
6458  * ins->objectid == start position
6459  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6460  * ins->offset == the size of the hole.
6461  * Any available blocks before search_start are skipped.
6462  *
6463  * If there is no suitable free space, we will record the max size of
6464  * the free space extent currently.
6465  */
6466 static noinline int find_free_extent(struct btrfs_root *orig_root,
6467                                      u64 num_bytes, u64 empty_size,
6468                                      u64 hint_byte, struct btrfs_key *ins,
6469                                      u64 flags, int delalloc)
6470 {
6471         int ret = 0;
6472         struct btrfs_root *root = orig_root->fs_info->extent_root;
6473         struct btrfs_free_cluster *last_ptr = NULL;
6474         struct btrfs_block_group_cache *block_group = NULL;
6475         u64 search_start = 0;
6476         u64 max_extent_size = 0;
6477         int empty_cluster = 2 * 1024 * 1024;
6478         struct btrfs_space_info *space_info;
6479         int loop = 0;
6480         int index = __get_raid_index(flags);
6481         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6482                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6483         bool failed_cluster_refill = false;
6484         bool failed_alloc = false;
6485         bool use_cluster = true;
6486         bool have_caching_bg = false;
6487
6488         WARN_ON(num_bytes < root->sectorsize);
6489         ins->type = BTRFS_EXTENT_ITEM_KEY;
6490         ins->objectid = 0;
6491         ins->offset = 0;
6492
6493         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6494
6495         space_info = __find_space_info(root->fs_info, flags);
6496         if (!space_info) {
6497                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6498                 return -ENOSPC;
6499         }
6500
6501         /*
6502          * If the space info is for both data and metadata it means we have a
6503          * small filesystem and we can't use the clustering stuff.
6504          */
6505         if (btrfs_mixed_space_info(space_info))
6506                 use_cluster = false;
6507
6508         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6509                 last_ptr = &root->fs_info->meta_alloc_cluster;
6510                 if (!btrfs_test_opt(root, SSD))
6511                         empty_cluster = 64 * 1024;
6512         }
6513
6514         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6515             btrfs_test_opt(root, SSD)) {
6516                 last_ptr = &root->fs_info->data_alloc_cluster;
6517         }
6518
6519         if (last_ptr) {
6520                 spin_lock(&last_ptr->lock);
6521                 if (last_ptr->block_group)
6522                         hint_byte = last_ptr->window_start;
6523                 spin_unlock(&last_ptr->lock);
6524         }
6525
6526         search_start = max(search_start, first_logical_byte(root, 0));
6527         search_start = max(search_start, hint_byte);
6528
6529         if (!last_ptr)
6530                 empty_cluster = 0;
6531
6532         if (search_start == hint_byte) {
6533                 block_group = btrfs_lookup_block_group(root->fs_info,
6534                                                        search_start);
6535                 /*
6536                  * we don't want to use the block group if it doesn't match our
6537                  * allocation bits, or if its not cached.
6538                  *
6539                  * However if we are re-searching with an ideal block group
6540                  * picked out then we don't care that the block group is cached.
6541                  */
6542                 if (block_group && block_group_bits(block_group, flags) &&
6543                     block_group->cached != BTRFS_CACHE_NO) {
6544                         down_read(&space_info->groups_sem);
6545                         if (list_empty(&block_group->list) ||
6546                             block_group->ro) {
6547                                 /*
6548                                  * someone is removing this block group,
6549                                  * we can't jump into the have_block_group
6550                                  * target because our list pointers are not
6551                                  * valid
6552                                  */
6553                                 btrfs_put_block_group(block_group);
6554                                 up_read(&space_info->groups_sem);
6555                         } else {
6556                                 index = get_block_group_index(block_group);
6557                                 btrfs_lock_block_group(block_group, delalloc);
6558                                 goto have_block_group;
6559                         }
6560                 } else if (block_group) {
6561                         btrfs_put_block_group(block_group);
6562                 }
6563         }
6564 search:
6565         have_caching_bg = false;
6566         down_read(&space_info->groups_sem);
6567         list_for_each_entry(block_group, &space_info->block_groups[index],
6568                             list) {
6569                 u64 offset;
6570                 int cached;
6571
6572                 btrfs_grab_block_group(block_group, delalloc);
6573                 search_start = block_group->key.objectid;
6574
6575                 /*
6576                  * this can happen if we end up cycling through all the
6577                  * raid types, but we want to make sure we only allocate
6578                  * for the proper type.
6579                  */
6580                 if (!block_group_bits(block_group, flags)) {
6581                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6582                                 BTRFS_BLOCK_GROUP_RAID1 |
6583                                 BTRFS_BLOCK_GROUP_RAID5 |
6584                                 BTRFS_BLOCK_GROUP_RAID6 |
6585                                 BTRFS_BLOCK_GROUP_RAID10;
6586
6587                         /*
6588                          * if they asked for extra copies and this block group
6589                          * doesn't provide them, bail.  This does allow us to
6590                          * fill raid0 from raid1.
6591                          */
6592                         if ((flags & extra) && !(block_group->flags & extra))
6593                                 goto loop;
6594                 }
6595
6596 have_block_group:
6597                 cached = block_group_cache_done(block_group);
6598                 if (unlikely(!cached)) {
6599                         ret = cache_block_group(block_group, 0);
6600                         BUG_ON(ret < 0);
6601                         ret = 0;
6602                 }
6603
6604                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6605                         goto loop;
6606                 if (unlikely(block_group->ro))
6607                         goto loop;
6608
6609                 /*
6610                  * Ok we want to try and use the cluster allocator, so
6611                  * lets look there
6612                  */
6613                 if (last_ptr) {
6614                         struct btrfs_block_group_cache *used_block_group;
6615                         unsigned long aligned_cluster;
6616                         /*
6617                          * the refill lock keeps out other
6618                          * people trying to start a new cluster
6619                          */
6620                         used_block_group = btrfs_lock_cluster(block_group,
6621                                                               last_ptr,
6622                                                               delalloc);
6623                         if (!used_block_group)
6624                                 goto refill_cluster;
6625
6626                         if (used_block_group != block_group &&
6627                             (used_block_group->ro ||
6628                              !block_group_bits(used_block_group, flags)))
6629                                 goto release_cluster;
6630
6631                         offset = btrfs_alloc_from_cluster(used_block_group,
6632                                                 last_ptr,
6633                                                 num_bytes,
6634                                                 used_block_group->key.objectid,
6635                                                 &max_extent_size);
6636                         if (offset) {
6637                                 /* we have a block, we're done */
6638                                 spin_unlock(&last_ptr->refill_lock);
6639                                 trace_btrfs_reserve_extent_cluster(root,
6640                                                 used_block_group,
6641                                                 search_start, num_bytes);
6642                                 if (used_block_group != block_group) {
6643                                         btrfs_release_block_group(block_group,
6644                                                                   delalloc);
6645                                         block_group = used_block_group;
6646                                 }
6647                                 goto checks;
6648                         }
6649
6650                         WARN_ON(last_ptr->block_group != used_block_group);
6651 release_cluster:
6652                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6653                          * set up a new clusters, so lets just skip it
6654                          * and let the allocator find whatever block
6655                          * it can find.  If we reach this point, we
6656                          * will have tried the cluster allocator
6657                          * plenty of times and not have found
6658                          * anything, so we are likely way too
6659                          * fragmented for the clustering stuff to find
6660                          * anything.
6661                          *
6662                          * However, if the cluster is taken from the
6663                          * current block group, release the cluster
6664                          * first, so that we stand a better chance of
6665                          * succeeding in the unclustered
6666                          * allocation.  */
6667                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6668                             used_block_group != block_group) {
6669                                 spin_unlock(&last_ptr->refill_lock);
6670                                 btrfs_release_block_group(used_block_group,
6671                                                           delalloc);
6672                                 goto unclustered_alloc;
6673                         }
6674
6675                         /*
6676                          * this cluster didn't work out, free it and
6677                          * start over
6678                          */
6679                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6680
6681                         if (used_block_group != block_group)
6682                                 btrfs_release_block_group(used_block_group,
6683                                                           delalloc);
6684 refill_cluster:
6685                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6686                                 spin_unlock(&last_ptr->refill_lock);
6687                                 goto unclustered_alloc;
6688                         }
6689
6690                         aligned_cluster = max_t(unsigned long,
6691                                                 empty_cluster + empty_size,
6692                                               block_group->full_stripe_len);
6693
6694                         /* allocate a cluster in this block group */
6695                         ret = btrfs_find_space_cluster(root, block_group,
6696                                                        last_ptr, search_start,
6697                                                        num_bytes,
6698                                                        aligned_cluster);
6699                         if (ret == 0) {
6700                                 /*
6701                                  * now pull our allocation out of this
6702                                  * cluster
6703                                  */
6704                                 offset = btrfs_alloc_from_cluster(block_group,
6705                                                         last_ptr,
6706                                                         num_bytes,
6707                                                         search_start,
6708                                                         &max_extent_size);
6709                                 if (offset) {
6710                                         /* we found one, proceed */
6711                                         spin_unlock(&last_ptr->refill_lock);
6712                                         trace_btrfs_reserve_extent_cluster(root,
6713                                                 block_group, search_start,
6714                                                 num_bytes);
6715                                         goto checks;
6716                                 }
6717                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6718                                    && !failed_cluster_refill) {
6719                                 spin_unlock(&last_ptr->refill_lock);
6720
6721                                 failed_cluster_refill = true;
6722                                 wait_block_group_cache_progress(block_group,
6723                                        num_bytes + empty_cluster + empty_size);
6724                                 goto have_block_group;
6725                         }
6726
6727                         /*
6728                          * at this point we either didn't find a cluster
6729                          * or we weren't able to allocate a block from our
6730                          * cluster.  Free the cluster we've been trying
6731                          * to use, and go to the next block group
6732                          */
6733                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6734                         spin_unlock(&last_ptr->refill_lock);
6735                         goto loop;
6736                 }
6737
6738 unclustered_alloc:
6739                 spin_lock(&block_group->free_space_ctl->tree_lock);
6740                 if (cached &&
6741                     block_group->free_space_ctl->free_space <
6742                     num_bytes + empty_cluster + empty_size) {
6743                         if (block_group->free_space_ctl->free_space >
6744                             max_extent_size)
6745                                 max_extent_size =
6746                                         block_group->free_space_ctl->free_space;
6747                         spin_unlock(&block_group->free_space_ctl->tree_lock);
6748                         goto loop;
6749                 }
6750                 spin_unlock(&block_group->free_space_ctl->tree_lock);
6751
6752                 offset = btrfs_find_space_for_alloc(block_group, search_start,
6753                                                     num_bytes, empty_size,
6754                                                     &max_extent_size);
6755                 /*
6756                  * If we didn't find a chunk, and we haven't failed on this
6757                  * block group before, and this block group is in the middle of
6758                  * caching and we are ok with waiting, then go ahead and wait
6759                  * for progress to be made, and set failed_alloc to true.
6760                  *
6761                  * If failed_alloc is true then we've already waited on this
6762                  * block group once and should move on to the next block group.
6763                  */
6764                 if (!offset && !failed_alloc && !cached &&
6765                     loop > LOOP_CACHING_NOWAIT) {
6766                         wait_block_group_cache_progress(block_group,
6767                                                 num_bytes + empty_size);
6768                         failed_alloc = true;
6769                         goto have_block_group;
6770                 } else if (!offset) {
6771                         if (!cached)
6772                                 have_caching_bg = true;
6773                         goto loop;
6774                 }
6775 checks:
6776                 search_start = ALIGN(offset, root->stripesize);
6777
6778                 /* move on to the next group */
6779                 if (search_start + num_bytes >
6780                     block_group->key.objectid + block_group->key.offset) {
6781                         btrfs_add_free_space(block_group, offset, num_bytes);
6782                         goto loop;
6783                 }
6784
6785                 if (offset < search_start)
6786                         btrfs_add_free_space(block_group, offset,
6787                                              search_start - offset);
6788                 BUG_ON(offset > search_start);
6789
6790                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
6791                                                   alloc_type, delalloc);
6792                 if (ret == -EAGAIN) {
6793                         btrfs_add_free_space(block_group, offset, num_bytes);
6794                         goto loop;
6795                 }
6796
6797                 /* we are all good, lets return */
6798                 ins->objectid = search_start;
6799                 ins->offset = num_bytes;
6800
6801                 trace_btrfs_reserve_extent(orig_root, block_group,
6802                                            search_start, num_bytes);
6803                 btrfs_release_block_group(block_group, delalloc);
6804                 break;
6805 loop:
6806                 failed_cluster_refill = false;
6807                 failed_alloc = false;
6808                 BUG_ON(index != get_block_group_index(block_group));
6809                 btrfs_release_block_group(block_group, delalloc);
6810         }
6811         up_read(&space_info->groups_sem);
6812
6813         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6814                 goto search;
6815
6816         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6817                 goto search;
6818
6819         /*
6820          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6821          *                      caching kthreads as we move along
6822          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6823          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6824          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6825          *                      again
6826          */
6827         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6828                 index = 0;
6829                 loop++;
6830                 if (loop == LOOP_ALLOC_CHUNK) {
6831                         struct btrfs_trans_handle *trans;
6832                         int exist = 0;
6833
6834                         trans = current->journal_info;
6835                         if (trans)
6836                                 exist = 1;
6837                         else
6838                                 trans = btrfs_join_transaction(root);
6839
6840                         if (IS_ERR(trans)) {
6841                                 ret = PTR_ERR(trans);
6842                                 goto out;
6843                         }
6844
6845                         ret = do_chunk_alloc(trans, root, flags,
6846                                              CHUNK_ALLOC_FORCE);
6847                         /*
6848                          * Do not bail out on ENOSPC since we
6849                          * can do more things.
6850                          */
6851                         if (ret < 0 && ret != -ENOSPC)
6852                                 btrfs_abort_transaction(trans,
6853                                                         root, ret);
6854                         else
6855                                 ret = 0;
6856                         if (!exist)
6857                                 btrfs_end_transaction(trans, root);
6858                         if (ret)
6859                                 goto out;
6860                 }
6861
6862                 if (loop == LOOP_NO_EMPTY_SIZE) {
6863                         empty_size = 0;
6864                         empty_cluster = 0;
6865                 }
6866
6867                 goto search;
6868         } else if (!ins->objectid) {
6869                 ret = -ENOSPC;
6870         } else if (ins->objectid) {
6871                 ret = 0;
6872         }
6873 out:
6874         if (ret == -ENOSPC)
6875                 ins->offset = max_extent_size;
6876         return ret;
6877 }
6878
6879 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6880                             int dump_block_groups)
6881 {
6882         struct btrfs_block_group_cache *cache;
6883         int index = 0;
6884
6885         spin_lock(&info->lock);
6886         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
6887                info->flags,
6888                info->total_bytes - info->bytes_used - info->bytes_pinned -
6889                info->bytes_reserved - info->bytes_readonly,
6890                (info->full) ? "" : "not ");
6891         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
6892                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6893                info->total_bytes, info->bytes_used, info->bytes_pinned,
6894                info->bytes_reserved, info->bytes_may_use,
6895                info->bytes_readonly);
6896         spin_unlock(&info->lock);
6897
6898         if (!dump_block_groups)
6899                 return;
6900
6901         down_read(&info->groups_sem);
6902 again:
6903         list_for_each_entry(cache, &info->block_groups[index], list) {
6904                 spin_lock(&cache->lock);
6905                 printk(KERN_INFO "BTRFS: "
6906                            "block group %llu has %llu bytes, "
6907                            "%llu used %llu pinned %llu reserved %s\n",
6908                        cache->key.objectid, cache->key.offset,
6909                        btrfs_block_group_used(&cache->item), cache->pinned,
6910                        cache->reserved, cache->ro ? "[readonly]" : "");
6911                 btrfs_dump_free_space(cache, bytes);
6912                 spin_unlock(&cache->lock);
6913         }
6914         if (++index < BTRFS_NR_RAID_TYPES)
6915                 goto again;
6916         up_read(&info->groups_sem);
6917 }
6918
6919 int btrfs_reserve_extent(struct btrfs_root *root,
6920                          u64 num_bytes, u64 min_alloc_size,
6921                          u64 empty_size, u64 hint_byte,
6922                          struct btrfs_key *ins, int is_data, int delalloc)
6923 {
6924         bool final_tried = false;
6925         u64 flags;
6926         int ret;
6927
6928         flags = btrfs_get_alloc_profile(root, is_data);
6929 again:
6930         WARN_ON(num_bytes < root->sectorsize);
6931         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
6932                                flags, delalloc);
6933
6934         if (ret == -ENOSPC) {
6935                 if (!final_tried && ins->offset) {
6936                         num_bytes = min(num_bytes >> 1, ins->offset);
6937                         num_bytes = round_down(num_bytes, root->sectorsize);
6938                         num_bytes = max(num_bytes, min_alloc_size);
6939                         if (num_bytes == min_alloc_size)
6940                                 final_tried = true;
6941                         goto again;
6942                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6943                         struct btrfs_space_info *sinfo;
6944
6945                         sinfo = __find_space_info(root->fs_info, flags);
6946                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6947                                 flags, num_bytes);
6948                         if (sinfo)
6949                                 dump_space_info(sinfo, num_bytes, 1);
6950                 }
6951         }
6952
6953         return ret;
6954 }
6955
6956 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6957                                         u64 start, u64 len,
6958                                         int pin, int delalloc)
6959 {
6960         struct btrfs_block_group_cache *cache;
6961         int ret = 0;
6962
6963         cache = btrfs_lookup_block_group(root->fs_info, start);
6964         if (!cache) {
6965                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6966                         start);
6967                 return -ENOSPC;
6968         }
6969
6970         if (btrfs_test_opt(root, DISCARD))
6971                 ret = btrfs_discard_extent(root, start, len, NULL);
6972
6973         if (pin)
6974                 pin_down_extent(root, cache, start, len, 1);
6975         else {
6976                 btrfs_add_free_space(cache, start, len);
6977                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
6978         }
6979         btrfs_put_block_group(cache);
6980
6981         trace_btrfs_reserved_extent_free(root, start, len);
6982
6983         return ret;
6984 }
6985
6986 int btrfs_free_reserved_extent(struct btrfs_root *root,
6987                                u64 start, u64 len, int delalloc)
6988 {
6989         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
6990 }
6991
6992 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6993                                        u64 start, u64 len)
6994 {
6995         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
6996 }
6997
6998 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6999                                       struct btrfs_root *root,
7000                                       u64 parent, u64 root_objectid,
7001                                       u64 flags, u64 owner, u64 offset,
7002                                       struct btrfs_key *ins, int ref_mod)
7003 {
7004         int ret;
7005         struct btrfs_fs_info *fs_info = root->fs_info;
7006         struct btrfs_extent_item *extent_item;
7007         struct btrfs_extent_inline_ref *iref;
7008         struct btrfs_path *path;
7009         struct extent_buffer *leaf;
7010         int type;
7011         u32 size;
7012
7013         if (parent > 0)
7014                 type = BTRFS_SHARED_DATA_REF_KEY;
7015         else
7016                 type = BTRFS_EXTENT_DATA_REF_KEY;
7017
7018         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7019
7020         path = btrfs_alloc_path();
7021         if (!path)
7022                 return -ENOMEM;
7023
7024         path->leave_spinning = 1;
7025         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7026                                       ins, size);
7027         if (ret) {
7028                 btrfs_free_path(path);
7029                 return ret;
7030         }
7031
7032         leaf = path->nodes[0];
7033         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7034                                      struct btrfs_extent_item);
7035         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7036         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7037         btrfs_set_extent_flags(leaf, extent_item,
7038                                flags | BTRFS_EXTENT_FLAG_DATA);
7039
7040         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7041         btrfs_set_extent_inline_ref_type(leaf, iref, type);
7042         if (parent > 0) {
7043                 struct btrfs_shared_data_ref *ref;
7044                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7045                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7046                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7047         } else {
7048                 struct btrfs_extent_data_ref *ref;
7049                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7050                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7051                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7052                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7053                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7054         }
7055
7056         btrfs_mark_buffer_dirty(path->nodes[0]);
7057         btrfs_free_path(path);
7058
7059         /* Always set parent to 0 here since its exclusive anyway. */
7060         ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
7061                                       ins->objectid, ins->offset,
7062                                       BTRFS_QGROUP_OPER_ADD_EXCL, 0);
7063         if (ret)
7064                 return ret;
7065
7066         ret = update_block_group(root, ins->objectid, ins->offset, 1);
7067         if (ret) { /* -ENOENT, logic error */
7068                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7069                         ins->objectid, ins->offset);
7070                 BUG();
7071         }
7072         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7073         return ret;
7074 }
7075
7076 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7077                                      struct btrfs_root *root,
7078                                      u64 parent, u64 root_objectid,
7079                                      u64 flags, struct btrfs_disk_key *key,
7080                                      int level, struct btrfs_key *ins,
7081                                      int no_quota)
7082 {
7083         int ret;
7084         struct btrfs_fs_info *fs_info = root->fs_info;
7085         struct btrfs_extent_item *extent_item;
7086         struct btrfs_tree_block_info *block_info;
7087         struct btrfs_extent_inline_ref *iref;
7088         struct btrfs_path *path;
7089         struct extent_buffer *leaf;
7090         u32 size = sizeof(*extent_item) + sizeof(*iref);
7091         u64 num_bytes = ins->offset;
7092         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7093                                                  SKINNY_METADATA);
7094
7095         if (!skinny_metadata)
7096                 size += sizeof(*block_info);
7097
7098         path = btrfs_alloc_path();
7099         if (!path) {
7100                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7101                                                    root->nodesize);
7102                 return -ENOMEM;
7103         }
7104
7105         path->leave_spinning = 1;
7106         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7107                                       ins, size);
7108         if (ret) {
7109                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7110                                                    root->nodesize);
7111                 btrfs_free_path(path);
7112                 return ret;
7113         }
7114
7115         leaf = path->nodes[0];
7116         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7117                                      struct btrfs_extent_item);
7118         btrfs_set_extent_refs(leaf, extent_item, 1);
7119         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7120         btrfs_set_extent_flags(leaf, extent_item,
7121                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7122
7123         if (skinny_metadata) {
7124                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7125                 num_bytes = root->nodesize;
7126         } else {
7127                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7128                 btrfs_set_tree_block_key(leaf, block_info, key);
7129                 btrfs_set_tree_block_level(leaf, block_info, level);
7130                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7131         }
7132
7133         if (parent > 0) {
7134                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7135                 btrfs_set_extent_inline_ref_type(leaf, iref,
7136                                                  BTRFS_SHARED_BLOCK_REF_KEY);
7137                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7138         } else {
7139                 btrfs_set_extent_inline_ref_type(leaf, iref,
7140                                                  BTRFS_TREE_BLOCK_REF_KEY);
7141                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7142         }
7143
7144         btrfs_mark_buffer_dirty(leaf);
7145         btrfs_free_path(path);
7146
7147         if (!no_quota) {
7148                 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
7149                                               ins->objectid, num_bytes,
7150                                               BTRFS_QGROUP_OPER_ADD_EXCL, 0);
7151                 if (ret)
7152                         return ret;
7153         }
7154
7155         ret = update_block_group(root, ins->objectid, root->nodesize, 1);
7156         if (ret) { /* -ENOENT, logic error */
7157                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7158                         ins->objectid, ins->offset);
7159                 BUG();
7160         }
7161
7162         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7163         return ret;
7164 }
7165
7166 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7167                                      struct btrfs_root *root,
7168                                      u64 root_objectid, u64 owner,
7169                                      u64 offset, struct btrfs_key *ins)
7170 {
7171         int ret;
7172
7173         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7174
7175         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7176                                          ins->offset, 0,
7177                                          root_objectid, owner, offset,
7178                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7179         return ret;
7180 }
7181
7182 /*
7183  * this is used by the tree logging recovery code.  It records that
7184  * an extent has been allocated and makes sure to clear the free
7185  * space cache bits as well
7186  */
7187 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7188                                    struct btrfs_root *root,
7189                                    u64 root_objectid, u64 owner, u64 offset,
7190                                    struct btrfs_key *ins)
7191 {
7192         int ret;
7193         struct btrfs_block_group_cache *block_group;
7194
7195         /*
7196          * Mixed block groups will exclude before processing the log so we only
7197          * need to do the exlude dance if this fs isn't mixed.
7198          */
7199         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7200                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7201                 if (ret)
7202                         return ret;
7203         }
7204
7205         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7206         if (!block_group)
7207                 return -EINVAL;
7208
7209         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7210                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
7211         BUG_ON(ret); /* logic error */
7212         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7213                                          0, owner, offset, ins, 1);
7214         btrfs_put_block_group(block_group);
7215         return ret;
7216 }
7217
7218 static struct extent_buffer *
7219 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7220                       u64 bytenr, u32 blocksize, int level)
7221 {
7222         struct extent_buffer *buf;
7223
7224         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
7225         if (!buf)
7226                 return ERR_PTR(-ENOMEM);
7227         btrfs_set_header_generation(buf, trans->transid);
7228         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7229         btrfs_tree_lock(buf);
7230         clean_tree_block(trans, root, buf);
7231         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7232
7233         btrfs_set_lock_blocking(buf);
7234         btrfs_set_buffer_uptodate(buf);
7235
7236         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7237                 buf->log_index = root->log_transid % 2;
7238                 /*
7239                  * we allow two log transactions at a time, use different
7240                  * EXENT bit to differentiate dirty pages.
7241                  */
7242                 if (buf->log_index == 0)
7243                         set_extent_dirty(&root->dirty_log_pages, buf->start,
7244                                         buf->start + buf->len - 1, GFP_NOFS);
7245                 else
7246                         set_extent_new(&root->dirty_log_pages, buf->start,
7247                                         buf->start + buf->len - 1, GFP_NOFS);
7248         } else {
7249                 buf->log_index = -1;
7250                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7251                          buf->start + buf->len - 1, GFP_NOFS);
7252         }
7253         trans->blocks_used++;
7254         /* this returns a buffer locked for blocking */
7255         return buf;
7256 }
7257
7258 static struct btrfs_block_rsv *
7259 use_block_rsv(struct btrfs_trans_handle *trans,
7260               struct btrfs_root *root, u32 blocksize)
7261 {
7262         struct btrfs_block_rsv *block_rsv;
7263         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7264         int ret;
7265         bool global_updated = false;
7266
7267         block_rsv = get_block_rsv(trans, root);
7268
7269         if (unlikely(block_rsv->size == 0))
7270                 goto try_reserve;
7271 again:
7272         ret = block_rsv_use_bytes(block_rsv, blocksize);
7273         if (!ret)
7274                 return block_rsv;
7275
7276         if (block_rsv->failfast)
7277                 return ERR_PTR(ret);
7278
7279         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7280                 global_updated = true;
7281                 update_global_block_rsv(root->fs_info);
7282                 goto again;
7283         }
7284
7285         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7286                 static DEFINE_RATELIMIT_STATE(_rs,
7287                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7288                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7289                 if (__ratelimit(&_rs))
7290                         WARN(1, KERN_DEBUG
7291                                 "BTRFS: block rsv returned %d\n", ret);
7292         }
7293 try_reserve:
7294         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7295                                      BTRFS_RESERVE_NO_FLUSH);
7296         if (!ret)
7297                 return block_rsv;
7298         /*
7299          * If we couldn't reserve metadata bytes try and use some from
7300          * the global reserve if its space type is the same as the global
7301          * reservation.
7302          */
7303         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7304             block_rsv->space_info == global_rsv->space_info) {
7305                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7306                 if (!ret)
7307                         return global_rsv;
7308         }
7309         return ERR_PTR(ret);
7310 }
7311
7312 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7313                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7314 {
7315         block_rsv_add_bytes(block_rsv, blocksize, 0);
7316         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7317 }
7318
7319 /*
7320  * finds a free extent and does all the dirty work required for allocation
7321  * returns the key for the extent through ins, and a tree buffer for
7322  * the first block of the extent through buf.
7323  *
7324  * returns the tree buffer or NULL.
7325  */
7326 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7327                                         struct btrfs_root *root,
7328                                         u64 parent, u64 root_objectid,
7329                                         struct btrfs_disk_key *key, int level,
7330                                         u64 hint, u64 empty_size)
7331 {
7332         struct btrfs_key ins;
7333         struct btrfs_block_rsv *block_rsv;
7334         struct extent_buffer *buf;
7335         u64 flags = 0;
7336         int ret;
7337         u32 blocksize = root->nodesize;
7338         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7339                                                  SKINNY_METADATA);
7340
7341         if (btrfs_test_is_dummy_root(root)) {
7342                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7343                                             blocksize, level);
7344                 if (!IS_ERR(buf))
7345                         root->alloc_bytenr += blocksize;
7346                 return buf;
7347         }
7348
7349         block_rsv = use_block_rsv(trans, root, blocksize);
7350         if (IS_ERR(block_rsv))
7351                 return ERR_CAST(block_rsv);
7352
7353         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7354                                    empty_size, hint, &ins, 0, 0);
7355         if (ret) {
7356                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7357                 return ERR_PTR(ret);
7358         }
7359
7360         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
7361                                     blocksize, level);
7362         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
7363
7364         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7365                 if (parent == 0)
7366                         parent = ins.objectid;
7367                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7368         } else
7369                 BUG_ON(parent > 0);
7370
7371         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7372                 struct btrfs_delayed_extent_op *extent_op;
7373                 extent_op = btrfs_alloc_delayed_extent_op();
7374                 BUG_ON(!extent_op); /* -ENOMEM */
7375                 if (key)
7376                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7377                 else
7378                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7379                 extent_op->flags_to_set = flags;
7380                 if (skinny_metadata)
7381                         extent_op->update_key = 0;
7382                 else
7383                         extent_op->update_key = 1;
7384                 extent_op->update_flags = 1;
7385                 extent_op->is_data = 0;
7386                 extent_op->level = level;
7387
7388                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7389                                         ins.objectid,
7390                                         ins.offset, parent, root_objectid,
7391                                         level, BTRFS_ADD_DELAYED_EXTENT,
7392                                         extent_op, 0);
7393                 BUG_ON(ret); /* -ENOMEM */
7394         }
7395         return buf;
7396 }
7397
7398 struct walk_control {
7399         u64 refs[BTRFS_MAX_LEVEL];
7400         u64 flags[BTRFS_MAX_LEVEL];
7401         struct btrfs_key update_progress;
7402         int stage;
7403         int level;
7404         int shared_level;
7405         int update_ref;
7406         int keep_locks;
7407         int reada_slot;
7408         int reada_count;
7409         int for_reloc;
7410 };
7411
7412 #define DROP_REFERENCE  1
7413 #define UPDATE_BACKREF  2
7414
7415 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7416                                      struct btrfs_root *root,
7417                                      struct walk_control *wc,
7418                                      struct btrfs_path *path)
7419 {
7420         u64 bytenr;
7421         u64 generation;
7422         u64 refs;
7423         u64 flags;
7424         u32 nritems;
7425         u32 blocksize;
7426         struct btrfs_key key;
7427         struct extent_buffer *eb;
7428         int ret;
7429         int slot;
7430         int nread = 0;
7431
7432         if (path->slots[wc->level] < wc->reada_slot) {
7433                 wc->reada_count = wc->reada_count * 2 / 3;
7434                 wc->reada_count = max(wc->reada_count, 2);
7435         } else {
7436                 wc->reada_count = wc->reada_count * 3 / 2;
7437                 wc->reada_count = min_t(int, wc->reada_count,
7438                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7439         }
7440
7441         eb = path->nodes[wc->level];
7442         nritems = btrfs_header_nritems(eb);
7443         blocksize = root->nodesize;
7444
7445         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7446                 if (nread >= wc->reada_count)
7447                         break;
7448
7449                 cond_resched();
7450                 bytenr = btrfs_node_blockptr(eb, slot);
7451                 generation = btrfs_node_ptr_generation(eb, slot);
7452
7453                 if (slot == path->slots[wc->level])
7454                         goto reada;
7455
7456                 if (wc->stage == UPDATE_BACKREF &&
7457                     generation <= root->root_key.offset)
7458                         continue;
7459
7460                 /* We don't lock the tree block, it's OK to be racy here */
7461                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7462                                                wc->level - 1, 1, &refs,
7463                                                &flags);
7464                 /* We don't care about errors in readahead. */
7465                 if (ret < 0)
7466                         continue;
7467                 BUG_ON(refs == 0);
7468
7469                 if (wc->stage == DROP_REFERENCE) {
7470                         if (refs == 1)
7471                                 goto reada;
7472
7473                         if (wc->level == 1 &&
7474                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7475                                 continue;
7476                         if (!wc->update_ref ||
7477                             generation <= root->root_key.offset)
7478                                 continue;
7479                         btrfs_node_key_to_cpu(eb, &key, slot);
7480                         ret = btrfs_comp_cpu_keys(&key,
7481                                                   &wc->update_progress);
7482                         if (ret < 0)
7483                                 continue;
7484                 } else {
7485                         if (wc->level == 1 &&
7486                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7487                                 continue;
7488                 }
7489 reada:
7490                 readahead_tree_block(root, bytenr, blocksize);
7491                 nread++;
7492         }
7493         wc->reada_slot = slot;
7494 }
7495
7496 static int account_leaf_items(struct btrfs_trans_handle *trans,
7497                               struct btrfs_root *root,
7498                               struct extent_buffer *eb)
7499 {
7500         int nr = btrfs_header_nritems(eb);
7501         int i, extent_type, ret;
7502         struct btrfs_key key;
7503         struct btrfs_file_extent_item *fi;
7504         u64 bytenr, num_bytes;
7505
7506         for (i = 0; i < nr; i++) {
7507                 btrfs_item_key_to_cpu(eb, &key, i);
7508
7509                 if (key.type != BTRFS_EXTENT_DATA_KEY)
7510                         continue;
7511
7512                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
7513                 /* filter out non qgroup-accountable extents  */
7514                 extent_type = btrfs_file_extent_type(eb, fi);
7515
7516                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
7517                         continue;
7518
7519                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
7520                 if (!bytenr)
7521                         continue;
7522
7523                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
7524
7525                 ret = btrfs_qgroup_record_ref(trans, root->fs_info,
7526                                               root->objectid,
7527                                               bytenr, num_bytes,
7528                                               BTRFS_QGROUP_OPER_SUB_SUBTREE, 0);
7529                 if (ret)
7530                         return ret;
7531         }
7532         return 0;
7533 }
7534
7535 /*
7536  * Walk up the tree from the bottom, freeing leaves and any interior
7537  * nodes which have had all slots visited. If a node (leaf or
7538  * interior) is freed, the node above it will have it's slot
7539  * incremented. The root node will never be freed.
7540  *
7541  * At the end of this function, we should have a path which has all
7542  * slots incremented to the next position for a search. If we need to
7543  * read a new node it will be NULL and the node above it will have the
7544  * correct slot selected for a later read.
7545  *
7546  * If we increment the root nodes slot counter past the number of
7547  * elements, 1 is returned to signal completion of the search.
7548  */
7549 static int adjust_slots_upwards(struct btrfs_root *root,
7550                                 struct btrfs_path *path, int root_level)
7551 {
7552         int level = 0;
7553         int nr, slot;
7554         struct extent_buffer *eb;
7555
7556         if (root_level == 0)
7557                 return 1;
7558
7559         while (level <= root_level) {
7560                 eb = path->nodes[level];
7561                 nr = btrfs_header_nritems(eb);
7562                 path->slots[level]++;
7563                 slot = path->slots[level];
7564                 if (slot >= nr || level == 0) {
7565                         /*
7566                          * Don't free the root -  we will detect this
7567                          * condition after our loop and return a
7568                          * positive value for caller to stop walking the tree.
7569                          */
7570                         if (level != root_level) {
7571                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7572                                 path->locks[level] = 0;
7573
7574                                 free_extent_buffer(eb);
7575                                 path->nodes[level] = NULL;
7576                                 path->slots[level] = 0;
7577                         }
7578                 } else {
7579                         /*
7580                          * We have a valid slot to walk back down
7581                          * from. Stop here so caller can process these
7582                          * new nodes.
7583                          */
7584                         break;
7585                 }
7586
7587                 level++;
7588         }
7589
7590         eb = path->nodes[root_level];
7591         if (path->slots[root_level] >= btrfs_header_nritems(eb))
7592                 return 1;
7593
7594         return 0;
7595 }
7596
7597 /*
7598  * root_eb is the subtree root and is locked before this function is called.
7599  */
7600 static int account_shared_subtree(struct btrfs_trans_handle *trans,
7601                                   struct btrfs_root *root,
7602                                   struct extent_buffer *root_eb,
7603                                   u64 root_gen,
7604                                   int root_level)
7605 {
7606         int ret = 0;
7607         int level;
7608         struct extent_buffer *eb = root_eb;
7609         struct btrfs_path *path = NULL;
7610
7611         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
7612         BUG_ON(root_eb == NULL);
7613
7614         if (!root->fs_info->quota_enabled)
7615                 return 0;
7616
7617         if (!extent_buffer_uptodate(root_eb)) {
7618                 ret = btrfs_read_buffer(root_eb, root_gen);
7619                 if (ret)
7620                         goto out;
7621         }
7622
7623         if (root_level == 0) {
7624                 ret = account_leaf_items(trans, root, root_eb);
7625                 goto out;
7626         }
7627
7628         path = btrfs_alloc_path();
7629         if (!path)
7630                 return -ENOMEM;
7631
7632         /*
7633          * Walk down the tree.  Missing extent blocks are filled in as
7634          * we go. Metadata is accounted every time we read a new
7635          * extent block.
7636          *
7637          * When we reach a leaf, we account for file extent items in it,
7638          * walk back up the tree (adjusting slot pointers as we go)
7639          * and restart the search process.
7640          */
7641         extent_buffer_get(root_eb); /* For path */
7642         path->nodes[root_level] = root_eb;
7643         path->slots[root_level] = 0;
7644         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
7645 walk_down:
7646         level = root_level;
7647         while (level >= 0) {
7648                 if (path->nodes[level] == NULL) {
7649                         int parent_slot;
7650                         u64 child_gen;
7651                         u64 child_bytenr;
7652
7653                         /* We need to get child blockptr/gen from
7654                          * parent before we can read it. */
7655                         eb = path->nodes[level + 1];
7656                         parent_slot = path->slots[level + 1];
7657                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
7658                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
7659
7660                         eb = read_tree_block(root, child_bytenr, child_gen);
7661                         if (!eb || !extent_buffer_uptodate(eb)) {
7662                                 ret = -EIO;
7663                                 goto out;
7664                         }
7665
7666                         path->nodes[level] = eb;
7667                         path->slots[level] = 0;
7668
7669                         btrfs_tree_read_lock(eb);
7670                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
7671                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
7672
7673                         ret = btrfs_qgroup_record_ref(trans, root->fs_info,
7674                                                 root->objectid,
7675                                                 child_bytenr,
7676                                                 root->nodesize,
7677                                                 BTRFS_QGROUP_OPER_SUB_SUBTREE,
7678                                                 0);
7679                         if (ret)
7680                                 goto out;
7681
7682                 }
7683
7684                 if (level == 0) {
7685                         ret = account_leaf_items(trans, root, path->nodes[level]);
7686                         if (ret)
7687                                 goto out;
7688
7689                         /* Nonzero return here means we completed our search */
7690                         ret = adjust_slots_upwards(root, path, root_level);
7691                         if (ret)
7692                                 break;
7693
7694                         /* Restart search with new slots */
7695                         goto walk_down;
7696                 }
7697
7698                 level--;
7699         }
7700
7701         ret = 0;
7702 out:
7703         btrfs_free_path(path);
7704
7705         return ret;
7706 }
7707
7708 /*
7709  * helper to process tree block while walking down the tree.
7710  *
7711  * when wc->stage == UPDATE_BACKREF, this function updates
7712  * back refs for pointers in the block.
7713  *
7714  * NOTE: return value 1 means we should stop walking down.
7715  */
7716 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7717                                    struct btrfs_root *root,
7718                                    struct btrfs_path *path,
7719                                    struct walk_control *wc, int lookup_info)
7720 {
7721         int level = wc->level;
7722         struct extent_buffer *eb = path->nodes[level];
7723         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7724         int ret;
7725
7726         if (wc->stage == UPDATE_BACKREF &&
7727             btrfs_header_owner(eb) != root->root_key.objectid)
7728                 return 1;
7729
7730         /*
7731          * when reference count of tree block is 1, it won't increase
7732          * again. once full backref flag is set, we never clear it.
7733          */
7734         if (lookup_info &&
7735             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7736              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7737                 BUG_ON(!path->locks[level]);
7738                 ret = btrfs_lookup_extent_info(trans, root,
7739                                                eb->start, level, 1,
7740                                                &wc->refs[level],
7741                                                &wc->flags[level]);
7742                 BUG_ON(ret == -ENOMEM);
7743                 if (ret)
7744                         return ret;
7745                 BUG_ON(wc->refs[level] == 0);
7746         }
7747
7748         if (wc->stage == DROP_REFERENCE) {
7749                 if (wc->refs[level] > 1)
7750                         return 1;
7751
7752                 if (path->locks[level] && !wc->keep_locks) {
7753                         btrfs_tree_unlock_rw(eb, path->locks[level]);
7754                         path->locks[level] = 0;
7755                 }
7756                 return 0;
7757         }
7758
7759         /* wc->stage == UPDATE_BACKREF */
7760         if (!(wc->flags[level] & flag)) {
7761                 BUG_ON(!path->locks[level]);
7762                 ret = btrfs_inc_ref(trans, root, eb, 1);
7763                 BUG_ON(ret); /* -ENOMEM */
7764                 ret = btrfs_dec_ref(trans, root, eb, 0);
7765                 BUG_ON(ret); /* -ENOMEM */
7766                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7767                                                   eb->len, flag,
7768                                                   btrfs_header_level(eb), 0);
7769                 BUG_ON(ret); /* -ENOMEM */
7770                 wc->flags[level] |= flag;
7771         }
7772
7773         /*
7774          * the block is shared by multiple trees, so it's not good to
7775          * keep the tree lock
7776          */
7777         if (path->locks[level] && level > 0) {
7778                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7779                 path->locks[level] = 0;
7780         }
7781         return 0;
7782 }
7783
7784 /*
7785  * helper to process tree block pointer.
7786  *
7787  * when wc->stage == DROP_REFERENCE, this function checks
7788  * reference count of the block pointed to. if the block
7789  * is shared and we need update back refs for the subtree
7790  * rooted at the block, this function changes wc->stage to
7791  * UPDATE_BACKREF. if the block is shared and there is no
7792  * need to update back, this function drops the reference
7793  * to the block.
7794  *
7795  * NOTE: return value 1 means we should stop walking down.
7796  */
7797 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7798                                  struct btrfs_root *root,
7799                                  struct btrfs_path *path,
7800                                  struct walk_control *wc, int *lookup_info)
7801 {
7802         u64 bytenr;
7803         u64 generation;
7804         u64 parent;
7805         u32 blocksize;
7806         struct btrfs_key key;
7807         struct extent_buffer *next;
7808         int level = wc->level;
7809         int reada = 0;
7810         int ret = 0;
7811         bool need_account = false;
7812
7813         generation = btrfs_node_ptr_generation(path->nodes[level],
7814                                                path->slots[level]);
7815         /*
7816          * if the lower level block was created before the snapshot
7817          * was created, we know there is no need to update back refs
7818          * for the subtree
7819          */
7820         if (wc->stage == UPDATE_BACKREF &&
7821             generation <= root->root_key.offset) {
7822                 *lookup_info = 1;
7823                 return 1;
7824         }
7825
7826         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7827         blocksize = root->nodesize;
7828
7829         next = btrfs_find_tree_block(root, bytenr);
7830         if (!next) {
7831                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7832                 if (!next)
7833                         return -ENOMEM;
7834                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
7835                                                level - 1);
7836                 reada = 1;
7837         }
7838         btrfs_tree_lock(next);
7839         btrfs_set_lock_blocking(next);
7840
7841         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7842                                        &wc->refs[level - 1],
7843                                        &wc->flags[level - 1]);
7844         if (ret < 0) {
7845                 btrfs_tree_unlock(next);
7846                 return ret;
7847         }
7848
7849         if (unlikely(wc->refs[level - 1] == 0)) {
7850                 btrfs_err(root->fs_info, "Missing references.");
7851                 BUG();
7852         }
7853         *lookup_info = 0;
7854
7855         if (wc->stage == DROP_REFERENCE) {
7856                 if (wc->refs[level - 1] > 1) {
7857                         need_account = true;
7858                         if (level == 1 &&
7859                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7860                                 goto skip;
7861
7862                         if (!wc->update_ref ||
7863                             generation <= root->root_key.offset)
7864                                 goto skip;
7865
7866                         btrfs_node_key_to_cpu(path->nodes[level], &key,
7867                                               path->slots[level]);
7868                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7869                         if (ret < 0)
7870                                 goto skip;
7871
7872                         wc->stage = UPDATE_BACKREF;
7873                         wc->shared_level = level - 1;
7874                 }
7875         } else {
7876                 if (level == 1 &&
7877                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7878                         goto skip;
7879         }
7880
7881         if (!btrfs_buffer_uptodate(next, generation, 0)) {
7882                 btrfs_tree_unlock(next);
7883                 free_extent_buffer(next);
7884                 next = NULL;
7885                 *lookup_info = 1;
7886         }
7887
7888         if (!next) {
7889                 if (reada && level == 1)
7890                         reada_walk_down(trans, root, wc, path);
7891                 next = read_tree_block(root, bytenr, generation);
7892                 if (!next || !extent_buffer_uptodate(next)) {
7893                         free_extent_buffer(next);
7894                         return -EIO;
7895                 }
7896                 btrfs_tree_lock(next);
7897                 btrfs_set_lock_blocking(next);
7898         }
7899
7900         level--;
7901         BUG_ON(level != btrfs_header_level(next));
7902         path->nodes[level] = next;
7903         path->slots[level] = 0;
7904         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7905         wc->level = level;
7906         if (wc->level == 1)
7907                 wc->reada_slot = 0;
7908         return 0;
7909 skip:
7910         wc->refs[level - 1] = 0;
7911         wc->flags[level - 1] = 0;
7912         if (wc->stage == DROP_REFERENCE) {
7913                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7914                         parent = path->nodes[level]->start;
7915                 } else {
7916                         BUG_ON(root->root_key.objectid !=
7917                                btrfs_header_owner(path->nodes[level]));
7918                         parent = 0;
7919                 }
7920
7921                 if (need_account) {
7922                         ret = account_shared_subtree(trans, root, next,
7923                                                      generation, level - 1);
7924                         if (ret) {
7925                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
7926                                         "%d accounting shared subtree. Quota "
7927                                         "is out of sync, rescan required.\n",
7928                                         root->fs_info->sb->s_id, ret);
7929                         }
7930                 }
7931                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7932                                 root->root_key.objectid, level - 1, 0, 0);
7933                 BUG_ON(ret); /* -ENOMEM */
7934         }
7935         btrfs_tree_unlock(next);
7936         free_extent_buffer(next);
7937         *lookup_info = 1;
7938         return 1;
7939 }
7940
7941 /*
7942  * helper to process tree block while walking up the tree.
7943  *
7944  * when wc->stage == DROP_REFERENCE, this function drops
7945  * reference count on the block.
7946  *
7947  * when wc->stage == UPDATE_BACKREF, this function changes
7948  * wc->stage back to DROP_REFERENCE if we changed wc->stage
7949  * to UPDATE_BACKREF previously while processing the block.
7950  *
7951  * NOTE: return value 1 means we should stop walking up.
7952  */
7953 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7954                                  struct btrfs_root *root,
7955                                  struct btrfs_path *path,
7956                                  struct walk_control *wc)
7957 {
7958         int ret;
7959         int level = wc->level;
7960         struct extent_buffer *eb = path->nodes[level];
7961         u64 parent = 0;
7962
7963         if (wc->stage == UPDATE_BACKREF) {
7964                 BUG_ON(wc->shared_level < level);
7965                 if (level < wc->shared_level)
7966                         goto out;
7967
7968                 ret = find_next_key(path, level + 1, &wc->update_progress);
7969                 if (ret > 0)
7970                         wc->update_ref = 0;
7971
7972                 wc->stage = DROP_REFERENCE;
7973                 wc->shared_level = -1;
7974                 path->slots[level] = 0;
7975
7976                 /*
7977                  * check reference count again if the block isn't locked.
7978                  * we should start walking down the tree again if reference
7979                  * count is one.
7980                  */
7981                 if (!path->locks[level]) {
7982                         BUG_ON(level == 0);
7983                         btrfs_tree_lock(eb);
7984                         btrfs_set_lock_blocking(eb);
7985                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7986
7987                         ret = btrfs_lookup_extent_info(trans, root,
7988                                                        eb->start, level, 1,
7989                                                        &wc->refs[level],
7990                                                        &wc->flags[level]);
7991                         if (ret < 0) {
7992                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7993                                 path->locks[level] = 0;
7994                                 return ret;
7995                         }
7996                         BUG_ON(wc->refs[level] == 0);
7997                         if (wc->refs[level] == 1) {
7998                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7999                                 path->locks[level] = 0;
8000                                 return 1;
8001                         }
8002                 }
8003         }
8004
8005         /* wc->stage == DROP_REFERENCE */
8006         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8007
8008         if (wc->refs[level] == 1) {
8009                 if (level == 0) {
8010                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8011                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8012                         else
8013                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8014                         BUG_ON(ret); /* -ENOMEM */
8015                         ret = account_leaf_items(trans, root, eb);
8016                         if (ret) {
8017                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8018                                         "%d accounting leaf items. Quota "
8019                                         "is out of sync, rescan required.\n",
8020                                         root->fs_info->sb->s_id, ret);
8021                         }
8022                 }
8023                 /* make block locked assertion in clean_tree_block happy */
8024                 if (!path->locks[level] &&
8025                     btrfs_header_generation(eb) == trans->transid) {
8026                         btrfs_tree_lock(eb);
8027                         btrfs_set_lock_blocking(eb);
8028                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8029                 }
8030                 clean_tree_block(trans, root, eb);
8031         }
8032
8033         if (eb == root->node) {
8034                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8035                         parent = eb->start;
8036                 else
8037                         BUG_ON(root->root_key.objectid !=
8038                                btrfs_header_owner(eb));
8039         } else {
8040                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8041                         parent = path->nodes[level + 1]->start;
8042                 else
8043                         BUG_ON(root->root_key.objectid !=
8044                                btrfs_header_owner(path->nodes[level + 1]));
8045         }
8046
8047         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8048 out:
8049         wc->refs[level] = 0;
8050         wc->flags[level] = 0;
8051         return 0;
8052 }
8053
8054 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8055                                    struct btrfs_root *root,
8056                                    struct btrfs_path *path,
8057                                    struct walk_control *wc)
8058 {
8059         int level = wc->level;
8060         int lookup_info = 1;
8061         int ret;
8062
8063         while (level >= 0) {
8064                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8065                 if (ret > 0)
8066                         break;
8067
8068                 if (level == 0)
8069                         break;
8070
8071                 if (path->slots[level] >=
8072                     btrfs_header_nritems(path->nodes[level]))
8073                         break;
8074
8075                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8076                 if (ret > 0) {
8077                         path->slots[level]++;
8078                         continue;
8079                 } else if (ret < 0)
8080                         return ret;
8081                 level = wc->level;
8082         }
8083         return 0;
8084 }
8085
8086 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8087                                  struct btrfs_root *root,
8088                                  struct btrfs_path *path,
8089                                  struct walk_control *wc, int max_level)
8090 {
8091         int level = wc->level;
8092         int ret;
8093
8094         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8095         while (level < max_level && path->nodes[level]) {
8096                 wc->level = level;
8097                 if (path->slots[level] + 1 <
8098                     btrfs_header_nritems(path->nodes[level])) {
8099                         path->slots[level]++;
8100                         return 0;
8101                 } else {
8102                         ret = walk_up_proc(trans, root, path, wc);
8103                         if (ret > 0)
8104                                 return 0;
8105
8106                         if (path->locks[level]) {
8107                                 btrfs_tree_unlock_rw(path->nodes[level],
8108                                                      path->locks[level]);
8109                                 path->locks[level] = 0;
8110                         }
8111                         free_extent_buffer(path->nodes[level]);
8112                         path->nodes[level] = NULL;
8113                         level++;
8114                 }
8115         }
8116         return 1;
8117 }
8118
8119 /*
8120  * drop a subvolume tree.
8121  *
8122  * this function traverses the tree freeing any blocks that only
8123  * referenced by the tree.
8124  *
8125  * when a shared tree block is found. this function decreases its
8126  * reference count by one. if update_ref is true, this function
8127  * also make sure backrefs for the shared block and all lower level
8128  * blocks are properly updated.
8129  *
8130  * If called with for_reloc == 0, may exit early with -EAGAIN
8131  */
8132 int btrfs_drop_snapshot(struct btrfs_root *root,
8133                          struct btrfs_block_rsv *block_rsv, int update_ref,
8134                          int for_reloc)
8135 {
8136         struct btrfs_path *path;
8137         struct btrfs_trans_handle *trans;
8138         struct btrfs_root *tree_root = root->fs_info->tree_root;
8139         struct btrfs_root_item *root_item = &root->root_item;
8140         struct walk_control *wc;
8141         struct btrfs_key key;
8142         int err = 0;
8143         int ret;
8144         int level;
8145         bool root_dropped = false;
8146
8147         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8148
8149         path = btrfs_alloc_path();
8150         if (!path) {
8151                 err = -ENOMEM;
8152                 goto out;
8153         }
8154
8155         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8156         if (!wc) {
8157                 btrfs_free_path(path);
8158                 err = -ENOMEM;
8159                 goto out;
8160         }
8161
8162         trans = btrfs_start_transaction(tree_root, 0);
8163         if (IS_ERR(trans)) {
8164                 err = PTR_ERR(trans);
8165                 goto out_free;
8166         }
8167
8168         if (block_rsv)
8169                 trans->block_rsv = block_rsv;
8170
8171         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8172                 level = btrfs_header_level(root->node);
8173                 path->nodes[level] = btrfs_lock_root_node(root);
8174                 btrfs_set_lock_blocking(path->nodes[level]);
8175                 path->slots[level] = 0;
8176                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8177                 memset(&wc->update_progress, 0,
8178                        sizeof(wc->update_progress));
8179         } else {
8180                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8181                 memcpy(&wc->update_progress, &key,
8182                        sizeof(wc->update_progress));
8183
8184                 level = root_item->drop_level;
8185                 BUG_ON(level == 0);
8186                 path->lowest_level = level;
8187                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8188                 path->lowest_level = 0;
8189                 if (ret < 0) {
8190                         err = ret;
8191                         goto out_end_trans;
8192                 }
8193                 WARN_ON(ret > 0);
8194
8195                 /*
8196                  * unlock our path, this is safe because only this
8197                  * function is allowed to delete this snapshot
8198                  */
8199                 btrfs_unlock_up_safe(path, 0);
8200
8201                 level = btrfs_header_level(root->node);
8202                 while (1) {
8203                         btrfs_tree_lock(path->nodes[level]);
8204                         btrfs_set_lock_blocking(path->nodes[level]);
8205                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8206
8207                         ret = btrfs_lookup_extent_info(trans, root,
8208                                                 path->nodes[level]->start,
8209                                                 level, 1, &wc->refs[level],
8210                                                 &wc->flags[level]);
8211                         if (ret < 0) {
8212                                 err = ret;
8213                                 goto out_end_trans;
8214                         }
8215                         BUG_ON(wc->refs[level] == 0);
8216
8217                         if (level == root_item->drop_level)
8218                                 break;
8219
8220                         btrfs_tree_unlock(path->nodes[level]);
8221                         path->locks[level] = 0;
8222                         WARN_ON(wc->refs[level] != 1);
8223                         level--;
8224                 }
8225         }
8226
8227         wc->level = level;
8228         wc->shared_level = -1;
8229         wc->stage = DROP_REFERENCE;
8230         wc->update_ref = update_ref;
8231         wc->keep_locks = 0;
8232         wc->for_reloc = for_reloc;
8233         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8234
8235         while (1) {
8236
8237                 ret = walk_down_tree(trans, root, path, wc);
8238                 if (ret < 0) {
8239                         err = ret;
8240                         break;
8241                 }
8242
8243                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8244                 if (ret < 0) {
8245                         err = ret;
8246                         break;
8247                 }
8248
8249                 if (ret > 0) {
8250                         BUG_ON(wc->stage != DROP_REFERENCE);
8251                         break;
8252                 }
8253
8254                 if (wc->stage == DROP_REFERENCE) {
8255                         level = wc->level;
8256                         btrfs_node_key(path->nodes[level],
8257                                        &root_item->drop_progress,
8258                                        path->slots[level]);
8259                         root_item->drop_level = level;
8260                 }
8261
8262                 BUG_ON(wc->level == 0);
8263                 if (btrfs_should_end_transaction(trans, tree_root) ||
8264                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8265                         ret = btrfs_update_root(trans, tree_root,
8266                                                 &root->root_key,
8267                                                 root_item);
8268                         if (ret) {
8269                                 btrfs_abort_transaction(trans, tree_root, ret);
8270                                 err = ret;
8271                                 goto out_end_trans;
8272                         }
8273
8274                         /*
8275                          * Qgroup update accounting is run from
8276                          * delayed ref handling. This usually works
8277                          * out because delayed refs are normally the
8278                          * only way qgroup updates are added. However,
8279                          * we may have added updates during our tree
8280                          * walk so run qgroups here to make sure we
8281                          * don't lose any updates.
8282                          */
8283                         ret = btrfs_delayed_qgroup_accounting(trans,
8284                                                               root->fs_info);
8285                         if (ret)
8286                                 printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
8287                                                    "running qgroup updates "
8288                                                    "during snapshot delete. "
8289                                                    "Quota is out of sync, "
8290                                                    "rescan required.\n", ret);
8291
8292                         btrfs_end_transaction_throttle(trans, tree_root);
8293                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8294                                 pr_debug("BTRFS: drop snapshot early exit\n");
8295                                 err = -EAGAIN;
8296                                 goto out_free;
8297                         }
8298
8299                         trans = btrfs_start_transaction(tree_root, 0);
8300                         if (IS_ERR(trans)) {
8301                                 err = PTR_ERR(trans);
8302                                 goto out_free;
8303                         }
8304                         if (block_rsv)
8305                                 trans->block_rsv = block_rsv;
8306                 }
8307         }
8308         btrfs_release_path(path);
8309         if (err)
8310                 goto out_end_trans;
8311
8312         ret = btrfs_del_root(trans, tree_root, &root->root_key);
8313         if (ret) {
8314                 btrfs_abort_transaction(trans, tree_root, ret);
8315                 goto out_end_trans;
8316         }
8317
8318         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8319                 ret = btrfs_find_root(tree_root, &root->root_key, path,
8320                                       NULL, NULL);
8321                 if (ret < 0) {
8322                         btrfs_abort_transaction(trans, tree_root, ret);
8323                         err = ret;
8324                         goto out_end_trans;
8325                 } else if (ret > 0) {
8326                         /* if we fail to delete the orphan item this time
8327                          * around, it'll get picked up the next time.
8328                          *
8329                          * The most common failure here is just -ENOENT.
8330                          */
8331                         btrfs_del_orphan_item(trans, tree_root,
8332                                               root->root_key.objectid);
8333                 }
8334         }
8335
8336         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8337                 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
8338         } else {
8339                 free_extent_buffer(root->node);
8340                 free_extent_buffer(root->commit_root);
8341                 btrfs_put_fs_root(root);
8342         }
8343         root_dropped = true;
8344 out_end_trans:
8345         ret = btrfs_delayed_qgroup_accounting(trans, tree_root->fs_info);
8346         if (ret)
8347                 printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
8348                                    "running qgroup updates "
8349                                    "during snapshot delete. "
8350                                    "Quota is out of sync, "
8351                                    "rescan required.\n", ret);
8352
8353         btrfs_end_transaction_throttle(trans, tree_root);
8354 out_free:
8355         kfree(wc);
8356         btrfs_free_path(path);
8357 out:
8358         /*
8359          * So if we need to stop dropping the snapshot for whatever reason we
8360          * need to make sure to add it back to the dead root list so that we
8361          * keep trying to do the work later.  This also cleans up roots if we
8362          * don't have it in the radix (like when we recover after a power fail
8363          * or unmount) so we don't leak memory.
8364          */
8365         if (!for_reloc && root_dropped == false)
8366                 btrfs_add_dead_root(root);
8367         if (err && err != -EAGAIN)
8368                 btrfs_std_error(root->fs_info, err);
8369         return err;
8370 }
8371
8372 /*
8373  * drop subtree rooted at tree block 'node'.
8374  *
8375  * NOTE: this function will unlock and release tree block 'node'
8376  * only used by relocation code
8377  */
8378 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8379                         struct btrfs_root *root,
8380                         struct extent_buffer *node,
8381                         struct extent_buffer *parent)
8382 {
8383         struct btrfs_path *path;
8384         struct walk_control *wc;
8385         int level;
8386         int parent_level;
8387         int ret = 0;
8388         int wret;
8389
8390         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8391
8392         path = btrfs_alloc_path();
8393         if (!path)
8394                 return -ENOMEM;
8395
8396         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8397         if (!wc) {
8398                 btrfs_free_path(path);
8399                 return -ENOMEM;
8400         }
8401
8402         btrfs_assert_tree_locked(parent);
8403         parent_level = btrfs_header_level(parent);
8404         extent_buffer_get(parent);
8405         path->nodes[parent_level] = parent;
8406         path->slots[parent_level] = btrfs_header_nritems(parent);
8407
8408         btrfs_assert_tree_locked(node);
8409         level = btrfs_header_level(node);
8410         path->nodes[level] = node;
8411         path->slots[level] = 0;
8412         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8413
8414         wc->refs[parent_level] = 1;
8415         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8416         wc->level = level;
8417         wc->shared_level = -1;
8418         wc->stage = DROP_REFERENCE;
8419         wc->update_ref = 0;
8420         wc->keep_locks = 1;
8421         wc->for_reloc = 1;
8422         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8423
8424         while (1) {
8425                 wret = walk_down_tree(trans, root, path, wc);
8426                 if (wret < 0) {
8427                         ret = wret;
8428                         break;
8429                 }
8430
8431                 wret = walk_up_tree(trans, root, path, wc, parent_level);
8432                 if (wret < 0)
8433                         ret = wret;
8434                 if (wret != 0)
8435                         break;
8436         }
8437
8438         kfree(wc);
8439         btrfs_free_path(path);
8440         return ret;
8441 }
8442
8443 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
8444 {
8445         u64 num_devices;
8446         u64 stripped;
8447
8448         /*
8449          * if restripe for this chunk_type is on pick target profile and
8450          * return, otherwise do the usual balance
8451          */
8452         stripped = get_restripe_target(root->fs_info, flags);
8453         if (stripped)
8454                 return extended_to_chunk(stripped);
8455
8456         num_devices = root->fs_info->fs_devices->rw_devices;
8457
8458         stripped = BTRFS_BLOCK_GROUP_RAID0 |
8459                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
8460                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
8461
8462         if (num_devices == 1) {
8463                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8464                 stripped = flags & ~stripped;
8465
8466                 /* turn raid0 into single device chunks */
8467                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
8468                         return stripped;
8469
8470                 /* turn mirroring into duplication */
8471                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
8472                              BTRFS_BLOCK_GROUP_RAID10))
8473                         return stripped | BTRFS_BLOCK_GROUP_DUP;
8474         } else {
8475                 /* they already had raid on here, just return */
8476                 if (flags & stripped)
8477                         return flags;
8478
8479                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8480                 stripped = flags & ~stripped;
8481
8482                 /* switch duplicated blocks with raid1 */
8483                 if (flags & BTRFS_BLOCK_GROUP_DUP)
8484                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
8485
8486                 /* this is drive concat, leave it alone */
8487         }
8488
8489         return flags;
8490 }
8491
8492 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8493 {
8494         struct btrfs_space_info *sinfo = cache->space_info;
8495         u64 num_bytes;
8496         u64 min_allocable_bytes;
8497         int ret = -ENOSPC;
8498
8499
8500         /*
8501          * We need some metadata space and system metadata space for
8502          * allocating chunks in some corner cases until we force to set
8503          * it to be readonly.
8504          */
8505         if ((sinfo->flags &
8506              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
8507             !force)
8508                 min_allocable_bytes = 1 * 1024 * 1024;
8509         else
8510                 min_allocable_bytes = 0;
8511
8512         spin_lock(&sinfo->lock);
8513         spin_lock(&cache->lock);
8514
8515         if (cache->ro) {
8516                 ret = 0;
8517                 goto out;
8518         }
8519
8520         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8521                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8522
8523         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8524             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
8525             min_allocable_bytes <= sinfo->total_bytes) {
8526                 sinfo->bytes_readonly += num_bytes;
8527                 cache->ro = 1;
8528                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
8529                 ret = 0;
8530         }
8531 out:
8532         spin_unlock(&cache->lock);
8533         spin_unlock(&sinfo->lock);
8534         return ret;
8535 }
8536
8537 int btrfs_set_block_group_ro(struct btrfs_root *root,
8538                              struct btrfs_block_group_cache *cache)
8539
8540 {
8541         struct btrfs_trans_handle *trans;
8542         u64 alloc_flags;
8543         int ret;
8544
8545         BUG_ON(cache->ro);
8546
8547         trans = btrfs_join_transaction(root);
8548         if (IS_ERR(trans))
8549                 return PTR_ERR(trans);
8550
8551         alloc_flags = update_block_group_flags(root, cache->flags);
8552         if (alloc_flags != cache->flags) {
8553                 ret = do_chunk_alloc(trans, root, alloc_flags,
8554                                      CHUNK_ALLOC_FORCE);
8555                 if (ret < 0)
8556                         goto out;
8557         }
8558
8559         ret = set_block_group_ro(cache, 0);
8560         if (!ret)
8561                 goto out;
8562         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8563         ret = do_chunk_alloc(trans, root, alloc_flags,
8564                              CHUNK_ALLOC_FORCE);
8565         if (ret < 0)
8566                 goto out;
8567         ret = set_block_group_ro(cache, 0);
8568 out:
8569         btrfs_end_transaction(trans, root);
8570         return ret;
8571 }
8572
8573 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8574                             struct btrfs_root *root, u64 type)
8575 {
8576         u64 alloc_flags = get_alloc_profile(root, type);
8577         return do_chunk_alloc(trans, root, alloc_flags,
8578                               CHUNK_ALLOC_FORCE);
8579 }
8580
8581 /*
8582  * helper to account the unused space of all the readonly block group in the
8583  * space_info. takes mirrors into account.
8584  */
8585 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8586 {
8587         struct btrfs_block_group_cache *block_group;
8588         u64 free_bytes = 0;
8589         int factor;
8590
8591         /* It's df, we don't care if it's racey */
8592         if (list_empty(&sinfo->ro_bgs))
8593                 return 0;
8594
8595         spin_lock(&sinfo->lock);
8596         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
8597                 spin_lock(&block_group->lock);
8598
8599                 if (!block_group->ro) {
8600                         spin_unlock(&block_group->lock);
8601                         continue;
8602                 }
8603
8604                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8605                                           BTRFS_BLOCK_GROUP_RAID10 |
8606                                           BTRFS_BLOCK_GROUP_DUP))
8607                         factor = 2;
8608                 else
8609                         factor = 1;
8610
8611                 free_bytes += (block_group->key.offset -
8612                                btrfs_block_group_used(&block_group->item)) *
8613                                factor;
8614
8615                 spin_unlock(&block_group->lock);
8616         }
8617         spin_unlock(&sinfo->lock);
8618
8619         return free_bytes;
8620 }
8621
8622 void btrfs_set_block_group_rw(struct btrfs_root *root,
8623                               struct btrfs_block_group_cache *cache)
8624 {
8625         struct btrfs_space_info *sinfo = cache->space_info;
8626         u64 num_bytes;
8627
8628         BUG_ON(!cache->ro);
8629
8630         spin_lock(&sinfo->lock);
8631         spin_lock(&cache->lock);
8632         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8633                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8634         sinfo->bytes_readonly -= num_bytes;
8635         cache->ro = 0;
8636         list_del_init(&cache->ro_list);
8637         spin_unlock(&cache->lock);
8638         spin_unlock(&sinfo->lock);
8639 }
8640
8641 /*
8642  * checks to see if its even possible to relocate this block group.
8643  *
8644  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8645  * ok to go ahead and try.
8646  */
8647 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8648 {
8649         struct btrfs_block_group_cache *block_group;
8650         struct btrfs_space_info *space_info;
8651         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8652         struct btrfs_device *device;
8653         struct btrfs_trans_handle *trans;
8654         u64 min_free;
8655         u64 dev_min = 1;
8656         u64 dev_nr = 0;
8657         u64 target;
8658         int index;
8659         int full = 0;
8660         int ret = 0;
8661
8662         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8663
8664         /* odd, couldn't find the block group, leave it alone */
8665         if (!block_group)
8666                 return -1;
8667
8668         min_free = btrfs_block_group_used(&block_group->item);
8669
8670         /* no bytes used, we're good */
8671         if (!min_free)
8672                 goto out;
8673
8674         space_info = block_group->space_info;
8675         spin_lock(&space_info->lock);
8676
8677         full = space_info->full;
8678
8679         /*
8680          * if this is the last block group we have in this space, we can't
8681          * relocate it unless we're able to allocate a new chunk below.
8682          *
8683          * Otherwise, we need to make sure we have room in the space to handle
8684          * all of the extents from this block group.  If we can, we're good
8685          */
8686         if ((space_info->total_bytes != block_group->key.offset) &&
8687             (space_info->bytes_used + space_info->bytes_reserved +
8688              space_info->bytes_pinned + space_info->bytes_readonly +
8689              min_free < space_info->total_bytes)) {
8690                 spin_unlock(&space_info->lock);
8691                 goto out;
8692         }
8693         spin_unlock(&space_info->lock);
8694
8695         /*
8696          * ok we don't have enough space, but maybe we have free space on our
8697          * devices to allocate new chunks for relocation, so loop through our
8698          * alloc devices and guess if we have enough space.  if this block
8699          * group is going to be restriped, run checks against the target
8700          * profile instead of the current one.
8701          */
8702         ret = -1;
8703
8704         /*
8705          * index:
8706          *      0: raid10
8707          *      1: raid1
8708          *      2: dup
8709          *      3: raid0
8710          *      4: single
8711          */
8712         target = get_restripe_target(root->fs_info, block_group->flags);
8713         if (target) {
8714                 index = __get_raid_index(extended_to_chunk(target));
8715         } else {
8716                 /*
8717                  * this is just a balance, so if we were marked as full
8718                  * we know there is no space for a new chunk
8719                  */
8720                 if (full)
8721                         goto out;
8722
8723                 index = get_block_group_index(block_group);
8724         }
8725
8726         if (index == BTRFS_RAID_RAID10) {
8727                 dev_min = 4;
8728                 /* Divide by 2 */
8729                 min_free >>= 1;
8730         } else if (index == BTRFS_RAID_RAID1) {
8731                 dev_min = 2;
8732         } else if (index == BTRFS_RAID_DUP) {
8733                 /* Multiply by 2 */
8734                 min_free <<= 1;
8735         } else if (index == BTRFS_RAID_RAID0) {
8736                 dev_min = fs_devices->rw_devices;
8737                 do_div(min_free, dev_min);
8738         }
8739
8740         /* We need to do this so that we can look at pending chunks */
8741         trans = btrfs_join_transaction(root);
8742         if (IS_ERR(trans)) {
8743                 ret = PTR_ERR(trans);
8744                 goto out;
8745         }
8746
8747         mutex_lock(&root->fs_info->chunk_mutex);
8748         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8749                 u64 dev_offset;
8750
8751                 /*
8752                  * check to make sure we can actually find a chunk with enough
8753                  * space to fit our block group in.
8754                  */
8755                 if (device->total_bytes > device->bytes_used + min_free &&
8756                     !device->is_tgtdev_for_dev_replace) {
8757                         ret = find_free_dev_extent(trans, device, min_free,
8758                                                    &dev_offset, NULL);
8759                         if (!ret)
8760                                 dev_nr++;
8761
8762                         if (dev_nr >= dev_min)
8763                                 break;
8764
8765                         ret = -1;
8766                 }
8767         }
8768         mutex_unlock(&root->fs_info->chunk_mutex);
8769         btrfs_end_transaction(trans, root);
8770 out:
8771         btrfs_put_block_group(block_group);
8772         return ret;
8773 }
8774
8775 static int find_first_block_group(struct btrfs_root *root,
8776                 struct btrfs_path *path, struct btrfs_key *key)
8777 {
8778         int ret = 0;
8779         struct btrfs_key found_key;
8780         struct extent_buffer *leaf;
8781         int slot;
8782
8783         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8784         if (ret < 0)
8785                 goto out;
8786
8787         while (1) {
8788                 slot = path->slots[0];
8789                 leaf = path->nodes[0];
8790                 if (slot >= btrfs_header_nritems(leaf)) {
8791                         ret = btrfs_next_leaf(root, path);
8792                         if (ret == 0)
8793                                 continue;
8794                         if (ret < 0)
8795                                 goto out;
8796                         break;
8797                 }
8798                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8799
8800                 if (found_key.objectid >= key->objectid &&
8801                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8802                         ret = 0;
8803                         goto out;
8804                 }
8805                 path->slots[0]++;
8806         }
8807 out:
8808         return ret;
8809 }
8810
8811 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8812 {
8813         struct btrfs_block_group_cache *block_group;
8814         u64 last = 0;
8815
8816         while (1) {
8817                 struct inode *inode;
8818
8819                 block_group = btrfs_lookup_first_block_group(info, last);
8820                 while (block_group) {
8821                         spin_lock(&block_group->lock);
8822                         if (block_group->iref)
8823                                 break;
8824                         spin_unlock(&block_group->lock);
8825                         block_group = next_block_group(info->tree_root,
8826                                                        block_group);
8827                 }
8828                 if (!block_group) {
8829                         if (last == 0)
8830                                 break;
8831                         last = 0;
8832                         continue;
8833                 }
8834
8835                 inode = block_group->inode;
8836                 block_group->iref = 0;
8837                 block_group->inode = NULL;
8838                 spin_unlock(&block_group->lock);
8839                 iput(inode);
8840                 last = block_group->key.objectid + block_group->key.offset;
8841                 btrfs_put_block_group(block_group);
8842         }
8843 }
8844
8845 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8846 {
8847         struct btrfs_block_group_cache *block_group;
8848         struct btrfs_space_info *space_info;
8849         struct btrfs_caching_control *caching_ctl;
8850         struct rb_node *n;
8851
8852         down_write(&info->commit_root_sem);
8853         while (!list_empty(&info->caching_block_groups)) {
8854                 caching_ctl = list_entry(info->caching_block_groups.next,
8855                                          struct btrfs_caching_control, list);
8856                 list_del(&caching_ctl->list);
8857                 put_caching_control(caching_ctl);
8858         }
8859         up_write(&info->commit_root_sem);
8860
8861         spin_lock(&info->unused_bgs_lock);
8862         while (!list_empty(&info->unused_bgs)) {
8863                 block_group = list_first_entry(&info->unused_bgs,
8864                                                struct btrfs_block_group_cache,
8865                                                bg_list);
8866                 list_del_init(&block_group->bg_list);
8867                 btrfs_put_block_group(block_group);
8868         }
8869         spin_unlock(&info->unused_bgs_lock);
8870
8871         spin_lock(&info->block_group_cache_lock);
8872         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8873                 block_group = rb_entry(n, struct btrfs_block_group_cache,
8874                                        cache_node);
8875                 rb_erase(&block_group->cache_node,
8876                          &info->block_group_cache_tree);
8877                 spin_unlock(&info->block_group_cache_lock);
8878
8879                 down_write(&block_group->space_info->groups_sem);
8880                 list_del(&block_group->list);
8881                 up_write(&block_group->space_info->groups_sem);
8882
8883                 if (block_group->cached == BTRFS_CACHE_STARTED)
8884                         wait_block_group_cache_done(block_group);
8885
8886                 /*
8887                  * We haven't cached this block group, which means we could
8888                  * possibly have excluded extents on this block group.
8889                  */
8890                 if (block_group->cached == BTRFS_CACHE_NO ||
8891                     block_group->cached == BTRFS_CACHE_ERROR)
8892                         free_excluded_extents(info->extent_root, block_group);
8893
8894                 btrfs_remove_free_space_cache(block_group);
8895                 btrfs_put_block_group(block_group);
8896
8897                 spin_lock(&info->block_group_cache_lock);
8898         }
8899         spin_unlock(&info->block_group_cache_lock);
8900
8901         /* now that all the block groups are freed, go through and
8902          * free all the space_info structs.  This is only called during
8903          * the final stages of unmount, and so we know nobody is
8904          * using them.  We call synchronize_rcu() once before we start,
8905          * just to be on the safe side.
8906          */
8907         synchronize_rcu();
8908
8909         release_global_block_rsv(info);
8910
8911         while (!list_empty(&info->space_info)) {
8912                 int i;
8913
8914                 space_info = list_entry(info->space_info.next,
8915                                         struct btrfs_space_info,
8916                                         list);
8917                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8918                         if (WARN_ON(space_info->bytes_pinned > 0 ||
8919                             space_info->bytes_reserved > 0 ||
8920                             space_info->bytes_may_use > 0)) {
8921                                 dump_space_info(space_info, 0, 0);
8922                         }
8923                 }
8924                 list_del(&space_info->list);
8925                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
8926                         struct kobject *kobj;
8927                         kobj = space_info->block_group_kobjs[i];
8928                         space_info->block_group_kobjs[i] = NULL;
8929                         if (kobj) {
8930                                 kobject_del(kobj);
8931                                 kobject_put(kobj);
8932                         }
8933                 }
8934                 kobject_del(&space_info->kobj);
8935                 kobject_put(&space_info->kobj);
8936         }
8937         return 0;
8938 }
8939
8940 static void __link_block_group(struct btrfs_space_info *space_info,
8941                                struct btrfs_block_group_cache *cache)
8942 {
8943         int index = get_block_group_index(cache);
8944         bool first = false;
8945
8946         down_write(&space_info->groups_sem);
8947         if (list_empty(&space_info->block_groups[index]))
8948                 first = true;
8949         list_add_tail(&cache->list, &space_info->block_groups[index]);
8950         up_write(&space_info->groups_sem);
8951
8952         if (first) {
8953                 struct raid_kobject *rkobj;
8954                 int ret;
8955
8956                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
8957                 if (!rkobj)
8958                         goto out_err;
8959                 rkobj->raid_type = index;
8960                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
8961                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
8962                                   "%s", get_raid_name(index));
8963                 if (ret) {
8964                         kobject_put(&rkobj->kobj);
8965                         goto out_err;
8966                 }
8967                 space_info->block_group_kobjs[index] = &rkobj->kobj;
8968         }
8969
8970         return;
8971 out_err:
8972         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
8973 }
8974
8975 static struct btrfs_block_group_cache *
8976 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
8977 {
8978         struct btrfs_block_group_cache *cache;
8979
8980         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8981         if (!cache)
8982                 return NULL;
8983
8984         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8985                                         GFP_NOFS);
8986         if (!cache->free_space_ctl) {
8987                 kfree(cache);
8988                 return NULL;
8989         }
8990
8991         cache->key.objectid = start;
8992         cache->key.offset = size;
8993         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8994
8995         cache->sectorsize = root->sectorsize;
8996         cache->fs_info = root->fs_info;
8997         cache->full_stripe_len = btrfs_full_stripe_len(root,
8998                                                &root->fs_info->mapping_tree,
8999                                                start);
9000         atomic_set(&cache->count, 1);
9001         spin_lock_init(&cache->lock);
9002         init_rwsem(&cache->data_rwsem);
9003         INIT_LIST_HEAD(&cache->list);
9004         INIT_LIST_HEAD(&cache->cluster_list);
9005         INIT_LIST_HEAD(&cache->bg_list);
9006         INIT_LIST_HEAD(&cache->ro_list);
9007         btrfs_init_free_space_ctl(cache);
9008         atomic_set(&cache->trimming, 0);
9009
9010         return cache;
9011 }
9012
9013 int btrfs_read_block_groups(struct btrfs_root *root)
9014 {
9015         struct btrfs_path *path;
9016         int ret;
9017         struct btrfs_block_group_cache *cache;
9018         struct btrfs_fs_info *info = root->fs_info;
9019         struct btrfs_space_info *space_info;
9020         struct btrfs_key key;
9021         struct btrfs_key found_key;
9022         struct extent_buffer *leaf;
9023         int need_clear = 0;
9024         u64 cache_gen;
9025
9026         root = info->extent_root;
9027         key.objectid = 0;
9028         key.offset = 0;
9029         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9030         path = btrfs_alloc_path();
9031         if (!path)
9032                 return -ENOMEM;
9033         path->reada = 1;
9034
9035         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9036         if (btrfs_test_opt(root, SPACE_CACHE) &&
9037             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9038                 need_clear = 1;
9039         if (btrfs_test_opt(root, CLEAR_CACHE))
9040                 need_clear = 1;
9041
9042         while (1) {
9043                 ret = find_first_block_group(root, path, &key);
9044                 if (ret > 0)
9045                         break;
9046                 if (ret != 0)
9047                         goto error;
9048
9049                 leaf = path->nodes[0];
9050                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9051
9052                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9053                                                        found_key.offset);
9054                 if (!cache) {
9055                         ret = -ENOMEM;
9056                         goto error;
9057                 }
9058
9059                 if (need_clear) {
9060                         /*
9061                          * When we mount with old space cache, we need to
9062                          * set BTRFS_DC_CLEAR and set dirty flag.
9063                          *
9064                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9065                          *    truncate the old free space cache inode and
9066                          *    setup a new one.
9067                          * b) Setting 'dirty flag' makes sure that we flush
9068                          *    the new space cache info onto disk.
9069                          */
9070                         cache->disk_cache_state = BTRFS_DC_CLEAR;
9071                         if (btrfs_test_opt(root, SPACE_CACHE))
9072                                 cache->dirty = 1;
9073                 }
9074
9075                 read_extent_buffer(leaf, &cache->item,
9076                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
9077                                    sizeof(cache->item));
9078                 cache->flags = btrfs_block_group_flags(&cache->item);
9079
9080                 key.objectid = found_key.objectid + found_key.offset;
9081                 btrfs_release_path(path);
9082
9083                 /*
9084                  * We need to exclude the super stripes now so that the space
9085                  * info has super bytes accounted for, otherwise we'll think
9086                  * we have more space than we actually do.
9087                  */
9088                 ret = exclude_super_stripes(root, cache);
9089                 if (ret) {
9090                         /*
9091                          * We may have excluded something, so call this just in
9092                          * case.
9093                          */
9094                         free_excluded_extents(root, cache);
9095                         btrfs_put_block_group(cache);
9096                         goto error;
9097                 }
9098
9099                 /*
9100                  * check for two cases, either we are full, and therefore
9101                  * don't need to bother with the caching work since we won't
9102                  * find any space, or we are empty, and we can just add all
9103                  * the space in and be done with it.  This saves us _alot_ of
9104                  * time, particularly in the full case.
9105                  */
9106                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9107                         cache->last_byte_to_unpin = (u64)-1;
9108                         cache->cached = BTRFS_CACHE_FINISHED;
9109                         free_excluded_extents(root, cache);
9110                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9111                         cache->last_byte_to_unpin = (u64)-1;
9112                         cache->cached = BTRFS_CACHE_FINISHED;
9113                         add_new_free_space(cache, root->fs_info,
9114                                            found_key.objectid,
9115                                            found_key.objectid +
9116                                            found_key.offset);
9117                         free_excluded_extents(root, cache);
9118                 }
9119
9120                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9121                 if (ret) {
9122                         btrfs_remove_free_space_cache(cache);
9123                         btrfs_put_block_group(cache);
9124                         goto error;
9125                 }
9126
9127                 ret = update_space_info(info, cache->flags, found_key.offset,
9128                                         btrfs_block_group_used(&cache->item),
9129                                         &space_info);
9130                 if (ret) {
9131                         btrfs_remove_free_space_cache(cache);
9132                         spin_lock(&info->block_group_cache_lock);
9133                         rb_erase(&cache->cache_node,
9134                                  &info->block_group_cache_tree);
9135                         spin_unlock(&info->block_group_cache_lock);
9136                         btrfs_put_block_group(cache);
9137                         goto error;
9138                 }
9139
9140                 cache->space_info = space_info;
9141                 spin_lock(&cache->space_info->lock);
9142                 cache->space_info->bytes_readonly += cache->bytes_super;
9143                 spin_unlock(&cache->space_info->lock);
9144
9145                 __link_block_group(space_info, cache);
9146
9147                 set_avail_alloc_bits(root->fs_info, cache->flags);
9148                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9149                         set_block_group_ro(cache, 1);
9150                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9151                         spin_lock(&info->unused_bgs_lock);
9152                         /* Should always be true but just in case. */
9153                         if (list_empty(&cache->bg_list)) {
9154                                 btrfs_get_block_group(cache);
9155                                 list_add_tail(&cache->bg_list,
9156                                               &info->unused_bgs);
9157                         }
9158                         spin_unlock(&info->unused_bgs_lock);
9159                 }
9160         }
9161
9162         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9163                 if (!(get_alloc_profile(root, space_info->flags) &
9164                       (BTRFS_BLOCK_GROUP_RAID10 |
9165                        BTRFS_BLOCK_GROUP_RAID1 |
9166                        BTRFS_BLOCK_GROUP_RAID5 |
9167                        BTRFS_BLOCK_GROUP_RAID6 |
9168                        BTRFS_BLOCK_GROUP_DUP)))
9169                         continue;
9170                 /*
9171                  * avoid allocating from un-mirrored block group if there are
9172                  * mirrored block groups.
9173                  */
9174                 list_for_each_entry(cache,
9175                                 &space_info->block_groups[BTRFS_RAID_RAID0],
9176                                 list)
9177                         set_block_group_ro(cache, 1);
9178                 list_for_each_entry(cache,
9179                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
9180                                 list)
9181                         set_block_group_ro(cache, 1);
9182         }
9183
9184         init_global_block_rsv(info);
9185         ret = 0;
9186 error:
9187         btrfs_free_path(path);
9188         return ret;
9189 }
9190
9191 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9192                                        struct btrfs_root *root)
9193 {
9194         struct btrfs_block_group_cache *block_group, *tmp;
9195         struct btrfs_root *extent_root = root->fs_info->extent_root;
9196         struct btrfs_block_group_item item;
9197         struct btrfs_key key;
9198         int ret = 0;
9199
9200         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9201                 list_del_init(&block_group->bg_list);
9202                 if (ret)
9203                         continue;
9204
9205                 spin_lock(&block_group->lock);
9206                 memcpy(&item, &block_group->item, sizeof(item));
9207                 memcpy(&key, &block_group->key, sizeof(key));
9208                 spin_unlock(&block_group->lock);
9209
9210                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9211                                         sizeof(item));
9212                 if (ret)
9213                         btrfs_abort_transaction(trans, extent_root, ret);
9214                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9215                                                key.objectid, key.offset);
9216                 if (ret)
9217                         btrfs_abort_transaction(trans, extent_root, ret);
9218         }
9219 }
9220
9221 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9222                            struct btrfs_root *root, u64 bytes_used,
9223                            u64 type, u64 chunk_objectid, u64 chunk_offset,
9224                            u64 size)
9225 {
9226         int ret;
9227         struct btrfs_root *extent_root;
9228         struct btrfs_block_group_cache *cache;
9229
9230         extent_root = root->fs_info->extent_root;
9231
9232         btrfs_set_log_full_commit(root->fs_info, trans);
9233
9234         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9235         if (!cache)
9236                 return -ENOMEM;
9237
9238         btrfs_set_block_group_used(&cache->item, bytes_used);
9239         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9240         btrfs_set_block_group_flags(&cache->item, type);
9241
9242         cache->flags = type;
9243         cache->last_byte_to_unpin = (u64)-1;
9244         cache->cached = BTRFS_CACHE_FINISHED;
9245         ret = exclude_super_stripes(root, cache);
9246         if (ret) {
9247                 /*
9248                  * We may have excluded something, so call this just in
9249                  * case.
9250                  */
9251                 free_excluded_extents(root, cache);
9252                 btrfs_put_block_group(cache);
9253                 return ret;
9254         }
9255
9256         add_new_free_space(cache, root->fs_info, chunk_offset,
9257                            chunk_offset + size);
9258
9259         free_excluded_extents(root, cache);
9260
9261         ret = btrfs_add_block_group_cache(root->fs_info, cache);
9262         if (ret) {
9263                 btrfs_remove_free_space_cache(cache);
9264                 btrfs_put_block_group(cache);
9265                 return ret;
9266         }
9267
9268         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9269                                 &cache->space_info);
9270         if (ret) {
9271                 btrfs_remove_free_space_cache(cache);
9272                 spin_lock(&root->fs_info->block_group_cache_lock);
9273                 rb_erase(&cache->cache_node,
9274                          &root->fs_info->block_group_cache_tree);
9275                 spin_unlock(&root->fs_info->block_group_cache_lock);
9276                 btrfs_put_block_group(cache);
9277                 return ret;
9278         }
9279         update_global_block_rsv(root->fs_info);
9280
9281         spin_lock(&cache->space_info->lock);
9282         cache->space_info->bytes_readonly += cache->bytes_super;
9283         spin_unlock(&cache->space_info->lock);
9284
9285         __link_block_group(cache->space_info, cache);
9286
9287         list_add_tail(&cache->bg_list, &trans->new_bgs);
9288
9289         set_avail_alloc_bits(extent_root->fs_info, type);
9290
9291         return 0;
9292 }
9293
9294 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9295 {
9296         u64 extra_flags = chunk_to_extended(flags) &
9297                                 BTRFS_EXTENDED_PROFILE_MASK;
9298
9299         write_seqlock(&fs_info->profiles_lock);
9300         if (flags & BTRFS_BLOCK_GROUP_DATA)
9301                 fs_info->avail_data_alloc_bits &= ~extra_flags;
9302         if (flags & BTRFS_BLOCK_GROUP_METADATA)
9303                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9304         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9305                 fs_info->avail_system_alloc_bits &= ~extra_flags;
9306         write_sequnlock(&fs_info->profiles_lock);
9307 }
9308
9309 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9310                              struct btrfs_root *root, u64 group_start,
9311                              struct extent_map *em)
9312 {
9313         struct btrfs_path *path;
9314         struct btrfs_block_group_cache *block_group;
9315         struct btrfs_free_cluster *cluster;
9316         struct btrfs_root *tree_root = root->fs_info->tree_root;
9317         struct btrfs_key key;
9318         struct inode *inode;
9319         struct kobject *kobj = NULL;
9320         int ret;
9321         int index;
9322         int factor;
9323         struct btrfs_caching_control *caching_ctl = NULL;
9324         bool remove_em;
9325
9326         root = root->fs_info->extent_root;
9327
9328         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9329         BUG_ON(!block_group);
9330         BUG_ON(!block_group->ro);
9331
9332         /*
9333          * Free the reserved super bytes from this block group before
9334          * remove it.
9335          */
9336         free_excluded_extents(root, block_group);
9337
9338         memcpy(&key, &block_group->key, sizeof(key));
9339         index = get_block_group_index(block_group);
9340         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
9341                                   BTRFS_BLOCK_GROUP_RAID1 |
9342                                   BTRFS_BLOCK_GROUP_RAID10))
9343                 factor = 2;
9344         else
9345                 factor = 1;
9346
9347         /* make sure this block group isn't part of an allocation cluster */
9348         cluster = &root->fs_info->data_alloc_cluster;
9349         spin_lock(&cluster->refill_lock);
9350         btrfs_return_cluster_to_free_space(block_group, cluster);
9351         spin_unlock(&cluster->refill_lock);
9352
9353         /*
9354          * make sure this block group isn't part of a metadata
9355          * allocation cluster
9356          */
9357         cluster = &root->fs_info->meta_alloc_cluster;
9358         spin_lock(&cluster->refill_lock);
9359         btrfs_return_cluster_to_free_space(block_group, cluster);
9360         spin_unlock(&cluster->refill_lock);
9361
9362         path = btrfs_alloc_path();
9363         if (!path) {
9364                 ret = -ENOMEM;
9365                 goto out;
9366         }
9367
9368         inode = lookup_free_space_inode(tree_root, block_group, path);
9369         if (!IS_ERR(inode)) {
9370                 ret = btrfs_orphan_add(trans, inode);
9371                 if (ret) {
9372                         btrfs_add_delayed_iput(inode);
9373                         goto out;
9374                 }
9375                 clear_nlink(inode);
9376                 /* One for the block groups ref */
9377                 spin_lock(&block_group->lock);
9378                 if (block_group->iref) {
9379                         block_group->iref = 0;
9380                         block_group->inode = NULL;
9381                         spin_unlock(&block_group->lock);
9382                         iput(inode);
9383                 } else {
9384                         spin_unlock(&block_group->lock);
9385                 }
9386                 /* One for our lookup ref */
9387                 btrfs_add_delayed_iput(inode);
9388         }
9389
9390         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
9391         key.offset = block_group->key.objectid;
9392         key.type = 0;
9393
9394         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
9395         if (ret < 0)
9396                 goto out;
9397         if (ret > 0)
9398                 btrfs_release_path(path);
9399         if (ret == 0) {
9400                 ret = btrfs_del_item(trans, tree_root, path);
9401                 if (ret)
9402                         goto out;
9403                 btrfs_release_path(path);
9404         }
9405
9406         spin_lock(&root->fs_info->block_group_cache_lock);
9407         rb_erase(&block_group->cache_node,
9408                  &root->fs_info->block_group_cache_tree);
9409         RB_CLEAR_NODE(&block_group->cache_node);
9410
9411         if (root->fs_info->first_logical_byte == block_group->key.objectid)
9412                 root->fs_info->first_logical_byte = (u64)-1;
9413         spin_unlock(&root->fs_info->block_group_cache_lock);
9414
9415         down_write(&block_group->space_info->groups_sem);
9416         /*
9417          * we must use list_del_init so people can check to see if they
9418          * are still on the list after taking the semaphore
9419          */
9420         list_del_init(&block_group->list);
9421         list_del_init(&block_group->ro_list);
9422         if (list_empty(&block_group->space_info->block_groups[index])) {
9423                 kobj = block_group->space_info->block_group_kobjs[index];
9424                 block_group->space_info->block_group_kobjs[index] = NULL;
9425                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
9426         }
9427         up_write(&block_group->space_info->groups_sem);
9428         if (kobj) {
9429                 kobject_del(kobj);
9430                 kobject_put(kobj);
9431         }
9432
9433         if (block_group->has_caching_ctl)
9434                 caching_ctl = get_caching_control(block_group);
9435         if (block_group->cached == BTRFS_CACHE_STARTED)
9436                 wait_block_group_cache_done(block_group);
9437         if (block_group->has_caching_ctl) {
9438                 down_write(&root->fs_info->commit_root_sem);
9439                 if (!caching_ctl) {
9440                         struct btrfs_caching_control *ctl;
9441
9442                         list_for_each_entry(ctl,
9443                                     &root->fs_info->caching_block_groups, list)
9444                                 if (ctl->block_group == block_group) {
9445                                         caching_ctl = ctl;
9446                                         atomic_inc(&caching_ctl->count);
9447                                         break;
9448                                 }
9449                 }
9450                 if (caching_ctl)
9451                         list_del_init(&caching_ctl->list);
9452                 up_write(&root->fs_info->commit_root_sem);
9453                 if (caching_ctl) {
9454                         /* Once for the caching bgs list and once for us. */
9455                         put_caching_control(caching_ctl);
9456                         put_caching_control(caching_ctl);
9457                 }
9458         }
9459
9460         btrfs_remove_free_space_cache(block_group);
9461
9462         spin_lock(&block_group->space_info->lock);
9463         block_group->space_info->total_bytes -= block_group->key.offset;
9464         block_group->space_info->bytes_readonly -= block_group->key.offset;
9465         block_group->space_info->disk_total -= block_group->key.offset * factor;
9466         spin_unlock(&block_group->space_info->lock);
9467
9468         memcpy(&key, &block_group->key, sizeof(key));
9469
9470         lock_chunks(root);
9471         spin_lock(&block_group->lock);
9472         block_group->removed = 1;
9473         /*
9474          * At this point trimming can't start on this block group, because we
9475          * removed the block group from the tree fs_info->block_group_cache_tree
9476          * so no one can't find it anymore and even if someone already got this
9477          * block group before we removed it from the rbtree, they have already
9478          * incremented block_group->trimming - if they didn't, they won't find
9479          * any free space entries because we already removed them all when we
9480          * called btrfs_remove_free_space_cache().
9481          *
9482          * And we must not remove the extent map from the fs_info->mapping_tree
9483          * to prevent the same logical address range and physical device space
9484          * ranges from being reused for a new block group. This is because our
9485          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
9486          * completely transactionless, so while it is trimming a range the
9487          * currently running transaction might finish and a new one start,
9488          * allowing for new block groups to be created that can reuse the same
9489          * physical device locations unless we take this special care.
9490          */
9491         remove_em = (atomic_read(&block_group->trimming) == 0);
9492         /*
9493          * Make sure a trimmer task always sees the em in the pinned_chunks list
9494          * if it sees block_group->removed == 1 (needs to lock block_group->lock
9495          * before checking block_group->removed).
9496          */
9497         if (!remove_em) {
9498                 /*
9499                  * Our em might be in trans->transaction->pending_chunks which
9500                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
9501                  * and so is the fs_info->pinned_chunks list.
9502                  *
9503                  * So at this point we must be holding the chunk_mutex to avoid
9504                  * any races with chunk allocation (more specifically at
9505                  * volumes.c:contains_pending_extent()), to ensure it always
9506                  * sees the em, either in the pending_chunks list or in the
9507                  * pinned_chunks list.
9508                  */
9509                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
9510         }
9511         spin_unlock(&block_group->lock);
9512         unlock_chunks(root);
9513
9514         if (remove_em) {
9515                 struct extent_map_tree *em_tree;
9516
9517                 em_tree = &root->fs_info->mapping_tree.map_tree;
9518                 write_lock(&em_tree->lock);
9519                 remove_extent_mapping(em_tree, em);
9520                 write_unlock(&em_tree->lock);
9521                 /* once for the tree */
9522                 free_extent_map(em);
9523         }
9524
9525         btrfs_put_block_group(block_group);
9526         btrfs_put_block_group(block_group);
9527
9528         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
9529         if (ret > 0)
9530                 ret = -EIO;
9531         if (ret < 0)
9532                 goto out;
9533
9534         ret = btrfs_del_item(trans, root, path);
9535 out:
9536         btrfs_free_path(path);
9537         return ret;
9538 }
9539
9540 /*
9541  * Process the unused_bgs list and remove any that don't have any allocated
9542  * space inside of them.
9543  */
9544 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9545 {
9546         struct btrfs_block_group_cache *block_group;
9547         struct btrfs_space_info *space_info;
9548         struct btrfs_root *root = fs_info->extent_root;
9549         struct btrfs_trans_handle *trans;
9550         int ret = 0;
9551
9552         if (!fs_info->open)
9553                 return;
9554
9555         spin_lock(&fs_info->unused_bgs_lock);
9556         while (!list_empty(&fs_info->unused_bgs)) {
9557                 u64 start, end;
9558
9559                 block_group = list_first_entry(&fs_info->unused_bgs,
9560                                                struct btrfs_block_group_cache,
9561                                                bg_list);
9562                 space_info = block_group->space_info;
9563                 list_del_init(&block_group->bg_list);
9564                 if (ret || btrfs_mixed_space_info(space_info)) {
9565                         btrfs_put_block_group(block_group);
9566                         continue;
9567                 }
9568                 spin_unlock(&fs_info->unused_bgs_lock);
9569
9570                 /* Don't want to race with allocators so take the groups_sem */
9571                 down_write(&space_info->groups_sem);
9572                 spin_lock(&block_group->lock);
9573                 if (block_group->reserved ||
9574                     btrfs_block_group_used(&block_group->item) ||
9575                     block_group->ro) {
9576                         /*
9577                          * We want to bail if we made new allocations or have
9578                          * outstanding allocations in this block group.  We do
9579                          * the ro check in case balance is currently acting on
9580                          * this block group.
9581                          */
9582                         spin_unlock(&block_group->lock);
9583                         up_write(&space_info->groups_sem);
9584                         goto next;
9585                 }
9586                 spin_unlock(&block_group->lock);
9587
9588                 /* We don't want to force the issue, only flip if it's ok. */
9589                 ret = set_block_group_ro(block_group, 0);
9590                 up_write(&space_info->groups_sem);
9591                 if (ret < 0) {
9592                         ret = 0;
9593                         goto next;
9594                 }
9595
9596                 /*
9597                  * Want to do this before we do anything else so we can recover
9598                  * properly if we fail to join the transaction.
9599                  */
9600                 trans = btrfs_join_transaction(root);
9601                 if (IS_ERR(trans)) {
9602                         btrfs_set_block_group_rw(root, block_group);
9603                         ret = PTR_ERR(trans);
9604                         goto next;
9605                 }
9606
9607                 /*
9608                  * We could have pending pinned extents for this block group,
9609                  * just delete them, we don't care about them anymore.
9610                  */
9611                 start = block_group->key.objectid;
9612                 end = start + block_group->key.offset - 1;
9613                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
9614                                   EXTENT_DIRTY, GFP_NOFS);
9615                 if (ret) {
9616                         btrfs_set_block_group_rw(root, block_group);
9617                         goto end_trans;
9618                 }
9619                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
9620                                   EXTENT_DIRTY, GFP_NOFS);
9621                 if (ret) {
9622                         btrfs_set_block_group_rw(root, block_group);
9623                         goto end_trans;
9624                 }
9625
9626                 /* Reset pinned so btrfs_put_block_group doesn't complain */
9627                 block_group->pinned = 0;
9628
9629                 /*
9630                  * Btrfs_remove_chunk will abort the transaction if things go
9631                  * horribly wrong.
9632                  */
9633                 ret = btrfs_remove_chunk(trans, root,
9634                                          block_group->key.objectid);
9635 end_trans:
9636                 btrfs_end_transaction(trans, root);
9637 next:
9638                 btrfs_put_block_group(block_group);
9639                 spin_lock(&fs_info->unused_bgs_lock);
9640         }
9641         spin_unlock(&fs_info->unused_bgs_lock);
9642 }
9643
9644 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
9645 {
9646         struct btrfs_space_info *space_info;
9647         struct btrfs_super_block *disk_super;
9648         u64 features;
9649         u64 flags;
9650         int mixed = 0;
9651         int ret;
9652
9653         disk_super = fs_info->super_copy;
9654         if (!btrfs_super_root(disk_super))
9655                 return 1;
9656
9657         features = btrfs_super_incompat_flags(disk_super);
9658         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
9659                 mixed = 1;
9660
9661         flags = BTRFS_BLOCK_GROUP_SYSTEM;
9662         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9663         if (ret)
9664                 goto out;
9665
9666         if (mixed) {
9667                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
9668                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9669         } else {
9670                 flags = BTRFS_BLOCK_GROUP_METADATA;
9671                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9672                 if (ret)
9673                         goto out;
9674
9675                 flags = BTRFS_BLOCK_GROUP_DATA;
9676                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9677         }
9678 out:
9679         return ret;
9680 }
9681
9682 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
9683 {
9684         return unpin_extent_range(root, start, end);
9685 }
9686
9687 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
9688                                u64 num_bytes, u64 *actual_bytes)
9689 {
9690         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
9691 }
9692
9693 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
9694 {
9695         struct btrfs_fs_info *fs_info = root->fs_info;
9696         struct btrfs_block_group_cache *cache = NULL;
9697         u64 group_trimmed;
9698         u64 start;
9699         u64 end;
9700         u64 trimmed = 0;
9701         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
9702         int ret = 0;
9703
9704         /*
9705          * try to trim all FS space, our block group may start from non-zero.
9706          */
9707         if (range->len == total_bytes)
9708                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
9709         else
9710                 cache = btrfs_lookup_block_group(fs_info, range->start);
9711
9712         while (cache) {
9713                 if (cache->key.objectid >= (range->start + range->len)) {
9714                         btrfs_put_block_group(cache);
9715                         break;
9716                 }
9717
9718                 start = max(range->start, cache->key.objectid);
9719                 end = min(range->start + range->len,
9720                                 cache->key.objectid + cache->key.offset);
9721
9722                 if (end - start >= range->minlen) {
9723                         if (!block_group_cache_done(cache)) {
9724                                 ret = cache_block_group(cache, 0);
9725                                 if (ret) {
9726                                         btrfs_put_block_group(cache);
9727                                         break;
9728                                 }
9729                                 ret = wait_block_group_cache_done(cache);
9730                                 if (ret) {
9731                                         btrfs_put_block_group(cache);
9732                                         break;
9733                                 }
9734                         }
9735                         ret = btrfs_trim_block_group(cache,
9736                                                      &group_trimmed,
9737                                                      start,
9738                                                      end,
9739                                                      range->minlen);
9740
9741                         trimmed += group_trimmed;
9742                         if (ret) {
9743                                 btrfs_put_block_group(cache);
9744                                 break;
9745                         }
9746                 }
9747
9748                 cache = next_block_group(fs_info->tree_root, cache);
9749         }
9750
9751         range->len = trimmed;
9752         return ret;
9753 }
9754
9755 /*
9756  * btrfs_{start,end}_write_no_snapshoting() are similar to
9757  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
9758  * data into the page cache through nocow before the subvolume is snapshoted,
9759  * but flush the data into disk after the snapshot creation, or to prevent
9760  * operations while snapshoting is ongoing and that cause the snapshot to be
9761  * inconsistent (writes followed by expanding truncates for example).
9762  */
9763 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
9764 {
9765         percpu_counter_dec(&root->subv_writers->counter);
9766         /*
9767          * Make sure counter is updated before we wake up
9768          * waiters.
9769          */
9770         smp_mb();
9771         if (waitqueue_active(&root->subv_writers->wait))
9772                 wake_up(&root->subv_writers->wait);
9773 }
9774
9775 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
9776 {
9777         if (atomic_read(&root->will_be_snapshoted))
9778                 return 0;
9779
9780         percpu_counter_inc(&root->subv_writers->counter);
9781         /*
9782          * Make sure counter is updated before we check for snapshot creation.
9783          */
9784         smp_mb();
9785         if (atomic_read(&root->will_be_snapshoted)) {
9786                 btrfs_end_write_no_snapshoting(root);
9787                 return 0;
9788         }
9789         return 1;
9790 }