OSDN Git Service

105b3d2e86f22f3bd4474c796bfc07d9459ca473
[android-x86/kernel.git] / fs / btrfs / extent_io.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/bio.h>
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/page-flags.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
14 #include <linux/prefetch.h>
15 #include <linux/cleancache.h>
16 #include "extent_io.h"
17 #include "extent_map.h"
18 #include "ctree.h"
19 #include "btrfs_inode.h"
20 #include "volumes.h"
21 #include "check-integrity.h"
22 #include "locking.h"
23 #include "rcu-string.h"
24 #include "backref.h"
25 #include "disk-io.h"
26
27 static struct kmem_cache *extent_state_cache;
28 static struct kmem_cache *extent_buffer_cache;
29 static struct bio_set btrfs_bioset;
30
31 static inline bool extent_state_in_tree(const struct extent_state *state)
32 {
33         return !RB_EMPTY_NODE(&state->rb_node);
34 }
35
36 #ifdef CONFIG_BTRFS_DEBUG
37 static LIST_HEAD(buffers);
38 static LIST_HEAD(states);
39
40 static DEFINE_SPINLOCK(leak_lock);
41
42 static inline
43 void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
44 {
45         unsigned long flags;
46
47         spin_lock_irqsave(&leak_lock, flags);
48         list_add(new, head);
49         spin_unlock_irqrestore(&leak_lock, flags);
50 }
51
52 static inline
53 void btrfs_leak_debug_del(struct list_head *entry)
54 {
55         unsigned long flags;
56
57         spin_lock_irqsave(&leak_lock, flags);
58         list_del(entry);
59         spin_unlock_irqrestore(&leak_lock, flags);
60 }
61
62 static inline
63 void btrfs_leak_debug_check(void)
64 {
65         struct extent_state *state;
66         struct extent_buffer *eb;
67
68         while (!list_empty(&states)) {
69                 state = list_entry(states.next, struct extent_state, leak_list);
70                 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
71                        state->start, state->end, state->state,
72                        extent_state_in_tree(state),
73                        refcount_read(&state->refs));
74                 list_del(&state->leak_list);
75                 kmem_cache_free(extent_state_cache, state);
76         }
77
78         while (!list_empty(&buffers)) {
79                 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
80                 pr_err("BTRFS: buffer leak start %llu len %lu refs %d bflags %lu\n",
81                        eb->start, eb->len, atomic_read(&eb->refs), eb->bflags);
82                 list_del(&eb->leak_list);
83                 kmem_cache_free(extent_buffer_cache, eb);
84         }
85 }
86
87 #define btrfs_debug_check_extent_io_range(tree, start, end)             \
88         __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
89 static inline void __btrfs_debug_check_extent_io_range(const char *caller,
90                 struct extent_io_tree *tree, u64 start, u64 end)
91 {
92         struct inode *inode = tree->private_data;
93         u64 isize;
94
95         if (!inode || !is_data_inode(inode))
96                 return;
97
98         isize = i_size_read(inode);
99         if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
100                 btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
101                     "%s: ino %llu isize %llu odd range [%llu,%llu]",
102                         caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
103         }
104 }
105 #else
106 #define btrfs_leak_debug_add(new, head) do {} while (0)
107 #define btrfs_leak_debug_del(entry)     do {} while (0)
108 #define btrfs_leak_debug_check()        do {} while (0)
109 #define btrfs_debug_check_extent_io_range(c, s, e)      do {} while (0)
110 #endif
111
112 struct tree_entry {
113         u64 start;
114         u64 end;
115         struct rb_node rb_node;
116 };
117
118 struct extent_page_data {
119         struct bio *bio;
120         struct extent_io_tree *tree;
121         /* tells writepage not to lock the state bits for this range
122          * it still does the unlocking
123          */
124         unsigned int extent_locked:1;
125
126         /* tells the submit_bio code to use REQ_SYNC */
127         unsigned int sync_io:1;
128 };
129
130 static int add_extent_changeset(struct extent_state *state, unsigned bits,
131                                  struct extent_changeset *changeset,
132                                  int set)
133 {
134         int ret;
135
136         if (!changeset)
137                 return 0;
138         if (set && (state->state & bits) == bits)
139                 return 0;
140         if (!set && (state->state & bits) == 0)
141                 return 0;
142         changeset->bytes_changed += state->end - state->start + 1;
143         ret = ulist_add(&changeset->range_changed, state->start, state->end,
144                         GFP_ATOMIC);
145         return ret;
146 }
147
148 static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
149                                        unsigned long bio_flags)
150 {
151         blk_status_t ret = 0;
152         struct extent_io_tree *tree = bio->bi_private;
153
154         bio->bi_private = NULL;
155
156         if (tree->ops)
157                 ret = tree->ops->submit_bio_hook(tree->private_data, bio,
158                                                  mirror_num, bio_flags);
159         else
160                 btrfsic_submit_bio(bio);
161
162         return blk_status_to_errno(ret);
163 }
164
165 /* Cleanup unsubmitted bios */
166 static void end_write_bio(struct extent_page_data *epd, int ret)
167 {
168         if (epd->bio) {
169                 epd->bio->bi_status = errno_to_blk_status(ret);
170                 bio_endio(epd->bio);
171                 epd->bio = NULL;
172         }
173 }
174
175 /*
176  * Submit bio from extent page data via submit_one_bio
177  *
178  * Return 0 if everything is OK.
179  * Return <0 for error.
180  */
181 static int __must_check flush_write_bio(struct extent_page_data *epd)
182 {
183         int ret = 0;
184
185         if (epd->bio) {
186                 ret = submit_one_bio(epd->bio, 0, 0);
187                 /*
188                  * Clean up of epd->bio is handled by its endio function.
189                  * And endio is either triggered by successful bio execution
190                  * or the error handler of submit bio hook.
191                  * So at this point, no matter what happened, we don't need
192                  * to clean up epd->bio.
193                  */
194                 epd->bio = NULL;
195         }
196         return ret;
197 }
198
199 int __init extent_io_init(void)
200 {
201         extent_state_cache = kmem_cache_create("btrfs_extent_state",
202                         sizeof(struct extent_state), 0,
203                         SLAB_MEM_SPREAD, NULL);
204         if (!extent_state_cache)
205                 return -ENOMEM;
206
207         extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
208                         sizeof(struct extent_buffer), 0,
209                         SLAB_MEM_SPREAD, NULL);
210         if (!extent_buffer_cache)
211                 goto free_state_cache;
212
213         if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
214                         offsetof(struct btrfs_io_bio, bio),
215                         BIOSET_NEED_BVECS))
216                 goto free_buffer_cache;
217
218         if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))
219                 goto free_bioset;
220
221         return 0;
222
223 free_bioset:
224         bioset_exit(&btrfs_bioset);
225
226 free_buffer_cache:
227         kmem_cache_destroy(extent_buffer_cache);
228         extent_buffer_cache = NULL;
229
230 free_state_cache:
231         kmem_cache_destroy(extent_state_cache);
232         extent_state_cache = NULL;
233         return -ENOMEM;
234 }
235
236 void __cold extent_io_exit(void)
237 {
238         btrfs_leak_debug_check();
239
240         /*
241          * Make sure all delayed rcu free are flushed before we
242          * destroy caches.
243          */
244         rcu_barrier();
245         kmem_cache_destroy(extent_state_cache);
246         kmem_cache_destroy(extent_buffer_cache);
247         bioset_exit(&btrfs_bioset);
248 }
249
250 void extent_io_tree_init(struct btrfs_fs_info *fs_info,
251                          struct extent_io_tree *tree, unsigned int owner,
252                          void *private_data)
253 {
254         tree->fs_info = fs_info;
255         tree->state = RB_ROOT;
256         tree->ops = NULL;
257         tree->dirty_bytes = 0;
258         spin_lock_init(&tree->lock);
259         tree->private_data = private_data;
260         tree->owner = owner;
261 }
262
263 void extent_io_tree_release(struct extent_io_tree *tree)
264 {
265         spin_lock(&tree->lock);
266         /*
267          * Do a single barrier for the waitqueue_active check here, the state
268          * of the waitqueue should not change once extent_io_tree_release is
269          * called.
270          */
271         smp_mb();
272         while (!RB_EMPTY_ROOT(&tree->state)) {
273                 struct rb_node *node;
274                 struct extent_state *state;
275
276                 node = rb_first(&tree->state);
277                 state = rb_entry(node, struct extent_state, rb_node);
278                 rb_erase(&state->rb_node, &tree->state);
279                 RB_CLEAR_NODE(&state->rb_node);
280                 /*
281                  * btree io trees aren't supposed to have tasks waiting for
282                  * changes in the flags of extent states ever.
283                  */
284                 ASSERT(!waitqueue_active(&state->wq));
285                 free_extent_state(state);
286
287                 cond_resched_lock(&tree->lock);
288         }
289         spin_unlock(&tree->lock);
290 }
291
292 static struct extent_state *alloc_extent_state(gfp_t mask)
293 {
294         struct extent_state *state;
295
296         /*
297          * The given mask might be not appropriate for the slab allocator,
298          * drop the unsupported bits
299          */
300         mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
301         state = kmem_cache_alloc(extent_state_cache, mask);
302         if (!state)
303                 return state;
304         state->state = 0;
305         state->failrec = NULL;
306         RB_CLEAR_NODE(&state->rb_node);
307         btrfs_leak_debug_add(&state->leak_list, &states);
308         refcount_set(&state->refs, 1);
309         init_waitqueue_head(&state->wq);
310         trace_alloc_extent_state(state, mask, _RET_IP_);
311         return state;
312 }
313
314 void free_extent_state(struct extent_state *state)
315 {
316         if (!state)
317                 return;
318         if (refcount_dec_and_test(&state->refs)) {
319                 WARN_ON(extent_state_in_tree(state));
320                 btrfs_leak_debug_del(&state->leak_list);
321                 trace_free_extent_state(state, _RET_IP_);
322                 kmem_cache_free(extent_state_cache, state);
323         }
324 }
325
326 static struct rb_node *tree_insert(struct rb_root *root,
327                                    struct rb_node *search_start,
328                                    u64 offset,
329                                    struct rb_node *node,
330                                    struct rb_node ***p_in,
331                                    struct rb_node **parent_in)
332 {
333         struct rb_node **p;
334         struct rb_node *parent = NULL;
335         struct tree_entry *entry;
336
337         if (p_in && parent_in) {
338                 p = *p_in;
339                 parent = *parent_in;
340                 goto do_insert;
341         }
342
343         p = search_start ? &search_start : &root->rb_node;
344         while (*p) {
345                 parent = *p;
346                 entry = rb_entry(parent, struct tree_entry, rb_node);
347
348                 if (offset < entry->start)
349                         p = &(*p)->rb_left;
350                 else if (offset > entry->end)
351                         p = &(*p)->rb_right;
352                 else
353                         return parent;
354         }
355
356 do_insert:
357         rb_link_node(node, parent, p);
358         rb_insert_color(node, root);
359         return NULL;
360 }
361
362 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
363                                       struct rb_node **next_ret,
364                                       struct rb_node **prev_ret,
365                                       struct rb_node ***p_ret,
366                                       struct rb_node **parent_ret)
367 {
368         struct rb_root *root = &tree->state;
369         struct rb_node **n = &root->rb_node;
370         struct rb_node *prev = NULL;
371         struct rb_node *orig_prev = NULL;
372         struct tree_entry *entry;
373         struct tree_entry *prev_entry = NULL;
374
375         while (*n) {
376                 prev = *n;
377                 entry = rb_entry(prev, struct tree_entry, rb_node);
378                 prev_entry = entry;
379
380                 if (offset < entry->start)
381                         n = &(*n)->rb_left;
382                 else if (offset > entry->end)
383                         n = &(*n)->rb_right;
384                 else
385                         return *n;
386         }
387
388         if (p_ret)
389                 *p_ret = n;
390         if (parent_ret)
391                 *parent_ret = prev;
392
393         if (next_ret) {
394                 orig_prev = prev;
395                 while (prev && offset > prev_entry->end) {
396                         prev = rb_next(prev);
397                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
398                 }
399                 *next_ret = prev;
400                 prev = orig_prev;
401         }
402
403         if (prev_ret) {
404                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
405                 while (prev && offset < prev_entry->start) {
406                         prev = rb_prev(prev);
407                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
408                 }
409                 *prev_ret = prev;
410         }
411         return NULL;
412 }
413
414 static inline struct rb_node *
415 tree_search_for_insert(struct extent_io_tree *tree,
416                        u64 offset,
417                        struct rb_node ***p_ret,
418                        struct rb_node **parent_ret)
419 {
420         struct rb_node *next= NULL;
421         struct rb_node *ret;
422
423         ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
424         if (!ret)
425                 return next;
426         return ret;
427 }
428
429 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
430                                           u64 offset)
431 {
432         return tree_search_for_insert(tree, offset, NULL, NULL);
433 }
434
435 /*
436  * utility function to look for merge candidates inside a given range.
437  * Any extents with matching state are merged together into a single
438  * extent in the tree.  Extents with EXTENT_IO in their state field
439  * are not merged because the end_io handlers need to be able to do
440  * operations on them without sleeping (or doing allocations/splits).
441  *
442  * This should be called with the tree lock held.
443  */
444 static void merge_state(struct extent_io_tree *tree,
445                         struct extent_state *state)
446 {
447         struct extent_state *other;
448         struct rb_node *other_node;
449
450         if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
451                 return;
452
453         other_node = rb_prev(&state->rb_node);
454         if (other_node) {
455                 other = rb_entry(other_node, struct extent_state, rb_node);
456                 if (other->end == state->start - 1 &&
457                     other->state == state->state) {
458                         if (tree->private_data &&
459                             is_data_inode(tree->private_data))
460                                 btrfs_merge_delalloc_extent(tree->private_data,
461                                                             state, other);
462                         state->start = other->start;
463                         rb_erase(&other->rb_node, &tree->state);
464                         RB_CLEAR_NODE(&other->rb_node);
465                         free_extent_state(other);
466                 }
467         }
468         other_node = rb_next(&state->rb_node);
469         if (other_node) {
470                 other = rb_entry(other_node, struct extent_state, rb_node);
471                 if (other->start == state->end + 1 &&
472                     other->state == state->state) {
473                         if (tree->private_data &&
474                             is_data_inode(tree->private_data))
475                                 btrfs_merge_delalloc_extent(tree->private_data,
476                                                             state, other);
477                         state->end = other->end;
478                         rb_erase(&other->rb_node, &tree->state);
479                         RB_CLEAR_NODE(&other->rb_node);
480                         free_extent_state(other);
481                 }
482         }
483 }
484
485 static void set_state_bits(struct extent_io_tree *tree,
486                            struct extent_state *state, unsigned *bits,
487                            struct extent_changeset *changeset);
488
489 /*
490  * insert an extent_state struct into the tree.  'bits' are set on the
491  * struct before it is inserted.
492  *
493  * This may return -EEXIST if the extent is already there, in which case the
494  * state struct is freed.
495  *
496  * The tree lock is not taken internally.  This is a utility function and
497  * probably isn't what you want to call (see set/clear_extent_bit).
498  */
499 static int insert_state(struct extent_io_tree *tree,
500                         struct extent_state *state, u64 start, u64 end,
501                         struct rb_node ***p,
502                         struct rb_node **parent,
503                         unsigned *bits, struct extent_changeset *changeset)
504 {
505         struct rb_node *node;
506
507         if (end < start)
508                 WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n",
509                        end, start);
510         state->start = start;
511         state->end = end;
512
513         set_state_bits(tree, state, bits, changeset);
514
515         node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
516         if (node) {
517                 struct extent_state *found;
518                 found = rb_entry(node, struct extent_state, rb_node);
519                 pr_err("BTRFS: found node %llu %llu on insert of %llu %llu\n",
520                        found->start, found->end, start, end);
521                 return -EEXIST;
522         }
523         merge_state(tree, state);
524         return 0;
525 }
526
527 /*
528  * split a given extent state struct in two, inserting the preallocated
529  * struct 'prealloc' as the newly created second half.  'split' indicates an
530  * offset inside 'orig' where it should be split.
531  *
532  * Before calling,
533  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
534  * are two extent state structs in the tree:
535  * prealloc: [orig->start, split - 1]
536  * orig: [ split, orig->end ]
537  *
538  * The tree locks are not taken by this function. They need to be held
539  * by the caller.
540  */
541 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
542                        struct extent_state *prealloc, u64 split)
543 {
544         struct rb_node *node;
545
546         if (tree->private_data && is_data_inode(tree->private_data))
547                 btrfs_split_delalloc_extent(tree->private_data, orig, split);
548
549         prealloc->start = orig->start;
550         prealloc->end = split - 1;
551         prealloc->state = orig->state;
552         orig->start = split;
553
554         node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
555                            &prealloc->rb_node, NULL, NULL);
556         if (node) {
557                 free_extent_state(prealloc);
558                 return -EEXIST;
559         }
560         return 0;
561 }
562
563 static struct extent_state *next_state(struct extent_state *state)
564 {
565         struct rb_node *next = rb_next(&state->rb_node);
566         if (next)
567                 return rb_entry(next, struct extent_state, rb_node);
568         else
569                 return NULL;
570 }
571
572 /*
573  * utility function to clear some bits in an extent state struct.
574  * it will optionally wake up anyone waiting on this state (wake == 1).
575  *
576  * If no bits are set on the state struct after clearing things, the
577  * struct is freed and removed from the tree
578  */
579 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
580                                             struct extent_state *state,
581                                             unsigned *bits, int wake,
582                                             struct extent_changeset *changeset)
583 {
584         struct extent_state *next;
585         unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
586         int ret;
587
588         if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
589                 u64 range = state->end - state->start + 1;
590                 WARN_ON(range > tree->dirty_bytes);
591                 tree->dirty_bytes -= range;
592         }
593
594         if (tree->private_data && is_data_inode(tree->private_data))
595                 btrfs_clear_delalloc_extent(tree->private_data, state, bits);
596
597         ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
598         BUG_ON(ret < 0);
599         state->state &= ~bits_to_clear;
600         if (wake)
601                 wake_up(&state->wq);
602         if (state->state == 0) {
603                 next = next_state(state);
604                 if (extent_state_in_tree(state)) {
605                         rb_erase(&state->rb_node, &tree->state);
606                         RB_CLEAR_NODE(&state->rb_node);
607                         free_extent_state(state);
608                 } else {
609                         WARN_ON(1);
610                 }
611         } else {
612                 merge_state(tree, state);
613                 next = next_state(state);
614         }
615         return next;
616 }
617
618 static struct extent_state *
619 alloc_extent_state_atomic(struct extent_state *prealloc)
620 {
621         if (!prealloc)
622                 prealloc = alloc_extent_state(GFP_ATOMIC);
623
624         return prealloc;
625 }
626
627 static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
628 {
629         struct inode *inode = tree->private_data;
630
631         btrfs_panic(btrfs_sb(inode->i_sb), err,
632         "locking error: extent tree was modified by another thread while locked");
633 }
634
635 /*
636  * clear some bits on a range in the tree.  This may require splitting
637  * or inserting elements in the tree, so the gfp mask is used to
638  * indicate which allocations or sleeping are allowed.
639  *
640  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
641  * the given range from the tree regardless of state (ie for truncate).
642  *
643  * the range [start, end] is inclusive.
644  *
645  * This takes the tree lock, and returns 0 on success and < 0 on error.
646  */
647 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
648                               unsigned bits, int wake, int delete,
649                               struct extent_state **cached_state,
650                               gfp_t mask, struct extent_changeset *changeset)
651 {
652         struct extent_state *state;
653         struct extent_state *cached;
654         struct extent_state *prealloc = NULL;
655         struct rb_node *node;
656         u64 last_end;
657         int err;
658         int clear = 0;
659
660         btrfs_debug_check_extent_io_range(tree, start, end);
661         trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
662
663         if (bits & EXTENT_DELALLOC)
664                 bits |= EXTENT_NORESERVE;
665
666         if (delete)
667                 bits |= ~EXTENT_CTLBITS;
668
669         if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
670                 clear = 1;
671 again:
672         if (!prealloc && gfpflags_allow_blocking(mask)) {
673                 /*
674                  * Don't care for allocation failure here because we might end
675                  * up not needing the pre-allocated extent state at all, which
676                  * is the case if we only have in the tree extent states that
677                  * cover our input range and don't cover too any other range.
678                  * If we end up needing a new extent state we allocate it later.
679                  */
680                 prealloc = alloc_extent_state(mask);
681         }
682
683         spin_lock(&tree->lock);
684         if (cached_state) {
685                 cached = *cached_state;
686
687                 if (clear) {
688                         *cached_state = NULL;
689                         cached_state = NULL;
690                 }
691
692                 if (cached && extent_state_in_tree(cached) &&
693                     cached->start <= start && cached->end > start) {
694                         if (clear)
695                                 refcount_dec(&cached->refs);
696                         state = cached;
697                         goto hit_next;
698                 }
699                 if (clear)
700                         free_extent_state(cached);
701         }
702         /*
703          * this search will find the extents that end after
704          * our range starts
705          */
706         node = tree_search(tree, start);
707         if (!node)
708                 goto out;
709         state = rb_entry(node, struct extent_state, rb_node);
710 hit_next:
711         if (state->start > end)
712                 goto out;
713         WARN_ON(state->end < start);
714         last_end = state->end;
715
716         /* the state doesn't have the wanted bits, go ahead */
717         if (!(state->state & bits)) {
718                 state = next_state(state);
719                 goto next;
720         }
721
722         /*
723          *     | ---- desired range ---- |
724          *  | state | or
725          *  | ------------- state -------------- |
726          *
727          * We need to split the extent we found, and may flip
728          * bits on second half.
729          *
730          * If the extent we found extends past our range, we
731          * just split and search again.  It'll get split again
732          * the next time though.
733          *
734          * If the extent we found is inside our range, we clear
735          * the desired bit on it.
736          */
737
738         if (state->start < start) {
739                 prealloc = alloc_extent_state_atomic(prealloc);
740                 BUG_ON(!prealloc);
741                 err = split_state(tree, state, prealloc, start);
742                 if (err)
743                         extent_io_tree_panic(tree, err);
744
745                 prealloc = NULL;
746                 if (err)
747                         goto out;
748                 if (state->end <= end) {
749                         state = clear_state_bit(tree, state, &bits, wake,
750                                                 changeset);
751                         goto next;
752                 }
753                 goto search_again;
754         }
755         /*
756          * | ---- desired range ---- |
757          *                        | state |
758          * We need to split the extent, and clear the bit
759          * on the first half
760          */
761         if (state->start <= end && state->end > end) {
762                 prealloc = alloc_extent_state_atomic(prealloc);
763                 BUG_ON(!prealloc);
764                 err = split_state(tree, state, prealloc, end + 1);
765                 if (err)
766                         extent_io_tree_panic(tree, err);
767
768                 if (wake)
769                         wake_up(&state->wq);
770
771                 clear_state_bit(tree, prealloc, &bits, wake, changeset);
772
773                 prealloc = NULL;
774                 goto out;
775         }
776
777         state = clear_state_bit(tree, state, &bits, wake, changeset);
778 next:
779         if (last_end == (u64)-1)
780                 goto out;
781         start = last_end + 1;
782         if (start <= end && state && !need_resched())
783                 goto hit_next;
784
785 search_again:
786         if (start > end)
787                 goto out;
788         spin_unlock(&tree->lock);
789         if (gfpflags_allow_blocking(mask))
790                 cond_resched();
791         goto again;
792
793 out:
794         spin_unlock(&tree->lock);
795         if (prealloc)
796                 free_extent_state(prealloc);
797
798         return 0;
799
800 }
801
802 static void wait_on_state(struct extent_io_tree *tree,
803                           struct extent_state *state)
804                 __releases(tree->lock)
805                 __acquires(tree->lock)
806 {
807         DEFINE_WAIT(wait);
808         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
809         spin_unlock(&tree->lock);
810         schedule();
811         spin_lock(&tree->lock);
812         finish_wait(&state->wq, &wait);
813 }
814
815 /*
816  * waits for one or more bits to clear on a range in the state tree.
817  * The range [start, end] is inclusive.
818  * The tree lock is taken by this function
819  */
820 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
821                             unsigned long bits)
822 {
823         struct extent_state *state;
824         struct rb_node *node;
825
826         btrfs_debug_check_extent_io_range(tree, start, end);
827
828         spin_lock(&tree->lock);
829 again:
830         while (1) {
831                 /*
832                  * this search will find all the extents that end after
833                  * our range starts
834                  */
835                 node = tree_search(tree, start);
836 process_node:
837                 if (!node)
838                         break;
839
840                 state = rb_entry(node, struct extent_state, rb_node);
841
842                 if (state->start > end)
843                         goto out;
844
845                 if (state->state & bits) {
846                         start = state->start;
847                         refcount_inc(&state->refs);
848                         wait_on_state(tree, state);
849                         free_extent_state(state);
850                         goto again;
851                 }
852                 start = state->end + 1;
853
854                 if (start > end)
855                         break;
856
857                 if (!cond_resched_lock(&tree->lock)) {
858                         node = rb_next(node);
859                         goto process_node;
860                 }
861         }
862 out:
863         spin_unlock(&tree->lock);
864 }
865
866 static void set_state_bits(struct extent_io_tree *tree,
867                            struct extent_state *state,
868                            unsigned *bits, struct extent_changeset *changeset)
869 {
870         unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
871         int ret;
872
873         if (tree->private_data && is_data_inode(tree->private_data))
874                 btrfs_set_delalloc_extent(tree->private_data, state, bits);
875
876         if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
877                 u64 range = state->end - state->start + 1;
878                 tree->dirty_bytes += range;
879         }
880         ret = add_extent_changeset(state, bits_to_set, changeset, 1);
881         BUG_ON(ret < 0);
882         state->state |= bits_to_set;
883 }
884
885 static void cache_state_if_flags(struct extent_state *state,
886                                  struct extent_state **cached_ptr,
887                                  unsigned flags)
888 {
889         if (cached_ptr && !(*cached_ptr)) {
890                 if (!flags || (state->state & flags)) {
891                         *cached_ptr = state;
892                         refcount_inc(&state->refs);
893                 }
894         }
895 }
896
897 static void cache_state(struct extent_state *state,
898                         struct extent_state **cached_ptr)
899 {
900         return cache_state_if_flags(state, cached_ptr,
901                                     EXTENT_LOCKED | EXTENT_BOUNDARY);
902 }
903
904 /*
905  * set some bits on a range in the tree.  This may require allocations or
906  * sleeping, so the gfp mask is used to indicate what is allowed.
907  *
908  * If any of the exclusive bits are set, this will fail with -EEXIST if some
909  * part of the range already has the desired bits set.  The start of the
910  * existing range is returned in failed_start in this case.
911  *
912  * [start, end] is inclusive This takes the tree lock.
913  */
914
915 static int __must_check
916 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
917                  unsigned bits, unsigned exclusive_bits,
918                  u64 *failed_start, struct extent_state **cached_state,
919                  gfp_t mask, struct extent_changeset *changeset)
920 {
921         struct extent_state *state;
922         struct extent_state *prealloc = NULL;
923         struct rb_node *node;
924         struct rb_node **p;
925         struct rb_node *parent;
926         int err = 0;
927         u64 last_start;
928         u64 last_end;
929
930         btrfs_debug_check_extent_io_range(tree, start, end);
931         trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
932
933 again:
934         if (!prealloc && gfpflags_allow_blocking(mask)) {
935                 /*
936                  * Don't care for allocation failure here because we might end
937                  * up not needing the pre-allocated extent state at all, which
938                  * is the case if we only have in the tree extent states that
939                  * cover our input range and don't cover too any other range.
940                  * If we end up needing a new extent state we allocate it later.
941                  */
942                 prealloc = alloc_extent_state(mask);
943         }
944
945         spin_lock(&tree->lock);
946         if (cached_state && *cached_state) {
947                 state = *cached_state;
948                 if (state->start <= start && state->end > start &&
949                     extent_state_in_tree(state)) {
950                         node = &state->rb_node;
951                         goto hit_next;
952                 }
953         }
954         /*
955          * this search will find all the extents that end after
956          * our range starts.
957          */
958         node = tree_search_for_insert(tree, start, &p, &parent);
959         if (!node) {
960                 prealloc = alloc_extent_state_atomic(prealloc);
961                 BUG_ON(!prealloc);
962                 err = insert_state(tree, prealloc, start, end,
963                                    &p, &parent, &bits, changeset);
964                 if (err)
965                         extent_io_tree_panic(tree, err);
966
967                 cache_state(prealloc, cached_state);
968                 prealloc = NULL;
969                 goto out;
970         }
971         state = rb_entry(node, struct extent_state, rb_node);
972 hit_next:
973         last_start = state->start;
974         last_end = state->end;
975
976         /*
977          * | ---- desired range ---- |
978          * | state |
979          *
980          * Just lock what we found and keep going
981          */
982         if (state->start == start && state->end <= end) {
983                 if (state->state & exclusive_bits) {
984                         *failed_start = state->start;
985                         err = -EEXIST;
986                         goto out;
987                 }
988
989                 set_state_bits(tree, state, &bits, changeset);
990                 cache_state(state, cached_state);
991                 merge_state(tree, state);
992                 if (last_end == (u64)-1)
993                         goto out;
994                 start = last_end + 1;
995                 state = next_state(state);
996                 if (start < end && state && state->start == start &&
997                     !need_resched())
998                         goto hit_next;
999                 goto search_again;
1000         }
1001
1002         /*
1003          *     | ---- desired range ---- |
1004          * | state |
1005          *   or
1006          * | ------------- state -------------- |
1007          *
1008          * We need to split the extent we found, and may flip bits on
1009          * second half.
1010          *
1011          * If the extent we found extends past our
1012          * range, we just split and search again.  It'll get split
1013          * again the next time though.
1014          *
1015          * If the extent we found is inside our range, we set the
1016          * desired bit on it.
1017          */
1018         if (state->start < start) {
1019                 if (state->state & exclusive_bits) {
1020                         *failed_start = start;
1021                         err = -EEXIST;
1022                         goto out;
1023                 }
1024
1025                 prealloc = alloc_extent_state_atomic(prealloc);
1026                 BUG_ON(!prealloc);
1027                 err = split_state(tree, state, prealloc, start);
1028                 if (err)
1029                         extent_io_tree_panic(tree, err);
1030
1031                 prealloc = NULL;
1032                 if (err)
1033                         goto out;
1034                 if (state->end <= end) {
1035                         set_state_bits(tree, state, &bits, changeset);
1036                         cache_state(state, cached_state);
1037                         merge_state(tree, state);
1038                         if (last_end == (u64)-1)
1039                                 goto out;
1040                         start = last_end + 1;
1041                         state = next_state(state);
1042                         if (start < end && state && state->start == start &&
1043                             !need_resched())
1044                                 goto hit_next;
1045                 }
1046                 goto search_again;
1047         }
1048         /*
1049          * | ---- desired range ---- |
1050          *     | state | or               | state |
1051          *
1052          * There's a hole, we need to insert something in it and
1053          * ignore the extent we found.
1054          */
1055         if (state->start > start) {
1056                 u64 this_end;
1057                 if (end < last_start)
1058                         this_end = end;
1059                 else
1060                         this_end = last_start - 1;
1061
1062                 prealloc = alloc_extent_state_atomic(prealloc);
1063                 BUG_ON(!prealloc);
1064
1065                 /*
1066                  * Avoid to free 'prealloc' if it can be merged with
1067                  * the later extent.
1068                  */
1069                 err = insert_state(tree, prealloc, start, this_end,
1070                                    NULL, NULL, &bits, changeset);
1071                 if (err)
1072                         extent_io_tree_panic(tree, err);
1073
1074                 cache_state(prealloc, cached_state);
1075                 prealloc = NULL;
1076                 start = this_end + 1;
1077                 goto search_again;
1078         }
1079         /*
1080          * | ---- desired range ---- |
1081          *                        | state |
1082          * We need to split the extent, and set the bit
1083          * on the first half
1084          */
1085         if (state->start <= end && state->end > end) {
1086                 if (state->state & exclusive_bits) {
1087                         *failed_start = start;
1088                         err = -EEXIST;
1089                         goto out;
1090                 }
1091
1092                 prealloc = alloc_extent_state_atomic(prealloc);
1093                 BUG_ON(!prealloc);
1094                 err = split_state(tree, state, prealloc, end + 1);
1095                 if (err)
1096                         extent_io_tree_panic(tree, err);
1097
1098                 set_state_bits(tree, prealloc, &bits, changeset);
1099                 cache_state(prealloc, cached_state);
1100                 merge_state(tree, prealloc);
1101                 prealloc = NULL;
1102                 goto out;
1103         }
1104
1105 search_again:
1106         if (start > end)
1107                 goto out;
1108         spin_unlock(&tree->lock);
1109         if (gfpflags_allow_blocking(mask))
1110                 cond_resched();
1111         goto again;
1112
1113 out:
1114         spin_unlock(&tree->lock);
1115         if (prealloc)
1116                 free_extent_state(prealloc);
1117
1118         return err;
1119
1120 }
1121
1122 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1123                    unsigned bits, u64 * failed_start,
1124                    struct extent_state **cached_state, gfp_t mask)
1125 {
1126         return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1127                                 cached_state, mask, NULL);
1128 }
1129
1130
1131 /**
1132  * convert_extent_bit - convert all bits in a given range from one bit to
1133  *                      another
1134  * @tree:       the io tree to search
1135  * @start:      the start offset in bytes
1136  * @end:        the end offset in bytes (inclusive)
1137  * @bits:       the bits to set in this range
1138  * @clear_bits: the bits to clear in this range
1139  * @cached_state:       state that we're going to cache
1140  *
1141  * This will go through and set bits for the given range.  If any states exist
1142  * already in this range they are set with the given bit and cleared of the
1143  * clear_bits.  This is only meant to be used by things that are mergeable, ie
1144  * converting from say DELALLOC to DIRTY.  This is not meant to be used with
1145  * boundary bits like LOCK.
1146  *
1147  * All allocations are done with GFP_NOFS.
1148  */
1149 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1150                        unsigned bits, unsigned clear_bits,
1151                        struct extent_state **cached_state)
1152 {
1153         struct extent_state *state;
1154         struct extent_state *prealloc = NULL;
1155         struct rb_node *node;
1156         struct rb_node **p;
1157         struct rb_node *parent;
1158         int err = 0;
1159         u64 last_start;
1160         u64 last_end;
1161         bool first_iteration = true;
1162
1163         btrfs_debug_check_extent_io_range(tree, start, end);
1164         trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1165                                        clear_bits);
1166
1167 again:
1168         if (!prealloc) {
1169                 /*
1170                  * Best effort, don't worry if extent state allocation fails
1171                  * here for the first iteration. We might have a cached state
1172                  * that matches exactly the target range, in which case no
1173                  * extent state allocations are needed. We'll only know this
1174                  * after locking the tree.
1175                  */
1176                 prealloc = alloc_extent_state(GFP_NOFS);
1177                 if (!prealloc && !first_iteration)
1178                         return -ENOMEM;
1179         }
1180
1181         spin_lock(&tree->lock);
1182         if (cached_state && *cached_state) {
1183                 state = *cached_state;
1184                 if (state->start <= start && state->end > start &&
1185                     extent_state_in_tree(state)) {
1186                         node = &state->rb_node;
1187                         goto hit_next;
1188                 }
1189         }
1190
1191         /*
1192          * this search will find all the extents that end after
1193          * our range starts.
1194          */
1195         node = tree_search_for_insert(tree, start, &p, &parent);
1196         if (!node) {
1197                 prealloc = alloc_extent_state_atomic(prealloc);
1198                 if (!prealloc) {
1199                         err = -ENOMEM;
1200                         goto out;
1201                 }
1202                 err = insert_state(tree, prealloc, start, end,
1203                                    &p, &parent, &bits, NULL);
1204                 if (err)
1205                         extent_io_tree_panic(tree, err);
1206                 cache_state(prealloc, cached_state);
1207                 prealloc = NULL;
1208                 goto out;
1209         }
1210         state = rb_entry(node, struct extent_state, rb_node);
1211 hit_next:
1212         last_start = state->start;
1213         last_end = state->end;
1214
1215         /*
1216          * | ---- desired range ---- |
1217          * | state |
1218          *
1219          * Just lock what we found and keep going
1220          */
1221         if (state->start == start && state->end <= end) {
1222                 set_state_bits(tree, state, &bits, NULL);
1223                 cache_state(state, cached_state);
1224                 state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
1225                 if (last_end == (u64)-1)
1226                         goto out;
1227                 start = last_end + 1;
1228                 if (start < end && state && state->start == start &&
1229                     !need_resched())
1230                         goto hit_next;
1231                 goto search_again;
1232         }
1233
1234         /*
1235          *     | ---- desired range ---- |
1236          * | state |
1237          *   or
1238          * | ------------- state -------------- |
1239          *
1240          * We need to split the extent we found, and may flip bits on
1241          * second half.
1242          *
1243          * If the extent we found extends past our
1244          * range, we just split and search again.  It'll get split
1245          * again the next time though.
1246          *
1247          * If the extent we found is inside our range, we set the
1248          * desired bit on it.
1249          */
1250         if (state->start < start) {
1251                 prealloc = alloc_extent_state_atomic(prealloc);
1252                 if (!prealloc) {
1253                         err = -ENOMEM;
1254                         goto out;
1255                 }
1256                 err = split_state(tree, state, prealloc, start);
1257                 if (err)
1258                         extent_io_tree_panic(tree, err);
1259                 prealloc = NULL;
1260                 if (err)
1261                         goto out;
1262                 if (state->end <= end) {
1263                         set_state_bits(tree, state, &bits, NULL);
1264                         cache_state(state, cached_state);
1265                         state = clear_state_bit(tree, state, &clear_bits, 0,
1266                                                 NULL);
1267                         if (last_end == (u64)-1)
1268                                 goto out;
1269                         start = last_end + 1;
1270                         if (start < end && state && state->start == start &&
1271                             !need_resched())
1272                                 goto hit_next;
1273                 }
1274                 goto search_again;
1275         }
1276         /*
1277          * | ---- desired range ---- |
1278          *     | state | or               | state |
1279          *
1280          * There's a hole, we need to insert something in it and
1281          * ignore the extent we found.
1282          */
1283         if (state->start > start) {
1284                 u64 this_end;
1285                 if (end < last_start)
1286                         this_end = end;
1287                 else
1288                         this_end = last_start - 1;
1289
1290                 prealloc = alloc_extent_state_atomic(prealloc);
1291                 if (!prealloc) {
1292                         err = -ENOMEM;
1293                         goto out;
1294                 }
1295
1296                 /*
1297                  * Avoid to free 'prealloc' if it can be merged with
1298                  * the later extent.
1299                  */
1300                 err = insert_state(tree, prealloc, start, this_end,
1301                                    NULL, NULL, &bits, NULL);
1302                 if (err)
1303                         extent_io_tree_panic(tree, err);
1304                 cache_state(prealloc, cached_state);
1305                 prealloc = NULL;
1306                 start = this_end + 1;
1307                 goto search_again;
1308         }
1309         /*
1310          * | ---- desired range ---- |
1311          *                        | state |
1312          * We need to split the extent, and set the bit
1313          * on the first half
1314          */
1315         if (state->start <= end && state->end > end) {
1316                 prealloc = alloc_extent_state_atomic(prealloc);
1317                 if (!prealloc) {
1318                         err = -ENOMEM;
1319                         goto out;
1320                 }
1321
1322                 err = split_state(tree, state, prealloc, end + 1);
1323                 if (err)
1324                         extent_io_tree_panic(tree, err);
1325
1326                 set_state_bits(tree, prealloc, &bits, NULL);
1327                 cache_state(prealloc, cached_state);
1328                 clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
1329                 prealloc = NULL;
1330                 goto out;
1331         }
1332
1333 search_again:
1334         if (start > end)
1335                 goto out;
1336         spin_unlock(&tree->lock);
1337         cond_resched();
1338         first_iteration = false;
1339         goto again;
1340
1341 out:
1342         spin_unlock(&tree->lock);
1343         if (prealloc)
1344                 free_extent_state(prealloc);
1345
1346         return err;
1347 }
1348
1349 /* wrappers around set/clear extent bit */
1350 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1351                            unsigned bits, struct extent_changeset *changeset)
1352 {
1353         /*
1354          * We don't support EXTENT_LOCKED yet, as current changeset will
1355          * record any bits changed, so for EXTENT_LOCKED case, it will
1356          * either fail with -EEXIST or changeset will record the whole
1357          * range.
1358          */
1359         BUG_ON(bits & EXTENT_LOCKED);
1360
1361         return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
1362                                 changeset);
1363 }
1364
1365 int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
1366                            unsigned bits)
1367 {
1368         return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
1369                                 GFP_NOWAIT, NULL);
1370 }
1371
1372 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1373                      unsigned bits, int wake, int delete,
1374                      struct extent_state **cached)
1375 {
1376         return __clear_extent_bit(tree, start, end, bits, wake, delete,
1377                                   cached, GFP_NOFS, NULL);
1378 }
1379
1380 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1381                 unsigned bits, struct extent_changeset *changeset)
1382 {
1383         /*
1384          * Don't support EXTENT_LOCKED case, same reason as
1385          * set_record_extent_bits().
1386          */
1387         BUG_ON(bits & EXTENT_LOCKED);
1388
1389         return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
1390                                   changeset);
1391 }
1392
1393 /*
1394  * either insert or lock state struct between start and end use mask to tell
1395  * us if waiting is desired.
1396  */
1397 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1398                      struct extent_state **cached_state)
1399 {
1400         int err;
1401         u64 failed_start;
1402
1403         while (1) {
1404                 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
1405                                        EXTENT_LOCKED, &failed_start,
1406                                        cached_state, GFP_NOFS, NULL);
1407                 if (err == -EEXIST) {
1408                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1409                         start = failed_start;
1410                 } else
1411                         break;
1412                 WARN_ON(start > end);
1413         }
1414         return err;
1415 }
1416
1417 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1418 {
1419         int err;
1420         u64 failed_start;
1421
1422         err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1423                                &failed_start, NULL, GFP_NOFS, NULL);
1424         if (err == -EEXIST) {
1425                 if (failed_start > start)
1426                         clear_extent_bit(tree, start, failed_start - 1,
1427                                          EXTENT_LOCKED, 1, 0, NULL);
1428                 return 0;
1429         }
1430         return 1;
1431 }
1432
1433 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1434 {
1435         unsigned long index = start >> PAGE_SHIFT;
1436         unsigned long end_index = end >> PAGE_SHIFT;
1437         struct page *page;
1438
1439         while (index <= end_index) {
1440                 page = find_get_page(inode->i_mapping, index);
1441                 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1442                 clear_page_dirty_for_io(page);
1443                 put_page(page);
1444                 index++;
1445         }
1446 }
1447
1448 void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1449 {
1450         unsigned long index = start >> PAGE_SHIFT;
1451         unsigned long end_index = end >> PAGE_SHIFT;
1452         struct page *page;
1453
1454         while (index <= end_index) {
1455                 page = find_get_page(inode->i_mapping, index);
1456                 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1457                 __set_page_dirty_nobuffers(page);
1458                 account_page_redirty(page);
1459                 put_page(page);
1460                 index++;
1461         }
1462 }
1463
1464 /* find the first state struct with 'bits' set after 'start', and
1465  * return it.  tree->lock must be held.  NULL will returned if
1466  * nothing was found after 'start'
1467  */
1468 static struct extent_state *
1469 find_first_extent_bit_state(struct extent_io_tree *tree,
1470                             u64 start, unsigned bits)
1471 {
1472         struct rb_node *node;
1473         struct extent_state *state;
1474
1475         /*
1476          * this search will find all the extents that end after
1477          * our range starts.
1478          */
1479         node = tree_search(tree, start);
1480         if (!node)
1481                 goto out;
1482
1483         while (1) {
1484                 state = rb_entry(node, struct extent_state, rb_node);
1485                 if (state->end >= start && (state->state & bits))
1486                         return state;
1487
1488                 node = rb_next(node);
1489                 if (!node)
1490                         break;
1491         }
1492 out:
1493         return NULL;
1494 }
1495
1496 /*
1497  * find the first offset in the io tree with 'bits' set. zero is
1498  * returned if we find something, and *start_ret and *end_ret are
1499  * set to reflect the state struct that was found.
1500  *
1501  * If nothing was found, 1 is returned. If found something, return 0.
1502  */
1503 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1504                           u64 *start_ret, u64 *end_ret, unsigned bits,
1505                           struct extent_state **cached_state)
1506 {
1507         struct extent_state *state;
1508         int ret = 1;
1509
1510         spin_lock(&tree->lock);
1511         if (cached_state && *cached_state) {
1512                 state = *cached_state;
1513                 if (state->end == start - 1 && extent_state_in_tree(state)) {
1514                         while ((state = next_state(state)) != NULL) {
1515                                 if (state->state & bits)
1516                                         goto got_it;
1517                         }
1518                         free_extent_state(*cached_state);
1519                         *cached_state = NULL;
1520                         goto out;
1521                 }
1522                 free_extent_state(*cached_state);
1523                 *cached_state = NULL;
1524         }
1525
1526         state = find_first_extent_bit_state(tree, start, bits);
1527 got_it:
1528         if (state) {
1529                 cache_state_if_flags(state, cached_state, 0);
1530                 *start_ret = state->start;
1531                 *end_ret = state->end;
1532                 ret = 0;
1533         }
1534 out:
1535         spin_unlock(&tree->lock);
1536         return ret;
1537 }
1538
1539 /**
1540  * find_first_clear_extent_bit - find the first range that has @bits not set.
1541  * This range could start before @start.
1542  *
1543  * @tree - the tree to search
1544  * @start - the offset at/after which the found extent should start
1545  * @start_ret - records the beginning of the range
1546  * @end_ret - records the end of the range (inclusive)
1547  * @bits - the set of bits which must be unset
1548  *
1549  * Since unallocated range is also considered one which doesn't have the bits
1550  * set it's possible that @end_ret contains -1, this happens in case the range
1551  * spans (last_range_end, end of device]. In this case it's up to the caller to
1552  * trim @end_ret to the appropriate size.
1553  */
1554 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
1555                                  u64 *start_ret, u64 *end_ret, unsigned bits)
1556 {
1557         struct extent_state *state;
1558         struct rb_node *node, *prev = NULL, *next;
1559
1560         spin_lock(&tree->lock);
1561
1562         /* Find first extent with bits cleared */
1563         while (1) {
1564                 node = __etree_search(tree, start, &next, &prev, NULL, NULL);
1565                 if (!node) {
1566                         node = next;
1567                         if (!node) {
1568                                 /*
1569                                  * We are past the last allocated chunk,
1570                                  * set start at the end of the last extent. The
1571                                  * device alloc tree should never be empty so
1572                                  * prev is always set.
1573                                  */
1574                                 ASSERT(prev);
1575                                 state = rb_entry(prev, struct extent_state, rb_node);
1576                                 *start_ret = state->end + 1;
1577                                 *end_ret = -1;
1578                                 goto out;
1579                         }
1580                 }
1581                 /*
1582                  * At this point 'node' either contains 'start' or start is
1583                  * before 'node'
1584                  */
1585                 state = rb_entry(node, struct extent_state, rb_node);
1586
1587                 if (in_range(start, state->start, state->end - state->start + 1)) {
1588                         if (state->state & bits) {
1589                                 /*
1590                                  * |--range with bits sets--|
1591                                  *    |
1592                                  *    start
1593                                  */
1594                                 start = state->end + 1;
1595                         } else {
1596                                 /*
1597                                  * 'start' falls within a range that doesn't
1598                                  * have the bits set, so take its start as
1599                                  * the beginning of the desired range
1600                                  *
1601                                  * |--range with bits cleared----|
1602                                  *      |
1603                                  *      start
1604                                  */
1605                                 *start_ret = state->start;
1606                                 break;
1607                         }
1608                 } else {
1609                         /*
1610                          * |---prev range---|---hole/unset---|---node range---|
1611                          *                          |
1612                          *                        start
1613                          *
1614                          *                        or
1615                          *
1616                          * |---hole/unset--||--first node--|
1617                          * 0   |
1618                          *    start
1619                          */
1620                         if (prev) {
1621                                 state = rb_entry(prev, struct extent_state,
1622                                                  rb_node);
1623                                 *start_ret = state->end + 1;
1624                         } else {
1625                                 *start_ret = 0;
1626                         }
1627                         break;
1628                 }
1629         }
1630
1631         /*
1632          * Find the longest stretch from start until an entry which has the
1633          * bits set
1634          */
1635         while (1) {
1636                 state = rb_entry(node, struct extent_state, rb_node);
1637                 if (state->end >= start && !(state->state & bits)) {
1638                         *end_ret = state->end;
1639                 } else {
1640                         *end_ret = state->start - 1;
1641                         break;
1642                 }
1643
1644                 node = rb_next(node);
1645                 if (!node)
1646                         break;
1647         }
1648 out:
1649         spin_unlock(&tree->lock);
1650 }
1651
1652 /*
1653  * find a contiguous range of bytes in the file marked as delalloc, not
1654  * more than 'max_bytes'.  start and end are used to return the range,
1655  *
1656  * true is returned if we find something, false if nothing was in the tree
1657  */
1658 static noinline bool find_delalloc_range(struct extent_io_tree *tree,
1659                                         u64 *start, u64 *end, u64 max_bytes,
1660                                         struct extent_state **cached_state)
1661 {
1662         struct rb_node *node;
1663         struct extent_state *state;
1664         u64 cur_start = *start;
1665         bool found = false;
1666         u64 total_bytes = 0;
1667
1668         spin_lock(&tree->lock);
1669
1670         /*
1671          * this search will find all the extents that end after
1672          * our range starts.
1673          */
1674         node = tree_search(tree, cur_start);
1675         if (!node) {
1676                 *end = (u64)-1;
1677                 goto out;
1678         }
1679
1680         while (1) {
1681                 state = rb_entry(node, struct extent_state, rb_node);
1682                 if (found && (state->start != cur_start ||
1683                               (state->state & EXTENT_BOUNDARY))) {
1684                         goto out;
1685                 }
1686                 if (!(state->state & EXTENT_DELALLOC)) {
1687                         if (!found)
1688                                 *end = state->end;
1689                         goto out;
1690                 }
1691                 if (!found) {
1692                         *start = state->start;
1693                         *cached_state = state;
1694                         refcount_inc(&state->refs);
1695                 }
1696                 found = true;
1697                 *end = state->end;
1698                 cur_start = state->end + 1;
1699                 node = rb_next(node);
1700                 total_bytes += state->end - state->start + 1;
1701                 if (total_bytes >= max_bytes)
1702                         break;
1703                 if (!node)
1704                         break;
1705         }
1706 out:
1707         spin_unlock(&tree->lock);
1708         return found;
1709 }
1710
1711 static int __process_pages_contig(struct address_space *mapping,
1712                                   struct page *locked_page,
1713                                   pgoff_t start_index, pgoff_t end_index,
1714                                   unsigned long page_ops, pgoff_t *index_ret);
1715
1716 static noinline void __unlock_for_delalloc(struct inode *inode,
1717                                            struct page *locked_page,
1718                                            u64 start, u64 end)
1719 {
1720         unsigned long index = start >> PAGE_SHIFT;
1721         unsigned long end_index = end >> PAGE_SHIFT;
1722
1723         ASSERT(locked_page);
1724         if (index == locked_page->index && end_index == index)
1725                 return;
1726
1727         __process_pages_contig(inode->i_mapping, locked_page, index, end_index,
1728                                PAGE_UNLOCK, NULL);
1729 }
1730
1731 static noinline int lock_delalloc_pages(struct inode *inode,
1732                                         struct page *locked_page,
1733                                         u64 delalloc_start,
1734                                         u64 delalloc_end)
1735 {
1736         unsigned long index = delalloc_start >> PAGE_SHIFT;
1737         unsigned long index_ret = index;
1738         unsigned long end_index = delalloc_end >> PAGE_SHIFT;
1739         int ret;
1740
1741         ASSERT(locked_page);
1742         if (index == locked_page->index && index == end_index)
1743                 return 0;
1744
1745         ret = __process_pages_contig(inode->i_mapping, locked_page, index,
1746                                      end_index, PAGE_LOCK, &index_ret);
1747         if (ret == -EAGAIN)
1748                 __unlock_for_delalloc(inode, locked_page, delalloc_start,
1749                                       (u64)index_ret << PAGE_SHIFT);
1750         return ret;
1751 }
1752
1753 /*
1754  * Find and lock a contiguous range of bytes in the file marked as delalloc, no
1755  * more than @max_bytes.  @Start and @end are used to return the range,
1756  *
1757  * Return: true if we find something
1758  *         false if nothing was in the tree
1759  */
1760 EXPORT_FOR_TESTS
1761 noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
1762                                     struct extent_io_tree *tree,
1763                                     struct page *locked_page, u64 *start,
1764                                     u64 *end)
1765 {
1766         u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
1767         u64 delalloc_start;
1768         u64 delalloc_end;
1769         bool found;
1770         struct extent_state *cached_state = NULL;
1771         int ret;
1772         int loops = 0;
1773
1774 again:
1775         /* step one, find a bunch of delalloc bytes starting at start */
1776         delalloc_start = *start;
1777         delalloc_end = 0;
1778         found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1779                                     max_bytes, &cached_state);
1780         if (!found || delalloc_end <= *start) {
1781                 *start = delalloc_start;
1782                 *end = delalloc_end;
1783                 free_extent_state(cached_state);
1784                 return false;
1785         }
1786
1787         /*
1788          * start comes from the offset of locked_page.  We have to lock
1789          * pages in order, so we can't process delalloc bytes before
1790          * locked_page
1791          */
1792         if (delalloc_start < *start)
1793                 delalloc_start = *start;
1794
1795         /*
1796          * make sure to limit the number of pages we try to lock down
1797          */
1798         if (delalloc_end + 1 - delalloc_start > max_bytes)
1799                 delalloc_end = delalloc_start + max_bytes - 1;
1800
1801         /* step two, lock all the pages after the page that has start */
1802         ret = lock_delalloc_pages(inode, locked_page,
1803                                   delalloc_start, delalloc_end);
1804         ASSERT(!ret || ret == -EAGAIN);
1805         if (ret == -EAGAIN) {
1806                 /* some of the pages are gone, lets avoid looping by
1807                  * shortening the size of the delalloc range we're searching
1808                  */
1809                 free_extent_state(cached_state);
1810                 cached_state = NULL;
1811                 if (!loops) {
1812                         max_bytes = PAGE_SIZE;
1813                         loops = 1;
1814                         goto again;
1815                 } else {
1816                         found = false;
1817                         goto out_failed;
1818                 }
1819         }
1820
1821         /* step three, lock the state bits for the whole range */
1822         lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
1823
1824         /* then test to make sure it is all still delalloc */
1825         ret = test_range_bit(tree, delalloc_start, delalloc_end,
1826                              EXTENT_DELALLOC, 1, cached_state);
1827         if (!ret) {
1828                 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1829                                      &cached_state);
1830                 __unlock_for_delalloc(inode, locked_page,
1831                               delalloc_start, delalloc_end);
1832                 cond_resched();
1833                 goto again;
1834         }
1835         free_extent_state(cached_state);
1836         *start = delalloc_start;
1837         *end = delalloc_end;
1838 out_failed:
1839         return found;
1840 }
1841
1842 static int __process_pages_contig(struct address_space *mapping,
1843                                   struct page *locked_page,
1844                                   pgoff_t start_index, pgoff_t end_index,
1845                                   unsigned long page_ops, pgoff_t *index_ret)
1846 {
1847         unsigned long nr_pages = end_index - start_index + 1;
1848         unsigned long pages_locked = 0;
1849         pgoff_t index = start_index;
1850         struct page *pages[16];
1851         unsigned ret;
1852         int err = 0;
1853         int i;
1854
1855         if (page_ops & PAGE_LOCK) {
1856                 ASSERT(page_ops == PAGE_LOCK);
1857                 ASSERT(index_ret && *index_ret == start_index);
1858         }
1859
1860         if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1861                 mapping_set_error(mapping, -EIO);
1862
1863         while (nr_pages > 0) {
1864                 ret = find_get_pages_contig(mapping, index,
1865                                      min_t(unsigned long,
1866                                      nr_pages, ARRAY_SIZE(pages)), pages);
1867                 if (ret == 0) {
1868                         /*
1869                          * Only if we're going to lock these pages,
1870                          * can we find nothing at @index.
1871                          */
1872                         ASSERT(page_ops & PAGE_LOCK);
1873                         err = -EAGAIN;
1874                         goto out;
1875                 }
1876
1877                 for (i = 0; i < ret; i++) {
1878                         if (page_ops & PAGE_SET_PRIVATE2)
1879                                 SetPagePrivate2(pages[i]);
1880
1881                         if (pages[i] == locked_page) {
1882                                 put_page(pages[i]);
1883                                 pages_locked++;
1884                                 continue;
1885                         }
1886                         if (page_ops & PAGE_CLEAR_DIRTY)
1887                                 clear_page_dirty_for_io(pages[i]);
1888                         if (page_ops & PAGE_SET_WRITEBACK)
1889                                 set_page_writeback(pages[i]);
1890                         if (page_ops & PAGE_SET_ERROR)
1891                                 SetPageError(pages[i]);
1892                         if (page_ops & PAGE_END_WRITEBACK)
1893                                 end_page_writeback(pages[i]);
1894                         if (page_ops & PAGE_UNLOCK)
1895                                 unlock_page(pages[i]);
1896                         if (page_ops & PAGE_LOCK) {
1897                                 lock_page(pages[i]);
1898                                 if (!PageDirty(pages[i]) ||
1899                                     pages[i]->mapping != mapping) {
1900                                         unlock_page(pages[i]);
1901                                         put_page(pages[i]);
1902                                         err = -EAGAIN;
1903                                         goto out;
1904                                 }
1905                         }
1906                         put_page(pages[i]);
1907                         pages_locked++;
1908                 }
1909                 nr_pages -= ret;
1910                 index += ret;
1911                 cond_resched();
1912         }
1913 out:
1914         if (err && index_ret)
1915                 *index_ret = start_index + pages_locked - 1;
1916         return err;
1917 }
1918
1919 void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1920                                  u64 delalloc_end, struct page *locked_page,
1921                                  unsigned clear_bits,
1922                                  unsigned long page_ops)
1923 {
1924         clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, 1, 0,
1925                          NULL);
1926
1927         __process_pages_contig(inode->i_mapping, locked_page,
1928                                start >> PAGE_SHIFT, end >> PAGE_SHIFT,
1929                                page_ops, NULL);
1930 }
1931
1932 /*
1933  * count the number of bytes in the tree that have a given bit(s)
1934  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1935  * cached.  The total number found is returned.
1936  */
1937 u64 count_range_bits(struct extent_io_tree *tree,
1938                      u64 *start, u64 search_end, u64 max_bytes,
1939                      unsigned bits, int contig)
1940 {
1941         struct rb_node *node;
1942         struct extent_state *state;
1943         u64 cur_start = *start;
1944         u64 total_bytes = 0;
1945         u64 last = 0;
1946         int found = 0;
1947
1948         if (WARN_ON(search_end <= cur_start))
1949                 return 0;
1950
1951         spin_lock(&tree->lock);
1952         if (cur_start == 0 && bits == EXTENT_DIRTY) {
1953                 total_bytes = tree->dirty_bytes;
1954                 goto out;
1955         }
1956         /*
1957          * this search will find all the extents that end after
1958          * our range starts.
1959          */
1960         node = tree_search(tree, cur_start);
1961         if (!node)
1962                 goto out;
1963
1964         while (1) {
1965                 state = rb_entry(node, struct extent_state, rb_node);
1966                 if (state->start > search_end)
1967                         break;
1968                 if (contig && found && state->start > last + 1)
1969                         break;
1970                 if (state->end >= cur_start && (state->state & bits) == bits) {
1971                         total_bytes += min(search_end, state->end) + 1 -
1972                                        max(cur_start, state->start);
1973                         if (total_bytes >= max_bytes)
1974                                 break;
1975                         if (!found) {
1976                                 *start = max(cur_start, state->start);
1977                                 found = 1;
1978                         }
1979                         last = state->end;
1980                 } else if (contig && found) {
1981                         break;
1982                 }
1983                 node = rb_next(node);
1984                 if (!node)
1985                         break;
1986         }
1987 out:
1988         spin_unlock(&tree->lock);
1989         return total_bytes;
1990 }
1991
1992 /*
1993  * set the private field for a given byte offset in the tree.  If there isn't
1994  * an extent_state there already, this does nothing.
1995  */
1996 static noinline int set_state_failrec(struct extent_io_tree *tree, u64 start,
1997                 struct io_failure_record *failrec)
1998 {
1999         struct rb_node *node;
2000         struct extent_state *state;
2001         int ret = 0;
2002
2003         spin_lock(&tree->lock);
2004         /*
2005          * this search will find all the extents that end after
2006          * our range starts.
2007          */
2008         node = tree_search(tree, start);
2009         if (!node) {
2010                 ret = -ENOENT;
2011                 goto out;
2012         }
2013         state = rb_entry(node, struct extent_state, rb_node);
2014         if (state->start != start) {
2015                 ret = -ENOENT;
2016                 goto out;
2017         }
2018         state->failrec = failrec;
2019 out:
2020         spin_unlock(&tree->lock);
2021         return ret;
2022 }
2023
2024 static noinline int get_state_failrec(struct extent_io_tree *tree, u64 start,
2025                 struct io_failure_record **failrec)
2026 {
2027         struct rb_node *node;
2028         struct extent_state *state;
2029         int ret = 0;
2030
2031         spin_lock(&tree->lock);
2032         /*
2033          * this search will find all the extents that end after
2034          * our range starts.
2035          */
2036         node = tree_search(tree, start);
2037         if (!node) {
2038                 ret = -ENOENT;
2039                 goto out;
2040         }
2041         state = rb_entry(node, struct extent_state, rb_node);
2042         if (state->start != start) {
2043                 ret = -ENOENT;
2044                 goto out;
2045         }
2046         *failrec = state->failrec;
2047 out:
2048         spin_unlock(&tree->lock);
2049         return ret;
2050 }
2051
2052 /*
2053  * searches a range in the state tree for a given mask.
2054  * If 'filled' == 1, this returns 1 only if every extent in the tree
2055  * has the bits set.  Otherwise, 1 is returned if any bit in the
2056  * range is found set.
2057  */
2058 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
2059                    unsigned bits, int filled, struct extent_state *cached)
2060 {
2061         struct extent_state *state = NULL;
2062         struct rb_node *node;
2063         int bitset = 0;
2064
2065         spin_lock(&tree->lock);
2066         if (cached && extent_state_in_tree(cached) && cached->start <= start &&
2067             cached->end > start)
2068                 node = &cached->rb_node;
2069         else
2070                 node = tree_search(tree, start);
2071         while (node && start <= end) {
2072                 state = rb_entry(node, struct extent_state, rb_node);
2073
2074                 if (filled && state->start > start) {
2075                         bitset = 0;
2076                         break;
2077                 }
2078
2079                 if (state->start > end)
2080                         break;
2081
2082                 if (state->state & bits) {
2083                         bitset = 1;
2084                         if (!filled)
2085                                 break;
2086                 } else if (filled) {
2087                         bitset = 0;
2088                         break;
2089                 }
2090
2091                 if (state->end == (u64)-1)
2092                         break;
2093
2094                 start = state->end + 1;
2095                 if (start > end)
2096                         break;
2097                 node = rb_next(node);
2098                 if (!node) {
2099                         if (filled)
2100                                 bitset = 0;
2101                         break;
2102                 }
2103         }
2104         spin_unlock(&tree->lock);
2105         return bitset;
2106 }
2107
2108 /*
2109  * helper function to set a given page up to date if all the
2110  * extents in the tree for that page are up to date
2111  */
2112 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
2113 {
2114         u64 start = page_offset(page);
2115         u64 end = start + PAGE_SIZE - 1;
2116         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
2117                 SetPageUptodate(page);
2118 }
2119
2120 int free_io_failure(struct extent_io_tree *failure_tree,
2121                     struct extent_io_tree *io_tree,
2122                     struct io_failure_record *rec)
2123 {
2124         int ret;
2125         int err = 0;
2126
2127         set_state_failrec(failure_tree, rec->start, NULL);
2128         ret = clear_extent_bits(failure_tree, rec->start,
2129                                 rec->start + rec->len - 1,
2130                                 EXTENT_LOCKED | EXTENT_DIRTY);
2131         if (ret)
2132                 err = ret;
2133
2134         ret = clear_extent_bits(io_tree, rec->start,
2135                                 rec->start + rec->len - 1,
2136                                 EXTENT_DAMAGED);
2137         if (ret && !err)
2138                 err = ret;
2139
2140         kfree(rec);
2141         return err;
2142 }
2143
2144 /*
2145  * this bypasses the standard btrfs submit functions deliberately, as
2146  * the standard behavior is to write all copies in a raid setup. here we only
2147  * want to write the one bad copy. so we do the mapping for ourselves and issue
2148  * submit_bio directly.
2149  * to avoid any synchronization issues, wait for the data after writing, which
2150  * actually prevents the read that triggered the error from finishing.
2151  * currently, there can be no more than two copies of every data bit. thus,
2152  * exactly one rewrite is required.
2153  */
2154 int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
2155                       u64 length, u64 logical, struct page *page,
2156                       unsigned int pg_offset, int mirror_num)
2157 {
2158         struct bio *bio;
2159         struct btrfs_device *dev;
2160         u64 map_length = 0;
2161         u64 sector;
2162         struct btrfs_bio *bbio = NULL;
2163         int ret;
2164
2165         ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
2166         BUG_ON(!mirror_num);
2167
2168         bio = btrfs_io_bio_alloc(1);
2169         bio->bi_iter.bi_size = 0;
2170         map_length = length;
2171
2172         /*
2173          * Avoid races with device replace and make sure our bbio has devices
2174          * associated to its stripes that don't go away while we are doing the
2175          * read repair operation.
2176          */
2177         btrfs_bio_counter_inc_blocked(fs_info);
2178         if (btrfs_is_parity_mirror(fs_info, logical, length)) {
2179                 /*
2180                  * Note that we don't use BTRFS_MAP_WRITE because it's supposed
2181                  * to update all raid stripes, but here we just want to correct
2182                  * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
2183                  * stripe's dev and sector.
2184                  */
2185                 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
2186                                       &map_length, &bbio, 0);
2187                 if (ret) {
2188                         btrfs_bio_counter_dec(fs_info);
2189                         bio_put(bio);
2190                         return -EIO;
2191                 }
2192                 ASSERT(bbio->mirror_num == 1);
2193         } else {
2194                 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
2195                                       &map_length, &bbio, mirror_num);
2196                 if (ret) {
2197                         btrfs_bio_counter_dec(fs_info);
2198                         bio_put(bio);
2199                         return -EIO;
2200                 }
2201                 BUG_ON(mirror_num != bbio->mirror_num);
2202         }
2203
2204         sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9;
2205         bio->bi_iter.bi_sector = sector;
2206         dev = bbio->stripes[bbio->mirror_num - 1].dev;
2207         btrfs_put_bbio(bbio);
2208         if (!dev || !dev->bdev ||
2209             !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2210                 btrfs_bio_counter_dec(fs_info);
2211                 bio_put(bio);
2212                 return -EIO;
2213         }
2214         bio_set_dev(bio, dev->bdev);
2215         bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
2216         bio_add_page(bio, page, length, pg_offset);
2217
2218         if (btrfsic_submit_bio_wait(bio)) {
2219                 /* try to remap that extent elsewhere? */
2220                 btrfs_bio_counter_dec(fs_info);
2221                 bio_put(bio);
2222                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2223                 return -EIO;
2224         }
2225
2226         btrfs_info_rl_in_rcu(fs_info,
2227                 "read error corrected: ino %llu off %llu (dev %s sector %llu)",
2228                                   ino, start,
2229                                   rcu_str_deref(dev->name), sector);
2230         btrfs_bio_counter_dec(fs_info);
2231         bio_put(bio);
2232         return 0;
2233 }
2234
2235 int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num)
2236 {
2237         struct btrfs_fs_info *fs_info = eb->fs_info;
2238         u64 start = eb->start;
2239         int i, num_pages = num_extent_pages(eb);
2240         int ret = 0;
2241
2242         if (sb_rdonly(fs_info->sb))
2243                 return -EROFS;
2244
2245         for (i = 0; i < num_pages; i++) {
2246                 struct page *p = eb->pages[i];
2247
2248                 ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
2249                                         start - page_offset(p), mirror_num);
2250                 if (ret)
2251                         break;
2252                 start += PAGE_SIZE;
2253         }
2254
2255         return ret;
2256 }
2257
2258 /*
2259  * each time an IO finishes, we do a fast check in the IO failure tree
2260  * to see if we need to process or clean up an io_failure_record
2261  */
2262 int clean_io_failure(struct btrfs_fs_info *fs_info,
2263                      struct extent_io_tree *failure_tree,
2264                      struct extent_io_tree *io_tree, u64 start,
2265                      struct page *page, u64 ino, unsigned int pg_offset)
2266 {
2267         u64 private;
2268         struct io_failure_record *failrec;
2269         struct extent_state *state;
2270         int num_copies;
2271         int ret;
2272
2273         private = 0;
2274         ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
2275                                EXTENT_DIRTY, 0);
2276         if (!ret)
2277                 return 0;
2278
2279         ret = get_state_failrec(failure_tree, start, &failrec);
2280         if (ret)
2281                 return 0;
2282
2283         BUG_ON(!failrec->this_mirror);
2284
2285         if (failrec->in_validation) {
2286                 /* there was no real error, just free the record */
2287                 btrfs_debug(fs_info,
2288                         "clean_io_failure: freeing dummy error at %llu",
2289                         failrec->start);
2290                 goto out;
2291         }
2292         if (sb_rdonly(fs_info->sb))
2293                 goto out;
2294
2295         spin_lock(&io_tree->lock);
2296         state = find_first_extent_bit_state(io_tree,
2297                                             failrec->start,
2298                                             EXTENT_LOCKED);
2299         spin_unlock(&io_tree->lock);
2300
2301         if (state && state->start <= failrec->start &&
2302             state->end >= failrec->start + failrec->len - 1) {
2303                 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2304                                               failrec->len);
2305                 if (num_copies > 1)  {
2306                         repair_io_failure(fs_info, ino, start, failrec->len,
2307                                           failrec->logical, page, pg_offset,
2308                                           failrec->failed_mirror);
2309                 }
2310         }
2311
2312 out:
2313         free_io_failure(failure_tree, io_tree, failrec);
2314
2315         return 0;
2316 }
2317
2318 /*
2319  * Can be called when
2320  * - hold extent lock
2321  * - under ordered extent
2322  * - the inode is freeing
2323  */
2324 void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
2325 {
2326         struct extent_io_tree *failure_tree = &inode->io_failure_tree;
2327         struct io_failure_record *failrec;
2328         struct extent_state *state, *next;
2329
2330         if (RB_EMPTY_ROOT(&failure_tree->state))
2331                 return;
2332
2333         spin_lock(&failure_tree->lock);
2334         state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2335         while (state) {
2336                 if (state->start > end)
2337                         break;
2338
2339                 ASSERT(state->end <= end);
2340
2341                 next = next_state(state);
2342
2343                 failrec = state->failrec;
2344                 free_extent_state(state);
2345                 kfree(failrec);
2346
2347                 state = next;
2348         }
2349         spin_unlock(&failure_tree->lock);
2350 }
2351
2352 int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
2353                 struct io_failure_record **failrec_ret)
2354 {
2355         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2356         struct io_failure_record *failrec;
2357         struct extent_map *em;
2358         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2359         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2360         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2361         int ret;
2362         u64 logical;
2363
2364         ret = get_state_failrec(failure_tree, start, &failrec);
2365         if (ret) {
2366                 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2367                 if (!failrec)
2368                         return -ENOMEM;
2369
2370                 failrec->start = start;
2371                 failrec->len = end - start + 1;
2372                 failrec->this_mirror = 0;
2373                 failrec->bio_flags = 0;
2374                 failrec->in_validation = 0;
2375
2376                 read_lock(&em_tree->lock);
2377                 em = lookup_extent_mapping(em_tree, start, failrec->len);
2378                 if (!em) {
2379                         read_unlock(&em_tree->lock);
2380                         kfree(failrec);
2381                         return -EIO;
2382                 }
2383
2384                 if (em->start > start || em->start + em->len <= start) {
2385                         free_extent_map(em);
2386                         em = NULL;
2387                 }
2388                 read_unlock(&em_tree->lock);
2389                 if (!em) {
2390                         kfree(failrec);
2391                         return -EIO;
2392                 }
2393
2394                 logical = start - em->start;
2395                 logical = em->block_start + logical;
2396                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2397                         logical = em->block_start;
2398                         failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2399                         extent_set_compress_type(&failrec->bio_flags,
2400                                                  em->compress_type);
2401                 }
2402
2403                 btrfs_debug(fs_info,
2404                         "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2405                         logical, start, failrec->len);
2406
2407                 failrec->logical = logical;
2408                 free_extent_map(em);
2409
2410                 /* set the bits in the private failure tree */
2411                 ret = set_extent_bits(failure_tree, start, end,
2412                                         EXTENT_LOCKED | EXTENT_DIRTY);
2413                 if (ret >= 0)
2414                         ret = set_state_failrec(failure_tree, start, failrec);
2415                 /* set the bits in the inode's tree */
2416                 if (ret >= 0)
2417                         ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
2418                 if (ret < 0) {
2419                         kfree(failrec);
2420                         return ret;
2421                 }
2422         } else {
2423                 btrfs_debug(fs_info,
2424                         "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d",
2425                         failrec->logical, failrec->start, failrec->len,
2426                         failrec->in_validation);
2427                 /*
2428                  * when data can be on disk more than twice, add to failrec here
2429                  * (e.g. with a list for failed_mirror) to make
2430                  * clean_io_failure() clean all those errors at once.
2431                  */
2432         }
2433
2434         *failrec_ret = failrec;
2435
2436         return 0;
2437 }
2438
2439 bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
2440                            struct io_failure_record *failrec, int failed_mirror)
2441 {
2442         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2443         int num_copies;
2444
2445         num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
2446         if (num_copies == 1) {
2447                 /*
2448                  * we only have a single copy of the data, so don't bother with
2449                  * all the retry and error correction code that follows. no
2450                  * matter what the error is, it is very likely to persist.
2451                  */
2452                 btrfs_debug(fs_info,
2453                         "Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
2454                         num_copies, failrec->this_mirror, failed_mirror);
2455                 return false;
2456         }
2457
2458         /*
2459          * there are two premises:
2460          *      a) deliver good data to the caller
2461          *      b) correct the bad sectors on disk
2462          */
2463         if (failed_bio_pages > 1) {
2464                 /*
2465                  * to fulfill b), we need to know the exact failing sectors, as
2466                  * we don't want to rewrite any more than the failed ones. thus,
2467                  * we need separate read requests for the failed bio
2468                  *
2469                  * if the following BUG_ON triggers, our validation request got
2470                  * merged. we need separate requests for our algorithm to work.
2471                  */
2472                 BUG_ON(failrec->in_validation);
2473                 failrec->in_validation = 1;
2474                 failrec->this_mirror = failed_mirror;
2475         } else {
2476                 /*
2477                  * we're ready to fulfill a) and b) alongside. get a good copy
2478                  * of the failed sector and if we succeed, we have setup
2479                  * everything for repair_io_failure to do the rest for us.
2480                  */
2481                 if (failrec->in_validation) {
2482                         BUG_ON(failrec->this_mirror != failed_mirror);
2483                         failrec->in_validation = 0;
2484                         failrec->this_mirror = 0;
2485                 }
2486                 failrec->failed_mirror = failed_mirror;
2487                 failrec->this_mirror++;
2488                 if (failrec->this_mirror == failed_mirror)
2489                         failrec->this_mirror++;
2490         }
2491
2492         if (failrec->this_mirror > num_copies) {
2493                 btrfs_debug(fs_info,
2494                         "Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
2495                         num_copies, failrec->this_mirror, failed_mirror);
2496                 return false;
2497         }
2498
2499         return true;
2500 }
2501
2502
2503 struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
2504                                     struct io_failure_record *failrec,
2505                                     struct page *page, int pg_offset, int icsum,
2506                                     bio_end_io_t *endio_func, void *data)
2507 {
2508         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2509         struct bio *bio;
2510         struct btrfs_io_bio *btrfs_failed_bio;
2511         struct btrfs_io_bio *btrfs_bio;
2512
2513         bio = btrfs_io_bio_alloc(1);
2514         bio->bi_end_io = endio_func;
2515         bio->bi_iter.bi_sector = failrec->logical >> 9;
2516         bio_set_dev(bio, fs_info->fs_devices->latest_bdev);
2517         bio->bi_iter.bi_size = 0;
2518         bio->bi_private = data;
2519
2520         btrfs_failed_bio = btrfs_io_bio(failed_bio);
2521         if (btrfs_failed_bio->csum) {
2522                 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2523
2524                 btrfs_bio = btrfs_io_bio(bio);
2525                 btrfs_bio->csum = btrfs_bio->csum_inline;
2526                 icsum *= csum_size;
2527                 memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + icsum,
2528                        csum_size);
2529         }
2530
2531         bio_add_page(bio, page, failrec->len, pg_offset);
2532
2533         return bio;
2534 }
2535
2536 /*
2537  * This is a generic handler for readpage errors. If other copies exist, read
2538  * those and write back good data to the failed position. Does not investigate
2539  * in remapping the failed extent elsewhere, hoping the device will be smart
2540  * enough to do this as needed
2541  */
2542 static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2543                               struct page *page, u64 start, u64 end,
2544                               int failed_mirror)
2545 {
2546         struct io_failure_record *failrec;
2547         struct inode *inode = page->mapping->host;
2548         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2549         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2550         struct bio *bio;
2551         int read_mode = 0;
2552         blk_status_t status;
2553         int ret;
2554         unsigned failed_bio_pages = failed_bio->bi_iter.bi_size >> PAGE_SHIFT;
2555
2556         BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2557
2558         ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
2559         if (ret)
2560                 return ret;
2561
2562         if (!btrfs_check_repairable(inode, failed_bio_pages, failrec,
2563                                     failed_mirror)) {
2564                 free_io_failure(failure_tree, tree, failrec);
2565                 return -EIO;
2566         }
2567
2568         if (failed_bio_pages > 1)
2569                 read_mode |= REQ_FAILFAST_DEV;
2570
2571         phy_offset >>= inode->i_sb->s_blocksize_bits;
2572         bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
2573                                       start - page_offset(page),
2574                                       (int)phy_offset, failed_bio->bi_end_io,
2575                                       NULL);
2576         bio->bi_opf = REQ_OP_READ | read_mode;
2577
2578         btrfs_debug(btrfs_sb(inode->i_sb),
2579                 "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
2580                 read_mode, failrec->this_mirror, failrec->in_validation);
2581
2582         status = tree->ops->submit_bio_hook(tree->private_data, bio, failrec->this_mirror,
2583                                          failrec->bio_flags);
2584         if (status) {
2585                 free_io_failure(failure_tree, tree, failrec);
2586                 bio_put(bio);
2587                 ret = blk_status_to_errno(status);
2588         }
2589
2590         return ret;
2591 }
2592
2593 /* lots and lots of room for performance fixes in the end_bio funcs */
2594
2595 void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2596 {
2597         int uptodate = (err == 0);
2598         int ret = 0;
2599
2600         btrfs_writepage_endio_finish_ordered(page, start, end, uptodate);
2601
2602         if (!uptodate) {
2603                 ClearPageUptodate(page);
2604                 SetPageError(page);
2605                 ret = err < 0 ? err : -EIO;
2606                 mapping_set_error(page->mapping, ret);
2607         }
2608 }
2609
2610 /*
2611  * after a writepage IO is done, we need to:
2612  * clear the uptodate bits on error
2613  * clear the writeback bits in the extent tree for this IO
2614  * end_page_writeback if the page has no more pending IO
2615  *
2616  * Scheduling is not allowed, so the extent state tree is expected
2617  * to have one and only one object corresponding to this IO.
2618  */
2619 static void end_bio_extent_writepage(struct bio *bio)
2620 {
2621         int error = blk_status_to_errno(bio->bi_status);
2622         struct bio_vec *bvec;
2623         u64 start;
2624         u64 end;
2625         struct bvec_iter_all iter_all;
2626
2627         ASSERT(!bio_flagged(bio, BIO_CLONED));
2628         bio_for_each_segment_all(bvec, bio, iter_all) {
2629                 struct page *page = bvec->bv_page;
2630                 struct inode *inode = page->mapping->host;
2631                 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2632
2633                 /* We always issue full-page reads, but if some block
2634                  * in a page fails to read, blk_update_request() will
2635                  * advance bv_offset and adjust bv_len to compensate.
2636                  * Print a warning for nonzero offsets, and an error
2637                  * if they don't add up to a full page.  */
2638                 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2639                         if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2640                                 btrfs_err(fs_info,
2641                                    "partial page write in btrfs with offset %u and length %u",
2642                                         bvec->bv_offset, bvec->bv_len);
2643                         else
2644                                 btrfs_info(fs_info,
2645                                    "incomplete page write in btrfs with offset %u and length %u",
2646                                         bvec->bv_offset, bvec->bv_len);
2647                 }
2648
2649                 start = page_offset(page);
2650                 end = start + bvec->bv_offset + bvec->bv_len - 1;
2651
2652                 end_extent_writepage(page, error, start, end);
2653                 end_page_writeback(page);
2654         }
2655
2656         bio_put(bio);
2657 }
2658
2659 static void
2660 endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2661                               int uptodate)
2662 {
2663         struct extent_state *cached = NULL;
2664         u64 end = start + len - 1;
2665
2666         if (uptodate && tree->track_uptodate)
2667                 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2668         unlock_extent_cached_atomic(tree, start, end, &cached);
2669 }
2670
2671 /*
2672  * after a readpage IO is done, we need to:
2673  * clear the uptodate bits on error
2674  * set the uptodate bits if things worked
2675  * set the page up to date if all extents in the tree are uptodate
2676  * clear the lock bit in the extent tree
2677  * unlock the page if there are no other extents locked for it
2678  *
2679  * Scheduling is not allowed, so the extent state tree is expected
2680  * to have one and only one object corresponding to this IO.
2681  */
2682 static void end_bio_extent_readpage(struct bio *bio)
2683 {
2684         struct bio_vec *bvec;
2685         int uptodate = !bio->bi_status;
2686         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2687         struct extent_io_tree *tree, *failure_tree;
2688         u64 offset = 0;
2689         u64 start;
2690         u64 end;
2691         u64 len;
2692         u64 extent_start = 0;
2693         u64 extent_len = 0;
2694         int mirror;
2695         int ret;
2696         struct bvec_iter_all iter_all;
2697
2698         ASSERT(!bio_flagged(bio, BIO_CLONED));
2699         bio_for_each_segment_all(bvec, bio, iter_all) {
2700                 struct page *page = bvec->bv_page;
2701                 struct inode *inode = page->mapping->host;
2702                 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2703                 bool data_inode = btrfs_ino(BTRFS_I(inode))
2704                         != BTRFS_BTREE_INODE_OBJECTID;
2705
2706                 btrfs_debug(fs_info,
2707                         "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
2708                         (u64)bio->bi_iter.bi_sector, bio->bi_status,
2709                         io_bio->mirror_num);
2710                 tree = &BTRFS_I(inode)->io_tree;
2711                 failure_tree = &BTRFS_I(inode)->io_failure_tree;
2712
2713                 /* We always issue full-page reads, but if some block
2714                  * in a page fails to read, blk_update_request() will
2715                  * advance bv_offset and adjust bv_len to compensate.
2716                  * Print a warning for nonzero offsets, and an error
2717                  * if they don't add up to a full page.  */
2718                 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2719                         if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2720                                 btrfs_err(fs_info,
2721                                         "partial page read in btrfs with offset %u and length %u",
2722                                         bvec->bv_offset, bvec->bv_len);
2723                         else
2724                                 btrfs_info(fs_info,
2725                                         "incomplete page read in btrfs with offset %u and length %u",
2726                                         bvec->bv_offset, bvec->bv_len);
2727                 }
2728
2729                 start = page_offset(page);
2730                 end = start + bvec->bv_offset + bvec->bv_len - 1;
2731                 len = bvec->bv_len;
2732
2733                 mirror = io_bio->mirror_num;
2734                 if (likely(uptodate)) {
2735                         ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2736                                                               page, start, end,
2737                                                               mirror);
2738                         if (ret)
2739                                 uptodate = 0;
2740                         else
2741                                 clean_io_failure(BTRFS_I(inode)->root->fs_info,
2742                                                  failure_tree, tree, start,
2743                                                  page,
2744                                                  btrfs_ino(BTRFS_I(inode)), 0);
2745                 }
2746
2747                 if (likely(uptodate))
2748                         goto readpage_ok;
2749
2750                 if (data_inode) {
2751
2752                         /*
2753                          * The generic bio_readpage_error handles errors the
2754                          * following way: If possible, new read requests are
2755                          * created and submitted and will end up in
2756                          * end_bio_extent_readpage as well (if we're lucky,
2757                          * not in the !uptodate case). In that case it returns
2758                          * 0 and we just go on with the next page in our bio.
2759                          * If it can't handle the error it will return -EIO and
2760                          * we remain responsible for that page.
2761                          */
2762                         ret = bio_readpage_error(bio, offset, page, start, end,
2763                                                  mirror);
2764                         if (ret == 0) {
2765                                 uptodate = !bio->bi_status;
2766                                 offset += len;
2767                                 continue;
2768                         }
2769                 } else {
2770                         struct extent_buffer *eb;
2771
2772                         eb = (struct extent_buffer *)page->private;
2773                         set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
2774                         eb->read_mirror = mirror;
2775                         atomic_dec(&eb->io_pages);
2776                         if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD,
2777                                                &eb->bflags))
2778                                 btree_readahead_hook(eb, -EIO);
2779                 }
2780 readpage_ok:
2781                 if (likely(uptodate)) {
2782                         loff_t i_size = i_size_read(inode);
2783                         pgoff_t end_index = i_size >> PAGE_SHIFT;
2784                         unsigned off;
2785
2786                         /* Zero out the end if this page straddles i_size */
2787                         off = offset_in_page(i_size);
2788                         if (page->index == end_index && off)
2789                                 zero_user_segment(page, off, PAGE_SIZE);
2790                         SetPageUptodate(page);
2791                 } else {
2792                         ClearPageUptodate(page);
2793                         SetPageError(page);
2794                 }
2795                 unlock_page(page);
2796                 offset += len;
2797
2798                 if (unlikely(!uptodate)) {
2799                         if (extent_len) {
2800                                 endio_readpage_release_extent(tree,
2801                                                               extent_start,
2802                                                               extent_len, 1);
2803                                 extent_start = 0;
2804                                 extent_len = 0;
2805                         }
2806                         endio_readpage_release_extent(tree, start,
2807                                                       end - start + 1, 0);
2808                 } else if (!extent_len) {
2809                         extent_start = start;
2810                         extent_len = end + 1 - start;
2811                 } else if (extent_start + extent_len == start) {
2812                         extent_len += end + 1 - start;
2813                 } else {
2814                         endio_readpage_release_extent(tree, extent_start,
2815                                                       extent_len, uptodate);
2816                         extent_start = start;
2817                         extent_len = end + 1 - start;
2818                 }
2819         }
2820
2821         if (extent_len)
2822                 endio_readpage_release_extent(tree, extent_start, extent_len,
2823                                               uptodate);
2824         btrfs_io_bio_free_csum(io_bio);
2825         bio_put(bio);
2826 }
2827
2828 /*
2829  * Initialize the members up to but not including 'bio'. Use after allocating a
2830  * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
2831  * 'bio' because use of __GFP_ZERO is not supported.
2832  */
2833 static inline void btrfs_io_bio_init(struct btrfs_io_bio *btrfs_bio)
2834 {
2835         memset(btrfs_bio, 0, offsetof(struct btrfs_io_bio, bio));
2836 }
2837
2838 /*
2839  * The following helpers allocate a bio. As it's backed by a bioset, it'll
2840  * never fail.  We're returning a bio right now but you can call btrfs_io_bio
2841  * for the appropriate container_of magic
2842  */
2843 struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte)
2844 {
2845         struct bio *bio;
2846
2847         bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &btrfs_bioset);
2848         bio_set_dev(bio, bdev);
2849         bio->bi_iter.bi_sector = first_byte >> 9;
2850         btrfs_io_bio_init(btrfs_io_bio(bio));
2851         return bio;
2852 }
2853
2854 struct bio *btrfs_bio_clone(struct bio *bio)
2855 {
2856         struct btrfs_io_bio *btrfs_bio;
2857         struct bio *new;
2858
2859         /* Bio allocation backed by a bioset does not fail */
2860         new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset);
2861         btrfs_bio = btrfs_io_bio(new);
2862         btrfs_io_bio_init(btrfs_bio);
2863         btrfs_bio->iter = bio->bi_iter;
2864         return new;
2865 }
2866
2867 struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
2868 {
2869         struct bio *bio;
2870
2871         /* Bio allocation backed by a bioset does not fail */
2872         bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
2873         btrfs_io_bio_init(btrfs_io_bio(bio));
2874         return bio;
2875 }
2876
2877 struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
2878 {
2879         struct bio *bio;
2880         struct btrfs_io_bio *btrfs_bio;
2881
2882         /* this will never fail when it's backed by a bioset */
2883         bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset);
2884         ASSERT(bio);
2885
2886         btrfs_bio = btrfs_io_bio(bio);
2887         btrfs_io_bio_init(btrfs_bio);
2888
2889         bio_trim(bio, offset >> 9, size >> 9);
2890         btrfs_bio->iter = bio->bi_iter;
2891         return bio;
2892 }
2893
2894 /*
2895  * @opf:        bio REQ_OP_* and REQ_* flags as one value
2896  * @tree:       tree so we can call our merge_bio hook
2897  * @wbc:        optional writeback control for io accounting
2898  * @page:       page to add to the bio
2899  * @pg_offset:  offset of the new bio or to check whether we are adding
2900  *              a contiguous page to the previous one
2901  * @size:       portion of page that we want to write
2902  * @offset:     starting offset in the page
2903  * @bdev:       attach newly created bios to this bdev
2904  * @bio_ret:    must be valid pointer, newly allocated bio will be stored there
2905  * @end_io_func:     end_io callback for new bio
2906  * @mirror_num:      desired mirror to read/write
2907  * @prev_bio_flags:  flags of previous bio to see if we can merge the current one
2908  * @bio_flags:  flags of the current bio to see if we can merge them
2909  */
2910 static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
2911                               struct writeback_control *wbc,
2912                               struct page *page, u64 offset,
2913                               size_t size, unsigned long pg_offset,
2914                               struct block_device *bdev,
2915                               struct bio **bio_ret,
2916                               bio_end_io_t end_io_func,
2917                               int mirror_num,
2918                               unsigned long prev_bio_flags,
2919                               unsigned long bio_flags,
2920                               bool force_bio_submit)
2921 {
2922         int ret = 0;
2923         struct bio *bio;
2924         size_t page_size = min_t(size_t, size, PAGE_SIZE);
2925         sector_t sector = offset >> 9;
2926
2927         ASSERT(bio_ret);
2928
2929         if (*bio_ret) {
2930                 bool contig;
2931                 bool can_merge = true;
2932
2933                 bio = *bio_ret;
2934                 if (prev_bio_flags & EXTENT_BIO_COMPRESSED)
2935                         contig = bio->bi_iter.bi_sector == sector;
2936                 else
2937                         contig = bio_end_sector(bio) == sector;
2938
2939                 ASSERT(tree->ops);
2940                 if (btrfs_bio_fits_in_stripe(page, page_size, bio, bio_flags))
2941                         can_merge = false;
2942
2943                 if (prev_bio_flags != bio_flags || !contig || !can_merge ||
2944                     force_bio_submit ||
2945                     bio_add_page(bio, page, page_size, pg_offset) < page_size) {
2946                         ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
2947                         if (ret < 0) {
2948                                 *bio_ret = NULL;
2949                                 return ret;
2950                         }
2951                         bio = NULL;
2952                 } else {
2953                         if (wbc)
2954                                 wbc_account_io(wbc, page, page_size);
2955                         return 0;
2956                 }
2957         }
2958
2959         bio = btrfs_bio_alloc(bdev, offset);
2960         bio_add_page(bio, page, page_size, pg_offset);
2961         bio->bi_end_io = end_io_func;
2962         bio->bi_private = tree;
2963         bio->bi_write_hint = page->mapping->host->i_write_hint;
2964         bio->bi_opf = opf;
2965         if (wbc) {
2966                 wbc_init_bio(wbc, bio);
2967                 wbc_account_io(wbc, page, page_size);
2968         }
2969
2970         *bio_ret = bio;
2971
2972         return ret;
2973 }
2974
2975 static void attach_extent_buffer_page(struct extent_buffer *eb,
2976                                       struct page *page)
2977 {
2978         if (!PagePrivate(page)) {
2979                 SetPagePrivate(page);
2980                 get_page(page);
2981                 set_page_private(page, (unsigned long)eb);
2982         } else {
2983                 WARN_ON(page->private != (unsigned long)eb);
2984         }
2985 }
2986
2987 void set_page_extent_mapped(struct page *page)
2988 {
2989         if (!PagePrivate(page)) {
2990                 SetPagePrivate(page);
2991                 get_page(page);
2992                 set_page_private(page, EXTENT_PAGE_PRIVATE);
2993         }
2994 }
2995
2996 static struct extent_map *
2997 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
2998                  u64 start, u64 len, get_extent_t *get_extent,
2999                  struct extent_map **em_cached)
3000 {
3001         struct extent_map *em;
3002
3003         if (em_cached && *em_cached) {
3004                 em = *em_cached;
3005                 if (extent_map_in_tree(em) && start >= em->start &&
3006                     start < extent_map_end(em)) {
3007                         refcount_inc(&em->refs);
3008                         return em;
3009                 }
3010
3011                 free_extent_map(em);
3012                 *em_cached = NULL;
3013         }
3014
3015         em = get_extent(BTRFS_I(inode), page, pg_offset, start, len, 0);
3016         if (em_cached && !IS_ERR_OR_NULL(em)) {
3017                 BUG_ON(*em_cached);
3018                 refcount_inc(&em->refs);
3019                 *em_cached = em;
3020         }
3021         return em;
3022 }
3023 /*
3024  * basic readpage implementation.  Locked extent state structs are inserted
3025  * into the tree that are removed when the IO is done (by the end_io
3026  * handlers)
3027  * XXX JDM: This needs looking at to ensure proper page locking
3028  * return 0 on success, otherwise return error
3029  */
3030 static int __do_readpage(struct extent_io_tree *tree,
3031                          struct page *page,
3032                          get_extent_t *get_extent,
3033                          struct extent_map **em_cached,
3034                          struct bio **bio, int mirror_num,
3035                          unsigned long *bio_flags, unsigned int read_flags,
3036                          u64 *prev_em_start)
3037 {
3038         struct inode *inode = page->mapping->host;
3039         u64 start = page_offset(page);
3040         const u64 end = start + PAGE_SIZE - 1;
3041         u64 cur = start;
3042         u64 extent_offset;
3043         u64 last_byte = i_size_read(inode);
3044         u64 block_start;
3045         u64 cur_end;
3046         struct extent_map *em;
3047         struct block_device *bdev;
3048         int ret = 0;
3049         int nr = 0;
3050         size_t pg_offset = 0;
3051         size_t iosize;
3052         size_t disk_io_size;
3053         size_t blocksize = inode->i_sb->s_blocksize;
3054         unsigned long this_bio_flag = 0;
3055
3056         set_page_extent_mapped(page);
3057
3058         if (!PageUptodate(page)) {
3059                 if (cleancache_get_page(page) == 0) {
3060                         BUG_ON(blocksize != PAGE_SIZE);
3061                         unlock_extent(tree, start, end);
3062                         goto out;
3063                 }
3064         }
3065
3066         if (page->index == last_byte >> PAGE_SHIFT) {
3067                 char *userpage;
3068                 size_t zero_offset = offset_in_page(last_byte);
3069
3070                 if (zero_offset) {
3071                         iosize = PAGE_SIZE - zero_offset;
3072                         userpage = kmap_atomic(page);
3073                         memset(userpage + zero_offset, 0, iosize);
3074                         flush_dcache_page(page);
3075                         kunmap_atomic(userpage);
3076                 }
3077         }
3078         while (cur <= end) {
3079                 bool force_bio_submit = false;
3080                 u64 offset;
3081
3082                 if (cur >= last_byte) {
3083                         char *userpage;
3084                         struct extent_state *cached = NULL;
3085
3086                         iosize = PAGE_SIZE - pg_offset;
3087                         userpage = kmap_atomic(page);
3088                         memset(userpage + pg_offset, 0, iosize);
3089                         flush_dcache_page(page);
3090                         kunmap_atomic(userpage);
3091                         set_extent_uptodate(tree, cur, cur + iosize - 1,
3092                                             &cached, GFP_NOFS);
3093                         unlock_extent_cached(tree, cur,
3094                                              cur + iosize - 1, &cached);
3095                         break;
3096                 }
3097                 em = __get_extent_map(inode, page, pg_offset, cur,
3098                                       end - cur + 1, get_extent, em_cached);
3099                 if (IS_ERR_OR_NULL(em)) {
3100                         SetPageError(page);
3101                         unlock_extent(tree, cur, end);
3102                         break;
3103                 }
3104                 extent_offset = cur - em->start;
3105                 BUG_ON(extent_map_end(em) <= cur);
3106                 BUG_ON(end < cur);
3107
3108                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3109                         this_bio_flag |= EXTENT_BIO_COMPRESSED;
3110                         extent_set_compress_type(&this_bio_flag,
3111                                                  em->compress_type);
3112                 }
3113
3114                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
3115                 cur_end = min(extent_map_end(em) - 1, end);
3116                 iosize = ALIGN(iosize, blocksize);
3117                 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
3118                         disk_io_size = em->block_len;
3119                         offset = em->block_start;
3120                 } else {
3121                         offset = em->block_start + extent_offset;
3122                         disk_io_size = iosize;
3123                 }
3124                 bdev = em->bdev;
3125                 block_start = em->block_start;
3126                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3127                         block_start = EXTENT_MAP_HOLE;
3128
3129                 /*
3130                  * If we have a file range that points to a compressed extent
3131                  * and it's followed by a consecutive file range that points to
3132                  * to the same compressed extent (possibly with a different
3133                  * offset and/or length, so it either points to the whole extent
3134                  * or only part of it), we must make sure we do not submit a
3135                  * single bio to populate the pages for the 2 ranges because
3136                  * this makes the compressed extent read zero out the pages
3137                  * belonging to the 2nd range. Imagine the following scenario:
3138                  *
3139                  *  File layout
3140                  *  [0 - 8K]                     [8K - 24K]
3141                  *    |                               |
3142                  *    |                               |
3143                  * points to extent X,         points to extent X,
3144                  * offset 4K, length of 8K     offset 0, length 16K
3145                  *
3146                  * [extent X, compressed length = 4K uncompressed length = 16K]
3147                  *
3148                  * If the bio to read the compressed extent covers both ranges,
3149                  * it will decompress extent X into the pages belonging to the
3150                  * first range and then it will stop, zeroing out the remaining
3151                  * pages that belong to the other range that points to extent X.
3152                  * So here we make sure we submit 2 bios, one for the first
3153                  * range and another one for the third range. Both will target
3154                  * the same physical extent from disk, but we can't currently
3155                  * make the compressed bio endio callback populate the pages
3156                  * for both ranges because each compressed bio is tightly
3157                  * coupled with a single extent map, and each range can have
3158                  * an extent map with a different offset value relative to the
3159                  * uncompressed data of our extent and different lengths. This
3160                  * is a corner case so we prioritize correctness over
3161                  * non-optimal behavior (submitting 2 bios for the same extent).
3162                  */
3163                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3164                     prev_em_start && *prev_em_start != (u64)-1 &&
3165                     *prev_em_start != em->start)
3166                         force_bio_submit = true;
3167
3168                 if (prev_em_start)
3169                         *prev_em_start = em->start;
3170
3171                 free_extent_map(em);
3172                 em = NULL;
3173
3174                 /* we've found a hole, just zero and go on */
3175                 if (block_start == EXTENT_MAP_HOLE) {
3176                         char *userpage;
3177                         struct extent_state *cached = NULL;
3178
3179                         userpage = kmap_atomic(page);
3180                         memset(userpage + pg_offset, 0, iosize);
3181                         flush_dcache_page(page);
3182                         kunmap_atomic(userpage);
3183
3184                         set_extent_uptodate(tree, cur, cur + iosize - 1,
3185                                             &cached, GFP_NOFS);
3186                         unlock_extent_cached(tree, cur,
3187                                              cur + iosize - 1, &cached);
3188                         cur = cur + iosize;
3189                         pg_offset += iosize;
3190                         continue;
3191                 }
3192                 /* the get_extent function already copied into the page */
3193                 if (test_range_bit(tree, cur, cur_end,
3194                                    EXTENT_UPTODATE, 1, NULL)) {
3195                         check_page_uptodate(tree, page);
3196                         unlock_extent(tree, cur, cur + iosize - 1);
3197                         cur = cur + iosize;
3198                         pg_offset += iosize;
3199                         continue;
3200                 }
3201                 /* we have an inline extent but it didn't get marked up
3202                  * to date.  Error out
3203                  */
3204                 if (block_start == EXTENT_MAP_INLINE) {
3205                         SetPageError(page);
3206                         unlock_extent(tree, cur, cur + iosize - 1);
3207                         cur = cur + iosize;
3208                         pg_offset += iosize;
3209                         continue;
3210                 }
3211
3212                 ret = submit_extent_page(REQ_OP_READ | read_flags, tree, NULL,
3213                                          page, offset, disk_io_size,
3214                                          pg_offset, bdev, bio,
3215                                          end_bio_extent_readpage, mirror_num,
3216                                          *bio_flags,
3217                                          this_bio_flag,
3218                                          force_bio_submit);
3219                 if (!ret) {
3220                         nr++;
3221                         *bio_flags = this_bio_flag;
3222                 } else {
3223                         SetPageError(page);
3224                         unlock_extent(tree, cur, cur + iosize - 1);
3225                         goto out;
3226                 }
3227                 cur = cur + iosize;
3228                 pg_offset += iosize;
3229         }
3230 out:
3231         if (!nr) {
3232                 if (!PageError(page))
3233                         SetPageUptodate(page);
3234                 unlock_page(page);
3235         }
3236         return ret;
3237 }
3238
3239 static inline void contiguous_readpages(struct extent_io_tree *tree,
3240                                              struct page *pages[], int nr_pages,
3241                                              u64 start, u64 end,
3242                                              struct extent_map **em_cached,
3243                                              struct bio **bio,
3244                                              unsigned long *bio_flags,
3245                                              u64 *prev_em_start)
3246 {
3247         struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
3248         int index;
3249
3250         btrfs_lock_and_flush_ordered_range(tree, inode, start, end, NULL);
3251
3252         for (index = 0; index < nr_pages; index++) {
3253                 __do_readpage(tree, pages[index], btrfs_get_extent, em_cached,
3254                                 bio, 0, bio_flags, REQ_RAHEAD, prev_em_start);
3255                 put_page(pages[index]);
3256         }
3257 }
3258
3259 static int __extent_read_full_page(struct extent_io_tree *tree,
3260                                    struct page *page,
3261                                    get_extent_t *get_extent,
3262                                    struct bio **bio, int mirror_num,
3263                                    unsigned long *bio_flags,
3264                                    unsigned int read_flags)
3265 {
3266         struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
3267         u64 start = page_offset(page);
3268         u64 end = start + PAGE_SIZE - 1;
3269         int ret;
3270
3271         btrfs_lock_and_flush_ordered_range(tree, inode, start, end, NULL);
3272
3273         ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
3274                             bio_flags, read_flags, NULL);
3275         return ret;
3276 }
3277
3278 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
3279                             get_extent_t *get_extent, int mirror_num)
3280 {
3281         struct bio *bio = NULL;
3282         unsigned long bio_flags = 0;
3283         int ret;
3284
3285         ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
3286                                       &bio_flags, 0);
3287         if (bio)
3288                 ret = submit_one_bio(bio, mirror_num, bio_flags);
3289         return ret;
3290 }
3291
3292 static void update_nr_written(struct writeback_control *wbc,
3293                               unsigned long nr_written)
3294 {
3295         wbc->nr_to_write -= nr_written;
3296 }
3297
3298 /*
3299  * helper for __extent_writepage, doing all of the delayed allocation setup.
3300  *
3301  * This returns 1 if btrfs_run_delalloc_range function did all the work required
3302  * to write the page (copy into inline extent).  In this case the IO has
3303  * been started and the page is already unlocked.
3304  *
3305  * This returns 0 if all went well (page still locked)
3306  * This returns < 0 if there were errors (page still locked)
3307  */
3308 static noinline_for_stack int writepage_delalloc(struct inode *inode,
3309                 struct page *page, struct writeback_control *wbc,
3310                 u64 delalloc_start, unsigned long *nr_written)
3311 {
3312         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
3313         u64 page_end = delalloc_start + PAGE_SIZE - 1;
3314         bool found;
3315         u64 delalloc_to_write = 0;
3316         u64 delalloc_end = 0;
3317         int ret;
3318         int page_started = 0;
3319
3320
3321         while (delalloc_end < page_end) {
3322                 found = find_lock_delalloc_range(inode, tree,
3323                                                page,
3324                                                &delalloc_start,
3325                                                &delalloc_end);
3326                 if (!found) {
3327                         delalloc_start = delalloc_end + 1;
3328                         continue;
3329                 }
3330                 ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
3331                                 delalloc_end, &page_started, nr_written, wbc);
3332                 if (ret) {
3333                         SetPageError(page);
3334                         /*
3335                          * btrfs_run_delalloc_range should return < 0 for error
3336                          * but just in case, we use > 0 here meaning the IO is
3337                          * started, so we don't want to return > 0 unless
3338                          * things are going well.
3339                          */
3340                         ret = ret < 0 ? ret : -EIO;
3341                         goto done;
3342                 }
3343                 /*
3344                  * delalloc_end is already one less than the total length, so
3345                  * we don't subtract one from PAGE_SIZE
3346                  */
3347                 delalloc_to_write += (delalloc_end - delalloc_start +
3348                                       PAGE_SIZE) >> PAGE_SHIFT;
3349                 delalloc_start = delalloc_end + 1;
3350         }
3351         if (wbc->nr_to_write < delalloc_to_write) {
3352                 int thresh = 8192;
3353
3354                 if (delalloc_to_write < thresh * 2)
3355                         thresh = delalloc_to_write;
3356                 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3357                                          thresh);
3358         }
3359
3360         /* did the fill delalloc function already unlock and start
3361          * the IO?
3362          */
3363         if (page_started) {
3364                 /*
3365                  * we've unlocked the page, so we can't update
3366                  * the mapping's writeback index, just update
3367                  * nr_to_write.
3368                  */
3369                 wbc->nr_to_write -= *nr_written;
3370                 return 1;
3371         }
3372
3373         ret = 0;
3374
3375 done:
3376         return ret;
3377 }
3378
3379 /*
3380  * helper for __extent_writepage.  This calls the writepage start hooks,
3381  * and does the loop to map the page into extents and bios.
3382  *
3383  * We return 1 if the IO is started and the page is unlocked,
3384  * 0 if all went well (page still locked)
3385  * < 0 if there were errors (page still locked)
3386  */
3387 static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3388                                  struct page *page,
3389                                  struct writeback_control *wbc,
3390                                  struct extent_page_data *epd,
3391                                  loff_t i_size,
3392                                  unsigned long nr_written,
3393                                  unsigned int write_flags, int *nr_ret)
3394 {
3395         struct extent_io_tree *tree = epd->tree;
3396         u64 start = page_offset(page);
3397         u64 page_end = start + PAGE_SIZE - 1;
3398         u64 end;
3399         u64 cur = start;
3400         u64 extent_offset;
3401         u64 block_start;
3402         u64 iosize;
3403         struct extent_map *em;
3404         struct block_device *bdev;
3405         size_t pg_offset = 0;
3406         size_t blocksize;
3407         int ret = 0;
3408         int nr = 0;
3409         bool compressed;
3410
3411         ret = btrfs_writepage_cow_fixup(page, start, page_end);
3412         if (ret) {
3413                 /* Fixup worker will requeue */
3414                 if (ret == -EBUSY)
3415                         wbc->pages_skipped++;
3416                 else
3417                         redirty_page_for_writepage(wbc, page);
3418
3419                 update_nr_written(wbc, nr_written);
3420                 unlock_page(page);
3421                 return 1;
3422         }
3423
3424         /*
3425          * we don't want to touch the inode after unlocking the page,
3426          * so we update the mapping writeback index now
3427          */
3428         update_nr_written(wbc, nr_written + 1);
3429
3430         end = page_end;
3431         if (i_size <= start) {
3432                 btrfs_writepage_endio_finish_ordered(page, start, page_end, 1);
3433                 goto done;
3434         }
3435
3436         blocksize = inode->i_sb->s_blocksize;
3437
3438         while (cur <= end) {
3439                 u64 em_end;
3440                 u64 offset;
3441
3442                 if (cur >= i_size) {
3443                         btrfs_writepage_endio_finish_ordered(page, cur,
3444                                                              page_end, 1);
3445                         break;
3446                 }
3447                 em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, cur,
3448                                      end - cur + 1, 1);
3449                 if (IS_ERR_OR_NULL(em)) {
3450                         SetPageError(page);
3451                         ret = PTR_ERR_OR_ZERO(em);
3452                         break;
3453                 }
3454
3455                 extent_offset = cur - em->start;
3456                 em_end = extent_map_end(em);
3457                 BUG_ON(em_end <= cur);
3458                 BUG_ON(end < cur);
3459                 iosize = min(em_end - cur, end - cur + 1);
3460                 iosize = ALIGN(iosize, blocksize);
3461                 offset = em->block_start + extent_offset;
3462                 bdev = em->bdev;
3463                 block_start = em->block_start;
3464                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3465                 free_extent_map(em);
3466                 em = NULL;
3467
3468                 /*
3469                  * compressed and inline extents are written through other
3470                  * paths in the FS
3471                  */
3472                 if (compressed || block_start == EXTENT_MAP_HOLE ||
3473                     block_start == EXTENT_MAP_INLINE) {
3474                         /*
3475                          * end_io notification does not happen here for
3476                          * compressed extents
3477                          */
3478                         if (!compressed)
3479                                 btrfs_writepage_endio_finish_ordered(page, cur,
3480                                                             cur + iosize - 1,
3481                                                             1);
3482                         else if (compressed) {
3483                                 /* we don't want to end_page_writeback on
3484                                  * a compressed extent.  this happens
3485                                  * elsewhere
3486                                  */
3487                                 nr++;
3488                         }
3489
3490                         cur += iosize;
3491                         pg_offset += iosize;
3492                         continue;
3493                 }
3494
3495                 btrfs_set_range_writeback(tree, cur, cur + iosize - 1);
3496                 if (!PageWriteback(page)) {
3497                         btrfs_err(BTRFS_I(inode)->root->fs_info,
3498                                    "page %lu not writeback, cur %llu end %llu",
3499                                page->index, cur, end);
3500                 }
3501
3502                 ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
3503                                          page, offset, iosize, pg_offset,
3504                                          bdev, &epd->bio,
3505                                          end_bio_extent_writepage,
3506                                          0, 0, 0, false);
3507                 if (ret) {
3508                         SetPageError(page);
3509                         if (PageWriteback(page))
3510                                 end_page_writeback(page);
3511                 }
3512
3513                 cur = cur + iosize;
3514                 pg_offset += iosize;
3515                 nr++;
3516         }
3517 done:
3518         *nr_ret = nr;
3519         return ret;
3520 }
3521
3522 /*
3523  * the writepage semantics are similar to regular writepage.  extent
3524  * records are inserted to lock ranges in the tree, and as dirty areas
3525  * are found, they are marked writeback.  Then the lock bits are removed
3526  * and the end_io handler clears the writeback ranges
3527  *
3528  * Return 0 if everything goes well.
3529  * Return <0 for error.
3530  */
3531 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3532                               struct extent_page_data *epd)
3533 {
3534         struct inode *inode = page->mapping->host;
3535         u64 start = page_offset(page);
3536         u64 page_end = start + PAGE_SIZE - 1;
3537         int ret;
3538         int nr = 0;
3539         size_t pg_offset = 0;
3540         loff_t i_size = i_size_read(inode);
3541         unsigned long end_index = i_size >> PAGE_SHIFT;
3542         unsigned int write_flags = 0;
3543         unsigned long nr_written = 0;
3544
3545         write_flags = wbc_to_write_flags(wbc);
3546
3547         trace___extent_writepage(page, inode, wbc);
3548
3549         WARN_ON(!PageLocked(page));
3550
3551         ClearPageError(page);
3552
3553         pg_offset = offset_in_page(i_size);
3554         if (page->index > end_index ||
3555            (page->index == end_index && !pg_offset)) {
3556                 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
3557                 unlock_page(page);
3558                 return 0;
3559         }
3560
3561         if (page->index == end_index) {
3562                 char *userpage;
3563
3564                 userpage = kmap_atomic(page);
3565                 memset(userpage + pg_offset, 0,
3566                        PAGE_SIZE - pg_offset);
3567                 kunmap_atomic(userpage);
3568                 flush_dcache_page(page);
3569         }
3570
3571         pg_offset = 0;
3572
3573         set_page_extent_mapped(page);
3574
3575         if (!epd->extent_locked) {
3576                 ret = writepage_delalloc(inode, page, wbc, start, &nr_written);
3577                 if (ret == 1)
3578                         goto done_unlocked;
3579                 if (ret)
3580                         goto done;
3581         }
3582
3583         ret = __extent_writepage_io(inode, page, wbc, epd,
3584                                     i_size, nr_written, write_flags, &nr);
3585         if (ret == 1)
3586                 goto done_unlocked;
3587
3588 done:
3589         if (nr == 0) {
3590                 /* make sure the mapping tag for page dirty gets cleared */
3591                 set_page_writeback(page);
3592                 end_page_writeback(page);
3593         }
3594         if (PageError(page)) {
3595                 ret = ret < 0 ? ret : -EIO;
3596                 end_extent_writepage(page, ret, start, page_end);
3597         }
3598         unlock_page(page);
3599         ASSERT(ret <= 0);
3600         return ret;
3601
3602 done_unlocked:
3603         return 0;
3604 }
3605
3606 void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3607 {
3608         wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
3609                        TASK_UNINTERRUPTIBLE);
3610 }
3611
3612 /*
3613  * Lock eb pages and flush the bio if we can't the locks
3614  *
3615  * Return  0 if nothing went wrong
3616  * Return >0 is same as 0, except bio is not submitted
3617  * Return <0 if something went wrong, no page is locked
3618  */
3619 static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
3620                           struct extent_page_data *epd)
3621 {
3622         struct btrfs_fs_info *fs_info = eb->fs_info;
3623         int i, num_pages, failed_page_nr;
3624         int flush = 0;
3625         int ret = 0;
3626
3627         if (!btrfs_try_tree_write_lock(eb)) {
3628                 ret = flush_write_bio(epd);
3629                 if (ret < 0)
3630                         return ret;
3631                 flush = 1;
3632                 btrfs_tree_lock(eb);
3633         }
3634
3635         if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3636                 btrfs_tree_unlock(eb);
3637                 if (!epd->sync_io)
3638                         return 0;
3639                 if (!flush) {
3640                         ret = flush_write_bio(epd);
3641                         if (ret < 0)
3642                                 return ret;
3643                         flush = 1;
3644                 }
3645                 while (1) {
3646                         wait_on_extent_buffer_writeback(eb);
3647                         btrfs_tree_lock(eb);
3648                         if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3649                                 break;
3650                         btrfs_tree_unlock(eb);
3651                 }
3652         }
3653
3654         /*
3655          * We need to do this to prevent races in people who check if the eb is
3656          * under IO since we can end up having no IO bits set for a short period
3657          * of time.
3658          */
3659         spin_lock(&eb->refs_lock);
3660         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3661                 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3662                 spin_unlock(&eb->refs_lock);
3663                 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3664                 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3665                                          -eb->len,
3666                                          fs_info->dirty_metadata_batch);
3667                 ret = 1;
3668         } else {
3669                 spin_unlock(&eb->refs_lock);
3670         }
3671
3672         btrfs_tree_unlock(eb);
3673
3674         if (!ret)
3675                 return ret;
3676
3677         num_pages = num_extent_pages(eb);
3678         for (i = 0; i < num_pages; i++) {
3679                 struct page *p = eb->pages[i];
3680
3681                 if (!trylock_page(p)) {
3682                         if (!flush) {
3683                                 ret = flush_write_bio(epd);
3684                                 if (ret < 0) {
3685                                         failed_page_nr = i;
3686                                         goto err_unlock;
3687                                 }
3688                                 flush = 1;
3689                         }
3690                         lock_page(p);
3691                 }
3692         }
3693
3694         return ret;
3695 err_unlock:
3696         /* Unlock already locked pages */
3697         for (i = 0; i < failed_page_nr; i++)
3698                 unlock_page(eb->pages[i]);
3699         return ret;
3700 }
3701
3702 static void end_extent_buffer_writeback(struct extent_buffer *eb)
3703 {
3704         clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3705         smp_mb__after_atomic();
3706         wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3707 }
3708
3709 static void set_btree_ioerr(struct page *page)
3710 {
3711         struct extent_buffer *eb = (struct extent_buffer *)page->private;
3712
3713         SetPageError(page);
3714         if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3715                 return;
3716
3717         /*
3718          * If writeback for a btree extent that doesn't belong to a log tree
3719          * failed, increment the counter transaction->eb_write_errors.
3720          * We do this because while the transaction is running and before it's
3721          * committing (when we call filemap_fdata[write|wait]_range against
3722          * the btree inode), we might have
3723          * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
3724          * returns an error or an error happens during writeback, when we're
3725          * committing the transaction we wouldn't know about it, since the pages
3726          * can be no longer dirty nor marked anymore for writeback (if a
3727          * subsequent modification to the extent buffer didn't happen before the
3728          * transaction commit), which makes filemap_fdata[write|wait]_range not
3729          * able to find the pages tagged with SetPageError at transaction
3730          * commit time. So if this happens we must abort the transaction,
3731          * otherwise we commit a super block with btree roots that point to
3732          * btree nodes/leafs whose content on disk is invalid - either garbage
3733          * or the content of some node/leaf from a past generation that got
3734          * cowed or deleted and is no longer valid.
3735          *
3736          * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
3737          * not be enough - we need to distinguish between log tree extents vs
3738          * non-log tree extents, and the next filemap_fdatawait_range() call
3739          * will catch and clear such errors in the mapping - and that call might
3740          * be from a log sync and not from a transaction commit. Also, checking
3741          * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
3742          * not done and would not be reliable - the eb might have been released
3743          * from memory and reading it back again means that flag would not be
3744          * set (since it's a runtime flag, not persisted on disk).
3745          *
3746          * Using the flags below in the btree inode also makes us achieve the
3747          * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
3748          * writeback for all dirty pages and before filemap_fdatawait_range()
3749          * is called, the writeback for all dirty pages had already finished
3750          * with errors - because we were not using AS_EIO/AS_ENOSPC,
3751          * filemap_fdatawait_range() would return success, as it could not know
3752          * that writeback errors happened (the pages were no longer tagged for
3753          * writeback).
3754          */
3755         switch (eb->log_index) {
3756         case -1:
3757                 set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags);
3758                 break;
3759         case 0:
3760                 set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags);
3761                 break;
3762         case 1:
3763                 set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags);
3764                 break;
3765         default:
3766                 BUG(); /* unexpected, logic error */
3767         }
3768 }
3769
3770 static void end_bio_extent_buffer_writepage(struct bio *bio)
3771 {
3772         struct bio_vec *bvec;
3773         struct extent_buffer *eb;
3774         int done;
3775         struct bvec_iter_all iter_all;
3776
3777         ASSERT(!bio_flagged(bio, BIO_CLONED));
3778         bio_for_each_segment_all(bvec, bio, iter_all) {
3779                 struct page *page = bvec->bv_page;
3780
3781                 eb = (struct extent_buffer *)page->private;
3782                 BUG_ON(!eb);
3783                 done = atomic_dec_and_test(&eb->io_pages);
3784
3785                 if (bio->bi_status ||
3786                     test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
3787                         ClearPageUptodate(page);
3788                         set_btree_ioerr(page);
3789                 }
3790
3791                 end_page_writeback(page);
3792
3793                 if (!done)
3794                         continue;
3795
3796                 end_extent_buffer_writeback(eb);
3797         }
3798
3799         bio_put(bio);
3800 }
3801
3802 static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3803                         struct writeback_control *wbc,
3804                         struct extent_page_data *epd)
3805 {
3806         struct btrfs_fs_info *fs_info = eb->fs_info;
3807         struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3808         struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
3809         u64 offset = eb->start;
3810         u32 nritems;
3811         int i, num_pages;
3812         unsigned long start, end;
3813         unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
3814         int ret = 0;
3815
3816         clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
3817         num_pages = num_extent_pages(eb);
3818         atomic_set(&eb->io_pages, num_pages);
3819
3820         /* set btree blocks beyond nritems with 0 to avoid stale content. */
3821         nritems = btrfs_header_nritems(eb);
3822         if (btrfs_header_level(eb) > 0) {
3823                 end = btrfs_node_key_ptr_offset(nritems);
3824
3825                 memzero_extent_buffer(eb, end, eb->len - end);
3826         } else {
3827                 /*
3828                  * leaf:
3829                  * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
3830                  */
3831                 start = btrfs_item_nr_offset(nritems);
3832                 end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
3833                 memzero_extent_buffer(eb, start, end - start);
3834         }
3835
3836         for (i = 0; i < num_pages; i++) {
3837                 struct page *p = eb->pages[i];
3838
3839                 clear_page_dirty_for_io(p);
3840                 set_page_writeback(p);
3841                 ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
3842                                          p, offset, PAGE_SIZE, 0, bdev,
3843                                          &epd->bio,
3844                                          end_bio_extent_buffer_writepage,
3845                                          0, 0, 0, false);
3846                 if (ret) {
3847                         set_btree_ioerr(p);
3848                         if (PageWriteback(p))
3849                                 end_page_writeback(p);
3850                         if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3851                                 end_extent_buffer_writeback(eb);
3852                         ret = -EIO;
3853                         break;
3854                 }
3855                 offset += PAGE_SIZE;
3856                 update_nr_written(wbc, 1);
3857                 unlock_page(p);
3858         }
3859
3860         if (unlikely(ret)) {
3861                 for (; i < num_pages; i++) {
3862                         struct page *p = eb->pages[i];
3863                         clear_page_dirty_for_io(p);
3864                         unlock_page(p);
3865                 }
3866         }
3867
3868         return ret;
3869 }
3870
3871 int btree_write_cache_pages(struct address_space *mapping,
3872                                    struct writeback_control *wbc)
3873 {
3874         struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3875         struct extent_buffer *eb, *prev_eb = NULL;
3876         struct extent_page_data epd = {
3877                 .bio = NULL,
3878                 .tree = tree,
3879                 .extent_locked = 0,
3880                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3881         };
3882         int ret = 0;
3883         int done = 0;
3884         int nr_to_write_done = 0;
3885         struct pagevec pvec;
3886         int nr_pages;
3887         pgoff_t index;
3888         pgoff_t end;            /* Inclusive */
3889         int scanned = 0;
3890         xa_mark_t tag;
3891
3892         pagevec_init(&pvec);
3893         if (wbc->range_cyclic) {
3894                 index = mapping->writeback_index; /* Start from prev offset */
3895                 end = -1;
3896         } else {
3897                 index = wbc->range_start >> PAGE_SHIFT;
3898                 end = wbc->range_end >> PAGE_SHIFT;
3899                 scanned = 1;
3900         }
3901         if (wbc->sync_mode == WB_SYNC_ALL)
3902                 tag = PAGECACHE_TAG_TOWRITE;
3903         else
3904                 tag = PAGECACHE_TAG_DIRTY;
3905 retry:
3906         if (wbc->sync_mode == WB_SYNC_ALL)
3907                 tag_pages_for_writeback(mapping, index, end);
3908         while (!done && !nr_to_write_done && (index <= end) &&
3909                (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
3910                         tag))) {
3911                 unsigned i;
3912
3913                 scanned = 1;
3914                 for (i = 0; i < nr_pages; i++) {
3915                         struct page *page = pvec.pages[i];
3916
3917                         if (!PagePrivate(page))
3918                                 continue;
3919
3920                         spin_lock(&mapping->private_lock);
3921                         if (!PagePrivate(page)) {
3922                                 spin_unlock(&mapping->private_lock);
3923                                 continue;
3924                         }
3925
3926                         eb = (struct extent_buffer *)page->private;
3927
3928                         /*
3929                          * Shouldn't happen and normally this would be a BUG_ON
3930                          * but no sense in crashing the users box for something
3931                          * we can survive anyway.
3932                          */
3933                         if (WARN_ON(!eb)) {
3934                                 spin_unlock(&mapping->private_lock);
3935                                 continue;
3936                         }
3937
3938                         if (eb == prev_eb) {
3939                                 spin_unlock(&mapping->private_lock);
3940                                 continue;
3941                         }
3942
3943                         ret = atomic_inc_not_zero(&eb->refs);
3944                         spin_unlock(&mapping->private_lock);
3945                         if (!ret)
3946                                 continue;
3947
3948                         prev_eb = eb;
3949                         ret = lock_extent_buffer_for_io(eb, &epd);
3950                         if (!ret) {
3951                                 free_extent_buffer(eb);
3952                                 continue;
3953                         }
3954
3955                         ret = write_one_eb(eb, wbc, &epd);
3956                         if (ret) {
3957                                 done = 1;
3958                                 free_extent_buffer(eb);
3959                                 break;
3960                         }
3961                         free_extent_buffer(eb);
3962
3963                         /*
3964                          * the filesystem may choose to bump up nr_to_write.
3965                          * We have to make sure to honor the new nr_to_write
3966                          * at any time
3967                          */
3968                         nr_to_write_done = wbc->nr_to_write <= 0;
3969                 }
3970                 pagevec_release(&pvec);
3971                 cond_resched();
3972         }
3973         if (!scanned && !done) {
3974                 /*
3975                  * We hit the last page and there is more work to be done: wrap
3976                  * back to the start of the file
3977                  */
3978                 scanned = 1;
3979                 index = 0;
3980                 goto retry;
3981         }
3982         ASSERT(ret <= 0);
3983         if (ret < 0) {
3984                 end_write_bio(&epd, ret);
3985                 return ret;
3986         }
3987         ret = flush_write_bio(&epd);
3988         return ret;
3989 }
3990
3991 /**
3992  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3993  * @mapping: address space structure to write
3994  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3995  * @data: data passed to __extent_writepage function
3996  *
3997  * If a page is already under I/O, write_cache_pages() skips it, even
3998  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
3999  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
4000  * and msync() need to guarantee that all the data which was dirty at the time
4001  * the call was made get new I/O started against them.  If wbc->sync_mode is
4002  * WB_SYNC_ALL then we were called for data integrity and we must wait for
4003  * existing IO to complete.
4004  */
4005 static int extent_write_cache_pages(struct address_space *mapping,
4006                              struct writeback_control *wbc,
4007                              struct extent_page_data *epd)
4008 {
4009         struct inode *inode = mapping->host;
4010         int ret = 0;
4011         int done = 0;
4012         int nr_to_write_done = 0;
4013         struct pagevec pvec;
4014         int nr_pages;
4015         pgoff_t index;
4016         pgoff_t end;            /* Inclusive */
4017         pgoff_t done_index;
4018         int range_whole = 0;
4019         int scanned = 0;
4020         xa_mark_t tag;
4021
4022         /*
4023          * We have to hold onto the inode so that ordered extents can do their
4024          * work when the IO finishes.  The alternative to this is failing to add
4025          * an ordered extent if the igrab() fails there and that is a huge pain
4026          * to deal with, so instead just hold onto the inode throughout the
4027          * writepages operation.  If it fails here we are freeing up the inode
4028          * anyway and we'd rather not waste our time writing out stuff that is
4029          * going to be truncated anyway.
4030          */
4031         if (!igrab(inode))
4032                 return 0;
4033
4034         pagevec_init(&pvec);
4035         if (wbc->range_cyclic) {
4036                 index = mapping->writeback_index; /* Start from prev offset */
4037                 end = -1;
4038         } else {
4039                 index = wbc->range_start >> PAGE_SHIFT;
4040                 end = wbc->range_end >> PAGE_SHIFT;
4041                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
4042                         range_whole = 1;
4043                 scanned = 1;
4044         }
4045
4046         /*
4047          * We do the tagged writepage as long as the snapshot flush bit is set
4048          * and we are the first one who do the filemap_flush() on this inode.
4049          *
4050          * The nr_to_write == LONG_MAX is needed to make sure other flushers do
4051          * not race in and drop the bit.
4052          */
4053         if (range_whole && wbc->nr_to_write == LONG_MAX &&
4054             test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
4055                                &BTRFS_I(inode)->runtime_flags))
4056                 wbc->tagged_writepages = 1;
4057
4058         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4059                 tag = PAGECACHE_TAG_TOWRITE;
4060         else
4061                 tag = PAGECACHE_TAG_DIRTY;
4062 retry:
4063         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4064                 tag_pages_for_writeback(mapping, index, end);
4065         done_index = index;
4066         while (!done && !nr_to_write_done && (index <= end) &&
4067                         (nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
4068                                                 &index, end, tag))) {
4069                 unsigned i;
4070
4071                 scanned = 1;
4072                 for (i = 0; i < nr_pages; i++) {
4073                         struct page *page = pvec.pages[i];
4074
4075                         done_index = page->index;
4076                         /*
4077                          * At this point we hold neither the i_pages lock nor
4078                          * the page lock: the page may be truncated or
4079                          * invalidated (changing page->mapping to NULL),
4080                          * or even swizzled back from swapper_space to
4081                          * tmpfs file mapping
4082                          */
4083                         if (!trylock_page(page)) {
4084                                 ret = flush_write_bio(epd);
4085                                 BUG_ON(ret < 0);
4086                                 lock_page(page);
4087                         }
4088
4089                         if (unlikely(page->mapping != mapping)) {
4090                                 unlock_page(page);
4091                                 continue;
4092                         }
4093
4094                         if (wbc->sync_mode != WB_SYNC_NONE) {
4095                                 if (PageWriteback(page)) {
4096                                         ret = flush_write_bio(epd);
4097                                         BUG_ON(ret < 0);
4098                                 }
4099                                 wait_on_page_writeback(page);
4100                         }
4101
4102                         if (PageWriteback(page) ||
4103                             !clear_page_dirty_for_io(page)) {
4104                                 unlock_page(page);
4105                                 continue;
4106                         }
4107
4108                         ret = __extent_writepage(page, wbc, epd);
4109                         if (ret < 0) {
4110                                 /*
4111                                  * done_index is set past this page,
4112                                  * so media errors will not choke
4113                                  * background writeout for the entire
4114                                  * file. This has consequences for
4115                                  * range_cyclic semantics (ie. it may
4116                                  * not be suitable for data integrity
4117                                  * writeout).
4118                                  */
4119                                 done_index = page->index + 1;
4120                                 done = 1;
4121                                 break;
4122                         }
4123
4124                         /*
4125                          * the filesystem may choose to bump up nr_to_write.
4126                          * We have to make sure to honor the new nr_to_write
4127                          * at any time
4128                          */
4129                         nr_to_write_done = wbc->nr_to_write <= 0;
4130                 }
4131                 pagevec_release(&pvec);
4132                 cond_resched();
4133         }
4134         if (!scanned && !done) {
4135                 /*
4136                  * We hit the last page and there is more work to be done: wrap
4137                  * back to the start of the file
4138                  */
4139                 scanned = 1;
4140                 index = 0;
4141                 goto retry;
4142         }
4143
4144         if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
4145                 mapping->writeback_index = done_index;
4146
4147         btrfs_add_delayed_iput(inode);
4148         return ret;
4149 }
4150
4151 int extent_write_full_page(struct page *page, struct writeback_control *wbc)
4152 {
4153         int ret;
4154         struct extent_page_data epd = {
4155                 .bio = NULL,
4156                 .tree = &BTRFS_I(page->mapping->host)->io_tree,
4157                 .extent_locked = 0,
4158                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
4159         };
4160
4161         ret = __extent_writepage(page, wbc, &epd);
4162         ASSERT(ret <= 0);
4163         if (ret < 0) {
4164                 end_write_bio(&epd, ret);
4165                 return ret;
4166         }
4167
4168         ret = flush_write_bio(&epd);
4169         ASSERT(ret <= 0);
4170         return ret;
4171 }
4172
4173 int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
4174                               int mode)
4175 {
4176         int ret = 0;
4177         struct address_space *mapping = inode->i_mapping;
4178         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
4179         struct page *page;
4180         unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4181                 PAGE_SHIFT;
4182
4183         struct extent_page_data epd = {
4184                 .bio = NULL,
4185                 .tree = tree,
4186                 .extent_locked = 1,
4187                 .sync_io = mode == WB_SYNC_ALL,
4188         };
4189         struct writeback_control wbc_writepages = {
4190                 .sync_mode      = mode,
4191                 .nr_to_write    = nr_pages * 2,
4192                 .range_start    = start,
4193                 .range_end      = end + 1,
4194         };
4195
4196         while (start <= end) {
4197                 page = find_get_page(mapping, start >> PAGE_SHIFT);
4198                 if (clear_page_dirty_for_io(page))
4199                         ret = __extent_writepage(page, &wbc_writepages, &epd);
4200                 else {
4201                         btrfs_writepage_endio_finish_ordered(page, start,
4202                                                     start + PAGE_SIZE - 1, 1);
4203                         unlock_page(page);
4204                 }
4205                 put_page(page);
4206                 start += PAGE_SIZE;
4207         }
4208
4209         ASSERT(ret <= 0);
4210         if (ret < 0) {
4211                 end_write_bio(&epd, ret);
4212                 return ret;
4213         }
4214         ret = flush_write_bio(&epd);
4215         return ret;
4216 }
4217
4218 int extent_writepages(struct address_space *mapping,
4219                       struct writeback_control *wbc)
4220 {
4221         int ret = 0;
4222         struct extent_page_data epd = {
4223                 .bio = NULL,
4224                 .tree = &BTRFS_I(mapping->host)->io_tree,
4225                 .extent_locked = 0,
4226                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
4227         };
4228
4229         ret = extent_write_cache_pages(mapping, wbc, &epd);
4230         ASSERT(ret <= 0);
4231         if (ret < 0) {
4232                 end_write_bio(&epd, ret);
4233                 return ret;
4234         }
4235         ret = flush_write_bio(&epd);
4236         return ret;
4237 }
4238
4239 int extent_readpages(struct address_space *mapping, struct list_head *pages,
4240                      unsigned nr_pages)
4241 {
4242         struct bio *bio = NULL;
4243         unsigned long bio_flags = 0;
4244         struct page *pagepool[16];
4245         struct extent_map *em_cached = NULL;
4246         struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
4247         int nr = 0;
4248         u64 prev_em_start = (u64)-1;
4249
4250         while (!list_empty(pages)) {
4251                 u64 contig_end = 0;
4252
4253                 for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) {
4254                         struct page *page = lru_to_page(pages);
4255
4256                         prefetchw(&page->flags);
4257                         list_del(&page->lru);
4258                         if (add_to_page_cache_lru(page, mapping, page->index,
4259                                                 readahead_gfp_mask(mapping))) {
4260                                 put_page(page);
4261                                 break;
4262                         }
4263
4264                         pagepool[nr++] = page;
4265                         contig_end = page_offset(page) + PAGE_SIZE - 1;
4266                 }
4267
4268                 if (nr) {
4269                         u64 contig_start = page_offset(pagepool[0]);
4270
4271                         ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
4272
4273                         contiguous_readpages(tree, pagepool, nr, contig_start,
4274                                      contig_end, &em_cached, &bio, &bio_flags,
4275                                      &prev_em_start);
4276                 }
4277         }
4278
4279         if (em_cached)
4280                 free_extent_map(em_cached);
4281
4282         if (bio)
4283                 return submit_one_bio(bio, 0, bio_flags);
4284         return 0;
4285 }
4286
4287 /*
4288  * basic invalidatepage code, this waits on any locked or writeback
4289  * ranges corresponding to the page, and then deletes any extent state
4290  * records from the tree
4291  */
4292 int extent_invalidatepage(struct extent_io_tree *tree,
4293                           struct page *page, unsigned long offset)
4294 {
4295         struct extent_state *cached_state = NULL;
4296         u64 start = page_offset(page);
4297         u64 end = start + PAGE_SIZE - 1;
4298         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4299
4300         start += ALIGN(offset, blocksize);
4301         if (start > end)
4302                 return 0;
4303
4304         lock_extent_bits(tree, start, end, &cached_state);
4305         wait_on_page_writeback(page);
4306         clear_extent_bit(tree, start, end,
4307                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4308                          EXTENT_DO_ACCOUNTING,
4309                          1, 1, &cached_state);
4310         return 0;
4311 }
4312
4313 /*
4314  * a helper for releasepage, this tests for areas of the page that
4315  * are locked or under IO and drops the related state bits if it is safe
4316  * to drop the page.
4317  */
4318 static int try_release_extent_state(struct extent_io_tree *tree,
4319                                     struct page *page, gfp_t mask)
4320 {
4321         u64 start = page_offset(page);
4322         u64 end = start + PAGE_SIZE - 1;
4323         int ret = 1;
4324
4325         if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
4326                 ret = 0;
4327         } else {
4328                 /*
4329                  * at this point we can safely clear everything except the
4330                  * locked bit and the nodatasum bit
4331                  */
4332                 ret = __clear_extent_bit(tree, start, end,
4333                                  ~(EXTENT_LOCKED | EXTENT_NODATASUM),
4334                                  0, 0, NULL, mask, NULL);
4335
4336                 /* if clear_extent_bit failed for enomem reasons,
4337                  * we can't allow the release to continue.
4338                  */
4339                 if (ret < 0)
4340                         ret = 0;
4341                 else
4342                         ret = 1;
4343         }
4344         return ret;
4345 }
4346
4347 /*
4348  * a helper for releasepage.  As long as there are no locked extents
4349  * in the range corresponding to the page, both state records and extent
4350  * map records are removed
4351  */
4352 int try_release_extent_mapping(struct page *page, gfp_t mask)
4353 {
4354         struct extent_map *em;
4355         u64 start = page_offset(page);
4356         u64 end = start + PAGE_SIZE - 1;
4357         struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
4358         struct extent_io_tree *tree = &btrfs_inode->io_tree;
4359         struct extent_map_tree *map = &btrfs_inode->extent_tree;
4360
4361         if (gfpflags_allow_blocking(mask) &&
4362             page->mapping->host->i_size > SZ_16M) {
4363                 u64 len;
4364                 while (start <= end) {
4365                         len = end - start + 1;
4366                         write_lock(&map->lock);
4367                         em = lookup_extent_mapping(map, start, len);
4368                         if (!em) {
4369                                 write_unlock(&map->lock);
4370                                 break;
4371                         }
4372                         if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4373                             em->start != start) {
4374                                 write_unlock(&map->lock);
4375                                 free_extent_map(em);
4376                                 break;
4377                         }
4378                         if (!test_range_bit(tree, em->start,
4379                                             extent_map_end(em) - 1,
4380                                             EXTENT_LOCKED, 0, NULL)) {
4381                                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4382                                         &btrfs_inode->runtime_flags);
4383                                 remove_extent_mapping(map, em);
4384                                 /* once for the rb tree */
4385                                 free_extent_map(em);
4386                         }
4387                         start = extent_map_end(em);
4388                         write_unlock(&map->lock);
4389
4390                         /* once for us */
4391                         free_extent_map(em);
4392                 }
4393         }
4394         return try_release_extent_state(tree, page, mask);
4395 }
4396
4397 /*
4398  * helper function for fiemap, which doesn't want to see any holes.
4399  * This maps until we find something past 'last'
4400  */
4401 static struct extent_map *get_extent_skip_holes(struct inode *inode,
4402                                                 u64 offset, u64 last)
4403 {
4404         u64 sectorsize = btrfs_inode_sectorsize(inode);
4405         struct extent_map *em;
4406         u64 len;
4407
4408         if (offset >= last)
4409                 return NULL;
4410
4411         while (1) {
4412                 len = last - offset;
4413                 if (len == 0)
4414                         break;
4415                 len = ALIGN(len, sectorsize);
4416                 em = btrfs_get_extent_fiemap(BTRFS_I(inode), offset, len);
4417                 if (IS_ERR_OR_NULL(em))
4418                         return em;
4419
4420                 /* if this isn't a hole return it */
4421                 if (em->block_start != EXTENT_MAP_HOLE)
4422                         return em;
4423
4424                 /* this is a hole, advance to the next extent */
4425                 offset = extent_map_end(em);
4426                 free_extent_map(em);
4427                 if (offset >= last)
4428                         break;
4429         }
4430         return NULL;
4431 }
4432
4433 /*
4434  * To cache previous fiemap extent
4435  *
4436  * Will be used for merging fiemap extent
4437  */
4438 struct fiemap_cache {
4439         u64 offset;
4440         u64 phys;
4441         u64 len;
4442         u32 flags;
4443         bool cached;
4444 };
4445
4446 /*
4447  * Helper to submit fiemap extent.
4448  *
4449  * Will try to merge current fiemap extent specified by @offset, @phys,
4450  * @len and @flags with cached one.
4451  * And only when we fails to merge, cached one will be submitted as
4452  * fiemap extent.
4453  *
4454  * Return value is the same as fiemap_fill_next_extent().
4455  */
4456 static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
4457                                 struct fiemap_cache *cache,
4458                                 u64 offset, u64 phys, u64 len, u32 flags)
4459 {
4460         int ret = 0;
4461
4462         if (!cache->cached)
4463                 goto assign;
4464
4465         /*
4466          * Sanity check, extent_fiemap() should have ensured that new
4467          * fiemap extent won't overlap with cached one.
4468          * Not recoverable.
4469          *
4470          * NOTE: Physical address can overlap, due to compression
4471          */
4472         if (cache->offset + cache->len > offset) {
4473                 WARN_ON(1);
4474                 return -EINVAL;
4475         }
4476
4477         /*
4478          * Only merges fiemap extents if
4479          * 1) Their logical addresses are continuous
4480          *
4481          * 2) Their physical addresses are continuous
4482          *    So truly compressed (physical size smaller than logical size)
4483          *    extents won't get merged with each other
4484          *
4485          * 3) Share same flags except FIEMAP_EXTENT_LAST
4486          *    So regular extent won't get merged with prealloc extent
4487          */
4488         if (cache->offset + cache->len  == offset &&
4489             cache->phys + cache->len == phys  &&
4490             (cache->flags & ~FIEMAP_EXTENT_LAST) ==
4491                         (flags & ~FIEMAP_EXTENT_LAST)) {
4492                 cache->len += len;
4493                 cache->flags |= flags;
4494                 goto try_submit_last;
4495         }
4496
4497         /* Not mergeable, need to submit cached one */
4498         ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4499                                       cache->len, cache->flags);
4500         cache->cached = false;
4501         if (ret)
4502                 return ret;
4503 assign:
4504         cache->cached = true;
4505         cache->offset = offset;
4506         cache->phys = phys;
4507         cache->len = len;
4508         cache->flags = flags;
4509 try_submit_last:
4510         if (cache->flags & FIEMAP_EXTENT_LAST) {
4511                 ret = fiemap_fill_next_extent(fieinfo, cache->offset,
4512                                 cache->phys, cache->len, cache->flags);
4513                 cache->cached = false;
4514         }
4515         return ret;
4516 }
4517
4518 /*
4519  * Emit last fiemap cache
4520  *
4521  * The last fiemap cache may still be cached in the following case:
4522  * 0                  4k                    8k
4523  * |<- Fiemap range ->|
4524  * |<------------  First extent ----------->|
4525  *
4526  * In this case, the first extent range will be cached but not emitted.
4527  * So we must emit it before ending extent_fiemap().
4528  */
4529 static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
4530                                   struct fiemap_cache *cache)
4531 {
4532         int ret;
4533
4534         if (!cache->cached)
4535                 return 0;
4536
4537         ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4538                                       cache->len, cache->flags);
4539         cache->cached = false;
4540         if (ret > 0)
4541                 ret = 0;
4542         return ret;
4543 }
4544
4545 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4546                 __u64 start, __u64 len)
4547 {
4548         int ret = 0;
4549         u64 off = start;
4550         u64 max = start + len;
4551         u32 flags = 0;
4552         u32 found_type;
4553         u64 last;
4554         u64 last_for_get_extent = 0;
4555         u64 disko = 0;
4556         u64 isize = i_size_read(inode);
4557         struct btrfs_key found_key;
4558         struct extent_map *em = NULL;
4559         struct extent_state *cached_state = NULL;
4560         struct btrfs_path *path;
4561         struct btrfs_root *root = BTRFS_I(inode)->root;
4562         struct fiemap_cache cache = { 0 };
4563         struct ulist *roots;
4564         struct ulist *tmp_ulist;
4565         int end = 0;
4566         u64 em_start = 0;
4567         u64 em_len = 0;
4568         u64 em_end = 0;
4569
4570         if (len == 0)
4571                 return -EINVAL;
4572
4573         path = btrfs_alloc_path();
4574         if (!path)
4575                 return -ENOMEM;
4576         path->leave_spinning = 1;
4577
4578         roots = ulist_alloc(GFP_KERNEL);
4579         tmp_ulist = ulist_alloc(GFP_KERNEL);
4580         if (!roots || !tmp_ulist) {
4581                 ret = -ENOMEM;
4582                 goto out_free_ulist;
4583         }
4584
4585         start = round_down(start, btrfs_inode_sectorsize(inode));
4586         len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
4587
4588         /*
4589          * lookup the last file extent.  We're not using i_size here
4590          * because there might be preallocation past i_size
4591          */
4592         ret = btrfs_lookup_file_extent(NULL, root, path,
4593                         btrfs_ino(BTRFS_I(inode)), -1, 0);
4594         if (ret < 0) {
4595                 btrfs_free_path(path);
4596                 goto out_free_ulist;
4597         } else {
4598                 WARN_ON(!ret);
4599                 if (ret == 1)
4600                         ret = 0;
4601         }
4602
4603         path->slots[0]--;
4604         btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
4605         found_type = found_key.type;
4606
4607         /* No extents, but there might be delalloc bits */
4608         if (found_key.objectid != btrfs_ino(BTRFS_I(inode)) ||
4609             found_type != BTRFS_EXTENT_DATA_KEY) {
4610                 /* have to trust i_size as the end */
4611                 last = (u64)-1;
4612                 last_for_get_extent = isize;
4613         } else {
4614                 /*
4615                  * remember the start of the last extent.  There are a
4616                  * bunch of different factors that go into the length of the
4617                  * extent, so its much less complex to remember where it started
4618                  */
4619                 last = found_key.offset;
4620                 last_for_get_extent = last + 1;
4621         }
4622         btrfs_release_path(path);
4623
4624         /*
4625          * we might have some extents allocated but more delalloc past those
4626          * extents.  so, we trust isize unless the start of the last extent is
4627          * beyond isize
4628          */
4629         if (last < isize) {
4630                 last = (u64)-1;
4631                 last_for_get_extent = isize;
4632         }
4633
4634         lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4635                          &cached_state);
4636
4637         em = get_extent_skip_holes(inode, start, last_for_get_extent);
4638         if (!em)
4639                 goto out;
4640         if (IS_ERR(em)) {
4641                 ret = PTR_ERR(em);
4642                 goto out;
4643         }
4644
4645         while (!end) {
4646                 u64 offset_in_extent = 0;
4647
4648                 /* break if the extent we found is outside the range */
4649                 if (em->start >= max || extent_map_end(em) < off)
4650                         break;
4651
4652                 /*
4653                  * get_extent may return an extent that starts before our
4654                  * requested range.  We have to make sure the ranges
4655                  * we return to fiemap always move forward and don't
4656                  * overlap, so adjust the offsets here
4657                  */
4658                 em_start = max(em->start, off);
4659
4660                 /*
4661                  * record the offset from the start of the extent
4662                  * for adjusting the disk offset below.  Only do this if the
4663                  * extent isn't compressed since our in ram offset may be past
4664                  * what we have actually allocated on disk.
4665                  */
4666                 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4667                         offset_in_extent = em_start - em->start;
4668                 em_end = extent_map_end(em);
4669                 em_len = em_end - em_start;
4670                 flags = 0;
4671                 if (em->block_start < EXTENT_MAP_LAST_BYTE)
4672                         disko = em->block_start + offset_in_extent;
4673                 else
4674                         disko = 0;
4675
4676                 /*
4677                  * bump off for our next call to get_extent
4678                  */
4679                 off = extent_map_end(em);
4680                 if (off >= max)
4681                         end = 1;
4682
4683                 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
4684                         end = 1;
4685                         flags |= FIEMAP_EXTENT_LAST;
4686                 } else if (em->block_start == EXTENT_MAP_INLINE) {
4687                         flags |= (FIEMAP_EXTENT_DATA_INLINE |
4688                                   FIEMAP_EXTENT_NOT_ALIGNED);
4689                 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
4690                         flags |= (FIEMAP_EXTENT_DELALLOC |
4691                                   FIEMAP_EXTENT_UNKNOWN);
4692                 } else if (fieinfo->fi_extents_max) {
4693                         u64 bytenr = em->block_start -
4694                                 (em->start - em->orig_start);
4695
4696                         /*
4697                          * As btrfs supports shared space, this information
4698                          * can be exported to userspace tools via
4699                          * flag FIEMAP_EXTENT_SHARED.  If fi_extents_max == 0
4700                          * then we're just getting a count and we can skip the
4701                          * lookup stuff.
4702                          */
4703                         ret = btrfs_check_shared(root,
4704                                                  btrfs_ino(BTRFS_I(inode)),
4705                                                  bytenr, roots, tmp_ulist);
4706                         if (ret < 0)
4707                                 goto out_free;
4708                         if (ret)
4709                                 flags |= FIEMAP_EXTENT_SHARED;
4710                         ret = 0;
4711                 }
4712                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4713                         flags |= FIEMAP_EXTENT_ENCODED;
4714                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4715                         flags |= FIEMAP_EXTENT_UNWRITTEN;
4716
4717                 free_extent_map(em);
4718                 em = NULL;
4719                 if ((em_start >= last) || em_len == (u64)-1 ||
4720                    (last == (u64)-1 && isize <= em_end)) {
4721                         flags |= FIEMAP_EXTENT_LAST;
4722                         end = 1;
4723                 }
4724
4725                 /* now scan forward to see if this is really the last extent. */
4726                 em = get_extent_skip_holes(inode, off, last_for_get_extent);
4727                 if (IS_ERR(em)) {
4728                         ret = PTR_ERR(em);
4729                         goto out;
4730                 }
4731                 if (!em) {
4732                         flags |= FIEMAP_EXTENT_LAST;
4733                         end = 1;
4734                 }
4735                 ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
4736                                            em_len, flags);
4737                 if (ret) {
4738                         if (ret == 1)
4739                                 ret = 0;
4740                         goto out_free;
4741                 }
4742         }
4743 out_free:
4744         if (!ret)
4745                 ret = emit_last_fiemap_cache(fieinfo, &cache);
4746         free_extent_map(em);
4747 out:
4748         btrfs_free_path(path);
4749         unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4750                              &cached_state);
4751
4752 out_free_ulist:
4753         ulist_free(roots);
4754         ulist_free(tmp_ulist);
4755         return ret;
4756 }
4757
4758 static void __free_extent_buffer(struct extent_buffer *eb)
4759 {
4760         btrfs_leak_debug_del(&eb->leak_list);
4761         kmem_cache_free(extent_buffer_cache, eb);
4762 }
4763
4764 int extent_buffer_under_io(struct extent_buffer *eb)
4765 {
4766         return (atomic_read(&eb->io_pages) ||
4767                 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4768                 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4769 }
4770
4771 /*
4772  * Release all pages attached to the extent buffer.
4773  */
4774 static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
4775 {
4776         int i;
4777         int num_pages;
4778         int mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4779
4780         BUG_ON(extent_buffer_under_io(eb));
4781
4782         num_pages = num_extent_pages(eb);
4783         for (i = 0; i < num_pages; i++) {
4784                 struct page *page = eb->pages[i];
4785
4786                 if (!page)
4787                         continue;
4788                 if (mapped)
4789                         spin_lock(&page->mapping->private_lock);
4790                 /*
4791                  * We do this since we'll remove the pages after we've
4792                  * removed the eb from the radix tree, so we could race
4793                  * and have this page now attached to the new eb.  So
4794                  * only clear page_private if it's still connected to
4795                  * this eb.
4796                  */
4797                 if (PagePrivate(page) &&
4798                     page->private == (unsigned long)eb) {
4799                         BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4800                         BUG_ON(PageDirty(page));
4801                         BUG_ON(PageWriteback(page));
4802                         /*
4803                          * We need to make sure we haven't be attached
4804                          * to a new eb.
4805                          */
4806                         ClearPagePrivate(page);
4807                         set_page_private(page, 0);
4808                         /* One for the page private */
4809                         put_page(page);
4810                 }
4811
4812                 if (mapped)
4813                         spin_unlock(&page->mapping->private_lock);
4814
4815                 /* One for when we allocated the page */
4816                 put_page(page);
4817         }
4818 }
4819
4820 /*
4821  * Helper for releasing the extent buffer.
4822  */
4823 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4824 {
4825         btrfs_release_extent_buffer_pages(eb);
4826         __free_extent_buffer(eb);
4827 }
4828
4829 static struct extent_buffer *
4830 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
4831                       unsigned long len)
4832 {
4833         struct extent_buffer *eb = NULL;
4834
4835         eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
4836         eb->start = start;
4837         eb->len = len;
4838         eb->fs_info = fs_info;
4839         eb->bflags = 0;
4840         rwlock_init(&eb->lock);
4841         atomic_set(&eb->blocking_readers, 0);
4842         atomic_set(&eb->blocking_writers, 0);
4843         eb->lock_nested = false;
4844         init_waitqueue_head(&eb->write_lock_wq);
4845         init_waitqueue_head(&eb->read_lock_wq);
4846
4847         btrfs_leak_debug_add(&eb->leak_list, &buffers);
4848
4849         spin_lock_init(&eb->refs_lock);
4850         atomic_set(&eb->refs, 1);
4851         atomic_set(&eb->io_pages, 0);
4852
4853         /*
4854          * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4855          */
4856         BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4857                 > MAX_INLINE_EXTENT_BUFFER_SIZE);
4858         BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4859
4860 #ifdef CONFIG_BTRFS_DEBUG
4861         atomic_set(&eb->spinning_writers, 0);
4862         atomic_set(&eb->spinning_readers, 0);
4863         atomic_set(&eb->read_locks, 0);
4864         atomic_set(&eb->write_locks, 0);
4865 #endif
4866
4867         return eb;
4868 }
4869
4870 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4871 {
4872         int i;
4873         struct page *p;
4874         struct extent_buffer *new;
4875         int num_pages = num_extent_pages(src);
4876
4877         new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
4878         if (new == NULL)
4879                 return NULL;
4880
4881         for (i = 0; i < num_pages; i++) {
4882                 p = alloc_page(GFP_NOFS);
4883                 if (!p) {
4884                         btrfs_release_extent_buffer(new);
4885                         return NULL;
4886                 }
4887                 attach_extent_buffer_page(new, p);
4888                 WARN_ON(PageDirty(p));
4889                 SetPageUptodate(p);
4890                 new->pages[i] = p;
4891                 copy_page(page_address(p), page_address(src->pages[i]));
4892         }
4893
4894         set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4895         set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
4896
4897         return new;
4898 }
4899
4900 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4901                                                   u64 start, unsigned long len)
4902 {
4903         struct extent_buffer *eb;
4904         int num_pages;
4905         int i;
4906
4907         eb = __alloc_extent_buffer(fs_info, start, len);
4908         if (!eb)
4909                 return NULL;
4910
4911         num_pages = num_extent_pages(eb);
4912         for (i = 0; i < num_pages; i++) {
4913                 eb->pages[i] = alloc_page(GFP_NOFS);
4914                 if (!eb->pages[i])
4915                         goto err;
4916         }
4917         set_extent_buffer_uptodate(eb);
4918         btrfs_set_header_nritems(eb, 0);
4919         set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4920
4921         return eb;
4922 err:
4923         for (; i > 0; i--)
4924                 __free_page(eb->pages[i - 1]);
4925         __free_extent_buffer(eb);
4926         return NULL;
4927 }
4928
4929 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4930                                                 u64 start)
4931 {
4932         return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
4933 }
4934
4935 static void check_buffer_tree_ref(struct extent_buffer *eb)
4936 {
4937         int refs;
4938         /* the ref bit is tricky.  We have to make sure it is set
4939          * if we have the buffer dirty.   Otherwise the
4940          * code to free a buffer can end up dropping a dirty
4941          * page
4942          *
4943          * Once the ref bit is set, it won't go away while the
4944          * buffer is dirty or in writeback, and it also won't
4945          * go away while we have the reference count on the
4946          * eb bumped.
4947          *
4948          * We can't just set the ref bit without bumping the
4949          * ref on the eb because free_extent_buffer might
4950          * see the ref bit and try to clear it.  If this happens
4951          * free_extent_buffer might end up dropping our original
4952          * ref by mistake and freeing the page before we are able
4953          * to add one more ref.
4954          *
4955          * So bump the ref count first, then set the bit.  If someone
4956          * beat us to it, drop the ref we added.
4957          */
4958         refs = atomic_read(&eb->refs);
4959         if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4960                 return;
4961
4962         spin_lock(&eb->refs_lock);
4963         if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4964                 atomic_inc(&eb->refs);
4965         spin_unlock(&eb->refs_lock);
4966 }
4967
4968 static void mark_extent_buffer_accessed(struct extent_buffer *eb,
4969                 struct page *accessed)
4970 {
4971         int num_pages, i;
4972
4973         check_buffer_tree_ref(eb);
4974
4975         num_pages = num_extent_pages(eb);
4976         for (i = 0; i < num_pages; i++) {
4977                 struct page *p = eb->pages[i];
4978
4979                 if (p != accessed)
4980                         mark_page_accessed(p);
4981         }
4982 }
4983
4984 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4985                                          u64 start)
4986 {
4987         struct extent_buffer *eb;
4988
4989         rcu_read_lock();
4990         eb = radix_tree_lookup(&fs_info->buffer_radix,
4991                                start >> PAGE_SHIFT);
4992         if (eb && atomic_inc_not_zero(&eb->refs)) {
4993                 rcu_read_unlock();
4994                 /*
4995                  * Lock our eb's refs_lock to avoid races with
4996                  * free_extent_buffer. When we get our eb it might be flagged
4997                  * with EXTENT_BUFFER_STALE and another task running
4998                  * free_extent_buffer might have seen that flag set,
4999                  * eb->refs == 2, that the buffer isn't under IO (dirty and
5000                  * writeback flags not set) and it's still in the tree (flag
5001                  * EXTENT_BUFFER_TREE_REF set), therefore being in the process
5002                  * of decrementing the extent buffer's reference count twice.
5003                  * So here we could race and increment the eb's reference count,
5004                  * clear its stale flag, mark it as dirty and drop our reference
5005                  * before the other task finishes executing free_extent_buffer,
5006                  * which would later result in an attempt to free an extent
5007                  * buffer that is dirty.
5008                  */
5009                 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
5010                         spin_lock(&eb->refs_lock);
5011                         spin_unlock(&eb->refs_lock);
5012                 }
5013                 mark_extent_buffer_accessed(eb, NULL);
5014                 return eb;
5015         }
5016         rcu_read_unlock();
5017
5018         return NULL;
5019 }
5020
5021 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5022 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
5023                                         u64 start)
5024 {
5025         struct extent_buffer *eb, *exists = NULL;
5026         int ret;
5027
5028         eb = find_extent_buffer(fs_info, start);
5029         if (eb)
5030                 return eb;
5031         eb = alloc_dummy_extent_buffer(fs_info, start);
5032         if (!eb)
5033                 return NULL;
5034         eb->fs_info = fs_info;
5035 again:
5036         ret = radix_tree_preload(GFP_NOFS);
5037         if (ret)
5038                 goto free_eb;
5039         spin_lock(&fs_info->buffer_lock);
5040         ret = radix_tree_insert(&fs_info->buffer_radix,
5041                                 start >> PAGE_SHIFT, eb);
5042         spin_unlock(&fs_info->buffer_lock);
5043         radix_tree_preload_end();
5044         if (ret == -EEXIST) {
5045                 exists = find_extent_buffer(fs_info, start);
5046                 if (exists)
5047                         goto free_eb;
5048                 else
5049                         goto again;
5050         }
5051         check_buffer_tree_ref(eb);
5052         set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
5053
5054         return eb;
5055 free_eb:
5056         btrfs_release_extent_buffer(eb);
5057         return exists;
5058 }
5059 #endif
5060
5061 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
5062                                           u64 start)
5063 {
5064         unsigned long len = fs_info->nodesize;
5065         int num_pages;
5066         int i;
5067         unsigned long index = start >> PAGE_SHIFT;
5068         struct extent_buffer *eb;
5069         struct extent_buffer *exists = NULL;
5070         struct page *p;
5071         struct address_space *mapping = fs_info->btree_inode->i_mapping;
5072         int uptodate = 1;
5073         int ret;
5074
5075         if (!IS_ALIGNED(start, fs_info->sectorsize)) {
5076                 btrfs_err(fs_info, "bad tree block start %llu", start);
5077                 return ERR_PTR(-EINVAL);
5078         }
5079
5080         eb = find_extent_buffer(fs_info, start);
5081         if (eb)
5082                 return eb;
5083
5084         eb = __alloc_extent_buffer(fs_info, start, len);
5085         if (!eb)
5086                 return ERR_PTR(-ENOMEM);
5087
5088         num_pages = num_extent_pages(eb);
5089         for (i = 0; i < num_pages; i++, index++) {
5090                 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
5091                 if (!p) {
5092                         exists = ERR_PTR(-ENOMEM);
5093                         goto free_eb;
5094                 }
5095
5096                 spin_lock(&mapping->private_lock);
5097                 if (PagePrivate(p)) {
5098                         /*
5099                          * We could have already allocated an eb for this page
5100                          * and attached one so lets see if we can get a ref on
5101                          * the existing eb, and if we can we know it's good and
5102                          * we can just return that one, else we know we can just
5103                          * overwrite page->private.
5104                          */
5105                         exists = (struct extent_buffer *)p->private;
5106                         if (atomic_inc_not_zero(&exists->refs)) {
5107                                 spin_unlock(&mapping->private_lock);
5108                                 unlock_page(p);
5109                                 put_page(p);
5110                                 mark_extent_buffer_accessed(exists, p);
5111                                 goto free_eb;
5112                         }
5113                         exists = NULL;
5114
5115                         /*
5116                          * Do this so attach doesn't complain and we need to
5117                          * drop the ref the old guy had.
5118                          */
5119                         ClearPagePrivate(p);
5120                         WARN_ON(PageDirty(p));
5121                         put_page(p);
5122                 }
5123                 attach_extent_buffer_page(eb, p);
5124                 spin_unlock(&mapping->private_lock);
5125                 WARN_ON(PageDirty(p));
5126                 eb->pages[i] = p;
5127                 if (!PageUptodate(p))
5128                         uptodate = 0;
5129
5130                 /*
5131                  * We can't unlock the pages just yet since the extent buffer
5132                  * hasn't been properly inserted in the radix tree, this
5133                  * opens a race with btree_releasepage which can free a page
5134                  * while we are still filling in all pages for the buffer and
5135                  * we could crash.
5136                  */
5137         }
5138         if (uptodate)
5139                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5140 again:
5141         ret = radix_tree_preload(GFP_NOFS);
5142         if (ret) {
5143                 exists = ERR_PTR(ret);
5144                 goto free_eb;
5145         }
5146
5147         spin_lock(&fs_info->buffer_lock);
5148         ret = radix_tree_insert(&fs_info->buffer_radix,
5149                                 start >> PAGE_SHIFT, eb);
5150         spin_unlock(&fs_info->buffer_lock);
5151         radix_tree_preload_end();
5152         if (ret == -EEXIST) {
5153                 exists = find_extent_buffer(fs_info, start);
5154                 if (exists)
5155                         goto free_eb;
5156                 else
5157                         goto again;
5158         }
5159         /* add one reference for the tree */
5160         check_buffer_tree_ref(eb);
5161         set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
5162
5163         /*
5164          * Now it's safe to unlock the pages because any calls to
5165          * btree_releasepage will correctly detect that a page belongs to a
5166          * live buffer and won't free them prematurely.
5167          */
5168         for (i = 0; i < num_pages; i++)
5169                 unlock_page(eb->pages[i]);
5170         return eb;
5171
5172 free_eb:
5173         WARN_ON(!atomic_dec_and_test(&eb->refs));
5174         for (i = 0; i < num_pages; i++) {
5175                 if (eb->pages[i])
5176                         unlock_page(eb->pages[i]);
5177         }
5178
5179         btrfs_release_extent_buffer(eb);
5180         return exists;
5181 }
5182
5183 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
5184 {
5185         struct extent_buffer *eb =
5186                         container_of(head, struct extent_buffer, rcu_head);
5187
5188         __free_extent_buffer(eb);
5189 }
5190
5191 static int release_extent_buffer(struct extent_buffer *eb)
5192 {
5193         lockdep_assert_held(&eb->refs_lock);
5194
5195         WARN_ON(atomic_read(&eb->refs) == 0);
5196         if (atomic_dec_and_test(&eb->refs)) {
5197                 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
5198                         struct btrfs_fs_info *fs_info = eb->fs_info;
5199
5200                         spin_unlock(&eb->refs_lock);
5201
5202                         spin_lock(&fs_info->buffer_lock);
5203                         radix_tree_delete(&fs_info->buffer_radix,
5204                                           eb->start >> PAGE_SHIFT);
5205                         spin_unlock(&fs_info->buffer_lock);
5206                 } else {
5207                         spin_unlock(&eb->refs_lock);
5208                 }
5209
5210                 /* Should be safe to release our pages at this point */
5211                 btrfs_release_extent_buffer_pages(eb);
5212 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5213                 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
5214                         __free_extent_buffer(eb);
5215                         return 1;
5216                 }
5217 #endif
5218                 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
5219                 return 1;
5220         }
5221         spin_unlock(&eb->refs_lock);
5222
5223         return 0;
5224 }
5225
5226 void free_extent_buffer(struct extent_buffer *eb)
5227 {
5228         int refs;
5229         int old;
5230         if (!eb)
5231                 return;
5232
5233         while (1) {
5234                 refs = atomic_read(&eb->refs);
5235                 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
5236                     || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
5237                         refs == 1))
5238                         break;
5239                 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
5240                 if (old == refs)
5241                         return;
5242         }
5243
5244         spin_lock(&eb->refs_lock);
5245         if (atomic_read(&eb->refs) == 2 &&
5246             test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
5247             !extent_buffer_under_io(eb) &&
5248             test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5249                 atomic_dec(&eb->refs);
5250
5251         /*
5252          * I know this is terrible, but it's temporary until we stop tracking
5253          * the uptodate bits and such for the extent buffers.
5254          */
5255         release_extent_buffer(eb);
5256 }
5257
5258 void free_extent_buffer_stale(struct extent_buffer *eb)
5259 {
5260         if (!eb)
5261                 return;
5262
5263         spin_lock(&eb->refs_lock);
5264         set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
5265
5266         if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
5267             test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5268                 atomic_dec(&eb->refs);
5269         release_extent_buffer(eb);
5270 }
5271
5272 void clear_extent_buffer_dirty(struct extent_buffer *eb)
5273 {
5274         int i;
5275         int num_pages;
5276         struct page *page;
5277
5278         num_pages = num_extent_pages(eb);
5279
5280         for (i = 0; i < num_pages; i++) {
5281                 page = eb->pages[i];
5282                 if (!PageDirty(page))
5283                         continue;
5284
5285                 lock_page(page);
5286                 WARN_ON(!PagePrivate(page));
5287
5288                 clear_page_dirty_for_io(page);
5289                 xa_lock_irq(&page->mapping->i_pages);
5290                 if (!PageDirty(page))
5291                         __xa_clear_mark(&page->mapping->i_pages,
5292                                         page_index(page), PAGECACHE_TAG_DIRTY);
5293                 xa_unlock_irq(&page->mapping->i_pages);
5294                 ClearPageError(page);
5295                 unlock_page(page);
5296         }
5297         WARN_ON(atomic_read(&eb->refs) == 0);
5298 }
5299
5300 bool set_extent_buffer_dirty(struct extent_buffer *eb)
5301 {
5302         int i;
5303         int num_pages;
5304         bool was_dirty;
5305
5306         check_buffer_tree_ref(eb);
5307
5308         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
5309
5310         num_pages = num_extent_pages(eb);
5311         WARN_ON(atomic_read(&eb->refs) == 0);
5312         WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
5313
5314         if (!was_dirty)
5315                 for (i = 0; i < num_pages; i++)
5316                         set_page_dirty(eb->pages[i]);
5317
5318 #ifdef CONFIG_BTRFS_DEBUG
5319         for (i = 0; i < num_pages; i++)
5320                 ASSERT(PageDirty(eb->pages[i]));
5321 #endif
5322
5323         return was_dirty;
5324 }
5325
5326 void clear_extent_buffer_uptodate(struct extent_buffer *eb)
5327 {
5328         int i;
5329         struct page *page;
5330         int num_pages;
5331
5332         clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5333         num_pages = num_extent_pages(eb);
5334         for (i = 0; i < num_pages; i++) {
5335                 page = eb->pages[i];
5336                 if (page)
5337                         ClearPageUptodate(page);
5338         }
5339 }
5340
5341 void set_extent_buffer_uptodate(struct extent_buffer *eb)
5342 {
5343         int i;
5344         struct page *page;
5345         int num_pages;
5346
5347         set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5348         num_pages = num_extent_pages(eb);
5349         for (i = 0; i < num_pages; i++) {
5350                 page = eb->pages[i];
5351                 SetPageUptodate(page);
5352         }
5353 }
5354
5355 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
5356 {
5357         int i;
5358         struct page *page;
5359         int err;
5360         int ret = 0;
5361         int locked_pages = 0;
5362         int all_uptodate = 1;
5363         int num_pages;
5364         unsigned long num_reads = 0;
5365         struct bio *bio = NULL;
5366         unsigned long bio_flags = 0;
5367         struct extent_io_tree *tree = &BTRFS_I(eb->fs_info->btree_inode)->io_tree;
5368
5369         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
5370                 return 0;
5371
5372         num_pages = num_extent_pages(eb);
5373         for (i = 0; i < num_pages; i++) {
5374                 page = eb->pages[i];
5375                 if (wait == WAIT_NONE) {
5376                         if (!trylock_page(page))
5377                                 goto unlock_exit;
5378                 } else {
5379                         lock_page(page);
5380                 }
5381                 locked_pages++;
5382         }
5383         /*
5384          * We need to firstly lock all pages to make sure that
5385          * the uptodate bit of our pages won't be affected by
5386          * clear_extent_buffer_uptodate().
5387          */
5388         for (i = 0; i < num_pages; i++) {
5389                 page = eb->pages[i];
5390                 if (!PageUptodate(page)) {
5391                         num_reads++;
5392                         all_uptodate = 0;
5393                 }
5394         }
5395
5396         if (all_uptodate) {
5397                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5398                 goto unlock_exit;
5399         }
5400
5401         clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
5402         eb->read_mirror = 0;
5403         atomic_set(&eb->io_pages, num_reads);
5404         for (i = 0; i < num_pages; i++) {
5405                 page = eb->pages[i];
5406
5407                 if (!PageUptodate(page)) {
5408                         if (ret) {
5409                                 atomic_dec(&eb->io_pages);
5410                                 unlock_page(page);
5411                                 continue;
5412                         }
5413
5414                         ClearPageError(page);
5415                         err = __extent_read_full_page(tree, page,
5416                                                       btree_get_extent, &bio,
5417                                                       mirror_num, &bio_flags,
5418                                                       REQ_META);
5419                         if (err) {
5420                                 ret = err;
5421                                 /*
5422                                  * We use &bio in above __extent_read_full_page,
5423                                  * so we ensure that if it returns error, the
5424                                  * current page fails to add itself to bio and
5425                                  * it's been unlocked.
5426                                  *
5427                                  * We must dec io_pages by ourselves.
5428                                  */
5429                                 atomic_dec(&eb->io_pages);
5430                         }
5431                 } else {
5432                         unlock_page(page);
5433                 }
5434         }
5435
5436         if (bio) {
5437                 err = submit_one_bio(bio, mirror_num, bio_flags);
5438                 if (err)
5439                         return err;
5440         }
5441
5442         if (ret || wait != WAIT_COMPLETE)
5443                 return ret;
5444
5445         for (i = 0; i < num_pages; i++) {
5446                 page = eb->pages[i];
5447                 wait_on_page_locked(page);
5448                 if (!PageUptodate(page))
5449                         ret = -EIO;
5450         }
5451
5452         return ret;
5453
5454 unlock_exit:
5455         while (locked_pages > 0) {
5456                 locked_pages--;
5457                 page = eb->pages[locked_pages];
5458                 unlock_page(page);
5459         }
5460         return ret;
5461 }
5462
5463 void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
5464                         unsigned long start, unsigned long len)
5465 {
5466         size_t cur;
5467         size_t offset;
5468         struct page *page;
5469         char *kaddr;
5470         char *dst = (char *)dstv;
5471         size_t start_offset = offset_in_page(eb->start);
5472         unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5473
5474         if (start + len > eb->len) {
5475                 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
5476                      eb->start, eb->len, start, len);
5477                 memset(dst, 0, len);
5478                 return;
5479         }
5480
5481         offset = offset_in_page(start_offset + start);
5482
5483         while (len > 0) {
5484                 page = eb->pages[i];
5485
5486                 cur = min(len, (PAGE_SIZE - offset));
5487                 kaddr = page_address(page);
5488                 memcpy(dst, kaddr + offset, cur);
5489
5490                 dst += cur;
5491                 len -= cur;
5492                 offset = 0;
5493                 i++;
5494         }
5495 }
5496
5497 int read_extent_buffer_to_user(const struct extent_buffer *eb,
5498                                void __user *dstv,
5499                                unsigned long start, unsigned long len)
5500 {
5501         size_t cur;
5502         size_t offset;
5503         struct page *page;
5504         char *kaddr;
5505         char __user *dst = (char __user *)dstv;
5506         size_t start_offset = offset_in_page(eb->start);
5507         unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5508         int ret = 0;
5509
5510         WARN_ON(start > eb->len);
5511         WARN_ON(start + len > eb->start + eb->len);
5512
5513         offset = offset_in_page(start_offset + start);
5514
5515         while (len > 0) {
5516                 page = eb->pages[i];
5517
5518                 cur = min(len, (PAGE_SIZE - offset));
5519                 kaddr = page_address(page);
5520                 if (copy_to_user(dst, kaddr + offset, cur)) {
5521                         ret = -EFAULT;
5522                         break;
5523                 }
5524
5525                 dst += cur;
5526                 len -= cur;
5527                 offset = 0;
5528                 i++;
5529         }
5530
5531         return ret;
5532 }
5533
5534 /*
5535  * return 0 if the item is found within a page.
5536  * return 1 if the item spans two pages.
5537  * return -EINVAL otherwise.
5538  */
5539 int map_private_extent_buffer(const struct extent_buffer *eb,
5540                               unsigned long start, unsigned long min_len,
5541                               char **map, unsigned long *map_start,
5542                               unsigned long *map_len)
5543 {
5544         size_t offset;
5545         char *kaddr;
5546         struct page *p;
5547         size_t start_offset = offset_in_page(eb->start);
5548         unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5549         unsigned long end_i = (start_offset + start + min_len - 1) >>
5550                 PAGE_SHIFT;
5551
5552         if (start + min_len > eb->len) {
5553                 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
5554                        eb->start, eb->len, start, min_len);
5555                 return -EINVAL;
5556         }
5557
5558         if (i != end_i)
5559                 return 1;
5560
5561         if (i == 0) {
5562                 offset = start_offset;
5563                 *map_start = 0;
5564         } else {
5565                 offset = 0;
5566                 *map_start = ((u64)i << PAGE_SHIFT) - start_offset;
5567         }
5568
5569         p = eb->pages[i];
5570         kaddr = page_address(p);
5571         *map = kaddr + offset;
5572         *map_len = PAGE_SIZE - offset;
5573         return 0;
5574 }
5575
5576 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
5577                          unsigned long start, unsigned long len)
5578 {
5579         size_t cur;
5580         size_t offset;
5581         struct page *page;
5582         char *kaddr;
5583         char *ptr = (char *)ptrv;
5584         size_t start_offset = offset_in_page(eb->start);
5585         unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5586         int ret = 0;
5587
5588         WARN_ON(start > eb->len);
5589         WARN_ON(start + len > eb->start + eb->len);
5590
5591         offset = offset_in_page(start_offset + start);
5592
5593         while (len > 0) {
5594                 page = eb->pages[i];
5595
5596                 cur = min(len, (PAGE_SIZE - offset));
5597
5598                 kaddr = page_address(page);
5599                 ret = memcmp(ptr, kaddr + offset, cur);
5600                 if (ret)
5601                         break;
5602
5603                 ptr += cur;
5604                 len -= cur;
5605                 offset = 0;
5606                 i++;
5607         }
5608         return ret;
5609 }
5610
5611 void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
5612                 const void *srcv)
5613 {
5614         char *kaddr;
5615
5616         WARN_ON(!PageUptodate(eb->pages[0]));
5617         kaddr = page_address(eb->pages[0]);
5618         memcpy(kaddr + offsetof(struct btrfs_header, chunk_tree_uuid), srcv,
5619                         BTRFS_FSID_SIZE);
5620 }
5621
5622 void write_extent_buffer_fsid(struct extent_buffer *eb, const void *srcv)
5623 {
5624         char *kaddr;
5625
5626         WARN_ON(!PageUptodate(eb->pages[0]));
5627         kaddr = page_address(eb->pages[0]);
5628         memcpy(kaddr + offsetof(struct btrfs_header, fsid), srcv,
5629                         BTRFS_FSID_SIZE);
5630 }
5631
5632 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5633                          unsigned long start, unsigned long len)
5634 {
5635         size_t cur;
5636         size_t offset;
5637         struct page *page;
5638         char *kaddr;
5639         char *src = (char *)srcv;
5640         size_t start_offset = offset_in_page(eb->start);
5641         unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5642
5643         WARN_ON(start > eb->len);
5644         WARN_ON(start + len > eb->start + eb->len);
5645
5646         offset = offset_in_page(start_offset + start);
5647
5648         while (len > 0) {
5649                 page = eb->pages[i];
5650                 WARN_ON(!PageUptodate(page));
5651
5652                 cur = min(len, PAGE_SIZE - offset);
5653                 kaddr = page_address(page);
5654                 memcpy(kaddr + offset, src, cur);
5655
5656                 src += cur;
5657                 len -= cur;
5658                 offset = 0;
5659                 i++;
5660         }
5661 }
5662
5663 void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
5664                 unsigned long len)
5665 {
5666         size_t cur;
5667         size_t offset;
5668         struct page *page;
5669         char *kaddr;
5670         size_t start_offset = offset_in_page(eb->start);
5671         unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5672
5673         WARN_ON(start > eb->len);
5674         WARN_ON(start + len > eb->start + eb->len);
5675
5676         offset = offset_in_page(start_offset + start);
5677
5678         while (len > 0) {
5679                 page = eb->pages[i];
5680                 WARN_ON(!PageUptodate(page));
5681
5682                 cur = min(len, PAGE_SIZE - offset);
5683                 kaddr = page_address(page);
5684                 memset(kaddr + offset, 0, cur);
5685
5686                 len -= cur;
5687                 offset = 0;
5688                 i++;
5689         }
5690 }
5691
5692 void copy_extent_buffer_full(struct extent_buffer *dst,
5693                              struct extent_buffer *src)
5694 {
5695         int i;
5696         int num_pages;
5697
5698         ASSERT(dst->len == src->len);
5699
5700         num_pages = num_extent_pages(dst);
5701         for (i = 0; i < num_pages; i++)
5702                 copy_page(page_address(dst->pages[i]),
5703                                 page_address(src->pages[i]));
5704 }
5705
5706 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5707                         unsigned long dst_offset, unsigned long src_offset,
5708                         unsigned long len)
5709 {
5710         u64 dst_len = dst->len;
5711         size_t cur;
5712         size_t offset;
5713         struct page *page;
5714         char *kaddr;
5715         size_t start_offset = offset_in_page(dst->start);
5716         unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT;
5717
5718         WARN_ON(src->len != dst_len);
5719
5720         offset = offset_in_page(start_offset + dst_offset);
5721
5722         while (len > 0) {
5723                 page = dst->pages[i];
5724                 WARN_ON(!PageUptodate(page));
5725
5726                 cur = min(len, (unsigned long)(PAGE_SIZE - offset));
5727
5728                 kaddr = page_address(page);
5729                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
5730
5731                 src_offset += cur;
5732                 len -= cur;
5733                 offset = 0;
5734                 i++;
5735         }
5736 }
5737
5738 /*
5739  * eb_bitmap_offset() - calculate the page and offset of the byte containing the
5740  * given bit number
5741  * @eb: the extent buffer
5742  * @start: offset of the bitmap item in the extent buffer
5743  * @nr: bit number
5744  * @page_index: return index of the page in the extent buffer that contains the
5745  * given bit number
5746  * @page_offset: return offset into the page given by page_index
5747  *
5748  * This helper hides the ugliness of finding the byte in an extent buffer which
5749  * contains a given bit.
5750  */
5751 static inline void eb_bitmap_offset(struct extent_buffer *eb,
5752                                     unsigned long start, unsigned long nr,
5753                                     unsigned long *page_index,
5754                                     size_t *page_offset)
5755 {
5756         size_t start_offset = offset_in_page(eb->start);
5757         size_t byte_offset = BIT_BYTE(nr);
5758         size_t offset;
5759
5760         /*
5761          * The byte we want is the offset of the extent buffer + the offset of
5762          * the bitmap item in the extent buffer + the offset of the byte in the
5763          * bitmap item.
5764          */
5765         offset = start_offset + start + byte_offset;
5766
5767         *page_index = offset >> PAGE_SHIFT;
5768         *page_offset = offset_in_page(offset);
5769 }
5770
5771 /**
5772  * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
5773  * @eb: the extent buffer
5774  * @start: offset of the bitmap item in the extent buffer
5775  * @nr: bit number to test
5776  */
5777 int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
5778                            unsigned long nr)
5779 {
5780         u8 *kaddr;
5781         struct page *page;
5782         unsigned long i;
5783         size_t offset;
5784
5785         eb_bitmap_offset(eb, start, nr, &i, &offset);
5786         page = eb->pages[i];
5787         WARN_ON(!PageUptodate(page));
5788         kaddr = page_address(page);
5789         return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
5790 }
5791
5792 /**
5793  * extent_buffer_bitmap_set - set an area of a bitmap
5794  * @eb: the extent buffer
5795  * @start: offset of the bitmap item in the extent buffer
5796  * @pos: bit number of the first bit
5797  * @len: number of bits to set
5798  */
5799 void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
5800                               unsigned long pos, unsigned long len)
5801 {
5802         u8 *kaddr;
5803         struct page *page;
5804         unsigned long i;
5805         size_t offset;
5806         const unsigned int size = pos + len;
5807         int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5808         u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
5809
5810         eb_bitmap_offset(eb, start, pos, &i, &offset);
5811         page = eb->pages[i];
5812         WARN_ON(!PageUptodate(page));
5813         kaddr = page_address(page);
5814
5815         while (len >= bits_to_set) {
5816                 kaddr[offset] |= mask_to_set;
5817                 len -= bits_to_set;
5818                 bits_to_set = BITS_PER_BYTE;
5819                 mask_to_set = ~0;
5820                 if (++offset >= PAGE_SIZE && len > 0) {
5821                         offset = 0;
5822                         page = eb->pages[++i];
5823                         WARN_ON(!PageUptodate(page));
5824                         kaddr = page_address(page);
5825                 }
5826         }
5827         if (len) {
5828                 mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
5829                 kaddr[offset] |= mask_to_set;
5830         }
5831 }
5832
5833
5834 /**
5835  * extent_buffer_bitmap_clear - clear an area of a bitmap
5836  * @eb: the extent buffer
5837  * @start: offset of the bitmap item in the extent buffer
5838  * @pos: bit number of the first bit
5839  * @len: number of bits to clear
5840  */
5841 void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
5842                                 unsigned long pos, unsigned long len)
5843 {
5844         u8 *kaddr;
5845         struct page *page;
5846         unsigned long i;
5847         size_t offset;
5848         const unsigned int size = pos + len;
5849         int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5850         u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
5851
5852         eb_bitmap_offset(eb, start, pos, &i, &offset);
5853         page = eb->pages[i];
5854         WARN_ON(!PageUptodate(page));
5855         kaddr = page_address(page);
5856
5857         while (len >= bits_to_clear) {
5858                 kaddr[offset] &= ~mask_to_clear;
5859                 len -= bits_to_clear;
5860                 bits_to_clear = BITS_PER_BYTE;
5861                 mask_to_clear = ~0;
5862                 if (++offset >= PAGE_SIZE && len > 0) {
5863                         offset = 0;
5864                         page = eb->pages[++i];
5865                         WARN_ON(!PageUptodate(page));
5866                         kaddr = page_address(page);
5867                 }
5868         }
5869         if (len) {
5870                 mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
5871                 kaddr[offset] &= ~mask_to_clear;
5872         }
5873 }
5874
5875 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5876 {
5877         unsigned long distance = (src > dst) ? src - dst : dst - src;
5878         return distance < len;
5879 }
5880
5881 static void copy_pages(struct page *dst_page, struct page *src_page,
5882                        unsigned long dst_off, unsigned long src_off,
5883                        unsigned long len)
5884 {
5885         char *dst_kaddr = page_address(dst_page);
5886         char *src_kaddr;
5887         int must_memmove = 0;
5888
5889         if (dst_page != src_page) {
5890                 src_kaddr = page_address(src_page);
5891         } else {
5892                 src_kaddr = dst_kaddr;
5893                 if (areas_overlap(src_off, dst_off, len))
5894                         must_memmove = 1;
5895         }
5896
5897         if (must_memmove)
5898                 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5899         else
5900                 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
5901 }
5902
5903 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5904                            unsigned long src_offset, unsigned long len)
5905 {
5906         struct btrfs_fs_info *fs_info = dst->fs_info;
5907         size_t cur;
5908         size_t dst_off_in_page;
5909         size_t src_off_in_page;
5910         size_t start_offset = offset_in_page(dst->start);
5911         unsigned long dst_i;
5912         unsigned long src_i;
5913
5914         if (src_offset + len > dst->len) {
5915                 btrfs_err(fs_info,
5916                         "memmove bogus src_offset %lu move len %lu dst len %lu",
5917                          src_offset, len, dst->len);
5918                 BUG();
5919         }
5920         if (dst_offset + len > dst->len) {
5921                 btrfs_err(fs_info,
5922                         "memmove bogus dst_offset %lu move len %lu dst len %lu",
5923                          dst_offset, len, dst->len);
5924                 BUG();
5925         }
5926
5927         while (len > 0) {
5928                 dst_off_in_page = offset_in_page(start_offset + dst_offset);
5929                 src_off_in_page = offset_in_page(start_offset + src_offset);
5930
5931                 dst_i = (start_offset + dst_offset) >> PAGE_SHIFT;
5932                 src_i = (start_offset + src_offset) >> PAGE_SHIFT;
5933
5934                 cur = min(len, (unsigned long)(PAGE_SIZE -
5935                                                src_off_in_page));
5936                 cur = min_t(unsigned long, cur,
5937                         (unsigned long)(PAGE_SIZE - dst_off_in_page));
5938
5939                 copy_pages(dst->pages[dst_i], dst->pages[src_i],
5940                            dst_off_in_page, src_off_in_page, cur);
5941
5942                 src_offset += cur;
5943                 dst_offset += cur;
5944                 len -= cur;
5945         }
5946 }
5947
5948 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5949                            unsigned long src_offset, unsigned long len)
5950 {
5951         struct btrfs_fs_info *fs_info = dst->fs_info;
5952         size_t cur;
5953         size_t dst_off_in_page;
5954         size_t src_off_in_page;
5955         unsigned long dst_end = dst_offset + len - 1;
5956         unsigned long src_end = src_offset + len - 1;
5957         size_t start_offset = offset_in_page(dst->start);
5958         unsigned long dst_i;
5959         unsigned long src_i;
5960
5961         if (src_offset + len > dst->len) {
5962                 btrfs_err(fs_info,
5963                           "memmove bogus src_offset %lu move len %lu len %lu",
5964                           src_offset, len, dst->len);
5965                 BUG();
5966         }
5967         if (dst_offset + len > dst->len) {
5968                 btrfs_err(fs_info,
5969                           "memmove bogus dst_offset %lu move len %lu len %lu",
5970                           dst_offset, len, dst->len);
5971                 BUG();
5972         }
5973         if (dst_offset < src_offset) {
5974                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5975                 return;
5976         }
5977         while (len > 0) {
5978                 dst_i = (start_offset + dst_end) >> PAGE_SHIFT;
5979                 src_i = (start_offset + src_end) >> PAGE_SHIFT;
5980
5981                 dst_off_in_page = offset_in_page(start_offset + dst_end);
5982                 src_off_in_page = offset_in_page(start_offset + src_end);
5983
5984                 cur = min_t(unsigned long, len, src_off_in_page + 1);
5985                 cur = min(cur, dst_off_in_page + 1);
5986                 copy_pages(dst->pages[dst_i], dst->pages[src_i],
5987                            dst_off_in_page - cur + 1,
5988                            src_off_in_page - cur + 1, cur);
5989
5990                 dst_end -= cur;
5991                 src_end -= cur;
5992                 len -= cur;
5993         }
5994 }
5995
5996 int try_release_extent_buffer(struct page *page)
5997 {
5998         struct extent_buffer *eb;
5999
6000         /*
6001          * We need to make sure nobody is attaching this page to an eb right
6002          * now.
6003          */
6004         spin_lock(&page->mapping->private_lock);
6005         if (!PagePrivate(page)) {
6006                 spin_unlock(&page->mapping->private_lock);
6007                 return 1;
6008         }
6009
6010         eb = (struct extent_buffer *)page->private;
6011         BUG_ON(!eb);
6012
6013         /*
6014          * This is a little awful but should be ok, we need to make sure that
6015          * the eb doesn't disappear out from under us while we're looking at
6016          * this page.
6017          */
6018         spin_lock(&eb->refs_lock);
6019         if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
6020                 spin_unlock(&eb->refs_lock);
6021                 spin_unlock(&page->mapping->private_lock);
6022                 return 0;
6023         }
6024         spin_unlock(&page->mapping->private_lock);
6025
6026         /*
6027          * If tree ref isn't set then we know the ref on this eb is a real ref,
6028          * so just return, this page will likely be freed soon anyway.
6029          */
6030         if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
6031                 spin_unlock(&eb->refs_lock);
6032                 return 0;
6033         }
6034
6035         return release_extent_buffer(eb);
6036 }