OSDN Git Service

Merge tag 'acpi-5.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[tomoyo/tomoyo-test1.git] / fs / btrfs / scrub.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
4  */
5
6 #include <linux/blkdev.h>
7 #include <linux/ratelimit.h>
8 #include <linux/sched/mm.h>
9 #include <crypto/hash.h>
10 #include "ctree.h"
11 #include "discard.h"
12 #include "volumes.h"
13 #include "disk-io.h"
14 #include "ordered-data.h"
15 #include "transaction.h"
16 #include "backref.h"
17 #include "extent_io.h"
18 #include "dev-replace.h"
19 #include "check-integrity.h"
20 #include "rcu-string.h"
21 #include "raid56.h"
22 #include "block-group.h"
23
24 /*
25  * This is only the first step towards a full-features scrub. It reads all
26  * extent and super block and verifies the checksums. In case a bad checksum
27  * is found or the extent cannot be read, good data will be written back if
28  * any can be found.
29  *
30  * Future enhancements:
31  *  - In case an unrepairable extent is encountered, track which files are
32  *    affected and report them
33  *  - track and record media errors, throw out bad devices
34  *  - add a mode to also read unallocated space
35  */
36
37 struct scrub_block;
38 struct scrub_ctx;
39
40 /*
41  * the following three values only influence the performance.
42  * The last one configures the number of parallel and outstanding I/O
43  * operations. The first two values configure an upper limit for the number
44  * of (dynamically allocated) pages that are added to a bio.
45  */
46 #define SCRUB_PAGES_PER_RD_BIO  32      /* 128k per bio */
47 #define SCRUB_PAGES_PER_WR_BIO  32      /* 128k per bio */
48 #define SCRUB_BIOS_PER_SCTX     64      /* 8MB per device in flight */
49
50 /*
51  * the following value times PAGE_SIZE needs to be large enough to match the
52  * largest node/leaf/sector size that shall be supported.
53  * Values larger than BTRFS_STRIPE_LEN are not supported.
54  */
55 #define SCRUB_MAX_PAGES_PER_BLOCK       16      /* 64k per node/leaf/sector */
56
57 struct scrub_recover {
58         refcount_t              refs;
59         struct btrfs_bio        *bbio;
60         u64                     map_length;
61 };
62
63 struct scrub_page {
64         struct scrub_block      *sblock;
65         struct page             *page;
66         struct btrfs_device     *dev;
67         struct list_head        list;
68         u64                     flags;  /* extent flags */
69         u64                     generation;
70         u64                     logical;
71         u64                     physical;
72         u64                     physical_for_dev_replace;
73         atomic_t                refs;
74         struct {
75                 unsigned int    mirror_num:8;
76                 unsigned int    have_csum:1;
77                 unsigned int    io_error:1;
78         };
79         u8                      csum[BTRFS_CSUM_SIZE];
80
81         struct scrub_recover    *recover;
82 };
83
84 struct scrub_bio {
85         int                     index;
86         struct scrub_ctx        *sctx;
87         struct btrfs_device     *dev;
88         struct bio              *bio;
89         blk_status_t            status;
90         u64                     logical;
91         u64                     physical;
92 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
93         struct scrub_page       *pagev[SCRUB_PAGES_PER_WR_BIO];
94 #else
95         struct scrub_page       *pagev[SCRUB_PAGES_PER_RD_BIO];
96 #endif
97         int                     page_count;
98         int                     next_free;
99         struct btrfs_work       work;
100 };
101
102 struct scrub_block {
103         struct scrub_page       *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
104         int                     page_count;
105         atomic_t                outstanding_pages;
106         refcount_t              refs; /* free mem on transition to zero */
107         struct scrub_ctx        *sctx;
108         struct scrub_parity     *sparity;
109         struct {
110                 unsigned int    header_error:1;
111                 unsigned int    checksum_error:1;
112                 unsigned int    no_io_error_seen:1;
113                 unsigned int    generation_error:1; /* also sets header_error */
114
115                 /* The following is for the data used to check parity */
116                 /* It is for the data with checksum */
117                 unsigned int    data_corrected:1;
118         };
119         struct btrfs_work       work;
120 };
121
122 /* Used for the chunks with parity stripe such RAID5/6 */
123 struct scrub_parity {
124         struct scrub_ctx        *sctx;
125
126         struct btrfs_device     *scrub_dev;
127
128         u64                     logic_start;
129
130         u64                     logic_end;
131
132         int                     nsectors;
133
134         u64                     stripe_len;
135
136         refcount_t              refs;
137
138         struct list_head        spages;
139
140         /* Work of parity check and repair */
141         struct btrfs_work       work;
142
143         /* Mark the parity blocks which have data */
144         unsigned long           *dbitmap;
145
146         /*
147          * Mark the parity blocks which have data, but errors happen when
148          * read data or check data
149          */
150         unsigned long           *ebitmap;
151
152         unsigned long           bitmap[];
153 };
154
155 struct scrub_ctx {
156         struct scrub_bio        *bios[SCRUB_BIOS_PER_SCTX];
157         struct btrfs_fs_info    *fs_info;
158         int                     first_free;
159         int                     curr;
160         atomic_t                bios_in_flight;
161         atomic_t                workers_pending;
162         spinlock_t              list_lock;
163         wait_queue_head_t       list_wait;
164         u16                     csum_size;
165         struct list_head        csum_list;
166         atomic_t                cancel_req;
167         int                     readonly;
168         int                     pages_per_rd_bio;
169
170         int                     is_dev_replace;
171
172         struct scrub_bio        *wr_curr_bio;
173         struct mutex            wr_lock;
174         int                     pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
175         struct btrfs_device     *wr_tgtdev;
176         bool                    flush_all_writes;
177
178         /*
179          * statistics
180          */
181         struct btrfs_scrub_progress stat;
182         spinlock_t              stat_lock;
183
184         /*
185          * Use a ref counter to avoid use-after-free issues. Scrub workers
186          * decrement bios_in_flight and workers_pending and then do a wakeup
187          * on the list_wait wait queue. We must ensure the main scrub task
188          * doesn't free the scrub context before or while the workers are
189          * doing the wakeup() call.
190          */
191         refcount_t              refs;
192 };
193
194 struct scrub_warning {
195         struct btrfs_path       *path;
196         u64                     extent_item_size;
197         const char              *errstr;
198         u64                     physical;
199         u64                     logical;
200         struct btrfs_device     *dev;
201 };
202
203 struct full_stripe_lock {
204         struct rb_node node;
205         u64 logical;
206         u64 refs;
207         struct mutex mutex;
208 };
209
210 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
211 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
212 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
213 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
214                                      struct scrub_block *sblocks_for_recheck);
215 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
216                                 struct scrub_block *sblock,
217                                 int retry_failed_mirror);
218 static void scrub_recheck_block_checksum(struct scrub_block *sblock);
219 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
220                                              struct scrub_block *sblock_good);
221 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
222                                             struct scrub_block *sblock_good,
223                                             int page_num, int force_write);
224 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
225 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
226                                            int page_num);
227 static int scrub_checksum_data(struct scrub_block *sblock);
228 static int scrub_checksum_tree_block(struct scrub_block *sblock);
229 static int scrub_checksum_super(struct scrub_block *sblock);
230 static void scrub_block_get(struct scrub_block *sblock);
231 static void scrub_block_put(struct scrub_block *sblock);
232 static void scrub_page_get(struct scrub_page *spage);
233 static void scrub_page_put(struct scrub_page *spage);
234 static void scrub_parity_get(struct scrub_parity *sparity);
235 static void scrub_parity_put(struct scrub_parity *sparity);
236 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
237                                     struct scrub_page *spage);
238 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
239                        u64 physical, struct btrfs_device *dev, u64 flags,
240                        u64 gen, int mirror_num, u8 *csum, int force,
241                        u64 physical_for_dev_replace);
242 static void scrub_bio_end_io(struct bio *bio);
243 static void scrub_bio_end_io_worker(struct btrfs_work *work);
244 static void scrub_block_complete(struct scrub_block *sblock);
245 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
246                                u64 extent_logical, u64 extent_len,
247                                u64 *extent_physical,
248                                struct btrfs_device **extent_dev,
249                                int *extent_mirror_num);
250 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
251                                     struct scrub_page *spage);
252 static void scrub_wr_submit(struct scrub_ctx *sctx);
253 static void scrub_wr_bio_end_io(struct bio *bio);
254 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
255 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
256 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
257 static void scrub_put_ctx(struct scrub_ctx *sctx);
258
259 static inline int scrub_is_page_on_raid56(struct scrub_page *page)
260 {
261         return page->recover &&
262                (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
263 }
264
265 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
266 {
267         refcount_inc(&sctx->refs);
268         atomic_inc(&sctx->bios_in_flight);
269 }
270
271 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
272 {
273         atomic_dec(&sctx->bios_in_flight);
274         wake_up(&sctx->list_wait);
275         scrub_put_ctx(sctx);
276 }
277
278 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
279 {
280         while (atomic_read(&fs_info->scrub_pause_req)) {
281                 mutex_unlock(&fs_info->scrub_lock);
282                 wait_event(fs_info->scrub_pause_wait,
283                    atomic_read(&fs_info->scrub_pause_req) == 0);
284                 mutex_lock(&fs_info->scrub_lock);
285         }
286 }
287
288 static void scrub_pause_on(struct btrfs_fs_info *fs_info)
289 {
290         atomic_inc(&fs_info->scrubs_paused);
291         wake_up(&fs_info->scrub_pause_wait);
292 }
293
294 static void scrub_pause_off(struct btrfs_fs_info *fs_info)
295 {
296         mutex_lock(&fs_info->scrub_lock);
297         __scrub_blocked_if_needed(fs_info);
298         atomic_dec(&fs_info->scrubs_paused);
299         mutex_unlock(&fs_info->scrub_lock);
300
301         wake_up(&fs_info->scrub_pause_wait);
302 }
303
304 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
305 {
306         scrub_pause_on(fs_info);
307         scrub_pause_off(fs_info);
308 }
309
310 /*
311  * Insert new full stripe lock into full stripe locks tree
312  *
313  * Return pointer to existing or newly inserted full_stripe_lock structure if
314  * everything works well.
315  * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
316  *
317  * NOTE: caller must hold full_stripe_locks_root->lock before calling this
318  * function
319  */
320 static struct full_stripe_lock *insert_full_stripe_lock(
321                 struct btrfs_full_stripe_locks_tree *locks_root,
322                 u64 fstripe_logical)
323 {
324         struct rb_node **p;
325         struct rb_node *parent = NULL;
326         struct full_stripe_lock *entry;
327         struct full_stripe_lock *ret;
328
329         lockdep_assert_held(&locks_root->lock);
330
331         p = &locks_root->root.rb_node;
332         while (*p) {
333                 parent = *p;
334                 entry = rb_entry(parent, struct full_stripe_lock, node);
335                 if (fstripe_logical < entry->logical) {
336                         p = &(*p)->rb_left;
337                 } else if (fstripe_logical > entry->logical) {
338                         p = &(*p)->rb_right;
339                 } else {
340                         entry->refs++;
341                         return entry;
342                 }
343         }
344
345         /*
346          * Insert new lock.
347          */
348         ret = kmalloc(sizeof(*ret), GFP_KERNEL);
349         if (!ret)
350                 return ERR_PTR(-ENOMEM);
351         ret->logical = fstripe_logical;
352         ret->refs = 1;
353         mutex_init(&ret->mutex);
354
355         rb_link_node(&ret->node, parent, p);
356         rb_insert_color(&ret->node, &locks_root->root);
357         return ret;
358 }
359
360 /*
361  * Search for a full stripe lock of a block group
362  *
363  * Return pointer to existing full stripe lock if found
364  * Return NULL if not found
365  */
366 static struct full_stripe_lock *search_full_stripe_lock(
367                 struct btrfs_full_stripe_locks_tree *locks_root,
368                 u64 fstripe_logical)
369 {
370         struct rb_node *node;
371         struct full_stripe_lock *entry;
372
373         lockdep_assert_held(&locks_root->lock);
374
375         node = locks_root->root.rb_node;
376         while (node) {
377                 entry = rb_entry(node, struct full_stripe_lock, node);
378                 if (fstripe_logical < entry->logical)
379                         node = node->rb_left;
380                 else if (fstripe_logical > entry->logical)
381                         node = node->rb_right;
382                 else
383                         return entry;
384         }
385         return NULL;
386 }
387
388 /*
389  * Helper to get full stripe logical from a normal bytenr.
390  *
391  * Caller must ensure @cache is a RAID56 block group.
392  */
393 static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr)
394 {
395         u64 ret;
396
397         /*
398          * Due to chunk item size limit, full stripe length should not be
399          * larger than U32_MAX. Just a sanity check here.
400          */
401         WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);
402
403         /*
404          * round_down() can only handle power of 2, while RAID56 full
405          * stripe length can be 64KiB * n, so we need to manually round down.
406          */
407         ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) *
408                         cache->full_stripe_len + cache->start;
409         return ret;
410 }
411
412 /*
413  * Lock a full stripe to avoid concurrency of recovery and read
414  *
415  * It's only used for profiles with parities (RAID5/6), for other profiles it
416  * does nothing.
417  *
418  * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
419  * So caller must call unlock_full_stripe() at the same context.
420  *
421  * Return <0 if encounters error.
422  */
423 static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
424                             bool *locked_ret)
425 {
426         struct btrfs_block_group *bg_cache;
427         struct btrfs_full_stripe_locks_tree *locks_root;
428         struct full_stripe_lock *existing;
429         u64 fstripe_start;
430         int ret = 0;
431
432         *locked_ret = false;
433         bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
434         if (!bg_cache) {
435                 ASSERT(0);
436                 return -ENOENT;
437         }
438
439         /* Profiles not based on parity don't need full stripe lock */
440         if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
441                 goto out;
442         locks_root = &bg_cache->full_stripe_locks_root;
443
444         fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
445
446         /* Now insert the full stripe lock */
447         mutex_lock(&locks_root->lock);
448         existing = insert_full_stripe_lock(locks_root, fstripe_start);
449         mutex_unlock(&locks_root->lock);
450         if (IS_ERR(existing)) {
451                 ret = PTR_ERR(existing);
452                 goto out;
453         }
454         mutex_lock(&existing->mutex);
455         *locked_ret = true;
456 out:
457         btrfs_put_block_group(bg_cache);
458         return ret;
459 }
460
461 /*
462  * Unlock a full stripe.
463  *
464  * NOTE: Caller must ensure it's the same context calling corresponding
465  * lock_full_stripe().
466  *
467  * Return 0 if we unlock full stripe without problem.
468  * Return <0 for error
469  */
470 static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
471                               bool locked)
472 {
473         struct btrfs_block_group *bg_cache;
474         struct btrfs_full_stripe_locks_tree *locks_root;
475         struct full_stripe_lock *fstripe_lock;
476         u64 fstripe_start;
477         bool freeit = false;
478         int ret = 0;
479
480         /* If we didn't acquire full stripe lock, no need to continue */
481         if (!locked)
482                 return 0;
483
484         bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
485         if (!bg_cache) {
486                 ASSERT(0);
487                 return -ENOENT;
488         }
489         if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
490                 goto out;
491
492         locks_root = &bg_cache->full_stripe_locks_root;
493         fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
494
495         mutex_lock(&locks_root->lock);
496         fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
497         /* Unpaired unlock_full_stripe() detected */
498         if (!fstripe_lock) {
499                 WARN_ON(1);
500                 ret = -ENOENT;
501                 mutex_unlock(&locks_root->lock);
502                 goto out;
503         }
504
505         if (fstripe_lock->refs == 0) {
506                 WARN_ON(1);
507                 btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
508                         fstripe_lock->logical);
509         } else {
510                 fstripe_lock->refs--;
511         }
512
513         if (fstripe_lock->refs == 0) {
514                 rb_erase(&fstripe_lock->node, &locks_root->root);
515                 freeit = true;
516         }
517         mutex_unlock(&locks_root->lock);
518
519         mutex_unlock(&fstripe_lock->mutex);
520         if (freeit)
521                 kfree(fstripe_lock);
522 out:
523         btrfs_put_block_group(bg_cache);
524         return ret;
525 }
526
527 static void scrub_free_csums(struct scrub_ctx *sctx)
528 {
529         while (!list_empty(&sctx->csum_list)) {
530                 struct btrfs_ordered_sum *sum;
531                 sum = list_first_entry(&sctx->csum_list,
532                                        struct btrfs_ordered_sum, list);
533                 list_del(&sum->list);
534                 kfree(sum);
535         }
536 }
537
538 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
539 {
540         int i;
541
542         if (!sctx)
543                 return;
544
545         /* this can happen when scrub is cancelled */
546         if (sctx->curr != -1) {
547                 struct scrub_bio *sbio = sctx->bios[sctx->curr];
548
549                 for (i = 0; i < sbio->page_count; i++) {
550                         WARN_ON(!sbio->pagev[i]->page);
551                         scrub_block_put(sbio->pagev[i]->sblock);
552                 }
553                 bio_put(sbio->bio);
554         }
555
556         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
557                 struct scrub_bio *sbio = sctx->bios[i];
558
559                 if (!sbio)
560                         break;
561                 kfree(sbio);
562         }
563
564         kfree(sctx->wr_curr_bio);
565         scrub_free_csums(sctx);
566         kfree(sctx);
567 }
568
569 static void scrub_put_ctx(struct scrub_ctx *sctx)
570 {
571         if (refcount_dec_and_test(&sctx->refs))
572                 scrub_free_ctx(sctx);
573 }
574
575 static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
576                 struct btrfs_fs_info *fs_info, int is_dev_replace)
577 {
578         struct scrub_ctx *sctx;
579         int             i;
580
581         sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
582         if (!sctx)
583                 goto nomem;
584         refcount_set(&sctx->refs, 1);
585         sctx->is_dev_replace = is_dev_replace;
586         sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
587         sctx->curr = -1;
588         sctx->fs_info = fs_info;
589         INIT_LIST_HEAD(&sctx->csum_list);
590         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
591                 struct scrub_bio *sbio;
592
593                 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
594                 if (!sbio)
595                         goto nomem;
596                 sctx->bios[i] = sbio;
597
598                 sbio->index = i;
599                 sbio->sctx = sctx;
600                 sbio->page_count = 0;
601                 btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL,
602                                 NULL);
603
604                 if (i != SCRUB_BIOS_PER_SCTX - 1)
605                         sctx->bios[i]->next_free = i + 1;
606                 else
607                         sctx->bios[i]->next_free = -1;
608         }
609         sctx->first_free = 0;
610         atomic_set(&sctx->bios_in_flight, 0);
611         atomic_set(&sctx->workers_pending, 0);
612         atomic_set(&sctx->cancel_req, 0);
613         sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
614
615         spin_lock_init(&sctx->list_lock);
616         spin_lock_init(&sctx->stat_lock);
617         init_waitqueue_head(&sctx->list_wait);
618
619         WARN_ON(sctx->wr_curr_bio != NULL);
620         mutex_init(&sctx->wr_lock);
621         sctx->wr_curr_bio = NULL;
622         if (is_dev_replace) {
623                 WARN_ON(!fs_info->dev_replace.tgtdev);
624                 sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
625                 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
626                 sctx->flush_all_writes = false;
627         }
628
629         return sctx;
630
631 nomem:
632         scrub_free_ctx(sctx);
633         return ERR_PTR(-ENOMEM);
634 }
635
636 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
637                                      void *warn_ctx)
638 {
639         u64 isize;
640         u32 nlink;
641         int ret;
642         int i;
643         unsigned nofs_flag;
644         struct extent_buffer *eb;
645         struct btrfs_inode_item *inode_item;
646         struct scrub_warning *swarn = warn_ctx;
647         struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
648         struct inode_fs_paths *ipath = NULL;
649         struct btrfs_root *local_root;
650         struct btrfs_key root_key;
651         struct btrfs_key key;
652
653         root_key.objectid = root;
654         root_key.type = BTRFS_ROOT_ITEM_KEY;
655         root_key.offset = (u64)-1;
656         local_root = btrfs_get_fs_root(fs_info, &root_key, true);
657         if (IS_ERR(local_root)) {
658                 ret = PTR_ERR(local_root);
659                 goto err;
660         }
661
662         /*
663          * this makes the path point to (inum INODE_ITEM ioff)
664          */
665         key.objectid = inum;
666         key.type = BTRFS_INODE_ITEM_KEY;
667         key.offset = 0;
668
669         ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
670         if (ret) {
671                 btrfs_put_root(local_root);
672                 btrfs_release_path(swarn->path);
673                 goto err;
674         }
675
676         eb = swarn->path->nodes[0];
677         inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
678                                         struct btrfs_inode_item);
679         isize = btrfs_inode_size(eb, inode_item);
680         nlink = btrfs_inode_nlink(eb, inode_item);
681         btrfs_release_path(swarn->path);
682
683         /*
684          * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
685          * uses GFP_NOFS in this context, so we keep it consistent but it does
686          * not seem to be strictly necessary.
687          */
688         nofs_flag = memalloc_nofs_save();
689         ipath = init_ipath(4096, local_root, swarn->path);
690         memalloc_nofs_restore(nofs_flag);
691         if (IS_ERR(ipath)) {
692                 btrfs_put_root(local_root);
693                 ret = PTR_ERR(ipath);
694                 ipath = NULL;
695                 goto err;
696         }
697         ret = paths_from_inode(inum, ipath);
698
699         if (ret < 0)
700                 goto err;
701
702         /*
703          * we deliberately ignore the bit ipath might have been too small to
704          * hold all of the paths here
705          */
706         for (i = 0; i < ipath->fspath->elem_cnt; ++i)
707                 btrfs_warn_in_rcu(fs_info,
708 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
709                                   swarn->errstr, swarn->logical,
710                                   rcu_str_deref(swarn->dev->name),
711                                   swarn->physical,
712                                   root, inum, offset,
713                                   min(isize - offset, (u64)PAGE_SIZE), nlink,
714                                   (char *)(unsigned long)ipath->fspath->val[i]);
715
716         btrfs_put_root(local_root);
717         free_ipath(ipath);
718         return 0;
719
720 err:
721         btrfs_warn_in_rcu(fs_info,
722                           "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
723                           swarn->errstr, swarn->logical,
724                           rcu_str_deref(swarn->dev->name),
725                           swarn->physical,
726                           root, inum, offset, ret);
727
728         free_ipath(ipath);
729         return 0;
730 }
731
732 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
733 {
734         struct btrfs_device *dev;
735         struct btrfs_fs_info *fs_info;
736         struct btrfs_path *path;
737         struct btrfs_key found_key;
738         struct extent_buffer *eb;
739         struct btrfs_extent_item *ei;
740         struct scrub_warning swarn;
741         unsigned long ptr = 0;
742         u64 extent_item_pos;
743         u64 flags = 0;
744         u64 ref_root;
745         u32 item_size;
746         u8 ref_level = 0;
747         int ret;
748
749         WARN_ON(sblock->page_count < 1);
750         dev = sblock->pagev[0]->dev;
751         fs_info = sblock->sctx->fs_info;
752
753         path = btrfs_alloc_path();
754         if (!path)
755                 return;
756
757         swarn.physical = sblock->pagev[0]->physical;
758         swarn.logical = sblock->pagev[0]->logical;
759         swarn.errstr = errstr;
760         swarn.dev = NULL;
761
762         ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
763                                   &flags);
764         if (ret < 0)
765                 goto out;
766
767         extent_item_pos = swarn.logical - found_key.objectid;
768         swarn.extent_item_size = found_key.offset;
769
770         eb = path->nodes[0];
771         ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
772         item_size = btrfs_item_size_nr(eb, path->slots[0]);
773
774         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
775                 do {
776                         ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
777                                                       item_size, &ref_root,
778                                                       &ref_level);
779                         btrfs_warn_in_rcu(fs_info,
780 "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
781                                 errstr, swarn.logical,
782                                 rcu_str_deref(dev->name),
783                                 swarn.physical,
784                                 ref_level ? "node" : "leaf",
785                                 ret < 0 ? -1 : ref_level,
786                                 ret < 0 ? -1 : ref_root);
787                 } while (ret != 1);
788                 btrfs_release_path(path);
789         } else {
790                 btrfs_release_path(path);
791                 swarn.path = path;
792                 swarn.dev = dev;
793                 iterate_extent_inodes(fs_info, found_key.objectid,
794                                         extent_item_pos, 1,
795                                         scrub_print_warning_inode, &swarn, false);
796         }
797
798 out:
799         btrfs_free_path(path);
800 }
801
802 static inline void scrub_get_recover(struct scrub_recover *recover)
803 {
804         refcount_inc(&recover->refs);
805 }
806
807 static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
808                                      struct scrub_recover *recover)
809 {
810         if (refcount_dec_and_test(&recover->refs)) {
811                 btrfs_bio_counter_dec(fs_info);
812                 btrfs_put_bbio(recover->bbio);
813                 kfree(recover);
814         }
815 }
816
817 /*
818  * scrub_handle_errored_block gets called when either verification of the
819  * pages failed or the bio failed to read, e.g. with EIO. In the latter
820  * case, this function handles all pages in the bio, even though only one
821  * may be bad.
822  * The goal of this function is to repair the errored block by using the
823  * contents of one of the mirrors.
824  */
825 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
826 {
827         struct scrub_ctx *sctx = sblock_to_check->sctx;
828         struct btrfs_device *dev;
829         struct btrfs_fs_info *fs_info;
830         u64 logical;
831         unsigned int failed_mirror_index;
832         unsigned int is_metadata;
833         unsigned int have_csum;
834         struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
835         struct scrub_block *sblock_bad;
836         int ret;
837         int mirror_index;
838         int page_num;
839         int success;
840         bool full_stripe_locked;
841         unsigned int nofs_flag;
842         static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
843                                       DEFAULT_RATELIMIT_BURST);
844
845         BUG_ON(sblock_to_check->page_count < 1);
846         fs_info = sctx->fs_info;
847         if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
848                 /*
849                  * if we find an error in a super block, we just report it.
850                  * They will get written with the next transaction commit
851                  * anyway
852                  */
853                 spin_lock(&sctx->stat_lock);
854                 ++sctx->stat.super_errors;
855                 spin_unlock(&sctx->stat_lock);
856                 return 0;
857         }
858         logical = sblock_to_check->pagev[0]->logical;
859         BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
860         failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
861         is_metadata = !(sblock_to_check->pagev[0]->flags &
862                         BTRFS_EXTENT_FLAG_DATA);
863         have_csum = sblock_to_check->pagev[0]->have_csum;
864         dev = sblock_to_check->pagev[0]->dev;
865
866         /*
867          * We must use GFP_NOFS because the scrub task might be waiting for a
868          * worker task executing this function and in turn a transaction commit
869          * might be waiting the scrub task to pause (which needs to wait for all
870          * the worker tasks to complete before pausing).
871          * We do allocations in the workers through insert_full_stripe_lock()
872          * and scrub_add_page_to_wr_bio(), which happens down the call chain of
873          * this function.
874          */
875         nofs_flag = memalloc_nofs_save();
876         /*
877          * For RAID5/6, race can happen for a different device scrub thread.
878          * For data corruption, Parity and Data threads will both try
879          * to recovery the data.
880          * Race can lead to doubly added csum error, or even unrecoverable
881          * error.
882          */
883         ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
884         if (ret < 0) {
885                 memalloc_nofs_restore(nofs_flag);
886                 spin_lock(&sctx->stat_lock);
887                 if (ret == -ENOMEM)
888                         sctx->stat.malloc_errors++;
889                 sctx->stat.read_errors++;
890                 sctx->stat.uncorrectable_errors++;
891                 spin_unlock(&sctx->stat_lock);
892                 return ret;
893         }
894
895         /*
896          * read all mirrors one after the other. This includes to
897          * re-read the extent or metadata block that failed (that was
898          * the cause that this fixup code is called) another time,
899          * page by page this time in order to know which pages
900          * caused I/O errors and which ones are good (for all mirrors).
901          * It is the goal to handle the situation when more than one
902          * mirror contains I/O errors, but the errors do not
903          * overlap, i.e. the data can be repaired by selecting the
904          * pages from those mirrors without I/O error on the
905          * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
906          * would be that mirror #1 has an I/O error on the first page,
907          * the second page is good, and mirror #2 has an I/O error on
908          * the second page, but the first page is good.
909          * Then the first page of the first mirror can be repaired by
910          * taking the first page of the second mirror, and the
911          * second page of the second mirror can be repaired by
912          * copying the contents of the 2nd page of the 1st mirror.
913          * One more note: if the pages of one mirror contain I/O
914          * errors, the checksum cannot be verified. In order to get
915          * the best data for repairing, the first attempt is to find
916          * a mirror without I/O errors and with a validated checksum.
917          * Only if this is not possible, the pages are picked from
918          * mirrors with I/O errors without considering the checksum.
919          * If the latter is the case, at the end, the checksum of the
920          * repaired area is verified in order to correctly maintain
921          * the statistics.
922          */
923
924         sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
925                                       sizeof(*sblocks_for_recheck), GFP_KERNEL);
926         if (!sblocks_for_recheck) {
927                 spin_lock(&sctx->stat_lock);
928                 sctx->stat.malloc_errors++;
929                 sctx->stat.read_errors++;
930                 sctx->stat.uncorrectable_errors++;
931                 spin_unlock(&sctx->stat_lock);
932                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
933                 goto out;
934         }
935
936         /* setup the context, map the logical blocks and alloc the pages */
937         ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
938         if (ret) {
939                 spin_lock(&sctx->stat_lock);
940                 sctx->stat.read_errors++;
941                 sctx->stat.uncorrectable_errors++;
942                 spin_unlock(&sctx->stat_lock);
943                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
944                 goto out;
945         }
946         BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
947         sblock_bad = sblocks_for_recheck + failed_mirror_index;
948
949         /* build and submit the bios for the failed mirror, check checksums */
950         scrub_recheck_block(fs_info, sblock_bad, 1);
951
952         if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
953             sblock_bad->no_io_error_seen) {
954                 /*
955                  * the error disappeared after reading page by page, or
956                  * the area was part of a huge bio and other parts of the
957                  * bio caused I/O errors, or the block layer merged several
958                  * read requests into one and the error is caused by a
959                  * different bio (usually one of the two latter cases is
960                  * the cause)
961                  */
962                 spin_lock(&sctx->stat_lock);
963                 sctx->stat.unverified_errors++;
964                 sblock_to_check->data_corrected = 1;
965                 spin_unlock(&sctx->stat_lock);
966
967                 if (sctx->is_dev_replace)
968                         scrub_write_block_to_dev_replace(sblock_bad);
969                 goto out;
970         }
971
972         if (!sblock_bad->no_io_error_seen) {
973                 spin_lock(&sctx->stat_lock);
974                 sctx->stat.read_errors++;
975                 spin_unlock(&sctx->stat_lock);
976                 if (__ratelimit(&_rs))
977                         scrub_print_warning("i/o error", sblock_to_check);
978                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
979         } else if (sblock_bad->checksum_error) {
980                 spin_lock(&sctx->stat_lock);
981                 sctx->stat.csum_errors++;
982                 spin_unlock(&sctx->stat_lock);
983                 if (__ratelimit(&_rs))
984                         scrub_print_warning("checksum error", sblock_to_check);
985                 btrfs_dev_stat_inc_and_print(dev,
986                                              BTRFS_DEV_STAT_CORRUPTION_ERRS);
987         } else if (sblock_bad->header_error) {
988                 spin_lock(&sctx->stat_lock);
989                 sctx->stat.verify_errors++;
990                 spin_unlock(&sctx->stat_lock);
991                 if (__ratelimit(&_rs))
992                         scrub_print_warning("checksum/header error",
993                                             sblock_to_check);
994                 if (sblock_bad->generation_error)
995                         btrfs_dev_stat_inc_and_print(dev,
996                                 BTRFS_DEV_STAT_GENERATION_ERRS);
997                 else
998                         btrfs_dev_stat_inc_and_print(dev,
999                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1000         }
1001
1002         if (sctx->readonly) {
1003                 ASSERT(!sctx->is_dev_replace);
1004                 goto out;
1005         }
1006
1007         /*
1008          * now build and submit the bios for the other mirrors, check
1009          * checksums.
1010          * First try to pick the mirror which is completely without I/O
1011          * errors and also does not have a checksum error.
1012          * If one is found, and if a checksum is present, the full block
1013          * that is known to contain an error is rewritten. Afterwards
1014          * the block is known to be corrected.
1015          * If a mirror is found which is completely correct, and no
1016          * checksum is present, only those pages are rewritten that had
1017          * an I/O error in the block to be repaired, since it cannot be
1018          * determined, which copy of the other pages is better (and it
1019          * could happen otherwise that a correct page would be
1020          * overwritten by a bad one).
1021          */
1022         for (mirror_index = 0; ;mirror_index++) {
1023                 struct scrub_block *sblock_other;
1024
1025                 if (mirror_index == failed_mirror_index)
1026                         continue;
1027
1028                 /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
1029                 if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
1030                         if (mirror_index >= BTRFS_MAX_MIRRORS)
1031                                 break;
1032                         if (!sblocks_for_recheck[mirror_index].page_count)
1033                                 break;
1034
1035                         sblock_other = sblocks_for_recheck + mirror_index;
1036                 } else {
1037                         struct scrub_recover *r = sblock_bad->pagev[0]->recover;
1038                         int max_allowed = r->bbio->num_stripes -
1039                                                 r->bbio->num_tgtdevs;
1040
1041                         if (mirror_index >= max_allowed)
1042                                 break;
1043                         if (!sblocks_for_recheck[1].page_count)
1044                                 break;
1045
1046                         ASSERT(failed_mirror_index == 0);
1047                         sblock_other = sblocks_for_recheck + 1;
1048                         sblock_other->pagev[0]->mirror_num = 1 + mirror_index;
1049                 }
1050
1051                 /* build and submit the bios, check checksums */
1052                 scrub_recheck_block(fs_info, sblock_other, 0);
1053
1054                 if (!sblock_other->header_error &&
1055                     !sblock_other->checksum_error &&
1056                     sblock_other->no_io_error_seen) {
1057                         if (sctx->is_dev_replace) {
1058                                 scrub_write_block_to_dev_replace(sblock_other);
1059                                 goto corrected_error;
1060                         } else {
1061                                 ret = scrub_repair_block_from_good_copy(
1062                                                 sblock_bad, sblock_other);
1063                                 if (!ret)
1064                                         goto corrected_error;
1065                         }
1066                 }
1067         }
1068
1069         if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1070                 goto did_not_correct_error;
1071
1072         /*
1073          * In case of I/O errors in the area that is supposed to be
1074          * repaired, continue by picking good copies of those pages.
1075          * Select the good pages from mirrors to rewrite bad pages from
1076          * the area to fix. Afterwards verify the checksum of the block
1077          * that is supposed to be repaired. This verification step is
1078          * only done for the purpose of statistic counting and for the
1079          * final scrub report, whether errors remain.
1080          * A perfect algorithm could make use of the checksum and try
1081          * all possible combinations of pages from the different mirrors
1082          * until the checksum verification succeeds. For example, when
1083          * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1084          * of mirror #2 is readable but the final checksum test fails,
1085          * then the 2nd page of mirror #3 could be tried, whether now
1086          * the final checksum succeeds. But this would be a rare
1087          * exception and is therefore not implemented. At least it is
1088          * avoided that the good copy is overwritten.
1089          * A more useful improvement would be to pick the sectors
1090          * without I/O error based on sector sizes (512 bytes on legacy
1091          * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1092          * mirror could be repaired by taking 512 byte of a different
1093          * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1094          * area are unreadable.
1095          */
1096         success = 1;
1097         for (page_num = 0; page_num < sblock_bad->page_count;
1098              page_num++) {
1099                 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1100                 struct scrub_block *sblock_other = NULL;
1101
1102                 /* skip no-io-error page in scrub */
1103                 if (!page_bad->io_error && !sctx->is_dev_replace)
1104                         continue;
1105
1106                 if (scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
1107                         /*
1108                          * In case of dev replace, if raid56 rebuild process
1109                          * didn't work out correct data, then copy the content
1110                          * in sblock_bad to make sure target device is identical
1111                          * to source device, instead of writing garbage data in
1112                          * sblock_for_recheck array to target device.
1113                          */
1114                         sblock_other = NULL;
1115                 } else if (page_bad->io_error) {
1116                         /* try to find no-io-error page in mirrors */
1117                         for (mirror_index = 0;
1118                              mirror_index < BTRFS_MAX_MIRRORS &&
1119                              sblocks_for_recheck[mirror_index].page_count > 0;
1120                              mirror_index++) {
1121                                 if (!sblocks_for_recheck[mirror_index].
1122                                     pagev[page_num]->io_error) {
1123                                         sblock_other = sblocks_for_recheck +
1124                                                        mirror_index;
1125                                         break;
1126                                 }
1127                         }
1128                         if (!sblock_other)
1129                                 success = 0;
1130                 }
1131
1132                 if (sctx->is_dev_replace) {
1133                         /*
1134                          * did not find a mirror to fetch the page
1135                          * from. scrub_write_page_to_dev_replace()
1136                          * handles this case (page->io_error), by
1137                          * filling the block with zeros before
1138                          * submitting the write request
1139                          */
1140                         if (!sblock_other)
1141                                 sblock_other = sblock_bad;
1142
1143                         if (scrub_write_page_to_dev_replace(sblock_other,
1144                                                             page_num) != 0) {
1145                                 atomic64_inc(
1146                                         &fs_info->dev_replace.num_write_errors);
1147                                 success = 0;
1148                         }
1149                 } else if (sblock_other) {
1150                         ret = scrub_repair_page_from_good_copy(sblock_bad,
1151                                                                sblock_other,
1152                                                                page_num, 0);
1153                         if (0 == ret)
1154                                 page_bad->io_error = 0;
1155                         else
1156                                 success = 0;
1157                 }
1158         }
1159
1160         if (success && !sctx->is_dev_replace) {
1161                 if (is_metadata || have_csum) {
1162                         /*
1163                          * need to verify the checksum now that all
1164                          * sectors on disk are repaired (the write
1165                          * request for data to be repaired is on its way).
1166                          * Just be lazy and use scrub_recheck_block()
1167                          * which re-reads the data before the checksum
1168                          * is verified, but most likely the data comes out
1169                          * of the page cache.
1170                          */
1171                         scrub_recheck_block(fs_info, sblock_bad, 1);
1172                         if (!sblock_bad->header_error &&
1173                             !sblock_bad->checksum_error &&
1174                             sblock_bad->no_io_error_seen)
1175                                 goto corrected_error;
1176                         else
1177                                 goto did_not_correct_error;
1178                 } else {
1179 corrected_error:
1180                         spin_lock(&sctx->stat_lock);
1181                         sctx->stat.corrected_errors++;
1182                         sblock_to_check->data_corrected = 1;
1183                         spin_unlock(&sctx->stat_lock);
1184                         btrfs_err_rl_in_rcu(fs_info,
1185                                 "fixed up error at logical %llu on dev %s",
1186                                 logical, rcu_str_deref(dev->name));
1187                 }
1188         } else {
1189 did_not_correct_error:
1190                 spin_lock(&sctx->stat_lock);
1191                 sctx->stat.uncorrectable_errors++;
1192                 spin_unlock(&sctx->stat_lock);
1193                 btrfs_err_rl_in_rcu(fs_info,
1194                         "unable to fixup (regular) error at logical %llu on dev %s",
1195                         logical, rcu_str_deref(dev->name));
1196         }
1197
1198 out:
1199         if (sblocks_for_recheck) {
1200                 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1201                      mirror_index++) {
1202                         struct scrub_block *sblock = sblocks_for_recheck +
1203                                                      mirror_index;
1204                         struct scrub_recover *recover;
1205                         int page_index;
1206
1207                         for (page_index = 0; page_index < sblock->page_count;
1208                              page_index++) {
1209                                 sblock->pagev[page_index]->sblock = NULL;
1210                                 recover = sblock->pagev[page_index]->recover;
1211                                 if (recover) {
1212                                         scrub_put_recover(fs_info, recover);
1213                                         sblock->pagev[page_index]->recover =
1214                                                                         NULL;
1215                                 }
1216                                 scrub_page_put(sblock->pagev[page_index]);
1217                         }
1218                 }
1219                 kfree(sblocks_for_recheck);
1220         }
1221
1222         ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
1223         memalloc_nofs_restore(nofs_flag);
1224         if (ret < 0)
1225                 return ret;
1226         return 0;
1227 }
1228
1229 static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1230 {
1231         if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1232                 return 2;
1233         else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1234                 return 3;
1235         else
1236                 return (int)bbio->num_stripes;
1237 }
1238
1239 static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1240                                                  u64 *raid_map,
1241                                                  u64 mapped_length,
1242                                                  int nstripes, int mirror,
1243                                                  int *stripe_index,
1244                                                  u64 *stripe_offset)
1245 {
1246         int i;
1247
1248         if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1249                 /* RAID5/6 */
1250                 for (i = 0; i < nstripes; i++) {
1251                         if (raid_map[i] == RAID6_Q_STRIPE ||
1252                             raid_map[i] == RAID5_P_STRIPE)
1253                                 continue;
1254
1255                         if (logical >= raid_map[i] &&
1256                             logical < raid_map[i] + mapped_length)
1257                                 break;
1258                 }
1259
1260                 *stripe_index = i;
1261                 *stripe_offset = logical - raid_map[i];
1262         } else {
1263                 /* The other RAID type */
1264                 *stripe_index = mirror;
1265                 *stripe_offset = 0;
1266         }
1267 }
1268
1269 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1270                                      struct scrub_block *sblocks_for_recheck)
1271 {
1272         struct scrub_ctx *sctx = original_sblock->sctx;
1273         struct btrfs_fs_info *fs_info = sctx->fs_info;
1274         u64 length = original_sblock->page_count * PAGE_SIZE;
1275         u64 logical = original_sblock->pagev[0]->logical;
1276         u64 generation = original_sblock->pagev[0]->generation;
1277         u64 flags = original_sblock->pagev[0]->flags;
1278         u64 have_csum = original_sblock->pagev[0]->have_csum;
1279         struct scrub_recover *recover;
1280         struct btrfs_bio *bbio;
1281         u64 sublen;
1282         u64 mapped_length;
1283         u64 stripe_offset;
1284         int stripe_index;
1285         int page_index = 0;
1286         int mirror_index;
1287         int nmirrors;
1288         int ret;
1289
1290         /*
1291          * note: the two members refs and outstanding_pages
1292          * are not used (and not set) in the blocks that are used for
1293          * the recheck procedure
1294          */
1295
1296         while (length > 0) {
1297                 sublen = min_t(u64, length, PAGE_SIZE);
1298                 mapped_length = sublen;
1299                 bbio = NULL;
1300
1301                 /*
1302                  * with a length of PAGE_SIZE, each returned stripe
1303                  * represents one mirror
1304                  */
1305                 btrfs_bio_counter_inc_blocked(fs_info);
1306                 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
1307                                 logical, &mapped_length, &bbio);
1308                 if (ret || !bbio || mapped_length < sublen) {
1309                         btrfs_put_bbio(bbio);
1310                         btrfs_bio_counter_dec(fs_info);
1311                         return -EIO;
1312                 }
1313
1314                 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1315                 if (!recover) {
1316                         btrfs_put_bbio(bbio);
1317                         btrfs_bio_counter_dec(fs_info);
1318                         return -ENOMEM;
1319                 }
1320
1321                 refcount_set(&recover->refs, 1);
1322                 recover->bbio = bbio;
1323                 recover->map_length = mapped_length;
1324
1325                 BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
1326
1327                 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
1328
1329                 for (mirror_index = 0; mirror_index < nmirrors;
1330                      mirror_index++) {
1331                         struct scrub_block *sblock;
1332                         struct scrub_page *page;
1333
1334                         sblock = sblocks_for_recheck + mirror_index;
1335                         sblock->sctx = sctx;
1336
1337                         page = kzalloc(sizeof(*page), GFP_NOFS);
1338                         if (!page) {
1339 leave_nomem:
1340                                 spin_lock(&sctx->stat_lock);
1341                                 sctx->stat.malloc_errors++;
1342                                 spin_unlock(&sctx->stat_lock);
1343                                 scrub_put_recover(fs_info, recover);
1344                                 return -ENOMEM;
1345                         }
1346                         scrub_page_get(page);
1347                         sblock->pagev[page_index] = page;
1348                         page->sblock = sblock;
1349                         page->flags = flags;
1350                         page->generation = generation;
1351                         page->logical = logical;
1352                         page->have_csum = have_csum;
1353                         if (have_csum)
1354                                 memcpy(page->csum,
1355                                        original_sblock->pagev[0]->csum,
1356                                        sctx->csum_size);
1357
1358                         scrub_stripe_index_and_offset(logical,
1359                                                       bbio->map_type,
1360                                                       bbio->raid_map,
1361                                                       mapped_length,
1362                                                       bbio->num_stripes -
1363                                                       bbio->num_tgtdevs,
1364                                                       mirror_index,
1365                                                       &stripe_index,
1366                                                       &stripe_offset);
1367                         page->physical = bbio->stripes[stripe_index].physical +
1368                                          stripe_offset;
1369                         page->dev = bbio->stripes[stripe_index].dev;
1370
1371                         BUG_ON(page_index >= original_sblock->page_count);
1372                         page->physical_for_dev_replace =
1373                                 original_sblock->pagev[page_index]->
1374                                 physical_for_dev_replace;
1375                         /* for missing devices, dev->bdev is NULL */
1376                         page->mirror_num = mirror_index + 1;
1377                         sblock->page_count++;
1378                         page->page = alloc_page(GFP_NOFS);
1379                         if (!page->page)
1380                                 goto leave_nomem;
1381
1382                         scrub_get_recover(recover);
1383                         page->recover = recover;
1384                 }
1385                 scrub_put_recover(fs_info, recover);
1386                 length -= sublen;
1387                 logical += sublen;
1388                 page_index++;
1389         }
1390
1391         return 0;
1392 }
1393
1394 static void scrub_bio_wait_endio(struct bio *bio)
1395 {
1396         complete(bio->bi_private);
1397 }
1398
1399 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1400                                         struct bio *bio,
1401                                         struct scrub_page *page)
1402 {
1403         DECLARE_COMPLETION_ONSTACK(done);
1404         int ret;
1405         int mirror_num;
1406
1407         bio->bi_iter.bi_sector = page->logical >> 9;
1408         bio->bi_private = &done;
1409         bio->bi_end_io = scrub_bio_wait_endio;
1410
1411         mirror_num = page->sblock->pagev[0]->mirror_num;
1412         ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
1413                                     page->recover->map_length,
1414                                     mirror_num, 0);
1415         if (ret)
1416                 return ret;
1417
1418         wait_for_completion_io(&done);
1419         return blk_status_to_errno(bio->bi_status);
1420 }
1421
1422 static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
1423                                           struct scrub_block *sblock)
1424 {
1425         struct scrub_page *first_page = sblock->pagev[0];
1426         struct bio *bio;
1427         int page_num;
1428
1429         /* All pages in sblock belong to the same stripe on the same device. */
1430         ASSERT(first_page->dev);
1431         if (!first_page->dev->bdev)
1432                 goto out;
1433
1434         bio = btrfs_io_bio_alloc(BIO_MAX_PAGES);
1435         bio_set_dev(bio, first_page->dev->bdev);
1436
1437         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1438                 struct scrub_page *page = sblock->pagev[page_num];
1439
1440                 WARN_ON(!page->page);
1441                 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1442         }
1443
1444         if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) {
1445                 bio_put(bio);
1446                 goto out;
1447         }
1448
1449         bio_put(bio);
1450
1451         scrub_recheck_block_checksum(sblock);
1452
1453         return;
1454 out:
1455         for (page_num = 0; page_num < sblock->page_count; page_num++)
1456                 sblock->pagev[page_num]->io_error = 1;
1457
1458         sblock->no_io_error_seen = 0;
1459 }
1460
1461 /*
1462  * this function will check the on disk data for checksum errors, header
1463  * errors and read I/O errors. If any I/O errors happen, the exact pages
1464  * which are errored are marked as being bad. The goal is to enable scrub
1465  * to take those pages that are not errored from all the mirrors so that
1466  * the pages that are errored in the just handled mirror can be repaired.
1467  */
1468 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1469                                 struct scrub_block *sblock,
1470                                 int retry_failed_mirror)
1471 {
1472         int page_num;
1473
1474         sblock->no_io_error_seen = 1;
1475
1476         /* short cut for raid56 */
1477         if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->pagev[0]))
1478                 return scrub_recheck_block_on_raid56(fs_info, sblock);
1479
1480         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1481                 struct bio *bio;
1482                 struct scrub_page *page = sblock->pagev[page_num];
1483
1484                 if (page->dev->bdev == NULL) {
1485                         page->io_error = 1;
1486                         sblock->no_io_error_seen = 0;
1487                         continue;
1488                 }
1489
1490                 WARN_ON(!page->page);
1491                 bio = btrfs_io_bio_alloc(1);
1492                 bio_set_dev(bio, page->dev->bdev);
1493
1494                 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1495                 bio->bi_iter.bi_sector = page->physical >> 9;
1496                 bio->bi_opf = REQ_OP_READ;
1497
1498                 if (btrfsic_submit_bio_wait(bio)) {
1499                         page->io_error = 1;
1500                         sblock->no_io_error_seen = 0;
1501                 }
1502
1503                 bio_put(bio);
1504         }
1505
1506         if (sblock->no_io_error_seen)
1507                 scrub_recheck_block_checksum(sblock);
1508 }
1509
1510 static inline int scrub_check_fsid(u8 fsid[],
1511                                    struct scrub_page *spage)
1512 {
1513         struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1514         int ret;
1515
1516         ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1517         return !ret;
1518 }
1519
1520 static void scrub_recheck_block_checksum(struct scrub_block *sblock)
1521 {
1522         sblock->header_error = 0;
1523         sblock->checksum_error = 0;
1524         sblock->generation_error = 0;
1525
1526         if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1527                 scrub_checksum_data(sblock);
1528         else
1529                 scrub_checksum_tree_block(sblock);
1530 }
1531
1532 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1533                                              struct scrub_block *sblock_good)
1534 {
1535         int page_num;
1536         int ret = 0;
1537
1538         for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1539                 int ret_sub;
1540
1541                 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1542                                                            sblock_good,
1543                                                            page_num, 1);
1544                 if (ret_sub)
1545                         ret = ret_sub;
1546         }
1547
1548         return ret;
1549 }
1550
1551 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1552                                             struct scrub_block *sblock_good,
1553                                             int page_num, int force_write)
1554 {
1555         struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1556         struct scrub_page *page_good = sblock_good->pagev[page_num];
1557         struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1558
1559         BUG_ON(page_bad->page == NULL);
1560         BUG_ON(page_good->page == NULL);
1561         if (force_write || sblock_bad->header_error ||
1562             sblock_bad->checksum_error || page_bad->io_error) {
1563                 struct bio *bio;
1564                 int ret;
1565
1566                 if (!page_bad->dev->bdev) {
1567                         btrfs_warn_rl(fs_info,
1568                                 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1569                         return -EIO;
1570                 }
1571
1572                 bio = btrfs_io_bio_alloc(1);
1573                 bio_set_dev(bio, page_bad->dev->bdev);
1574                 bio->bi_iter.bi_sector = page_bad->physical >> 9;
1575                 bio->bi_opf = REQ_OP_WRITE;
1576
1577                 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1578                 if (PAGE_SIZE != ret) {
1579                         bio_put(bio);
1580                         return -EIO;
1581                 }
1582
1583                 if (btrfsic_submit_bio_wait(bio)) {
1584                         btrfs_dev_stat_inc_and_print(page_bad->dev,
1585                                 BTRFS_DEV_STAT_WRITE_ERRS);
1586                         atomic64_inc(&fs_info->dev_replace.num_write_errors);
1587                         bio_put(bio);
1588                         return -EIO;
1589                 }
1590                 bio_put(bio);
1591         }
1592
1593         return 0;
1594 }
1595
1596 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1597 {
1598         struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1599         int page_num;
1600
1601         /*
1602          * This block is used for the check of the parity on the source device,
1603          * so the data needn't be written into the destination device.
1604          */
1605         if (sblock->sparity)
1606                 return;
1607
1608         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1609                 int ret;
1610
1611                 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1612                 if (ret)
1613                         atomic64_inc(&fs_info->dev_replace.num_write_errors);
1614         }
1615 }
1616
1617 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1618                                            int page_num)
1619 {
1620         struct scrub_page *spage = sblock->pagev[page_num];
1621
1622         BUG_ON(spage->page == NULL);
1623         if (spage->io_error) {
1624                 void *mapped_buffer = kmap_atomic(spage->page);
1625
1626                 clear_page(mapped_buffer);
1627                 flush_dcache_page(spage->page);
1628                 kunmap_atomic(mapped_buffer);
1629         }
1630         return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1631 }
1632
1633 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1634                                     struct scrub_page *spage)
1635 {
1636         struct scrub_bio *sbio;
1637         int ret;
1638
1639         mutex_lock(&sctx->wr_lock);
1640 again:
1641         if (!sctx->wr_curr_bio) {
1642                 sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
1643                                               GFP_KERNEL);
1644                 if (!sctx->wr_curr_bio) {
1645                         mutex_unlock(&sctx->wr_lock);
1646                         return -ENOMEM;
1647                 }
1648                 sctx->wr_curr_bio->sctx = sctx;
1649                 sctx->wr_curr_bio->page_count = 0;
1650         }
1651         sbio = sctx->wr_curr_bio;
1652         if (sbio->page_count == 0) {
1653                 struct bio *bio;
1654
1655                 sbio->physical = spage->physical_for_dev_replace;
1656                 sbio->logical = spage->logical;
1657                 sbio->dev = sctx->wr_tgtdev;
1658                 bio = sbio->bio;
1659                 if (!bio) {
1660                         bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio);
1661                         sbio->bio = bio;
1662                 }
1663
1664                 bio->bi_private = sbio;
1665                 bio->bi_end_io = scrub_wr_bio_end_io;
1666                 bio_set_dev(bio, sbio->dev->bdev);
1667                 bio->bi_iter.bi_sector = sbio->physical >> 9;
1668                 bio->bi_opf = REQ_OP_WRITE;
1669                 sbio->status = 0;
1670         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1671                    spage->physical_for_dev_replace ||
1672                    sbio->logical + sbio->page_count * PAGE_SIZE !=
1673                    spage->logical) {
1674                 scrub_wr_submit(sctx);
1675                 goto again;
1676         }
1677
1678         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1679         if (ret != PAGE_SIZE) {
1680                 if (sbio->page_count < 1) {
1681                         bio_put(sbio->bio);
1682                         sbio->bio = NULL;
1683                         mutex_unlock(&sctx->wr_lock);
1684                         return -EIO;
1685                 }
1686                 scrub_wr_submit(sctx);
1687                 goto again;
1688         }
1689
1690         sbio->pagev[sbio->page_count] = spage;
1691         scrub_page_get(spage);
1692         sbio->page_count++;
1693         if (sbio->page_count == sctx->pages_per_wr_bio)
1694                 scrub_wr_submit(sctx);
1695         mutex_unlock(&sctx->wr_lock);
1696
1697         return 0;
1698 }
1699
1700 static void scrub_wr_submit(struct scrub_ctx *sctx)
1701 {
1702         struct scrub_bio *sbio;
1703
1704         if (!sctx->wr_curr_bio)
1705                 return;
1706
1707         sbio = sctx->wr_curr_bio;
1708         sctx->wr_curr_bio = NULL;
1709         WARN_ON(!sbio->bio->bi_disk);
1710         scrub_pending_bio_inc(sctx);
1711         /* process all writes in a single worker thread. Then the block layer
1712          * orders the requests before sending them to the driver which
1713          * doubled the write performance on spinning disks when measured
1714          * with Linux 3.5 */
1715         btrfsic_submit_bio(sbio->bio);
1716 }
1717
1718 static void scrub_wr_bio_end_io(struct bio *bio)
1719 {
1720         struct scrub_bio *sbio = bio->bi_private;
1721         struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
1722
1723         sbio->status = bio->bi_status;
1724         sbio->bio = bio;
1725
1726         btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
1727         btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1728 }
1729
1730 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1731 {
1732         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1733         struct scrub_ctx *sctx = sbio->sctx;
1734         int i;
1735
1736         WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1737         if (sbio->status) {
1738                 struct btrfs_dev_replace *dev_replace =
1739                         &sbio->sctx->fs_info->dev_replace;
1740
1741                 for (i = 0; i < sbio->page_count; i++) {
1742                         struct scrub_page *spage = sbio->pagev[i];
1743
1744                         spage->io_error = 1;
1745                         atomic64_inc(&dev_replace->num_write_errors);
1746                 }
1747         }
1748
1749         for (i = 0; i < sbio->page_count; i++)
1750                 scrub_page_put(sbio->pagev[i]);
1751
1752         bio_put(sbio->bio);
1753         kfree(sbio);
1754         scrub_pending_bio_dec(sctx);
1755 }
1756
1757 static int scrub_checksum(struct scrub_block *sblock)
1758 {
1759         u64 flags;
1760         int ret;
1761
1762         /*
1763          * No need to initialize these stats currently,
1764          * because this function only use return value
1765          * instead of these stats value.
1766          *
1767          * Todo:
1768          * always use stats
1769          */
1770         sblock->header_error = 0;
1771         sblock->generation_error = 0;
1772         sblock->checksum_error = 0;
1773
1774         WARN_ON(sblock->page_count < 1);
1775         flags = sblock->pagev[0]->flags;
1776         ret = 0;
1777         if (flags & BTRFS_EXTENT_FLAG_DATA)
1778                 ret = scrub_checksum_data(sblock);
1779         else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1780                 ret = scrub_checksum_tree_block(sblock);
1781         else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1782                 (void)scrub_checksum_super(sblock);
1783         else
1784                 WARN_ON(1);
1785         if (ret)
1786                 scrub_handle_errored_block(sblock);
1787
1788         return ret;
1789 }
1790
1791 static int scrub_checksum_data(struct scrub_block *sblock)
1792 {
1793         struct scrub_ctx *sctx = sblock->sctx;
1794         struct btrfs_fs_info *fs_info = sctx->fs_info;
1795         SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1796         u8 csum[BTRFS_CSUM_SIZE];
1797         u8 *on_disk_csum;
1798         struct page *page;
1799         void *buffer;
1800         u64 len;
1801         int index;
1802
1803         BUG_ON(sblock->page_count < 1);
1804         if (!sblock->pagev[0]->have_csum)
1805                 return 0;
1806
1807         shash->tfm = fs_info->csum_shash;
1808         crypto_shash_init(shash);
1809
1810         on_disk_csum = sblock->pagev[0]->csum;
1811         page = sblock->pagev[0]->page;
1812         buffer = kmap_atomic(page);
1813
1814         len = sctx->fs_info->sectorsize;
1815         index = 0;
1816         for (;;) {
1817                 u64 l = min_t(u64, len, PAGE_SIZE);
1818
1819                 crypto_shash_update(shash, buffer, l);
1820                 kunmap_atomic(buffer);
1821                 len -= l;
1822                 if (len == 0)
1823                         break;
1824                 index++;
1825                 BUG_ON(index >= sblock->page_count);
1826                 BUG_ON(!sblock->pagev[index]->page);
1827                 page = sblock->pagev[index]->page;
1828                 buffer = kmap_atomic(page);
1829         }
1830
1831         crypto_shash_final(shash, csum);
1832         if (memcmp(csum, on_disk_csum, sctx->csum_size))
1833                 sblock->checksum_error = 1;
1834
1835         return sblock->checksum_error;
1836 }
1837
1838 static int scrub_checksum_tree_block(struct scrub_block *sblock)
1839 {
1840         struct scrub_ctx *sctx = sblock->sctx;
1841         struct btrfs_header *h;
1842         struct btrfs_fs_info *fs_info = sctx->fs_info;
1843         SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1844         u8 calculated_csum[BTRFS_CSUM_SIZE];
1845         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1846         struct page *page;
1847         void *mapped_buffer;
1848         u64 mapped_size;
1849         void *p;
1850         u64 len;
1851         int index;
1852
1853         shash->tfm = fs_info->csum_shash;
1854         crypto_shash_init(shash);
1855
1856         BUG_ON(sblock->page_count < 1);
1857         page = sblock->pagev[0]->page;
1858         mapped_buffer = kmap_atomic(page);
1859         h = (struct btrfs_header *)mapped_buffer;
1860         memcpy(on_disk_csum, h->csum, sctx->csum_size);
1861
1862         /*
1863          * we don't use the getter functions here, as we
1864          * a) don't have an extent buffer and
1865          * b) the page is already kmapped
1866          */
1867         if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
1868                 sblock->header_error = 1;
1869
1870         if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
1871                 sblock->header_error = 1;
1872                 sblock->generation_error = 1;
1873         }
1874
1875         if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
1876                 sblock->header_error = 1;
1877
1878         if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1879                    BTRFS_UUID_SIZE))
1880                 sblock->header_error = 1;
1881
1882         len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE;
1883         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1884         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1885         index = 0;
1886         for (;;) {
1887                 u64 l = min_t(u64, len, mapped_size);
1888
1889                 crypto_shash_update(shash, p, l);
1890                 kunmap_atomic(mapped_buffer);
1891                 len -= l;
1892                 if (len == 0)
1893                         break;
1894                 index++;
1895                 BUG_ON(index >= sblock->page_count);
1896                 BUG_ON(!sblock->pagev[index]->page);
1897                 page = sblock->pagev[index]->page;
1898                 mapped_buffer = kmap_atomic(page);
1899                 mapped_size = PAGE_SIZE;
1900                 p = mapped_buffer;
1901         }
1902
1903         crypto_shash_final(shash, calculated_csum);
1904         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1905                 sblock->checksum_error = 1;
1906
1907         return sblock->header_error || sblock->checksum_error;
1908 }
1909
1910 static int scrub_checksum_super(struct scrub_block *sblock)
1911 {
1912         struct btrfs_super_block *s;
1913         struct scrub_ctx *sctx = sblock->sctx;
1914         struct btrfs_fs_info *fs_info = sctx->fs_info;
1915         SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1916         u8 calculated_csum[BTRFS_CSUM_SIZE];
1917         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1918         struct page *page;
1919         void *mapped_buffer;
1920         u64 mapped_size;
1921         void *p;
1922         int fail_gen = 0;
1923         int fail_cor = 0;
1924         u64 len;
1925         int index;
1926
1927         shash->tfm = fs_info->csum_shash;
1928         crypto_shash_init(shash);
1929
1930         BUG_ON(sblock->page_count < 1);
1931         page = sblock->pagev[0]->page;
1932         mapped_buffer = kmap_atomic(page);
1933         s = (struct btrfs_super_block *)mapped_buffer;
1934         memcpy(on_disk_csum, s->csum, sctx->csum_size);
1935
1936         if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
1937                 ++fail_cor;
1938
1939         if (sblock->pagev[0]->generation != btrfs_super_generation(s))
1940                 ++fail_gen;
1941
1942         if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
1943                 ++fail_cor;
1944
1945         len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1946         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1947         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1948         index = 0;
1949         for (;;) {
1950                 u64 l = min_t(u64, len, mapped_size);
1951
1952                 crypto_shash_update(shash, p, l);
1953                 kunmap_atomic(mapped_buffer);
1954                 len -= l;
1955                 if (len == 0)
1956                         break;
1957                 index++;
1958                 BUG_ON(index >= sblock->page_count);
1959                 BUG_ON(!sblock->pagev[index]->page);
1960                 page = sblock->pagev[index]->page;
1961                 mapped_buffer = kmap_atomic(page);
1962                 mapped_size = PAGE_SIZE;
1963                 p = mapped_buffer;
1964         }
1965
1966         crypto_shash_final(shash, calculated_csum);
1967         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1968                 ++fail_cor;
1969
1970         if (fail_cor + fail_gen) {
1971                 /*
1972                  * if we find an error in a super block, we just report it.
1973                  * They will get written with the next transaction commit
1974                  * anyway
1975                  */
1976                 spin_lock(&sctx->stat_lock);
1977                 ++sctx->stat.super_errors;
1978                 spin_unlock(&sctx->stat_lock);
1979                 if (fail_cor)
1980                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1981                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1982                 else
1983                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1984                                 BTRFS_DEV_STAT_GENERATION_ERRS);
1985         }
1986
1987         return fail_cor + fail_gen;
1988 }
1989
1990 static void scrub_block_get(struct scrub_block *sblock)
1991 {
1992         refcount_inc(&sblock->refs);
1993 }
1994
1995 static void scrub_block_put(struct scrub_block *sblock)
1996 {
1997         if (refcount_dec_and_test(&sblock->refs)) {
1998                 int i;
1999
2000                 if (sblock->sparity)
2001                         scrub_parity_put(sblock->sparity);
2002
2003                 for (i = 0; i < sblock->page_count; i++)
2004                         scrub_page_put(sblock->pagev[i]);
2005                 kfree(sblock);
2006         }
2007 }
2008
2009 static void scrub_page_get(struct scrub_page *spage)
2010 {
2011         atomic_inc(&spage->refs);
2012 }
2013
2014 static void scrub_page_put(struct scrub_page *spage)
2015 {
2016         if (atomic_dec_and_test(&spage->refs)) {
2017                 if (spage->page)
2018                         __free_page(spage->page);
2019                 kfree(spage);
2020         }
2021 }
2022
2023 static void scrub_submit(struct scrub_ctx *sctx)
2024 {
2025         struct scrub_bio *sbio;
2026
2027         if (sctx->curr == -1)
2028                 return;
2029
2030         sbio = sctx->bios[sctx->curr];
2031         sctx->curr = -1;
2032         scrub_pending_bio_inc(sctx);
2033         btrfsic_submit_bio(sbio->bio);
2034 }
2035
2036 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2037                                     struct scrub_page *spage)
2038 {
2039         struct scrub_block *sblock = spage->sblock;
2040         struct scrub_bio *sbio;
2041         int ret;
2042
2043 again:
2044         /*
2045          * grab a fresh bio or wait for one to become available
2046          */
2047         while (sctx->curr == -1) {
2048                 spin_lock(&sctx->list_lock);
2049                 sctx->curr = sctx->first_free;
2050                 if (sctx->curr != -1) {
2051                         sctx->first_free = sctx->bios[sctx->curr]->next_free;
2052                         sctx->bios[sctx->curr]->next_free = -1;
2053                         sctx->bios[sctx->curr]->page_count = 0;
2054                         spin_unlock(&sctx->list_lock);
2055                 } else {
2056                         spin_unlock(&sctx->list_lock);
2057                         wait_event(sctx->list_wait, sctx->first_free != -1);
2058                 }
2059         }
2060         sbio = sctx->bios[sctx->curr];
2061         if (sbio->page_count == 0) {
2062                 struct bio *bio;
2063
2064                 sbio->physical = spage->physical;
2065                 sbio->logical = spage->logical;
2066                 sbio->dev = spage->dev;
2067                 bio = sbio->bio;
2068                 if (!bio) {
2069                         bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio);
2070                         sbio->bio = bio;
2071                 }
2072
2073                 bio->bi_private = sbio;
2074                 bio->bi_end_io = scrub_bio_end_io;
2075                 bio_set_dev(bio, sbio->dev->bdev);
2076                 bio->bi_iter.bi_sector = sbio->physical >> 9;
2077                 bio->bi_opf = REQ_OP_READ;
2078                 sbio->status = 0;
2079         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2080                    spage->physical ||
2081                    sbio->logical + sbio->page_count * PAGE_SIZE !=
2082                    spage->logical ||
2083                    sbio->dev != spage->dev) {
2084                 scrub_submit(sctx);
2085                 goto again;
2086         }
2087
2088         sbio->pagev[sbio->page_count] = spage;
2089         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2090         if (ret != PAGE_SIZE) {
2091                 if (sbio->page_count < 1) {
2092                         bio_put(sbio->bio);
2093                         sbio->bio = NULL;
2094                         return -EIO;
2095                 }
2096                 scrub_submit(sctx);
2097                 goto again;
2098         }
2099
2100         scrub_block_get(sblock); /* one for the page added to the bio */
2101         atomic_inc(&sblock->outstanding_pages);
2102         sbio->page_count++;
2103         if (sbio->page_count == sctx->pages_per_rd_bio)
2104                 scrub_submit(sctx);
2105
2106         return 0;
2107 }
2108
2109 static void scrub_missing_raid56_end_io(struct bio *bio)
2110 {
2111         struct scrub_block *sblock = bio->bi_private;
2112         struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2113
2114         if (bio->bi_status)
2115                 sblock->no_io_error_seen = 0;
2116
2117         bio_put(bio);
2118
2119         btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2120 }
2121
2122 static void scrub_missing_raid56_worker(struct btrfs_work *work)
2123 {
2124         struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2125         struct scrub_ctx *sctx = sblock->sctx;
2126         struct btrfs_fs_info *fs_info = sctx->fs_info;
2127         u64 logical;
2128         struct btrfs_device *dev;
2129
2130         logical = sblock->pagev[0]->logical;
2131         dev = sblock->pagev[0]->dev;
2132
2133         if (sblock->no_io_error_seen)
2134                 scrub_recheck_block_checksum(sblock);
2135
2136         if (!sblock->no_io_error_seen) {
2137                 spin_lock(&sctx->stat_lock);
2138                 sctx->stat.read_errors++;
2139                 spin_unlock(&sctx->stat_lock);
2140                 btrfs_err_rl_in_rcu(fs_info,
2141                         "IO error rebuilding logical %llu for dev %s",
2142                         logical, rcu_str_deref(dev->name));
2143         } else if (sblock->header_error || sblock->checksum_error) {
2144                 spin_lock(&sctx->stat_lock);
2145                 sctx->stat.uncorrectable_errors++;
2146                 spin_unlock(&sctx->stat_lock);
2147                 btrfs_err_rl_in_rcu(fs_info,
2148                         "failed to rebuild valid logical %llu for dev %s",
2149                         logical, rcu_str_deref(dev->name));
2150         } else {
2151                 scrub_write_block_to_dev_replace(sblock);
2152         }
2153
2154         if (sctx->is_dev_replace && sctx->flush_all_writes) {
2155                 mutex_lock(&sctx->wr_lock);
2156                 scrub_wr_submit(sctx);
2157                 mutex_unlock(&sctx->wr_lock);
2158         }
2159
2160         scrub_block_put(sblock);
2161         scrub_pending_bio_dec(sctx);
2162 }
2163
2164 static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2165 {
2166         struct scrub_ctx *sctx = sblock->sctx;
2167         struct btrfs_fs_info *fs_info = sctx->fs_info;
2168         u64 length = sblock->page_count * PAGE_SIZE;
2169         u64 logical = sblock->pagev[0]->logical;
2170         struct btrfs_bio *bbio = NULL;
2171         struct bio *bio;
2172         struct btrfs_raid_bio *rbio;
2173         int ret;
2174         int i;
2175
2176         btrfs_bio_counter_inc_blocked(fs_info);
2177         ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2178                         &length, &bbio);
2179         if (ret || !bbio || !bbio->raid_map)
2180                 goto bbio_out;
2181
2182         if (WARN_ON(!sctx->is_dev_replace ||
2183                     !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2184                 /*
2185                  * We shouldn't be scrubbing a missing device. Even for dev
2186                  * replace, we should only get here for RAID 5/6. We either
2187                  * managed to mount something with no mirrors remaining or
2188                  * there's a bug in scrub_remap_extent()/btrfs_map_block().
2189                  */
2190                 goto bbio_out;
2191         }
2192
2193         bio = btrfs_io_bio_alloc(0);
2194         bio->bi_iter.bi_sector = logical >> 9;
2195         bio->bi_private = sblock;
2196         bio->bi_end_io = scrub_missing_raid56_end_io;
2197
2198         rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
2199         if (!rbio)
2200                 goto rbio_out;
2201
2202         for (i = 0; i < sblock->page_count; i++) {
2203                 struct scrub_page *spage = sblock->pagev[i];
2204
2205                 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2206         }
2207
2208         btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL);
2209         scrub_block_get(sblock);
2210         scrub_pending_bio_inc(sctx);
2211         raid56_submit_missing_rbio(rbio);
2212         return;
2213
2214 rbio_out:
2215         bio_put(bio);
2216 bbio_out:
2217         btrfs_bio_counter_dec(fs_info);
2218         btrfs_put_bbio(bbio);
2219         spin_lock(&sctx->stat_lock);
2220         sctx->stat.malloc_errors++;
2221         spin_unlock(&sctx->stat_lock);
2222 }
2223
2224 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2225                        u64 physical, struct btrfs_device *dev, u64 flags,
2226                        u64 gen, int mirror_num, u8 *csum, int force,
2227                        u64 physical_for_dev_replace)
2228 {
2229         struct scrub_block *sblock;
2230         int index;
2231
2232         sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2233         if (!sblock) {
2234                 spin_lock(&sctx->stat_lock);
2235                 sctx->stat.malloc_errors++;
2236                 spin_unlock(&sctx->stat_lock);
2237                 return -ENOMEM;
2238         }
2239
2240         /* one ref inside this function, plus one for each page added to
2241          * a bio later on */
2242         refcount_set(&sblock->refs, 1);
2243         sblock->sctx = sctx;
2244         sblock->no_io_error_seen = 1;
2245
2246         for (index = 0; len > 0; index++) {
2247                 struct scrub_page *spage;
2248                 u64 l = min_t(u64, len, PAGE_SIZE);
2249
2250                 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2251                 if (!spage) {
2252 leave_nomem:
2253                         spin_lock(&sctx->stat_lock);
2254                         sctx->stat.malloc_errors++;
2255                         spin_unlock(&sctx->stat_lock);
2256                         scrub_block_put(sblock);
2257                         return -ENOMEM;
2258                 }
2259                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2260                 scrub_page_get(spage);
2261                 sblock->pagev[index] = spage;
2262                 spage->sblock = sblock;
2263                 spage->dev = dev;
2264                 spage->flags = flags;
2265                 spage->generation = gen;
2266                 spage->logical = logical;
2267                 spage->physical = physical;
2268                 spage->physical_for_dev_replace = physical_for_dev_replace;
2269                 spage->mirror_num = mirror_num;
2270                 if (csum) {
2271                         spage->have_csum = 1;
2272                         memcpy(spage->csum, csum, sctx->csum_size);
2273                 } else {
2274                         spage->have_csum = 0;
2275                 }
2276                 sblock->page_count++;
2277                 spage->page = alloc_page(GFP_KERNEL);
2278                 if (!spage->page)
2279                         goto leave_nomem;
2280                 len -= l;
2281                 logical += l;
2282                 physical += l;
2283                 physical_for_dev_replace += l;
2284         }
2285
2286         WARN_ON(sblock->page_count == 0);
2287         if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2288                 /*
2289                  * This case should only be hit for RAID 5/6 device replace. See
2290                  * the comment in scrub_missing_raid56_pages() for details.
2291                  */
2292                 scrub_missing_raid56_pages(sblock);
2293         } else {
2294                 for (index = 0; index < sblock->page_count; index++) {
2295                         struct scrub_page *spage = sblock->pagev[index];
2296                         int ret;
2297
2298                         ret = scrub_add_page_to_rd_bio(sctx, spage);
2299                         if (ret) {
2300                                 scrub_block_put(sblock);
2301                                 return ret;
2302                         }
2303                 }
2304
2305                 if (force)
2306                         scrub_submit(sctx);
2307         }
2308
2309         /* last one frees, either here or in bio completion for last page */
2310         scrub_block_put(sblock);
2311         return 0;
2312 }
2313
2314 static void scrub_bio_end_io(struct bio *bio)
2315 {
2316         struct scrub_bio *sbio = bio->bi_private;
2317         struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2318
2319         sbio->status = bio->bi_status;
2320         sbio->bio = bio;
2321
2322         btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2323 }
2324
2325 static void scrub_bio_end_io_worker(struct btrfs_work *work)
2326 {
2327         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2328         struct scrub_ctx *sctx = sbio->sctx;
2329         int i;
2330
2331         BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2332         if (sbio->status) {
2333                 for (i = 0; i < sbio->page_count; i++) {
2334                         struct scrub_page *spage = sbio->pagev[i];
2335
2336                         spage->io_error = 1;
2337                         spage->sblock->no_io_error_seen = 0;
2338                 }
2339         }
2340
2341         /* now complete the scrub_block items that have all pages completed */
2342         for (i = 0; i < sbio->page_count; i++) {
2343                 struct scrub_page *spage = sbio->pagev[i];
2344                 struct scrub_block *sblock = spage->sblock;
2345
2346                 if (atomic_dec_and_test(&sblock->outstanding_pages))
2347                         scrub_block_complete(sblock);
2348                 scrub_block_put(sblock);
2349         }
2350
2351         bio_put(sbio->bio);
2352         sbio->bio = NULL;
2353         spin_lock(&sctx->list_lock);
2354         sbio->next_free = sctx->first_free;
2355         sctx->first_free = sbio->index;
2356         spin_unlock(&sctx->list_lock);
2357
2358         if (sctx->is_dev_replace && sctx->flush_all_writes) {
2359                 mutex_lock(&sctx->wr_lock);
2360                 scrub_wr_submit(sctx);
2361                 mutex_unlock(&sctx->wr_lock);
2362         }
2363
2364         scrub_pending_bio_dec(sctx);
2365 }
2366
2367 static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2368                                        unsigned long *bitmap,
2369                                        u64 start, u64 len)
2370 {
2371         u64 offset;
2372         u64 nsectors64;
2373         u32 nsectors;
2374         int sectorsize = sparity->sctx->fs_info->sectorsize;
2375
2376         if (len >= sparity->stripe_len) {
2377                 bitmap_set(bitmap, 0, sparity->nsectors);
2378                 return;
2379         }
2380
2381         start -= sparity->logic_start;
2382         start = div64_u64_rem(start, sparity->stripe_len, &offset);
2383         offset = div_u64(offset, sectorsize);
2384         nsectors64 = div_u64(len, sectorsize);
2385
2386         ASSERT(nsectors64 < UINT_MAX);
2387         nsectors = (u32)nsectors64;
2388
2389         if (offset + nsectors <= sparity->nsectors) {
2390                 bitmap_set(bitmap, offset, nsectors);
2391                 return;
2392         }
2393
2394         bitmap_set(bitmap, offset, sparity->nsectors - offset);
2395         bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2396 }
2397
2398 static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2399                                                    u64 start, u64 len)
2400 {
2401         __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2402 }
2403
2404 static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2405                                                   u64 start, u64 len)
2406 {
2407         __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2408 }
2409
2410 static void scrub_block_complete(struct scrub_block *sblock)
2411 {
2412         int corrupted = 0;
2413
2414         if (!sblock->no_io_error_seen) {
2415                 corrupted = 1;
2416                 scrub_handle_errored_block(sblock);
2417         } else {
2418                 /*
2419                  * if has checksum error, write via repair mechanism in
2420                  * dev replace case, otherwise write here in dev replace
2421                  * case.
2422                  */
2423                 corrupted = scrub_checksum(sblock);
2424                 if (!corrupted && sblock->sctx->is_dev_replace)
2425                         scrub_write_block_to_dev_replace(sblock);
2426         }
2427
2428         if (sblock->sparity && corrupted && !sblock->data_corrected) {
2429                 u64 start = sblock->pagev[0]->logical;
2430                 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2431                           PAGE_SIZE;
2432
2433                 scrub_parity_mark_sectors_error(sblock->sparity,
2434                                                 start, end - start);
2435         }
2436 }
2437
2438 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
2439 {
2440         struct btrfs_ordered_sum *sum = NULL;
2441         unsigned long index;
2442         unsigned long num_sectors;
2443
2444         while (!list_empty(&sctx->csum_list)) {
2445                 sum = list_first_entry(&sctx->csum_list,
2446                                        struct btrfs_ordered_sum, list);
2447                 if (sum->bytenr > logical)
2448                         return 0;
2449                 if (sum->bytenr + sum->len > logical)
2450                         break;
2451
2452                 ++sctx->stat.csum_discards;
2453                 list_del(&sum->list);
2454                 kfree(sum);
2455                 sum = NULL;
2456         }
2457         if (!sum)
2458                 return 0;
2459
2460         index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize);
2461         ASSERT(index < UINT_MAX);
2462
2463         num_sectors = sum->len / sctx->fs_info->sectorsize;
2464         memcpy(csum, sum->sums + index * sctx->csum_size, sctx->csum_size);
2465         if (index == num_sectors - 1) {
2466                 list_del(&sum->list);
2467                 kfree(sum);
2468         }
2469         return 1;
2470 }
2471
2472 /* scrub extent tries to collect up to 64 kB for each bio */
2473 static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
2474                         u64 logical, u64 len,
2475                         u64 physical, struct btrfs_device *dev, u64 flags,
2476                         u64 gen, int mirror_num, u64 physical_for_dev_replace)
2477 {
2478         int ret;
2479         u8 csum[BTRFS_CSUM_SIZE];
2480         u32 blocksize;
2481
2482         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2483                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2484                         blocksize = map->stripe_len;
2485                 else
2486                         blocksize = sctx->fs_info->sectorsize;
2487                 spin_lock(&sctx->stat_lock);
2488                 sctx->stat.data_extents_scrubbed++;
2489                 sctx->stat.data_bytes_scrubbed += len;
2490                 spin_unlock(&sctx->stat_lock);
2491         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2492                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2493                         blocksize = map->stripe_len;
2494                 else
2495                         blocksize = sctx->fs_info->nodesize;
2496                 spin_lock(&sctx->stat_lock);
2497                 sctx->stat.tree_extents_scrubbed++;
2498                 sctx->stat.tree_bytes_scrubbed += len;
2499                 spin_unlock(&sctx->stat_lock);
2500         } else {
2501                 blocksize = sctx->fs_info->sectorsize;
2502                 WARN_ON(1);
2503         }
2504
2505         while (len) {
2506                 u64 l = min_t(u64, len, blocksize);
2507                 int have_csum = 0;
2508
2509                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2510                         /* push csums to sbio */
2511                         have_csum = scrub_find_csum(sctx, logical, csum);
2512                         if (have_csum == 0)
2513                                 ++sctx->stat.no_csum;
2514                 }
2515                 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2516                                   mirror_num, have_csum ? csum : NULL, 0,
2517                                   physical_for_dev_replace);
2518                 if (ret)
2519                         return ret;
2520                 len -= l;
2521                 logical += l;
2522                 physical += l;
2523                 physical_for_dev_replace += l;
2524         }
2525         return 0;
2526 }
2527
2528 static int scrub_pages_for_parity(struct scrub_parity *sparity,
2529                                   u64 logical, u64 len,
2530                                   u64 physical, struct btrfs_device *dev,
2531                                   u64 flags, u64 gen, int mirror_num, u8 *csum)
2532 {
2533         struct scrub_ctx *sctx = sparity->sctx;
2534         struct scrub_block *sblock;
2535         int index;
2536
2537         sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2538         if (!sblock) {
2539                 spin_lock(&sctx->stat_lock);
2540                 sctx->stat.malloc_errors++;
2541                 spin_unlock(&sctx->stat_lock);
2542                 return -ENOMEM;
2543         }
2544
2545         /* one ref inside this function, plus one for each page added to
2546          * a bio later on */
2547         refcount_set(&sblock->refs, 1);
2548         sblock->sctx = sctx;
2549         sblock->no_io_error_seen = 1;
2550         sblock->sparity = sparity;
2551         scrub_parity_get(sparity);
2552
2553         for (index = 0; len > 0; index++) {
2554                 struct scrub_page *spage;
2555                 u64 l = min_t(u64, len, PAGE_SIZE);
2556
2557                 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2558                 if (!spage) {
2559 leave_nomem:
2560                         spin_lock(&sctx->stat_lock);
2561                         sctx->stat.malloc_errors++;
2562                         spin_unlock(&sctx->stat_lock);
2563                         scrub_block_put(sblock);
2564                         return -ENOMEM;
2565                 }
2566                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2567                 /* For scrub block */
2568                 scrub_page_get(spage);
2569                 sblock->pagev[index] = spage;
2570                 /* For scrub parity */
2571                 scrub_page_get(spage);
2572                 list_add_tail(&spage->list, &sparity->spages);
2573                 spage->sblock = sblock;
2574                 spage->dev = dev;
2575                 spage->flags = flags;
2576                 spage->generation = gen;
2577                 spage->logical = logical;
2578                 spage->physical = physical;
2579                 spage->mirror_num = mirror_num;
2580                 if (csum) {
2581                         spage->have_csum = 1;
2582                         memcpy(spage->csum, csum, sctx->csum_size);
2583                 } else {
2584                         spage->have_csum = 0;
2585                 }
2586                 sblock->page_count++;
2587                 spage->page = alloc_page(GFP_KERNEL);
2588                 if (!spage->page)
2589                         goto leave_nomem;
2590                 len -= l;
2591                 logical += l;
2592                 physical += l;
2593         }
2594
2595         WARN_ON(sblock->page_count == 0);
2596         for (index = 0; index < sblock->page_count; index++) {
2597                 struct scrub_page *spage = sblock->pagev[index];
2598                 int ret;
2599
2600                 ret = scrub_add_page_to_rd_bio(sctx, spage);
2601                 if (ret) {
2602                         scrub_block_put(sblock);
2603                         return ret;
2604                 }
2605         }
2606
2607         /* last one frees, either here or in bio completion for last page */
2608         scrub_block_put(sblock);
2609         return 0;
2610 }
2611
2612 static int scrub_extent_for_parity(struct scrub_parity *sparity,
2613                                    u64 logical, u64 len,
2614                                    u64 physical, struct btrfs_device *dev,
2615                                    u64 flags, u64 gen, int mirror_num)
2616 {
2617         struct scrub_ctx *sctx = sparity->sctx;
2618         int ret;
2619         u8 csum[BTRFS_CSUM_SIZE];
2620         u32 blocksize;
2621
2622         if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2623                 scrub_parity_mark_sectors_error(sparity, logical, len);
2624                 return 0;
2625         }
2626
2627         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2628                 blocksize = sparity->stripe_len;
2629         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2630                 blocksize = sparity->stripe_len;
2631         } else {
2632                 blocksize = sctx->fs_info->sectorsize;
2633                 WARN_ON(1);
2634         }
2635
2636         while (len) {
2637                 u64 l = min_t(u64, len, blocksize);
2638                 int have_csum = 0;
2639
2640                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2641                         /* push csums to sbio */
2642                         have_csum = scrub_find_csum(sctx, logical, csum);
2643                         if (have_csum == 0)
2644                                 goto skip;
2645                 }
2646                 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2647                                              flags, gen, mirror_num,
2648                                              have_csum ? csum : NULL);
2649                 if (ret)
2650                         return ret;
2651 skip:
2652                 len -= l;
2653                 logical += l;
2654                 physical += l;
2655         }
2656         return 0;
2657 }
2658
2659 /*
2660  * Given a physical address, this will calculate it's
2661  * logical offset. if this is a parity stripe, it will return
2662  * the most left data stripe's logical offset.
2663  *
2664  * return 0 if it is a data stripe, 1 means parity stripe.
2665  */
2666 static int get_raid56_logic_offset(u64 physical, int num,
2667                                    struct map_lookup *map, u64 *offset,
2668                                    u64 *stripe_start)
2669 {
2670         int i;
2671         int j = 0;
2672         u64 stripe_nr;
2673         u64 last_offset;
2674         u32 stripe_index;
2675         u32 rot;
2676         const int data_stripes = nr_data_stripes(map);
2677
2678         last_offset = (physical - map->stripes[num].physical) * data_stripes;
2679         if (stripe_start)
2680                 *stripe_start = last_offset;
2681
2682         *offset = last_offset;
2683         for (i = 0; i < data_stripes; i++) {
2684                 *offset = last_offset + i * map->stripe_len;
2685
2686                 stripe_nr = div64_u64(*offset, map->stripe_len);
2687                 stripe_nr = div_u64(stripe_nr, data_stripes);
2688
2689                 /* Work out the disk rotation on this stripe-set */
2690                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2691                 /* calculate which stripe this data locates */
2692                 rot += i;
2693                 stripe_index = rot % map->num_stripes;
2694                 if (stripe_index == num)
2695                         return 0;
2696                 if (stripe_index < num)
2697                         j++;
2698         }
2699         *offset = last_offset + j * map->stripe_len;
2700         return 1;
2701 }
2702
2703 static void scrub_free_parity(struct scrub_parity *sparity)
2704 {
2705         struct scrub_ctx *sctx = sparity->sctx;
2706         struct scrub_page *curr, *next;
2707         int nbits;
2708
2709         nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2710         if (nbits) {
2711                 spin_lock(&sctx->stat_lock);
2712                 sctx->stat.read_errors += nbits;
2713                 sctx->stat.uncorrectable_errors += nbits;
2714                 spin_unlock(&sctx->stat_lock);
2715         }
2716
2717         list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2718                 list_del_init(&curr->list);
2719                 scrub_page_put(curr);
2720         }
2721
2722         kfree(sparity);
2723 }
2724
2725 static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2726 {
2727         struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2728                                                     work);
2729         struct scrub_ctx *sctx = sparity->sctx;
2730
2731         scrub_free_parity(sparity);
2732         scrub_pending_bio_dec(sctx);
2733 }
2734
2735 static void scrub_parity_bio_endio(struct bio *bio)
2736 {
2737         struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2738         struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
2739
2740         if (bio->bi_status)
2741                 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2742                           sparity->nsectors);
2743
2744         bio_put(bio);
2745
2746         btrfs_init_work(&sparity->work, scrub_parity_bio_endio_worker, NULL,
2747                         NULL);
2748         btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
2749 }
2750
2751 static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2752 {
2753         struct scrub_ctx *sctx = sparity->sctx;
2754         struct btrfs_fs_info *fs_info = sctx->fs_info;
2755         struct bio *bio;
2756         struct btrfs_raid_bio *rbio;
2757         struct btrfs_bio *bbio = NULL;
2758         u64 length;
2759         int ret;
2760
2761         if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2762                            sparity->nsectors))
2763                 goto out;
2764
2765         length = sparity->logic_end - sparity->logic_start;
2766
2767         btrfs_bio_counter_inc_blocked(fs_info);
2768         ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
2769                                &length, &bbio);
2770         if (ret || !bbio || !bbio->raid_map)
2771                 goto bbio_out;
2772
2773         bio = btrfs_io_bio_alloc(0);
2774         bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2775         bio->bi_private = sparity;
2776         bio->bi_end_io = scrub_parity_bio_endio;
2777
2778         rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
2779                                               length, sparity->scrub_dev,
2780                                               sparity->dbitmap,
2781                                               sparity->nsectors);
2782         if (!rbio)
2783                 goto rbio_out;
2784
2785         scrub_pending_bio_inc(sctx);
2786         raid56_parity_submit_scrub_rbio(rbio);
2787         return;
2788
2789 rbio_out:
2790         bio_put(bio);
2791 bbio_out:
2792         btrfs_bio_counter_dec(fs_info);
2793         btrfs_put_bbio(bbio);
2794         bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2795                   sparity->nsectors);
2796         spin_lock(&sctx->stat_lock);
2797         sctx->stat.malloc_errors++;
2798         spin_unlock(&sctx->stat_lock);
2799 out:
2800         scrub_free_parity(sparity);
2801 }
2802
2803 static inline int scrub_calc_parity_bitmap_len(int nsectors)
2804 {
2805         return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
2806 }
2807
2808 static void scrub_parity_get(struct scrub_parity *sparity)
2809 {
2810         refcount_inc(&sparity->refs);
2811 }
2812
2813 static void scrub_parity_put(struct scrub_parity *sparity)
2814 {
2815         if (!refcount_dec_and_test(&sparity->refs))
2816                 return;
2817
2818         scrub_parity_check_and_repair(sparity);
2819 }
2820
2821 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2822                                                   struct map_lookup *map,
2823                                                   struct btrfs_device *sdev,
2824                                                   struct btrfs_path *path,
2825                                                   u64 logic_start,
2826                                                   u64 logic_end)
2827 {
2828         struct btrfs_fs_info *fs_info = sctx->fs_info;
2829         struct btrfs_root *root = fs_info->extent_root;
2830         struct btrfs_root *csum_root = fs_info->csum_root;
2831         struct btrfs_extent_item *extent;
2832         struct btrfs_bio *bbio = NULL;
2833         u64 flags;
2834         int ret;
2835         int slot;
2836         struct extent_buffer *l;
2837         struct btrfs_key key;
2838         u64 generation;
2839         u64 extent_logical;
2840         u64 extent_physical;
2841         u64 extent_len;
2842         u64 mapped_length;
2843         struct btrfs_device *extent_dev;
2844         struct scrub_parity *sparity;
2845         int nsectors;
2846         int bitmap_len;
2847         int extent_mirror_num;
2848         int stop_loop = 0;
2849
2850         nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
2851         bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2852         sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2853                           GFP_NOFS);
2854         if (!sparity) {
2855                 spin_lock(&sctx->stat_lock);
2856                 sctx->stat.malloc_errors++;
2857                 spin_unlock(&sctx->stat_lock);
2858                 return -ENOMEM;
2859         }
2860
2861         sparity->stripe_len = map->stripe_len;
2862         sparity->nsectors = nsectors;
2863         sparity->sctx = sctx;
2864         sparity->scrub_dev = sdev;
2865         sparity->logic_start = logic_start;
2866         sparity->logic_end = logic_end;
2867         refcount_set(&sparity->refs, 1);
2868         INIT_LIST_HEAD(&sparity->spages);
2869         sparity->dbitmap = sparity->bitmap;
2870         sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2871
2872         ret = 0;
2873         while (logic_start < logic_end) {
2874                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2875                         key.type = BTRFS_METADATA_ITEM_KEY;
2876                 else
2877                         key.type = BTRFS_EXTENT_ITEM_KEY;
2878                 key.objectid = logic_start;
2879                 key.offset = (u64)-1;
2880
2881                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2882                 if (ret < 0)
2883                         goto out;
2884
2885                 if (ret > 0) {
2886                         ret = btrfs_previous_extent_item(root, path, 0);
2887                         if (ret < 0)
2888                                 goto out;
2889                         if (ret > 0) {
2890                                 btrfs_release_path(path);
2891                                 ret = btrfs_search_slot(NULL, root, &key,
2892                                                         path, 0, 0);
2893                                 if (ret < 0)
2894                                         goto out;
2895                         }
2896                 }
2897
2898                 stop_loop = 0;
2899                 while (1) {
2900                         u64 bytes;
2901
2902                         l = path->nodes[0];
2903                         slot = path->slots[0];
2904                         if (slot >= btrfs_header_nritems(l)) {
2905                                 ret = btrfs_next_leaf(root, path);
2906                                 if (ret == 0)
2907                                         continue;
2908                                 if (ret < 0)
2909                                         goto out;
2910
2911                                 stop_loop = 1;
2912                                 break;
2913                         }
2914                         btrfs_item_key_to_cpu(l, &key, slot);
2915
2916                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2917                             key.type != BTRFS_METADATA_ITEM_KEY)
2918                                 goto next;
2919
2920                         if (key.type == BTRFS_METADATA_ITEM_KEY)
2921                                 bytes = fs_info->nodesize;
2922                         else
2923                                 bytes = key.offset;
2924
2925                         if (key.objectid + bytes <= logic_start)
2926                                 goto next;
2927
2928                         if (key.objectid >= logic_end) {
2929                                 stop_loop = 1;
2930                                 break;
2931                         }
2932
2933                         while (key.objectid >= logic_start + map->stripe_len)
2934                                 logic_start += map->stripe_len;
2935
2936                         extent = btrfs_item_ptr(l, slot,
2937                                                 struct btrfs_extent_item);
2938                         flags = btrfs_extent_flags(l, extent);
2939                         generation = btrfs_extent_generation(l, extent);
2940
2941                         if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
2942                             (key.objectid < logic_start ||
2943                              key.objectid + bytes >
2944                              logic_start + map->stripe_len)) {
2945                                 btrfs_err(fs_info,
2946                                           "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2947                                           key.objectid, logic_start);
2948                                 spin_lock(&sctx->stat_lock);
2949                                 sctx->stat.uncorrectable_errors++;
2950                                 spin_unlock(&sctx->stat_lock);
2951                                 goto next;
2952                         }
2953 again:
2954                         extent_logical = key.objectid;
2955                         extent_len = bytes;
2956
2957                         if (extent_logical < logic_start) {
2958                                 extent_len -= logic_start - extent_logical;
2959                                 extent_logical = logic_start;
2960                         }
2961
2962                         if (extent_logical + extent_len >
2963                             logic_start + map->stripe_len)
2964                                 extent_len = logic_start + map->stripe_len -
2965                                              extent_logical;
2966
2967                         scrub_parity_mark_sectors_data(sparity, extent_logical,
2968                                                        extent_len);
2969
2970                         mapped_length = extent_len;
2971                         bbio = NULL;
2972                         ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
2973                                         extent_logical, &mapped_length, &bbio,
2974                                         0);
2975                         if (!ret) {
2976                                 if (!bbio || mapped_length < extent_len)
2977                                         ret = -EIO;
2978                         }
2979                         if (ret) {
2980                                 btrfs_put_bbio(bbio);
2981                                 goto out;
2982                         }
2983                         extent_physical = bbio->stripes[0].physical;
2984                         extent_mirror_num = bbio->mirror_num;
2985                         extent_dev = bbio->stripes[0].dev;
2986                         btrfs_put_bbio(bbio);
2987
2988                         ret = btrfs_lookup_csums_range(csum_root,
2989                                                 extent_logical,
2990                                                 extent_logical + extent_len - 1,
2991                                                 &sctx->csum_list, 1);
2992                         if (ret)
2993                                 goto out;
2994
2995                         ret = scrub_extent_for_parity(sparity, extent_logical,
2996                                                       extent_len,
2997                                                       extent_physical,
2998                                                       extent_dev, flags,
2999                                                       generation,
3000                                                       extent_mirror_num);
3001
3002                         scrub_free_csums(sctx);
3003
3004                         if (ret)
3005                                 goto out;
3006
3007                         if (extent_logical + extent_len <
3008                             key.objectid + bytes) {
3009                                 logic_start += map->stripe_len;
3010
3011                                 if (logic_start >= logic_end) {
3012                                         stop_loop = 1;
3013                                         break;
3014                                 }
3015
3016                                 if (logic_start < key.objectid + bytes) {
3017                                         cond_resched();
3018                                         goto again;
3019                                 }
3020                         }
3021 next:
3022                         path->slots[0]++;
3023                 }
3024
3025                 btrfs_release_path(path);
3026
3027                 if (stop_loop)
3028                         break;
3029
3030                 logic_start += map->stripe_len;
3031         }
3032 out:
3033         if (ret < 0)
3034                 scrub_parity_mark_sectors_error(sparity, logic_start,
3035                                                 logic_end - logic_start);
3036         scrub_parity_put(sparity);
3037         scrub_submit(sctx);
3038         mutex_lock(&sctx->wr_lock);
3039         scrub_wr_submit(sctx);
3040         mutex_unlock(&sctx->wr_lock);
3041
3042         btrfs_release_path(path);
3043         return ret < 0 ? ret : 0;
3044 }
3045
3046 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3047                                            struct map_lookup *map,
3048                                            struct btrfs_device *scrub_dev,
3049                                            int num, u64 base, u64 length)
3050 {
3051         struct btrfs_path *path, *ppath;
3052         struct btrfs_fs_info *fs_info = sctx->fs_info;
3053         struct btrfs_root *root = fs_info->extent_root;
3054         struct btrfs_root *csum_root = fs_info->csum_root;
3055         struct btrfs_extent_item *extent;
3056         struct blk_plug plug;
3057         u64 flags;
3058         int ret;
3059         int slot;
3060         u64 nstripes;
3061         struct extent_buffer *l;
3062         u64 physical;
3063         u64 logical;
3064         u64 logic_end;
3065         u64 physical_end;
3066         u64 generation;
3067         int mirror_num;
3068         struct reada_control *reada1;
3069         struct reada_control *reada2;
3070         struct btrfs_key key;
3071         struct btrfs_key key_end;
3072         u64 increment = map->stripe_len;
3073         u64 offset;
3074         u64 extent_logical;
3075         u64 extent_physical;
3076         u64 extent_len;
3077         u64 stripe_logical;
3078         u64 stripe_end;
3079         struct btrfs_device *extent_dev;
3080         int extent_mirror_num;
3081         int stop_loop = 0;
3082
3083         physical = map->stripes[num].physical;
3084         offset = 0;
3085         nstripes = div64_u64(length, map->stripe_len);
3086         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3087                 offset = map->stripe_len * num;
3088                 increment = map->stripe_len * map->num_stripes;
3089                 mirror_num = 1;
3090         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3091                 int factor = map->num_stripes / map->sub_stripes;
3092                 offset = map->stripe_len * (num / map->sub_stripes);
3093                 increment = map->stripe_len * factor;
3094                 mirror_num = num % map->sub_stripes + 1;
3095         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
3096                 increment = map->stripe_len;
3097                 mirror_num = num % map->num_stripes + 1;
3098         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3099                 increment = map->stripe_len;
3100                 mirror_num = num % map->num_stripes + 1;
3101         } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3102                 get_raid56_logic_offset(physical, num, map, &offset, NULL);
3103                 increment = map->stripe_len * nr_data_stripes(map);
3104                 mirror_num = 1;
3105         } else {
3106                 increment = map->stripe_len;
3107                 mirror_num = 1;
3108         }
3109
3110         path = btrfs_alloc_path();
3111         if (!path)
3112                 return -ENOMEM;
3113
3114         ppath = btrfs_alloc_path();
3115         if (!ppath) {
3116                 btrfs_free_path(path);
3117                 return -ENOMEM;
3118         }
3119
3120         /*
3121          * work on commit root. The related disk blocks are static as
3122          * long as COW is applied. This means, it is save to rewrite
3123          * them to repair disk errors without any race conditions
3124          */
3125         path->search_commit_root = 1;
3126         path->skip_locking = 1;
3127
3128         ppath->search_commit_root = 1;
3129         ppath->skip_locking = 1;
3130         /*
3131          * trigger the readahead for extent tree csum tree and wait for
3132          * completion. During readahead, the scrub is officially paused
3133          * to not hold off transaction commits
3134          */
3135         logical = base + offset;
3136         physical_end = physical + nstripes * map->stripe_len;
3137         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3138                 get_raid56_logic_offset(physical_end, num,
3139                                         map, &logic_end, NULL);
3140                 logic_end += base;
3141         } else {
3142                 logic_end = logical + increment * nstripes;
3143         }
3144         wait_event(sctx->list_wait,
3145                    atomic_read(&sctx->bios_in_flight) == 0);
3146         scrub_blocked_if_needed(fs_info);
3147
3148         /* FIXME it might be better to start readahead at commit root */
3149         key.objectid = logical;
3150         key.type = BTRFS_EXTENT_ITEM_KEY;
3151         key.offset = (u64)0;
3152         key_end.objectid = logic_end;
3153         key_end.type = BTRFS_METADATA_ITEM_KEY;
3154         key_end.offset = (u64)-1;
3155         reada1 = btrfs_reada_add(root, &key, &key_end);
3156
3157         key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3158         key.type = BTRFS_EXTENT_CSUM_KEY;
3159         key.offset = logical;
3160         key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3161         key_end.type = BTRFS_EXTENT_CSUM_KEY;
3162         key_end.offset = logic_end;
3163         reada2 = btrfs_reada_add(csum_root, &key, &key_end);
3164
3165         if (!IS_ERR(reada1))
3166                 btrfs_reada_wait(reada1);
3167         if (!IS_ERR(reada2))
3168                 btrfs_reada_wait(reada2);
3169
3170
3171         /*
3172          * collect all data csums for the stripe to avoid seeking during
3173          * the scrub. This might currently (crc32) end up to be about 1MB
3174          */
3175         blk_start_plug(&plug);
3176
3177         /*
3178          * now find all extents for each stripe and scrub them
3179          */
3180         ret = 0;
3181         while (physical < physical_end) {
3182                 /*
3183                  * canceled?
3184                  */
3185                 if (atomic_read(&fs_info->scrub_cancel_req) ||
3186                     atomic_read(&sctx->cancel_req)) {
3187                         ret = -ECANCELED;
3188                         goto out;
3189                 }
3190                 /*
3191                  * check to see if we have to pause
3192                  */
3193                 if (atomic_read(&fs_info->scrub_pause_req)) {
3194                         /* push queued extents */
3195                         sctx->flush_all_writes = true;
3196                         scrub_submit(sctx);
3197                         mutex_lock(&sctx->wr_lock);
3198                         scrub_wr_submit(sctx);
3199                         mutex_unlock(&sctx->wr_lock);
3200                         wait_event(sctx->list_wait,
3201                                    atomic_read(&sctx->bios_in_flight) == 0);
3202                         sctx->flush_all_writes = false;
3203                         scrub_blocked_if_needed(fs_info);
3204                 }
3205
3206                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3207                         ret = get_raid56_logic_offset(physical, num, map,
3208                                                       &logical,
3209                                                       &stripe_logical);
3210                         logical += base;
3211                         if (ret) {
3212                                 /* it is parity strip */
3213                                 stripe_logical += base;
3214                                 stripe_end = stripe_logical + increment;
3215                                 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3216                                                           ppath, stripe_logical,
3217                                                           stripe_end);
3218                                 if (ret)
3219                                         goto out;
3220                                 goto skip;
3221                         }
3222                 }
3223
3224                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3225                         key.type = BTRFS_METADATA_ITEM_KEY;
3226                 else
3227                         key.type = BTRFS_EXTENT_ITEM_KEY;
3228                 key.objectid = logical;
3229                 key.offset = (u64)-1;
3230
3231                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3232                 if (ret < 0)
3233                         goto out;
3234
3235                 if (ret > 0) {
3236                         ret = btrfs_previous_extent_item(root, path, 0);
3237                         if (ret < 0)
3238                                 goto out;
3239                         if (ret > 0) {
3240                                 /* there's no smaller item, so stick with the
3241                                  * larger one */
3242                                 btrfs_release_path(path);
3243                                 ret = btrfs_search_slot(NULL, root, &key,
3244                                                         path, 0, 0);
3245                                 if (ret < 0)
3246                                         goto out;
3247                         }
3248                 }
3249
3250                 stop_loop = 0;
3251                 while (1) {
3252                         u64 bytes;
3253
3254                         l = path->nodes[0];
3255                         slot = path->slots[0];
3256                         if (slot >= btrfs_header_nritems(l)) {
3257                                 ret = btrfs_next_leaf(root, path);
3258                                 if (ret == 0)
3259                                         continue;
3260                                 if (ret < 0)
3261                                         goto out;
3262
3263                                 stop_loop = 1;
3264                                 break;
3265                         }
3266                         btrfs_item_key_to_cpu(l, &key, slot);
3267
3268                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3269                             key.type != BTRFS_METADATA_ITEM_KEY)
3270                                 goto next;
3271
3272                         if (key.type == BTRFS_METADATA_ITEM_KEY)
3273                                 bytes = fs_info->nodesize;
3274                         else
3275                                 bytes = key.offset;
3276
3277                         if (key.objectid + bytes <= logical)
3278                                 goto next;
3279
3280                         if (key.objectid >= logical + map->stripe_len) {
3281                                 /* out of this device extent */
3282                                 if (key.objectid >= logic_end)
3283                                         stop_loop = 1;
3284                                 break;
3285                         }
3286
3287                         extent = btrfs_item_ptr(l, slot,
3288                                                 struct btrfs_extent_item);
3289                         flags = btrfs_extent_flags(l, extent);
3290                         generation = btrfs_extent_generation(l, extent);
3291
3292                         if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3293                             (key.objectid < logical ||
3294                              key.objectid + bytes >
3295                              logical + map->stripe_len)) {
3296                                 btrfs_err(fs_info,
3297                                            "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3298                                        key.objectid, logical);
3299                                 spin_lock(&sctx->stat_lock);
3300                                 sctx->stat.uncorrectable_errors++;
3301                                 spin_unlock(&sctx->stat_lock);
3302                                 goto next;
3303                         }
3304
3305 again:
3306                         extent_logical = key.objectid;
3307                         extent_len = bytes;
3308
3309                         /*
3310                          * trim extent to this stripe
3311                          */
3312                         if (extent_logical < logical) {
3313                                 extent_len -= logical - extent_logical;
3314                                 extent_logical = logical;
3315                         }
3316                         if (extent_logical + extent_len >
3317                             logical + map->stripe_len) {
3318                                 extent_len = logical + map->stripe_len -
3319                                              extent_logical;
3320                         }
3321
3322                         extent_physical = extent_logical - logical + physical;
3323                         extent_dev = scrub_dev;
3324                         extent_mirror_num = mirror_num;
3325                         if (sctx->is_dev_replace)
3326                                 scrub_remap_extent(fs_info, extent_logical,
3327                                                    extent_len, &extent_physical,
3328                                                    &extent_dev,
3329                                                    &extent_mirror_num);
3330
3331                         ret = btrfs_lookup_csums_range(csum_root,
3332                                                        extent_logical,
3333                                                        extent_logical +
3334                                                        extent_len - 1,
3335                                                        &sctx->csum_list, 1);
3336                         if (ret)
3337                                 goto out;
3338
3339                         ret = scrub_extent(sctx, map, extent_logical, extent_len,
3340                                            extent_physical, extent_dev, flags,
3341                                            generation, extent_mirror_num,
3342                                            extent_logical - logical + physical);
3343
3344                         scrub_free_csums(sctx);
3345
3346                         if (ret)
3347                                 goto out;
3348
3349                         if (extent_logical + extent_len <
3350                             key.objectid + bytes) {
3351                                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3352                                         /*
3353                                          * loop until we find next data stripe
3354                                          * or we have finished all stripes.
3355                                          */
3356 loop:
3357                                         physical += map->stripe_len;
3358                                         ret = get_raid56_logic_offset(physical,
3359                                                         num, map, &logical,
3360                                                         &stripe_logical);
3361                                         logical += base;
3362
3363                                         if (ret && physical < physical_end) {
3364                                                 stripe_logical += base;
3365                                                 stripe_end = stripe_logical +
3366                                                                 increment;
3367                                                 ret = scrub_raid56_parity(sctx,
3368                                                         map, scrub_dev, ppath,
3369                                                         stripe_logical,
3370                                                         stripe_end);
3371                                                 if (ret)
3372                                                         goto out;
3373                                                 goto loop;
3374                                         }
3375                                 } else {
3376                                         physical += map->stripe_len;
3377                                         logical += increment;
3378                                 }
3379                                 if (logical < key.objectid + bytes) {
3380                                         cond_resched();
3381                                         goto again;
3382                                 }
3383
3384                                 if (physical >= physical_end) {
3385                                         stop_loop = 1;
3386                                         break;
3387                                 }
3388                         }
3389 next:
3390                         path->slots[0]++;
3391                 }
3392                 btrfs_release_path(path);
3393 skip:
3394                 logical += increment;
3395                 physical += map->stripe_len;
3396                 spin_lock(&sctx->stat_lock);
3397                 if (stop_loop)
3398                         sctx->stat.last_physical = map->stripes[num].physical +
3399                                                    length;
3400                 else
3401                         sctx->stat.last_physical = physical;
3402                 spin_unlock(&sctx->stat_lock);
3403                 if (stop_loop)
3404                         break;
3405         }
3406 out:
3407         /* push queued extents */
3408         scrub_submit(sctx);
3409         mutex_lock(&sctx->wr_lock);
3410         scrub_wr_submit(sctx);
3411         mutex_unlock(&sctx->wr_lock);
3412
3413         blk_finish_plug(&plug);
3414         btrfs_free_path(path);
3415         btrfs_free_path(ppath);
3416         return ret < 0 ? ret : 0;
3417 }
3418
3419 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3420                                           struct btrfs_device *scrub_dev,
3421                                           u64 chunk_offset, u64 length,
3422                                           u64 dev_offset,
3423                                           struct btrfs_block_group *cache)
3424 {
3425         struct btrfs_fs_info *fs_info = sctx->fs_info;
3426         struct extent_map_tree *map_tree = &fs_info->mapping_tree;
3427         struct map_lookup *map;
3428         struct extent_map *em;
3429         int i;
3430         int ret = 0;
3431
3432         read_lock(&map_tree->lock);
3433         em = lookup_extent_mapping(map_tree, chunk_offset, 1);
3434         read_unlock(&map_tree->lock);
3435
3436         if (!em) {
3437                 /*
3438                  * Might have been an unused block group deleted by the cleaner
3439                  * kthread or relocation.
3440                  */
3441                 spin_lock(&cache->lock);
3442                 if (!cache->removed)
3443                         ret = -EINVAL;
3444                 spin_unlock(&cache->lock);
3445
3446                 return ret;
3447         }
3448
3449         map = em->map_lookup;
3450         if (em->start != chunk_offset)
3451                 goto out;
3452
3453         if (em->len < length)
3454                 goto out;
3455
3456         for (i = 0; i < map->num_stripes; ++i) {
3457                 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3458                     map->stripes[i].physical == dev_offset) {
3459                         ret = scrub_stripe(sctx, map, scrub_dev, i,
3460                                            chunk_offset, length);
3461                         if (ret)
3462                                 goto out;
3463                 }
3464         }
3465 out:
3466         free_extent_map(em);
3467
3468         return ret;
3469 }
3470
3471 static noinline_for_stack
3472 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3473                            struct btrfs_device *scrub_dev, u64 start, u64 end)
3474 {
3475         struct btrfs_dev_extent *dev_extent = NULL;
3476         struct btrfs_path *path;
3477         struct btrfs_fs_info *fs_info = sctx->fs_info;
3478         struct btrfs_root *root = fs_info->dev_root;
3479         u64 length;
3480         u64 chunk_offset;
3481         int ret = 0;
3482         int ro_set;
3483         int slot;
3484         struct extent_buffer *l;
3485         struct btrfs_key key;
3486         struct btrfs_key found_key;
3487         struct btrfs_block_group *cache;
3488         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3489
3490         path = btrfs_alloc_path();
3491         if (!path)
3492                 return -ENOMEM;
3493
3494         path->reada = READA_FORWARD;
3495         path->search_commit_root = 1;
3496         path->skip_locking = 1;
3497
3498         key.objectid = scrub_dev->devid;
3499         key.offset = 0ull;
3500         key.type = BTRFS_DEV_EXTENT_KEY;
3501
3502         while (1) {
3503                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3504                 if (ret < 0)
3505                         break;
3506                 if (ret > 0) {
3507                         if (path->slots[0] >=
3508                             btrfs_header_nritems(path->nodes[0])) {
3509                                 ret = btrfs_next_leaf(root, path);
3510                                 if (ret < 0)
3511                                         break;
3512                                 if (ret > 0) {
3513                                         ret = 0;
3514                                         break;
3515                                 }
3516                         } else {
3517                                 ret = 0;
3518                         }
3519                 }
3520
3521                 l = path->nodes[0];
3522                 slot = path->slots[0];
3523
3524                 btrfs_item_key_to_cpu(l, &found_key, slot);
3525
3526                 if (found_key.objectid != scrub_dev->devid)
3527                         break;
3528
3529                 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3530                         break;
3531
3532                 if (found_key.offset >= end)
3533                         break;
3534
3535                 if (found_key.offset < key.offset)
3536                         break;
3537
3538                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3539                 length = btrfs_dev_extent_length(l, dev_extent);
3540
3541                 if (found_key.offset + length <= start)
3542                         goto skip;
3543
3544                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3545
3546                 /*
3547                  * get a reference on the corresponding block group to prevent
3548                  * the chunk from going away while we scrub it
3549                  */
3550                 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3551
3552                 /* some chunks are removed but not committed to disk yet,
3553                  * continue scrubbing */
3554                 if (!cache)
3555                         goto skip;
3556
3557                 /*
3558                  * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3559                  * to avoid deadlock caused by:
3560                  * btrfs_inc_block_group_ro()
3561                  * -> btrfs_wait_for_commit()
3562                  * -> btrfs_commit_transaction()
3563                  * -> btrfs_scrub_pause()
3564                  */
3565                 scrub_pause_on(fs_info);
3566
3567                 /*
3568                  * Don't do chunk preallocation for scrub.
3569                  *
3570                  * This is especially important for SYSTEM bgs, or we can hit
3571                  * -EFBIG from btrfs_finish_chunk_alloc() like:
3572                  * 1. The only SYSTEM bg is marked RO.
3573                  *    Since SYSTEM bg is small, that's pretty common.
3574                  * 2. New SYSTEM bg will be allocated
3575                  *    Due to regular version will allocate new chunk.
3576                  * 3. New SYSTEM bg is empty and will get cleaned up
3577                  *    Before cleanup really happens, it's marked RO again.
3578                  * 4. Empty SYSTEM bg get scrubbed
3579                  *    We go back to 2.
3580                  *
3581                  * This can easily boost the amount of SYSTEM chunks if cleaner
3582                  * thread can't be triggered fast enough, and use up all space
3583                  * of btrfs_super_block::sys_chunk_array
3584                  *
3585                  * While for dev replace, we need to try our best to mark block
3586                  * group RO, to prevent race between:
3587                  * - Write duplication
3588                  *   Contains latest data
3589                  * - Scrub copy
3590                  *   Contains data from commit tree
3591                  *
3592                  * If target block group is not marked RO, nocow writes can
3593                  * be overwritten by scrub copy, causing data corruption.
3594                  * So for dev-replace, it's not allowed to continue if a block
3595                  * group is not RO.
3596                  */
3597                 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
3598                 if (ret == 0) {
3599                         ro_set = 1;
3600                 } else if (ret == -ENOSPC && !sctx->is_dev_replace) {
3601                         /*
3602                          * btrfs_inc_block_group_ro return -ENOSPC when it
3603                          * failed in creating new chunk for metadata.
3604                          * It is not a problem for scrub, because
3605                          * metadata are always cowed, and our scrub paused
3606                          * commit_transactions.
3607                          */
3608                         ro_set = 0;
3609                 } else {
3610                         btrfs_warn(fs_info,
3611                                    "failed setting block group ro: %d", ret);
3612                         btrfs_put_block_group(cache);
3613                         scrub_pause_off(fs_info);
3614                         break;
3615                 }
3616
3617                 /*
3618                  * Now the target block is marked RO, wait for nocow writes to
3619                  * finish before dev-replace.
3620                  * COW is fine, as COW never overwrites extents in commit tree.
3621                  */
3622                 if (sctx->is_dev_replace) {
3623                         btrfs_wait_nocow_writers(cache);
3624                         btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
3625                                         cache->length);
3626                 }
3627
3628                 scrub_pause_off(fs_info);
3629                 down_write(&dev_replace->rwsem);
3630                 dev_replace->cursor_right = found_key.offset + length;
3631                 dev_replace->cursor_left = found_key.offset;
3632                 dev_replace->item_needs_writeback = 1;
3633                 up_write(&dev_replace->rwsem);
3634
3635                 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3636                                   found_key.offset, cache);
3637
3638                 /*
3639                  * flush, submit all pending read and write bios, afterwards
3640                  * wait for them.
3641                  * Note that in the dev replace case, a read request causes
3642                  * write requests that are submitted in the read completion
3643                  * worker. Therefore in the current situation, it is required
3644                  * that all write requests are flushed, so that all read and
3645                  * write requests are really completed when bios_in_flight
3646                  * changes to 0.
3647                  */
3648                 sctx->flush_all_writes = true;
3649                 scrub_submit(sctx);
3650                 mutex_lock(&sctx->wr_lock);
3651                 scrub_wr_submit(sctx);
3652                 mutex_unlock(&sctx->wr_lock);
3653
3654                 wait_event(sctx->list_wait,
3655                            atomic_read(&sctx->bios_in_flight) == 0);
3656
3657                 scrub_pause_on(fs_info);
3658
3659                 /*
3660                  * must be called before we decrease @scrub_paused.
3661                  * make sure we don't block transaction commit while
3662                  * we are waiting pending workers finished.
3663                  */
3664                 wait_event(sctx->list_wait,
3665                            atomic_read(&sctx->workers_pending) == 0);
3666                 sctx->flush_all_writes = false;
3667
3668                 scrub_pause_off(fs_info);
3669
3670                 down_write(&dev_replace->rwsem);
3671                 dev_replace->cursor_left = dev_replace->cursor_right;
3672                 dev_replace->item_needs_writeback = 1;
3673                 up_write(&dev_replace->rwsem);
3674
3675                 if (ro_set)
3676                         btrfs_dec_block_group_ro(cache);
3677
3678                 /*
3679                  * We might have prevented the cleaner kthread from deleting
3680                  * this block group if it was already unused because we raced
3681                  * and set it to RO mode first. So add it back to the unused
3682                  * list, otherwise it might not ever be deleted unless a manual
3683                  * balance is triggered or it becomes used and unused again.
3684                  */
3685                 spin_lock(&cache->lock);
3686                 if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3687                     cache->used == 0) {
3688                         spin_unlock(&cache->lock);
3689                         if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
3690                                 btrfs_discard_queue_work(&fs_info->discard_ctl,
3691                                                          cache);
3692                         else
3693                                 btrfs_mark_bg_unused(cache);
3694                 } else {
3695                         spin_unlock(&cache->lock);
3696                 }
3697
3698                 btrfs_put_block_group(cache);
3699                 if (ret)
3700                         break;
3701                 if (sctx->is_dev_replace &&
3702                     atomic64_read(&dev_replace->num_write_errors) > 0) {
3703                         ret = -EIO;
3704                         break;
3705                 }
3706                 if (sctx->stat.malloc_errors > 0) {
3707                         ret = -ENOMEM;
3708                         break;
3709                 }
3710 skip:
3711                 key.offset = found_key.offset + length;
3712                 btrfs_release_path(path);
3713         }
3714
3715         btrfs_free_path(path);
3716
3717         return ret;
3718 }
3719
3720 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3721                                            struct btrfs_device *scrub_dev)
3722 {
3723         int     i;
3724         u64     bytenr;
3725         u64     gen;
3726         int     ret;
3727         struct btrfs_fs_info *fs_info = sctx->fs_info;
3728
3729         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3730                 return -EIO;
3731
3732         /* Seed devices of a new filesystem has their own generation. */
3733         if (scrub_dev->fs_devices != fs_info->fs_devices)
3734                 gen = scrub_dev->generation;
3735         else
3736                 gen = fs_info->last_trans_committed;
3737
3738         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3739                 bytenr = btrfs_sb_offset(i);
3740                 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3741                     scrub_dev->commit_total_bytes)
3742                         break;
3743
3744                 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3745                                   scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
3746                                   NULL, 1, bytenr);
3747                 if (ret)
3748                         return ret;
3749         }
3750         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3751
3752         return 0;
3753 }
3754
3755 /*
3756  * get a reference count on fs_info->scrub_workers. start worker if necessary
3757  */
3758 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3759                                                 int is_dev_replace)
3760 {
3761         unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
3762         int max_active = fs_info->thread_pool_size;
3763
3764         lockdep_assert_held(&fs_info->scrub_lock);
3765
3766         if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
3767                 ASSERT(fs_info->scrub_workers == NULL);
3768                 fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub",
3769                                 flags, is_dev_replace ? 1 : max_active, 4);
3770                 if (!fs_info->scrub_workers)
3771                         goto fail_scrub_workers;
3772
3773                 ASSERT(fs_info->scrub_wr_completion_workers == NULL);
3774                 fs_info->scrub_wr_completion_workers =
3775                         btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
3776                                               max_active, 2);
3777                 if (!fs_info->scrub_wr_completion_workers)
3778                         goto fail_scrub_wr_completion_workers;
3779
3780                 ASSERT(fs_info->scrub_parity_workers == NULL);
3781                 fs_info->scrub_parity_workers =
3782                         btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
3783                                               max_active, 2);
3784                 if (!fs_info->scrub_parity_workers)
3785                         goto fail_scrub_parity_workers;
3786
3787                 refcount_set(&fs_info->scrub_workers_refcnt, 1);
3788         } else {
3789                 refcount_inc(&fs_info->scrub_workers_refcnt);
3790         }
3791         return 0;
3792
3793 fail_scrub_parity_workers:
3794         btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3795 fail_scrub_wr_completion_workers:
3796         btrfs_destroy_workqueue(fs_info->scrub_workers);
3797 fail_scrub_workers:
3798         return -ENOMEM;
3799 }
3800
3801 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3802                     u64 end, struct btrfs_scrub_progress *progress,
3803                     int readonly, int is_dev_replace)
3804 {
3805         struct scrub_ctx *sctx;
3806         int ret;
3807         struct btrfs_device *dev;
3808         unsigned int nofs_flag;
3809         struct btrfs_workqueue *scrub_workers = NULL;
3810         struct btrfs_workqueue *scrub_wr_comp = NULL;
3811         struct btrfs_workqueue *scrub_parity = NULL;
3812
3813         if (btrfs_fs_closing(fs_info))
3814                 return -EAGAIN;
3815
3816         if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
3817                 /*
3818                  * in this case scrub is unable to calculate the checksum
3819                  * the way scrub is implemented. Do not handle this
3820                  * situation at all because it won't ever happen.
3821                  */
3822                 btrfs_err(fs_info,
3823                            "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3824                        fs_info->nodesize,
3825                        BTRFS_STRIPE_LEN);
3826                 return -EINVAL;
3827         }
3828
3829         if (fs_info->sectorsize != PAGE_SIZE) {
3830                 /* not supported for data w/o checksums */
3831                 btrfs_err_rl(fs_info,
3832                            "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
3833                        fs_info->sectorsize, PAGE_SIZE);
3834                 return -EINVAL;
3835         }
3836
3837         if (fs_info->nodesize >
3838             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3839             fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3840                 /*
3841                  * would exhaust the array bounds of pagev member in
3842                  * struct scrub_block
3843                  */
3844                 btrfs_err(fs_info,
3845                           "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3846                        fs_info->nodesize,
3847                        SCRUB_MAX_PAGES_PER_BLOCK,
3848                        fs_info->sectorsize,
3849                        SCRUB_MAX_PAGES_PER_BLOCK);
3850                 return -EINVAL;
3851         }
3852
3853         /* Allocate outside of device_list_mutex */
3854         sctx = scrub_setup_ctx(fs_info, is_dev_replace);
3855         if (IS_ERR(sctx))
3856                 return PTR_ERR(sctx);
3857
3858         mutex_lock(&fs_info->fs_devices->device_list_mutex);
3859         dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
3860         if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
3861                      !is_dev_replace)) {
3862                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3863                 ret = -ENODEV;
3864                 goto out_free_ctx;
3865         }
3866
3867         if (!is_dev_replace && !readonly &&
3868             !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
3869                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3870                 btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
3871                                 rcu_str_deref(dev->name));
3872                 ret = -EROFS;
3873                 goto out_free_ctx;
3874         }
3875
3876         mutex_lock(&fs_info->scrub_lock);
3877         if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3878             test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
3879                 mutex_unlock(&fs_info->scrub_lock);
3880                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3881                 ret = -EIO;
3882                 goto out_free_ctx;
3883         }
3884
3885         down_read(&fs_info->dev_replace.rwsem);
3886         if (dev->scrub_ctx ||
3887             (!is_dev_replace &&
3888              btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3889                 up_read(&fs_info->dev_replace.rwsem);
3890                 mutex_unlock(&fs_info->scrub_lock);
3891                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3892                 ret = -EINPROGRESS;
3893                 goto out_free_ctx;
3894         }
3895         up_read(&fs_info->dev_replace.rwsem);
3896
3897         ret = scrub_workers_get(fs_info, is_dev_replace);
3898         if (ret) {
3899                 mutex_unlock(&fs_info->scrub_lock);
3900                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3901                 goto out_free_ctx;
3902         }
3903
3904         sctx->readonly = readonly;
3905         dev->scrub_ctx = sctx;
3906         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3907
3908         /*
3909          * checking @scrub_pause_req here, we can avoid
3910          * race between committing transaction and scrubbing.
3911          */
3912         __scrub_blocked_if_needed(fs_info);
3913         atomic_inc(&fs_info->scrubs_running);
3914         mutex_unlock(&fs_info->scrub_lock);
3915
3916         /*
3917          * In order to avoid deadlock with reclaim when there is a transaction
3918          * trying to pause scrub, make sure we use GFP_NOFS for all the
3919          * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
3920          * invoked by our callees. The pausing request is done when the
3921          * transaction commit starts, and it blocks the transaction until scrub
3922          * is paused (done at specific points at scrub_stripe() or right above
3923          * before incrementing fs_info->scrubs_running).
3924          */
3925         nofs_flag = memalloc_nofs_save();
3926         if (!is_dev_replace) {
3927                 btrfs_info(fs_info, "scrub: started on devid %llu", devid);
3928                 /*
3929                  * by holding device list mutex, we can
3930                  * kick off writing super in log tree sync.
3931                  */
3932                 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3933                 ret = scrub_supers(sctx, dev);
3934                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3935         }
3936
3937         if (!ret)
3938                 ret = scrub_enumerate_chunks(sctx, dev, start, end);
3939         memalloc_nofs_restore(nofs_flag);
3940
3941         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3942         atomic_dec(&fs_info->scrubs_running);
3943         wake_up(&fs_info->scrub_pause_wait);
3944
3945         wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
3946
3947         if (progress)
3948                 memcpy(progress, &sctx->stat, sizeof(*progress));
3949
3950         if (!is_dev_replace)
3951                 btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
3952                         ret ? "not finished" : "finished", devid, ret);
3953
3954         mutex_lock(&fs_info->scrub_lock);
3955         dev->scrub_ctx = NULL;
3956         if (refcount_dec_and_test(&fs_info->scrub_workers_refcnt)) {
3957                 scrub_workers = fs_info->scrub_workers;
3958                 scrub_wr_comp = fs_info->scrub_wr_completion_workers;
3959                 scrub_parity = fs_info->scrub_parity_workers;
3960
3961                 fs_info->scrub_workers = NULL;
3962                 fs_info->scrub_wr_completion_workers = NULL;
3963                 fs_info->scrub_parity_workers = NULL;
3964         }
3965         mutex_unlock(&fs_info->scrub_lock);
3966
3967         btrfs_destroy_workqueue(scrub_workers);
3968         btrfs_destroy_workqueue(scrub_wr_comp);
3969         btrfs_destroy_workqueue(scrub_parity);
3970         scrub_put_ctx(sctx);
3971
3972         return ret;
3973
3974 out_free_ctx:
3975         scrub_free_ctx(sctx);
3976
3977         return ret;
3978 }
3979
3980 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
3981 {
3982         mutex_lock(&fs_info->scrub_lock);
3983         atomic_inc(&fs_info->scrub_pause_req);
3984         while (atomic_read(&fs_info->scrubs_paused) !=
3985                atomic_read(&fs_info->scrubs_running)) {
3986                 mutex_unlock(&fs_info->scrub_lock);
3987                 wait_event(fs_info->scrub_pause_wait,
3988                            atomic_read(&fs_info->scrubs_paused) ==
3989                            atomic_read(&fs_info->scrubs_running));
3990                 mutex_lock(&fs_info->scrub_lock);
3991         }
3992         mutex_unlock(&fs_info->scrub_lock);
3993 }
3994
3995 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
3996 {
3997         atomic_dec(&fs_info->scrub_pause_req);
3998         wake_up(&fs_info->scrub_pause_wait);
3999 }
4000
4001 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
4002 {
4003         mutex_lock(&fs_info->scrub_lock);
4004         if (!atomic_read(&fs_info->scrubs_running)) {
4005                 mutex_unlock(&fs_info->scrub_lock);
4006                 return -ENOTCONN;
4007         }
4008
4009         atomic_inc(&fs_info->scrub_cancel_req);
4010         while (atomic_read(&fs_info->scrubs_running)) {
4011                 mutex_unlock(&fs_info->scrub_lock);
4012                 wait_event(fs_info->scrub_pause_wait,
4013                            atomic_read(&fs_info->scrubs_running) == 0);
4014                 mutex_lock(&fs_info->scrub_lock);
4015         }
4016         atomic_dec(&fs_info->scrub_cancel_req);
4017         mutex_unlock(&fs_info->scrub_lock);
4018
4019         return 0;
4020 }
4021
4022 int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
4023 {
4024         struct btrfs_fs_info *fs_info = dev->fs_info;
4025         struct scrub_ctx *sctx;
4026
4027         mutex_lock(&fs_info->scrub_lock);
4028         sctx = dev->scrub_ctx;
4029         if (!sctx) {
4030                 mutex_unlock(&fs_info->scrub_lock);
4031                 return -ENOTCONN;
4032         }
4033         atomic_inc(&sctx->cancel_req);
4034         while (dev->scrub_ctx) {
4035                 mutex_unlock(&fs_info->scrub_lock);
4036                 wait_event(fs_info->scrub_pause_wait,
4037                            dev->scrub_ctx == NULL);
4038                 mutex_lock(&fs_info->scrub_lock);
4039         }
4040         mutex_unlock(&fs_info->scrub_lock);
4041
4042         return 0;
4043 }
4044
4045 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
4046                          struct btrfs_scrub_progress *progress)
4047 {
4048         struct btrfs_device *dev;
4049         struct scrub_ctx *sctx = NULL;
4050
4051         mutex_lock(&fs_info->fs_devices->device_list_mutex);
4052         dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
4053         if (dev)
4054                 sctx = dev->scrub_ctx;
4055         if (sctx)
4056                 memcpy(progress, &sctx->stat, sizeof(*progress));
4057         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4058
4059         return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
4060 }
4061
4062 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4063                                u64 extent_logical, u64 extent_len,
4064                                u64 *extent_physical,
4065                                struct btrfs_device **extent_dev,
4066                                int *extent_mirror_num)
4067 {
4068         u64 mapped_length;
4069         struct btrfs_bio *bbio = NULL;
4070         int ret;
4071
4072         mapped_length = extent_len;
4073         ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4074                               &mapped_length, &bbio, 0);
4075         if (ret || !bbio || mapped_length < extent_len ||
4076             !bbio->stripes[0].dev->bdev) {
4077                 btrfs_put_bbio(bbio);
4078                 return;
4079         }
4080
4081         *extent_physical = bbio->stripes[0].physical;
4082         *extent_mirror_num = bbio->mirror_num;
4083         *extent_dev = bbio->stripes[0].dev;
4084         btrfs_put_bbio(bbio);
4085 }