1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 Fusion-io All rights reserved.
4 * Copyright (C) 2012 Intel Corp. All rights reserved.
7 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/raid/pq.h>
12 #include <linux/hash.h>
13 #include <linux/list_sort.h>
14 #include <linux/raid/xor.h>
20 #include "async-thread.h"
22 /* set when additional merges to this rbio are not allowed */
23 #define RBIO_RMW_LOCKED_BIT 1
26 * set when this rbio is sitting in the hash, but it is just a cache
29 #define RBIO_CACHE_BIT 2
32 * set when it is safe to trust the stripe_pages for caching
34 #define RBIO_CACHE_READY_BIT 3
36 #define RBIO_CACHE_SIZE 1024
38 #define BTRFS_STRIPE_HASH_TABLE_BITS 11
40 /* Used by the raid56 code to lock stripes for read/modify/write */
41 struct btrfs_stripe_hash {
42 struct list_head hash_list;
46 /* Used by the raid56 code to lock stripes for read/modify/write */
47 struct btrfs_stripe_hash_table {
48 struct list_head stripe_cache;
49 spinlock_t cache_lock;
51 struct btrfs_stripe_hash table[];
56 BTRFS_RBIO_READ_REBUILD,
57 BTRFS_RBIO_PARITY_SCRUB,
58 BTRFS_RBIO_REBUILD_MISSING,
61 struct btrfs_raid_bio {
62 struct btrfs_fs_info *fs_info;
63 struct btrfs_bio *bbio;
65 /* while we're doing rmw on a stripe
66 * we put it into a hash table so we can
67 * lock the stripe and merge more rbios
70 struct list_head hash_list;
73 * LRU list for the stripe cache
75 struct list_head stripe_cache;
78 * for scheduling work in the helper threads
80 struct btrfs_work work;
83 * bio list and bio_list_lock are used
84 * to add more bios into the stripe
85 * in hopes of avoiding the full rmw
87 struct bio_list bio_list;
88 spinlock_t bio_list_lock;
90 /* also protected by the bio_list_lock, the
91 * plug list is used by the plugging code
92 * to collect partial bios while plugged. The
93 * stripe locking code also uses it to hand off
94 * the stripe lock to the next pending IO
96 struct list_head plug_list;
99 * flags that tell us if it is safe to
100 * merge with this bio
104 /* size of each individual stripe on disk */
107 /* number of data stripes (no p/q) */
114 * set if we're doing a parity rebuild
115 * for a read from higher up, which is handled
116 * differently from a parity rebuild as part of
119 enum btrfs_rbio_ops operation;
121 /* first bad stripe */
124 /* second bad stripe (for raid6 use) */
129 * number of pages needed to represent the full
135 * size of all the bios in the bio_list. This
136 * helps us decide if the rbio maps to a full
145 atomic_t stripes_pending;
149 * these are two arrays of pointers. We allocate the
150 * rbio big enough to hold them both and setup their
151 * locations when the rbio is allocated
154 /* pointers to pages that we allocated for
155 * reading/writing stripes directly from the disk (including P/Q)
157 struct page **stripe_pages;
160 * pointers to the pages in the bio_list. Stored
161 * here for faster lookup
163 struct page **bio_pages;
166 * bitmap to record which horizontal stripe has data
168 unsigned long *dbitmap;
170 /* allocated with real_stripes-many pointers for finish_*() calls */
171 void **finish_pointers;
173 /* allocated with stripe_npages-many bits for finish_*() calls */
174 unsigned long *finish_pbitmap;
177 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
178 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
179 static void rmw_work(struct btrfs_work *work);
180 static void read_rebuild_work(struct btrfs_work *work);
181 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
182 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
183 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
184 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
185 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
187 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
189 static void scrub_parity_work(struct btrfs_work *work);
191 static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
193 btrfs_init_work(&rbio->work, work_func, NULL, NULL);
194 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
198 * the stripe hash table is used for locking, and to collect
199 * bios in hopes of making a full stripe
201 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
203 struct btrfs_stripe_hash_table *table;
204 struct btrfs_stripe_hash_table *x;
205 struct btrfs_stripe_hash *cur;
206 struct btrfs_stripe_hash *h;
207 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
210 if (info->stripe_hash_table)
214 * The table is large, starting with order 4 and can go as high as
215 * order 7 in case lock debugging is turned on.
217 * Try harder to allocate and fallback to vmalloc to lower the chance
218 * of a failing mount.
220 table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
224 spin_lock_init(&table->cache_lock);
225 INIT_LIST_HEAD(&table->stripe_cache);
229 for (i = 0; i < num_entries; i++) {
231 INIT_LIST_HEAD(&cur->hash_list);
232 spin_lock_init(&cur->lock);
235 x = cmpxchg(&info->stripe_hash_table, NULL, table);
241 * caching an rbio means to copy anything from the
242 * bio_pages array into the stripe_pages array. We
243 * use the page uptodate bit in the stripe cache array
244 * to indicate if it has valid data
246 * once the caching is done, we set the cache ready
249 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
256 ret = alloc_rbio_pages(rbio);
260 for (i = 0; i < rbio->nr_pages; i++) {
261 if (!rbio->bio_pages[i])
264 s = kmap(rbio->bio_pages[i]);
265 d = kmap(rbio->stripe_pages[i]);
269 kunmap(rbio->bio_pages[i]);
270 kunmap(rbio->stripe_pages[i]);
271 SetPageUptodate(rbio->stripe_pages[i]);
273 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
277 * we hash on the first logical address of the stripe
279 static int rbio_bucket(struct btrfs_raid_bio *rbio)
281 u64 num = rbio->bbio->raid_map[0];
284 * we shift down quite a bit. We're using byte
285 * addressing, and most of the lower bits are zeros.
286 * This tends to upset hash_64, and it consistently
287 * returns just one or two different values.
289 * shifting off the lower bits fixes things.
291 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
295 * stealing an rbio means taking all the uptodate pages from the stripe
296 * array in the source rbio and putting them into the destination rbio
298 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
304 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
307 for (i = 0; i < dest->nr_pages; i++) {
308 s = src->stripe_pages[i];
309 if (!s || !PageUptodate(s)) {
313 d = dest->stripe_pages[i];
317 dest->stripe_pages[i] = s;
318 src->stripe_pages[i] = NULL;
323 * merging means we take the bio_list from the victim and
324 * splice it into the destination. The victim should
325 * be discarded afterwards.
327 * must be called with dest->rbio_list_lock held
329 static void merge_rbio(struct btrfs_raid_bio *dest,
330 struct btrfs_raid_bio *victim)
332 bio_list_merge(&dest->bio_list, &victim->bio_list);
333 dest->bio_list_bytes += victim->bio_list_bytes;
334 dest->generic_bio_cnt += victim->generic_bio_cnt;
335 bio_list_init(&victim->bio_list);
339 * used to prune items that are in the cache. The caller
340 * must hold the hash table lock.
342 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
344 int bucket = rbio_bucket(rbio);
345 struct btrfs_stripe_hash_table *table;
346 struct btrfs_stripe_hash *h;
350 * check the bit again under the hash table lock.
352 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
355 table = rbio->fs_info->stripe_hash_table;
356 h = table->table + bucket;
358 /* hold the lock for the bucket because we may be
359 * removing it from the hash table
364 * hold the lock for the bio list because we need
365 * to make sure the bio list is empty
367 spin_lock(&rbio->bio_list_lock);
369 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
370 list_del_init(&rbio->stripe_cache);
371 table->cache_size -= 1;
374 /* if the bio list isn't empty, this rbio is
375 * still involved in an IO. We take it out
376 * of the cache list, and drop the ref that
377 * was held for the list.
379 * If the bio_list was empty, we also remove
380 * the rbio from the hash_table, and drop
381 * the corresponding ref
383 if (bio_list_empty(&rbio->bio_list)) {
384 if (!list_empty(&rbio->hash_list)) {
385 list_del_init(&rbio->hash_list);
386 refcount_dec(&rbio->refs);
387 BUG_ON(!list_empty(&rbio->plug_list));
392 spin_unlock(&rbio->bio_list_lock);
393 spin_unlock(&h->lock);
396 __free_raid_bio(rbio);
400 * prune a given rbio from the cache
402 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
404 struct btrfs_stripe_hash_table *table;
407 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
410 table = rbio->fs_info->stripe_hash_table;
412 spin_lock_irqsave(&table->cache_lock, flags);
413 __remove_rbio_from_cache(rbio);
414 spin_unlock_irqrestore(&table->cache_lock, flags);
418 * remove everything in the cache
420 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
422 struct btrfs_stripe_hash_table *table;
424 struct btrfs_raid_bio *rbio;
426 table = info->stripe_hash_table;
428 spin_lock_irqsave(&table->cache_lock, flags);
429 while (!list_empty(&table->stripe_cache)) {
430 rbio = list_entry(table->stripe_cache.next,
431 struct btrfs_raid_bio,
433 __remove_rbio_from_cache(rbio);
435 spin_unlock_irqrestore(&table->cache_lock, flags);
439 * remove all cached entries and free the hash table
442 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
444 if (!info->stripe_hash_table)
446 btrfs_clear_rbio_cache(info);
447 kvfree(info->stripe_hash_table);
448 info->stripe_hash_table = NULL;
452 * insert an rbio into the stripe cache. It
453 * must have already been prepared by calling
456 * If this rbio was already cached, it gets
457 * moved to the front of the lru.
459 * If the size of the rbio cache is too big, we
462 static void cache_rbio(struct btrfs_raid_bio *rbio)
464 struct btrfs_stripe_hash_table *table;
467 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
470 table = rbio->fs_info->stripe_hash_table;
472 spin_lock_irqsave(&table->cache_lock, flags);
473 spin_lock(&rbio->bio_list_lock);
475 /* bump our ref if we were not in the list before */
476 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
477 refcount_inc(&rbio->refs);
479 if (!list_empty(&rbio->stripe_cache)){
480 list_move(&rbio->stripe_cache, &table->stripe_cache);
482 list_add(&rbio->stripe_cache, &table->stripe_cache);
483 table->cache_size += 1;
486 spin_unlock(&rbio->bio_list_lock);
488 if (table->cache_size > RBIO_CACHE_SIZE) {
489 struct btrfs_raid_bio *found;
491 found = list_entry(table->stripe_cache.prev,
492 struct btrfs_raid_bio,
496 __remove_rbio_from_cache(found);
499 spin_unlock_irqrestore(&table->cache_lock, flags);
503 * helper function to run the xor_blocks api. It is only
504 * able to do MAX_XOR_BLOCKS at a time, so we need to
507 static void run_xor(void **pages, int src_cnt, ssize_t len)
511 void *dest = pages[src_cnt];
514 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
515 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
517 src_cnt -= xor_src_cnt;
518 src_off += xor_src_cnt;
523 * Returns true if the bio list inside this rbio covers an entire stripe (no
526 static int rbio_is_full(struct btrfs_raid_bio *rbio)
529 unsigned long size = rbio->bio_list_bytes;
532 spin_lock_irqsave(&rbio->bio_list_lock, flags);
533 if (size != rbio->nr_data * rbio->stripe_len)
535 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
536 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
542 * returns 1 if it is safe to merge two rbios together.
543 * The merging is safe if the two rbios correspond to
544 * the same stripe and if they are both going in the same
545 * direction (read vs write), and if neither one is
546 * locked for final IO
548 * The caller is responsible for locking such that
549 * rmw_locked is safe to test
551 static int rbio_can_merge(struct btrfs_raid_bio *last,
552 struct btrfs_raid_bio *cur)
554 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
555 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
559 * we can't merge with cached rbios, since the
560 * idea is that when we merge the destination
561 * rbio is going to run our IO for us. We can
562 * steal from cached rbios though, other functions
565 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
566 test_bit(RBIO_CACHE_BIT, &cur->flags))
569 if (last->bbio->raid_map[0] !=
570 cur->bbio->raid_map[0])
573 /* we can't merge with different operations */
574 if (last->operation != cur->operation)
577 * We've need read the full stripe from the drive.
578 * check and repair the parity and write the new results.
580 * We're not allowed to add any new bios to the
581 * bio list here, anyone else that wants to
582 * change this stripe needs to do their own rmw.
584 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
587 if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
590 if (last->operation == BTRFS_RBIO_READ_REBUILD) {
591 int fa = last->faila;
592 int fb = last->failb;
593 int cur_fa = cur->faila;
594 int cur_fb = cur->failb;
596 if (last->faila >= last->failb) {
601 if (cur->faila >= cur->failb) {
606 if (fa != cur_fa || fb != cur_fb)
612 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
615 return stripe * rbio->stripe_npages + index;
619 * these are just the pages from the rbio array, not from anything
620 * the FS sent down to us
622 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
625 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
629 * helper to index into the pstripe
631 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
633 return rbio_stripe_page(rbio, rbio->nr_data, index);
637 * helper to index into the qstripe, returns null
638 * if there is no qstripe
640 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
642 if (rbio->nr_data + 1 == rbio->real_stripes)
644 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
648 * The first stripe in the table for a logical address
649 * has the lock. rbios are added in one of three ways:
651 * 1) Nobody has the stripe locked yet. The rbio is given
652 * the lock and 0 is returned. The caller must start the IO
655 * 2) Someone has the stripe locked, but we're able to merge
656 * with the lock owner. The rbio is freed and the IO will
657 * start automatically along with the existing rbio. 1 is returned.
659 * 3) Someone has the stripe locked, but we're not able to merge.
660 * The rbio is added to the lock owner's plug list, or merged into
661 * an rbio already on the plug list. When the lock owner unlocks,
662 * the next rbio on the list is run and the IO is started automatically.
665 * If we return 0, the caller still owns the rbio and must continue with
666 * IO submission. If we return 1, the caller must assume the rbio has
667 * already been freed.
669 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
671 struct btrfs_stripe_hash *h;
672 struct btrfs_raid_bio *cur;
673 struct btrfs_raid_bio *pending;
675 struct btrfs_raid_bio *freeit = NULL;
676 struct btrfs_raid_bio *cache_drop = NULL;
679 h = rbio->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
681 spin_lock_irqsave(&h->lock, flags);
682 list_for_each_entry(cur, &h->hash_list, hash_list) {
683 if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0])
686 spin_lock(&cur->bio_list_lock);
688 /* Can we steal this cached rbio's pages? */
689 if (bio_list_empty(&cur->bio_list) &&
690 list_empty(&cur->plug_list) &&
691 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
692 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
693 list_del_init(&cur->hash_list);
694 refcount_dec(&cur->refs);
696 steal_rbio(cur, rbio);
698 spin_unlock(&cur->bio_list_lock);
703 /* Can we merge into the lock owner? */
704 if (rbio_can_merge(cur, rbio)) {
705 merge_rbio(cur, rbio);
706 spin_unlock(&cur->bio_list_lock);
714 * We couldn't merge with the running rbio, see if we can merge
715 * with the pending ones. We don't have to check for rmw_locked
716 * because there is no way they are inside finish_rmw right now
718 list_for_each_entry(pending, &cur->plug_list, plug_list) {
719 if (rbio_can_merge(pending, rbio)) {
720 merge_rbio(pending, rbio);
721 spin_unlock(&cur->bio_list_lock);
729 * No merging, put us on the tail of the plug list, our rbio
730 * will be started with the currently running rbio unlocks
732 list_add_tail(&rbio->plug_list, &cur->plug_list);
733 spin_unlock(&cur->bio_list_lock);
738 refcount_inc(&rbio->refs);
739 list_add(&rbio->hash_list, &h->hash_list);
741 spin_unlock_irqrestore(&h->lock, flags);
743 remove_rbio_from_cache(cache_drop);
745 __free_raid_bio(freeit);
750 * called as rmw or parity rebuild is completed. If the plug list has more
751 * rbios waiting for this stripe, the next one on the list will be started
753 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
756 struct btrfs_stripe_hash *h;
760 bucket = rbio_bucket(rbio);
761 h = rbio->fs_info->stripe_hash_table->table + bucket;
763 if (list_empty(&rbio->plug_list))
766 spin_lock_irqsave(&h->lock, flags);
767 spin_lock(&rbio->bio_list_lock);
769 if (!list_empty(&rbio->hash_list)) {
771 * if we're still cached and there is no other IO
772 * to perform, just leave this rbio here for others
773 * to steal from later
775 if (list_empty(&rbio->plug_list) &&
776 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
778 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
779 BUG_ON(!bio_list_empty(&rbio->bio_list));
783 list_del_init(&rbio->hash_list);
784 refcount_dec(&rbio->refs);
787 * we use the plug list to hold all the rbios
788 * waiting for the chance to lock this stripe.
789 * hand the lock over to one of them.
791 if (!list_empty(&rbio->plug_list)) {
792 struct btrfs_raid_bio *next;
793 struct list_head *head = rbio->plug_list.next;
795 next = list_entry(head, struct btrfs_raid_bio,
798 list_del_init(&rbio->plug_list);
800 list_add(&next->hash_list, &h->hash_list);
801 refcount_inc(&next->refs);
802 spin_unlock(&rbio->bio_list_lock);
803 spin_unlock_irqrestore(&h->lock, flags);
805 if (next->operation == BTRFS_RBIO_READ_REBUILD)
806 start_async_work(next, read_rebuild_work);
807 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
808 steal_rbio(rbio, next);
809 start_async_work(next, read_rebuild_work);
810 } else if (next->operation == BTRFS_RBIO_WRITE) {
811 steal_rbio(rbio, next);
812 start_async_work(next, rmw_work);
813 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
814 steal_rbio(rbio, next);
815 start_async_work(next, scrub_parity_work);
822 spin_unlock(&rbio->bio_list_lock);
823 spin_unlock_irqrestore(&h->lock, flags);
827 remove_rbio_from_cache(rbio);
830 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
834 if (!refcount_dec_and_test(&rbio->refs))
837 WARN_ON(!list_empty(&rbio->stripe_cache));
838 WARN_ON(!list_empty(&rbio->hash_list));
839 WARN_ON(!bio_list_empty(&rbio->bio_list));
841 for (i = 0; i < rbio->nr_pages; i++) {
842 if (rbio->stripe_pages[i]) {
843 __free_page(rbio->stripe_pages[i]);
844 rbio->stripe_pages[i] = NULL;
848 btrfs_put_bbio(rbio->bbio);
852 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
859 cur->bi_status = err;
866 * this frees the rbio and runs through all the bios in the
867 * bio_list and calls end_io on them
869 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
871 struct bio *cur = bio_list_get(&rbio->bio_list);
874 if (rbio->generic_bio_cnt)
875 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
878 * At this moment, rbio->bio_list is empty, however since rbio does not
879 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
880 * hash list, rbio may be merged with others so that rbio->bio_list
882 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
883 * more and we can call bio_endio() on all queued bios.
886 extra = bio_list_get(&rbio->bio_list);
887 __free_raid_bio(rbio);
889 rbio_endio_bio_list(cur, err);
891 rbio_endio_bio_list(extra, err);
895 * end io function used by finish_rmw. When we finally
896 * get here, we've written a full stripe
898 static void raid_write_end_io(struct bio *bio)
900 struct btrfs_raid_bio *rbio = bio->bi_private;
901 blk_status_t err = bio->bi_status;
905 fail_bio_stripe(rbio, bio);
909 if (!atomic_dec_and_test(&rbio->stripes_pending))
914 /* OK, we have read all the stripes we need to. */
915 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
916 0 : rbio->bbio->max_errors;
917 if (atomic_read(&rbio->error) > max_errors)
920 rbio_orig_end_io(rbio, err);
924 * the read/modify/write code wants to use the original bio for
925 * any pages it included, and then use the rbio for everything
926 * else. This function decides if a given index (stripe number)
927 * and page number in that stripe fall inside the original bio
930 * if you set bio_list_only, you'll get a NULL back for any ranges
931 * that are outside the bio_list
933 * This doesn't take any refs on anything, you get a bare page pointer
934 * and the caller must bump refs as required.
936 * You must call index_rbio_pages once before you can trust
937 * the answers from this function.
939 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
940 int index, int pagenr, int bio_list_only)
943 struct page *p = NULL;
945 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
947 spin_lock_irq(&rbio->bio_list_lock);
948 p = rbio->bio_pages[chunk_page];
949 spin_unlock_irq(&rbio->bio_list_lock);
951 if (p || bio_list_only)
954 return rbio->stripe_pages[chunk_page];
958 * number of pages we need for the entire stripe across all the
961 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
963 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
967 * allocation and initial setup for the btrfs_raid_bio. Not
968 * this does not allocate any pages for rbio->pages.
970 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
971 struct btrfs_bio *bbio,
974 struct btrfs_raid_bio *rbio;
976 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
977 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
978 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
981 rbio = kzalloc(sizeof(*rbio) +
982 sizeof(*rbio->stripe_pages) * num_pages +
983 sizeof(*rbio->bio_pages) * num_pages +
984 sizeof(*rbio->finish_pointers) * real_stripes +
985 sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
986 sizeof(*rbio->finish_pbitmap) *
987 BITS_TO_LONGS(stripe_npages),
990 return ERR_PTR(-ENOMEM);
992 bio_list_init(&rbio->bio_list);
993 INIT_LIST_HEAD(&rbio->plug_list);
994 spin_lock_init(&rbio->bio_list_lock);
995 INIT_LIST_HEAD(&rbio->stripe_cache);
996 INIT_LIST_HEAD(&rbio->hash_list);
998 rbio->fs_info = fs_info;
999 rbio->stripe_len = stripe_len;
1000 rbio->nr_pages = num_pages;
1001 rbio->real_stripes = real_stripes;
1002 rbio->stripe_npages = stripe_npages;
1005 refcount_set(&rbio->refs, 1);
1006 atomic_set(&rbio->error, 0);
1007 atomic_set(&rbio->stripes_pending, 0);
1010 * the stripe_pages, bio_pages, etc arrays point to the extra
1011 * memory we allocated past the end of the rbio
1014 #define CONSUME_ALLOC(ptr, count) do { \
1016 p = (unsigned char *)p + sizeof(*(ptr)) * (count); \
1018 CONSUME_ALLOC(rbio->stripe_pages, num_pages);
1019 CONSUME_ALLOC(rbio->bio_pages, num_pages);
1020 CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
1021 CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
1022 CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
1023 #undef CONSUME_ALLOC
1025 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1026 nr_data = real_stripes - 1;
1027 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1028 nr_data = real_stripes - 2;
1032 rbio->nr_data = nr_data;
1036 /* allocate pages for all the stripes in the bio, including parity */
1037 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1042 for (i = 0; i < rbio->nr_pages; i++) {
1043 if (rbio->stripe_pages[i])
1045 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1048 rbio->stripe_pages[i] = page;
1053 /* only allocate pages for p/q stripes */
1054 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1059 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1061 for (; i < rbio->nr_pages; i++) {
1062 if (rbio->stripe_pages[i])
1064 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1067 rbio->stripe_pages[i] = page;
1073 * add a single page from a specific stripe into our list of bios for IO
1074 * this will try to merge into existing bios if possible, and returns
1075 * zero if all went well.
1077 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1078 struct bio_list *bio_list,
1081 unsigned long page_index,
1082 unsigned long bio_max_len)
1084 struct bio *last = bio_list->tail;
1087 struct btrfs_bio_stripe *stripe;
1090 stripe = &rbio->bbio->stripes[stripe_nr];
1091 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1093 /* if the device is missing, just fail this stripe */
1094 if (!stripe->dev->bdev)
1095 return fail_rbio_index(rbio, stripe_nr);
1097 /* see if we can add this page onto our existing bio */
1099 u64 last_end = last->bi_iter.bi_sector << 9;
1100 last_end += last->bi_iter.bi_size;
1103 * we can't merge these if they are from different
1104 * devices or if they are not contiguous
1106 if (last_end == disk_start && !last->bi_status &&
1107 last->bi_bdev == stripe->dev->bdev) {
1108 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1109 if (ret == PAGE_SIZE)
1114 /* put a new bio on the list */
1115 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1116 btrfs_io_bio(bio)->device = stripe->dev;
1117 bio->bi_iter.bi_size = 0;
1118 bio_set_dev(bio, stripe->dev->bdev);
1119 bio->bi_iter.bi_sector = disk_start >> 9;
1121 bio_add_page(bio, page, PAGE_SIZE, 0);
1122 bio_list_add(bio_list, bio);
1127 * while we're doing the read/modify/write cycle, we could
1128 * have errors in reading pages off the disk. This checks
1129 * for errors and if we're not able to read the page it'll
1130 * trigger parity reconstruction. The rmw will be finished
1131 * after we've reconstructed the failed stripes
1133 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1135 if (rbio->faila >= 0 || rbio->failb >= 0) {
1136 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1137 __raid56_parity_recover(rbio);
1144 * helper function to walk our bio list and populate the bio_pages array with
1145 * the result. This seems expensive, but it is faster than constantly
1146 * searching through the bio list as we setup the IO in finish_rmw or stripe
1149 * This must be called before you trust the answers from page_in_rbio
1151 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1155 unsigned long stripe_offset;
1156 unsigned long page_index;
1158 spin_lock_irq(&rbio->bio_list_lock);
1159 bio_list_for_each(bio, &rbio->bio_list) {
1160 struct bio_vec bvec;
1161 struct bvec_iter iter;
1164 start = bio->bi_iter.bi_sector << 9;
1165 stripe_offset = start - rbio->bbio->raid_map[0];
1166 page_index = stripe_offset >> PAGE_SHIFT;
1168 if (bio_flagged(bio, BIO_CLONED))
1169 bio->bi_iter = btrfs_io_bio(bio)->iter;
1171 bio_for_each_segment(bvec, bio, iter) {
1172 rbio->bio_pages[page_index + i] = bvec.bv_page;
1176 spin_unlock_irq(&rbio->bio_list_lock);
1180 * this is called from one of two situations. We either
1181 * have a full stripe from the higher layers, or we've read all
1182 * the missing bits off disk.
1184 * This will calculate the parity and then send down any
1187 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1189 struct btrfs_bio *bbio = rbio->bbio;
1190 void **pointers = rbio->finish_pointers;
1191 int nr_data = rbio->nr_data;
1195 struct bio_list bio_list;
1199 bio_list_init(&bio_list);
1201 if (rbio->real_stripes - rbio->nr_data == 1)
1202 has_qstripe = false;
1203 else if (rbio->real_stripes - rbio->nr_data == 2)
1208 /* at this point we either have a full stripe,
1209 * or we've read the full stripe from the drive.
1210 * recalculate the parity and write the new results.
1212 * We're not allowed to add any new bios to the
1213 * bio list here, anyone else that wants to
1214 * change this stripe needs to do their own rmw.
1216 spin_lock_irq(&rbio->bio_list_lock);
1217 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1218 spin_unlock_irq(&rbio->bio_list_lock);
1220 atomic_set(&rbio->error, 0);
1223 * now that we've set rmw_locked, run through the
1224 * bio list one last time and map the page pointers
1226 * We don't cache full rbios because we're assuming
1227 * the higher layers are unlikely to use this area of
1228 * the disk again soon. If they do use it again,
1229 * hopefully they will send another full bio.
1231 index_rbio_pages(rbio);
1232 if (!rbio_is_full(rbio))
1233 cache_rbio_pages(rbio);
1235 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1237 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1239 /* first collect one page from each data stripe */
1240 for (stripe = 0; stripe < nr_data; stripe++) {
1241 p = page_in_rbio(rbio, stripe, pagenr, 0);
1242 pointers[stripe] = kmap(p);
1245 /* then add the parity stripe */
1246 p = rbio_pstripe_page(rbio, pagenr);
1248 pointers[stripe++] = kmap(p);
1253 * raid6, add the qstripe and call the
1254 * library function to fill in our p/q
1256 p = rbio_qstripe_page(rbio, pagenr);
1258 pointers[stripe++] = kmap(p);
1260 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1264 copy_page(pointers[nr_data], pointers[0]);
1265 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1269 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1270 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1274 * time to start writing. Make bios for everything from the
1275 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1278 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1279 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1281 if (stripe < rbio->nr_data) {
1282 page = page_in_rbio(rbio, stripe, pagenr, 1);
1286 page = rbio_stripe_page(rbio, stripe, pagenr);
1289 ret = rbio_add_io_page(rbio, &bio_list,
1290 page, stripe, pagenr, rbio->stripe_len);
1296 if (likely(!bbio->num_tgtdevs))
1299 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1300 if (!bbio->tgtdev_map[stripe])
1303 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1305 if (stripe < rbio->nr_data) {
1306 page = page_in_rbio(rbio, stripe, pagenr, 1);
1310 page = rbio_stripe_page(rbio, stripe, pagenr);
1313 ret = rbio_add_io_page(rbio, &bio_list, page,
1314 rbio->bbio->tgtdev_map[stripe],
1315 pagenr, rbio->stripe_len);
1322 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1323 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1325 while ((bio = bio_list_pop(&bio_list))) {
1326 bio->bi_private = rbio;
1327 bio->bi_end_io = raid_write_end_io;
1328 bio->bi_opf = REQ_OP_WRITE;
1335 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1337 while ((bio = bio_list_pop(&bio_list)))
1342 * helper to find the stripe number for a given bio. Used to figure out which
1343 * stripe has failed. This expects the bio to correspond to a physical disk,
1344 * so it looks up based on physical sector numbers.
1346 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1349 u64 physical = bio->bi_iter.bi_sector;
1351 struct btrfs_bio_stripe *stripe;
1355 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1356 stripe = &rbio->bbio->stripes[i];
1357 if (in_range(physical, stripe->physical, rbio->stripe_len) &&
1358 stripe->dev->bdev && bio->bi_bdev == stripe->dev->bdev) {
1366 * helper to find the stripe number for a given
1367 * bio (before mapping). Used to figure out which stripe has
1368 * failed. This looks up based on logical block numbers.
1370 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1373 u64 logical = bio->bi_iter.bi_sector << 9;
1376 for (i = 0; i < rbio->nr_data; i++) {
1377 u64 stripe_start = rbio->bbio->raid_map[i];
1379 if (in_range(logical, stripe_start, rbio->stripe_len))
1386 * returns -EIO if we had too many failures
1388 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1390 unsigned long flags;
1393 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1395 /* we already know this stripe is bad, move on */
1396 if (rbio->faila == failed || rbio->failb == failed)
1399 if (rbio->faila == -1) {
1400 /* first failure on this rbio */
1401 rbio->faila = failed;
1402 atomic_inc(&rbio->error);
1403 } else if (rbio->failb == -1) {
1404 /* second failure on this rbio */
1405 rbio->failb = failed;
1406 atomic_inc(&rbio->error);
1411 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1417 * helper to fail a stripe based on a physical disk
1420 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1423 int failed = find_bio_stripe(rbio, bio);
1428 return fail_rbio_index(rbio, failed);
1432 * this sets each page in the bio uptodate. It should only be used on private
1433 * rbio pages, nothing that comes in from the higher layers
1435 static void set_bio_pages_uptodate(struct bio *bio)
1437 struct bio_vec *bvec;
1438 struct bvec_iter_all iter_all;
1440 ASSERT(!bio_flagged(bio, BIO_CLONED));
1442 bio_for_each_segment_all(bvec, bio, iter_all)
1443 SetPageUptodate(bvec->bv_page);
1447 * end io for the read phase of the rmw cycle. All the bios here are physical
1448 * stripe bios we've read from the disk so we can recalculate the parity of the
1451 * This will usually kick off finish_rmw once all the bios are read in, but it
1452 * may trigger parity reconstruction if we had any errors along the way
1454 static void raid_rmw_end_io(struct bio *bio)
1456 struct btrfs_raid_bio *rbio = bio->bi_private;
1459 fail_bio_stripe(rbio, bio);
1461 set_bio_pages_uptodate(bio);
1465 if (!atomic_dec_and_test(&rbio->stripes_pending))
1468 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1472 * this will normally call finish_rmw to start our write
1473 * but if there are any failed stripes we'll reconstruct
1476 validate_rbio_for_rmw(rbio);
1481 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1485 * the stripe must be locked by the caller. It will
1486 * unlock after all the writes are done
1488 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1490 int bios_to_read = 0;
1491 struct bio_list bio_list;
1497 bio_list_init(&bio_list);
1499 ret = alloc_rbio_pages(rbio);
1503 index_rbio_pages(rbio);
1505 atomic_set(&rbio->error, 0);
1507 * build a list of bios to read all the missing parts of this
1510 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1511 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1514 * we want to find all the pages missing from
1515 * the rbio and read them from the disk. If
1516 * page_in_rbio finds a page in the bio list
1517 * we don't need to read it off the stripe.
1519 page = page_in_rbio(rbio, stripe, pagenr, 1);
1523 page = rbio_stripe_page(rbio, stripe, pagenr);
1525 * the bio cache may have handed us an uptodate
1526 * page. If so, be happy and use it
1528 if (PageUptodate(page))
1531 ret = rbio_add_io_page(rbio, &bio_list, page,
1532 stripe, pagenr, rbio->stripe_len);
1538 bios_to_read = bio_list_size(&bio_list);
1539 if (!bios_to_read) {
1541 * this can happen if others have merged with
1542 * us, it means there is nothing left to read.
1543 * But if there are missing devices it may not be
1544 * safe to do the full stripe write yet.
1550 * the bbio may be freed once we submit the last bio. Make sure
1551 * not to touch it after that
1553 atomic_set(&rbio->stripes_pending, bios_to_read);
1554 while ((bio = bio_list_pop(&bio_list))) {
1555 bio->bi_private = rbio;
1556 bio->bi_end_io = raid_rmw_end_io;
1557 bio->bi_opf = REQ_OP_READ;
1559 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1563 /* the actual write will happen once the reads are done */
1567 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1569 while ((bio = bio_list_pop(&bio_list)))
1575 validate_rbio_for_rmw(rbio);
1580 * if the upper layers pass in a full stripe, we thank them by only allocating
1581 * enough pages to hold the parity, and sending it all down quickly.
1583 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1587 ret = alloc_rbio_parity_pages(rbio);
1589 __free_raid_bio(rbio);
1593 ret = lock_stripe_add(rbio);
1600 * partial stripe writes get handed over to async helpers.
1601 * We're really hoping to merge a few more writes into this
1602 * rbio before calculating new parity
1604 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1608 ret = lock_stripe_add(rbio);
1610 start_async_work(rbio, rmw_work);
1615 * sometimes while we were reading from the drive to
1616 * recalculate parity, enough new bios come into create
1617 * a full stripe. So we do a check here to see if we can
1618 * go directly to finish_rmw
1620 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1622 /* head off into rmw land if we don't have a full stripe */
1623 if (!rbio_is_full(rbio))
1624 return partial_stripe_write(rbio);
1625 return full_stripe_write(rbio);
1629 * We use plugging call backs to collect full stripes.
1630 * Any time we get a partial stripe write while plugged
1631 * we collect it into a list. When the unplug comes down,
1632 * we sort the list by logical block number and merge
1633 * everything we can into the same rbios
1635 struct btrfs_plug_cb {
1636 struct blk_plug_cb cb;
1637 struct btrfs_fs_info *info;
1638 struct list_head rbio_list;
1639 struct btrfs_work work;
1643 * rbios on the plug list are sorted for easier merging.
1645 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1647 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1649 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1651 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1652 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1654 if (a_sector < b_sector)
1656 if (a_sector > b_sector)
1661 static void run_plug(struct btrfs_plug_cb *plug)
1663 struct btrfs_raid_bio *cur;
1664 struct btrfs_raid_bio *last = NULL;
1667 * sort our plug list then try to merge
1668 * everything we can in hopes of creating full
1671 list_sort(NULL, &plug->rbio_list, plug_cmp);
1672 while (!list_empty(&plug->rbio_list)) {
1673 cur = list_entry(plug->rbio_list.next,
1674 struct btrfs_raid_bio, plug_list);
1675 list_del_init(&cur->plug_list);
1677 if (rbio_is_full(cur)) {
1680 /* we have a full stripe, send it down */
1681 ret = full_stripe_write(cur);
1686 if (rbio_can_merge(last, cur)) {
1687 merge_rbio(last, cur);
1688 __free_raid_bio(cur);
1692 __raid56_parity_write(last);
1697 __raid56_parity_write(last);
1703 * if the unplug comes from schedule, we have to push the
1704 * work off to a helper thread
1706 static void unplug_work(struct btrfs_work *work)
1708 struct btrfs_plug_cb *plug;
1709 plug = container_of(work, struct btrfs_plug_cb, work);
1713 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1715 struct btrfs_plug_cb *plug;
1716 plug = container_of(cb, struct btrfs_plug_cb, cb);
1718 if (from_schedule) {
1719 btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
1720 btrfs_queue_work(plug->info->rmw_workers,
1728 * our main entry point for writes from the rest of the FS.
1730 int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1731 struct btrfs_bio *bbio, u64 stripe_len)
1733 struct btrfs_raid_bio *rbio;
1734 struct btrfs_plug_cb *plug = NULL;
1735 struct blk_plug_cb *cb;
1738 rbio = alloc_rbio(fs_info, bbio, stripe_len);
1740 btrfs_put_bbio(bbio);
1741 return PTR_ERR(rbio);
1743 bio_list_add(&rbio->bio_list, bio);
1744 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1745 rbio->operation = BTRFS_RBIO_WRITE;
1747 btrfs_bio_counter_inc_noblocked(fs_info);
1748 rbio->generic_bio_cnt = 1;
1751 * don't plug on full rbios, just get them out the door
1752 * as quickly as we can
1754 if (rbio_is_full(rbio)) {
1755 ret = full_stripe_write(rbio);
1757 btrfs_bio_counter_dec(fs_info);
1761 cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1763 plug = container_of(cb, struct btrfs_plug_cb, cb);
1765 plug->info = fs_info;
1766 INIT_LIST_HEAD(&plug->rbio_list);
1768 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1771 ret = __raid56_parity_write(rbio);
1773 btrfs_bio_counter_dec(fs_info);
1779 * all parity reconstruction happens here. We've read in everything
1780 * we can find from the drives and this does the heavy lifting of
1781 * sorting the good from the bad.
1783 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1787 int faila = -1, failb = -1;
1792 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1794 err = BLK_STS_RESOURCE;
1798 faila = rbio->faila;
1799 failb = rbio->failb;
1801 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1802 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1803 spin_lock_irq(&rbio->bio_list_lock);
1804 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1805 spin_unlock_irq(&rbio->bio_list_lock);
1808 index_rbio_pages(rbio);
1810 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1812 * Now we just use bitmap to mark the horizontal stripes in
1813 * which we have data when doing parity scrub.
1815 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1816 !test_bit(pagenr, rbio->dbitmap))
1819 /* setup our array of pointers with pages
1822 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1824 * if we're rebuilding a read, we have to use
1825 * pages from the bio list
1827 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1828 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1829 (stripe == faila || stripe == failb)) {
1830 page = page_in_rbio(rbio, stripe, pagenr, 0);
1832 page = rbio_stripe_page(rbio, stripe, pagenr);
1834 pointers[stripe] = kmap(page);
1837 /* all raid6 handling here */
1838 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1840 * single failure, rebuild from parity raid5
1844 if (faila == rbio->nr_data) {
1846 * Just the P stripe has failed, without
1847 * a bad data or Q stripe.
1848 * TODO, we should redo the xor here.
1850 err = BLK_STS_IOERR;
1854 * a single failure in raid6 is rebuilt
1855 * in the pstripe code below
1860 /* make sure our ps and qs are in order */
1864 /* if the q stripe is failed, do a pstripe reconstruction
1866 * If both the q stripe and the P stripe are failed, we're
1867 * here due to a crc mismatch and we can't give them the
1870 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1871 if (rbio->bbio->raid_map[faila] ==
1873 err = BLK_STS_IOERR;
1877 * otherwise we have one bad data stripe and
1878 * a good P stripe. raid5!
1883 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1884 raid6_datap_recov(rbio->real_stripes,
1885 PAGE_SIZE, faila, pointers);
1887 raid6_2data_recov(rbio->real_stripes,
1888 PAGE_SIZE, faila, failb,
1894 /* rebuild from P stripe here (raid5 or raid6) */
1895 BUG_ON(failb != -1);
1897 /* Copy parity block into failed block to start with */
1898 copy_page(pointers[faila], pointers[rbio->nr_data]);
1900 /* rearrange the pointer array */
1901 p = pointers[faila];
1902 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1903 pointers[stripe] = pointers[stripe + 1];
1904 pointers[rbio->nr_data - 1] = p;
1906 /* xor in the rest */
1907 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1909 /* if we're doing this rebuild as part of an rmw, go through
1910 * and set all of our private rbio pages in the
1911 * failed stripes as uptodate. This way finish_rmw will
1912 * know they can be trusted. If this was a read reconstruction,
1913 * other endio functions will fiddle the uptodate bits
1915 if (rbio->operation == BTRFS_RBIO_WRITE) {
1916 for (i = 0; i < rbio->stripe_npages; i++) {
1918 page = rbio_stripe_page(rbio, faila, i);
1919 SetPageUptodate(page);
1922 page = rbio_stripe_page(rbio, failb, i);
1923 SetPageUptodate(page);
1927 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1929 * if we're rebuilding a read, we have to use
1930 * pages from the bio list
1932 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1933 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1934 (stripe == faila || stripe == failb)) {
1935 page = page_in_rbio(rbio, stripe, pagenr, 0);
1937 page = rbio_stripe_page(rbio, stripe, pagenr);
1949 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
1950 * valid rbio which is consistent with ondisk content, thus such a
1951 * valid rbio can be cached to avoid further disk reads.
1953 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1954 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1956 * - In case of two failures, where rbio->failb != -1:
1958 * Do not cache this rbio since the above read reconstruction
1959 * (raid6_datap_recov() or raid6_2data_recov()) may have
1960 * changed some content of stripes which are not identical to
1961 * on-disk content any more, otherwise, a later write/recover
1962 * may steal stripe_pages from this rbio and end up with
1963 * corruptions or rebuild failures.
1965 * - In case of single failure, where rbio->failb == -1:
1967 * Cache this rbio iff the above read reconstruction is
1968 * executed without problems.
1970 if (err == BLK_STS_OK && rbio->failb < 0)
1971 cache_rbio_pages(rbio);
1973 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1975 rbio_orig_end_io(rbio, err);
1976 } else if (err == BLK_STS_OK) {
1980 if (rbio->operation == BTRFS_RBIO_WRITE)
1982 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
1983 finish_parity_scrub(rbio, 0);
1987 rbio_orig_end_io(rbio, err);
1992 * This is called only for stripes we've read from disk to
1993 * reconstruct the parity.
1995 static void raid_recover_end_io(struct bio *bio)
1997 struct btrfs_raid_bio *rbio = bio->bi_private;
2000 * we only read stripe pages off the disk, set them
2001 * up to date if there were no errors
2004 fail_bio_stripe(rbio, bio);
2006 set_bio_pages_uptodate(bio);
2009 if (!atomic_dec_and_test(&rbio->stripes_pending))
2012 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2013 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2015 __raid_recover_end_io(rbio);
2019 * reads everything we need off the disk to reconstruct
2020 * the parity. endio handlers trigger final reconstruction
2021 * when the IO is done.
2023 * This is used both for reads from the higher layers and for
2024 * parity construction required to finish a rmw cycle.
2026 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2028 int bios_to_read = 0;
2029 struct bio_list bio_list;
2035 bio_list_init(&bio_list);
2037 ret = alloc_rbio_pages(rbio);
2041 atomic_set(&rbio->error, 0);
2044 * read everything that hasn't failed. Thanks to the
2045 * stripe cache, it is possible that some or all of these
2046 * pages are going to be uptodate.
2048 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2049 if (rbio->faila == stripe || rbio->failb == stripe) {
2050 atomic_inc(&rbio->error);
2054 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2058 * the rmw code may have already read this
2061 p = rbio_stripe_page(rbio, stripe, pagenr);
2062 if (PageUptodate(p))
2065 ret = rbio_add_io_page(rbio, &bio_list,
2066 rbio_stripe_page(rbio, stripe, pagenr),
2067 stripe, pagenr, rbio->stripe_len);
2073 bios_to_read = bio_list_size(&bio_list);
2074 if (!bios_to_read) {
2076 * we might have no bios to read just because the pages
2077 * were up to date, or we might have no bios to read because
2078 * the devices were gone.
2080 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2081 __raid_recover_end_io(rbio);
2089 * the bbio may be freed once we submit the last bio. Make sure
2090 * not to touch it after that
2092 atomic_set(&rbio->stripes_pending, bios_to_read);
2093 while ((bio = bio_list_pop(&bio_list))) {
2094 bio->bi_private = rbio;
2095 bio->bi_end_io = raid_recover_end_io;
2096 bio->bi_opf = REQ_OP_READ;
2098 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2106 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2107 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2108 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2110 while ((bio = bio_list_pop(&bio_list)))
2117 * the main entry point for reads from the higher layers. This
2118 * is really only called when the normal read path had a failure,
2119 * so we assume the bio they send down corresponds to a failed part
2122 int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2123 struct btrfs_bio *bbio, u64 stripe_len,
2124 int mirror_num, int generic_io)
2126 struct btrfs_raid_bio *rbio;
2130 ASSERT(bbio->mirror_num == mirror_num);
2131 btrfs_io_bio(bio)->mirror_num = mirror_num;
2134 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2137 btrfs_put_bbio(bbio);
2138 return PTR_ERR(rbio);
2141 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2142 bio_list_add(&rbio->bio_list, bio);
2143 rbio->bio_list_bytes = bio->bi_iter.bi_size;
2145 rbio->faila = find_logical_bio_stripe(rbio, bio);
2146 if (rbio->faila == -1) {
2148 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2149 __func__, bio->bi_iter.bi_sector << 9,
2150 (u64)bio->bi_iter.bi_size, bbio->map_type);
2152 btrfs_put_bbio(bbio);
2158 btrfs_bio_counter_inc_noblocked(fs_info);
2159 rbio->generic_bio_cnt = 1;
2161 btrfs_get_bbio(bbio);
2166 * for 'mirror == 2', reconstruct from all other stripes.
2167 * for 'mirror_num > 2', select a stripe to fail on every retry.
2169 if (mirror_num > 2) {
2171 * 'mirror == 3' is to fail the p stripe and
2172 * reconstruct from the q stripe. 'mirror > 3' is to
2173 * fail a data stripe and reconstruct from p+q stripe.
2175 rbio->failb = rbio->real_stripes - (mirror_num - 1);
2176 ASSERT(rbio->failb > 0);
2177 if (rbio->failb <= rbio->faila)
2181 ret = lock_stripe_add(rbio);
2184 * __raid56_parity_recover will end the bio with
2185 * any errors it hits. We don't want to return
2186 * its error value up the stack because our caller
2187 * will end up calling bio_endio with any nonzero
2191 __raid56_parity_recover(rbio);
2193 * our rbio has been added to the list of
2194 * rbios that will be handled after the
2195 * currently lock owner is done
2201 static void rmw_work(struct btrfs_work *work)
2203 struct btrfs_raid_bio *rbio;
2205 rbio = container_of(work, struct btrfs_raid_bio, work);
2206 raid56_rmw_stripe(rbio);
2209 static void read_rebuild_work(struct btrfs_work *work)
2211 struct btrfs_raid_bio *rbio;
2213 rbio = container_of(work, struct btrfs_raid_bio, work);
2214 __raid56_parity_recover(rbio);
2218 * The following code is used to scrub/replace the parity stripe
2220 * Caller must have already increased bio_counter for getting @bbio.
2222 * Note: We need make sure all the pages that add into the scrub/replace
2223 * raid bio are correct and not be changed during the scrub/replace. That
2224 * is those pages just hold metadata or file data with checksum.
2227 struct btrfs_raid_bio *
2228 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2229 struct btrfs_bio *bbio, u64 stripe_len,
2230 struct btrfs_device *scrub_dev,
2231 unsigned long *dbitmap, int stripe_nsectors)
2233 struct btrfs_raid_bio *rbio;
2236 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2239 bio_list_add(&rbio->bio_list, bio);
2241 * This is a special bio which is used to hold the completion handler
2242 * and make the scrub rbio is similar to the other types
2244 ASSERT(!bio->bi_iter.bi_size);
2245 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2248 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2249 * to the end position, so this search can start from the first parity
2252 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2253 if (bbio->stripes[i].dev == scrub_dev) {
2258 ASSERT(i < rbio->real_stripes);
2260 /* Now we just support the sectorsize equals to page size */
2261 ASSERT(fs_info->sectorsize == PAGE_SIZE);
2262 ASSERT(rbio->stripe_npages == stripe_nsectors);
2263 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2266 * We have already increased bio_counter when getting bbio, record it
2267 * so we can free it at rbio_orig_end_io().
2269 rbio->generic_bio_cnt = 1;
2274 /* Used for both parity scrub and missing. */
2275 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2281 ASSERT(logical >= rbio->bbio->raid_map[0]);
2282 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2283 rbio->stripe_len * rbio->nr_data);
2284 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2285 index = stripe_offset >> PAGE_SHIFT;
2286 rbio->bio_pages[index] = page;
2290 * We just scrub the parity that we have correct data on the same horizontal,
2291 * so we needn't allocate all pages for all the stripes.
2293 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2300 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2301 for (i = 0; i < rbio->real_stripes; i++) {
2302 index = i * rbio->stripe_npages + bit;
2303 if (rbio->stripe_pages[index])
2306 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2309 rbio->stripe_pages[index] = page;
2315 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2318 struct btrfs_bio *bbio = rbio->bbio;
2319 void **pointers = rbio->finish_pointers;
2320 unsigned long *pbitmap = rbio->finish_pbitmap;
2321 int nr_data = rbio->nr_data;
2325 struct page *p_page = NULL;
2326 struct page *q_page = NULL;
2327 struct bio_list bio_list;
2332 bio_list_init(&bio_list);
2334 if (rbio->real_stripes - rbio->nr_data == 1)
2335 has_qstripe = false;
2336 else if (rbio->real_stripes - rbio->nr_data == 2)
2341 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2343 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2347 * Because the higher layers(scrubber) are unlikely to
2348 * use this area of the disk again soon, so don't cache
2351 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2356 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2359 SetPageUptodate(p_page);
2362 /* RAID6, allocate and map temp space for the Q stripe */
2363 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2365 __free_page(p_page);
2368 SetPageUptodate(q_page);
2369 pointers[rbio->real_stripes - 1] = kmap(q_page);
2372 atomic_set(&rbio->error, 0);
2374 /* Map the parity stripe just once */
2375 pointers[nr_data] = kmap(p_page);
2377 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2380 /* first collect one page from each data stripe */
2381 for (stripe = 0; stripe < nr_data; stripe++) {
2382 p = page_in_rbio(rbio, stripe, pagenr, 0);
2383 pointers[stripe] = kmap(p);
2387 /* RAID6, call the library function to fill in our P/Q */
2388 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2392 copy_page(pointers[nr_data], pointers[0]);
2393 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2396 /* Check scrubbing parity and repair it */
2397 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2399 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2400 copy_page(parity, pointers[rbio->scrubp]);
2402 /* Parity is right, needn't writeback */
2403 bitmap_clear(rbio->dbitmap, pagenr, 1);
2406 for (stripe = 0; stripe < nr_data; stripe++)
2407 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2411 __free_page(p_page);
2414 __free_page(q_page);
2419 * time to start writing. Make bios for everything from the
2420 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2423 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2426 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2427 ret = rbio_add_io_page(rbio, &bio_list,
2428 page, rbio->scrubp, pagenr, rbio->stripe_len);
2436 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2439 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2440 ret = rbio_add_io_page(rbio, &bio_list, page,
2441 bbio->tgtdev_map[rbio->scrubp],
2442 pagenr, rbio->stripe_len);
2448 nr_data = bio_list_size(&bio_list);
2450 /* Every parity is right */
2451 rbio_orig_end_io(rbio, BLK_STS_OK);
2455 atomic_set(&rbio->stripes_pending, nr_data);
2457 while ((bio = bio_list_pop(&bio_list))) {
2458 bio->bi_private = rbio;
2459 bio->bi_end_io = raid_write_end_io;
2460 bio->bi_opf = REQ_OP_WRITE;
2467 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2469 while ((bio = bio_list_pop(&bio_list)))
2473 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2475 if (stripe >= 0 && stripe < rbio->nr_data)
2481 * While we're doing the parity check and repair, we could have errors
2482 * in reading pages off the disk. This checks for errors and if we're
2483 * not able to read the page it'll trigger parity reconstruction. The
2484 * parity scrub will be finished after we've reconstructed the failed
2487 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2489 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2492 if (rbio->faila >= 0 || rbio->failb >= 0) {
2493 int dfail = 0, failp = -1;
2495 if (is_data_stripe(rbio, rbio->faila))
2497 else if (is_parity_stripe(rbio->faila))
2498 failp = rbio->faila;
2500 if (is_data_stripe(rbio, rbio->failb))
2502 else if (is_parity_stripe(rbio->failb))
2503 failp = rbio->failb;
2506 * Because we can not use a scrubbing parity to repair
2507 * the data, so the capability of the repair is declined.
2508 * (In the case of RAID5, we can not repair anything)
2510 if (dfail > rbio->bbio->max_errors - 1)
2514 * If all data is good, only parity is correctly, just
2515 * repair the parity.
2518 finish_parity_scrub(rbio, 0);
2523 * Here means we got one corrupted data stripe and one
2524 * corrupted parity on RAID6, if the corrupted parity
2525 * is scrubbing parity, luckily, use the other one to repair
2526 * the data, or we can not repair the data stripe.
2528 if (failp != rbio->scrubp)
2531 __raid_recover_end_io(rbio);
2533 finish_parity_scrub(rbio, 1);
2538 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2542 * end io for the read phase of the rmw cycle. All the bios here are physical
2543 * stripe bios we've read from the disk so we can recalculate the parity of the
2546 * This will usually kick off finish_rmw once all the bios are read in, but it
2547 * may trigger parity reconstruction if we had any errors along the way
2549 static void raid56_parity_scrub_end_io(struct bio *bio)
2551 struct btrfs_raid_bio *rbio = bio->bi_private;
2554 fail_bio_stripe(rbio, bio);
2556 set_bio_pages_uptodate(bio);
2560 if (!atomic_dec_and_test(&rbio->stripes_pending))
2564 * this will normally call finish_rmw to start our write
2565 * but if there are any failed stripes we'll reconstruct
2568 validate_rbio_for_parity_scrub(rbio);
2571 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2573 int bios_to_read = 0;
2574 struct bio_list bio_list;
2580 bio_list_init(&bio_list);
2582 ret = alloc_rbio_essential_pages(rbio);
2586 atomic_set(&rbio->error, 0);
2588 * build a list of bios to read all the missing parts of this
2591 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2592 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2595 * we want to find all the pages missing from
2596 * the rbio and read them from the disk. If
2597 * page_in_rbio finds a page in the bio list
2598 * we don't need to read it off the stripe.
2600 page = page_in_rbio(rbio, stripe, pagenr, 1);
2604 page = rbio_stripe_page(rbio, stripe, pagenr);
2606 * the bio cache may have handed us an uptodate
2607 * page. If so, be happy and use it
2609 if (PageUptodate(page))
2612 ret = rbio_add_io_page(rbio, &bio_list, page,
2613 stripe, pagenr, rbio->stripe_len);
2619 bios_to_read = bio_list_size(&bio_list);
2620 if (!bios_to_read) {
2622 * this can happen if others have merged with
2623 * us, it means there is nothing left to read.
2624 * But if there are missing devices it may not be
2625 * safe to do the full stripe write yet.
2631 * the bbio may be freed once we submit the last bio. Make sure
2632 * not to touch it after that
2634 atomic_set(&rbio->stripes_pending, bios_to_read);
2635 while ((bio = bio_list_pop(&bio_list))) {
2636 bio->bi_private = rbio;
2637 bio->bi_end_io = raid56_parity_scrub_end_io;
2638 bio->bi_opf = REQ_OP_READ;
2640 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2644 /* the actual write will happen once the reads are done */
2648 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2650 while ((bio = bio_list_pop(&bio_list)))
2656 validate_rbio_for_parity_scrub(rbio);
2659 static void scrub_parity_work(struct btrfs_work *work)
2661 struct btrfs_raid_bio *rbio;
2663 rbio = container_of(work, struct btrfs_raid_bio, work);
2664 raid56_parity_scrub_stripe(rbio);
2667 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2669 if (!lock_stripe_add(rbio))
2670 start_async_work(rbio, scrub_parity_work);
2673 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2675 struct btrfs_raid_bio *
2676 raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2677 struct btrfs_bio *bbio, u64 length)
2679 struct btrfs_raid_bio *rbio;
2681 rbio = alloc_rbio(fs_info, bbio, length);
2685 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2686 bio_list_add(&rbio->bio_list, bio);
2688 * This is a special bio which is used to hold the completion handler
2689 * and make the scrub rbio is similar to the other types
2691 ASSERT(!bio->bi_iter.bi_size);
2693 rbio->faila = find_logical_bio_stripe(rbio, bio);
2694 if (rbio->faila == -1) {
2701 * When we get bbio, we have already increased bio_counter, record it
2702 * so we can free it at rbio_orig_end_io()
2704 rbio->generic_bio_cnt = 1;
2709 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2711 if (!lock_stripe_add(rbio))
2712 start_async_work(rbio, read_rebuild_work);