OSDN Git Service

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 28 Apr 2012 16:30:07 +0000 (09:30 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 28 Apr 2012 16:30:07 +0000 (09:30 -0700)
Pull btrfs fixes from Chris Mason:
 "This has our collection of bug fixes.  I missed the last rc because I
  thought our patches were making NFS crash during my xfs test runs.
  Turns out it was an NFS client bug fixed by someone else while I tried
  to bisect it.

  All of these fixes are small, but some are fairly high impact.  The
  biggest are fixes for our mount -o remount handling, a deadlock due to
  GFP_KERNEL allocations in readdir, and a RAID10 error handling bug.

  This was tested against both 3.3 and Linus' master as of this morning."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (26 commits)
  Btrfs: reduce lock contention during extent insertion
  Btrfs: avoid deadlocks from GFP_KERNEL allocations during btrfs_real_readdir
  Btrfs: Fix space checking during fs resize
  Btrfs: fix block_rsv and space_info lock ordering
  Btrfs: Prevent root_list corruption
  Btrfs: fix repair code for RAID10
  Btrfs: do not start delalloc inodes during sync
  Btrfs: fix that check_int_data mount option was ignored
  Btrfs: don't count CRC or header errors twice while scrubbing
  Btrfs: fix btrfs_ioctl_dev_info() crash on missing device
  btrfs: don't return EINTR
  Btrfs: double unlock bug in error handling
  Btrfs: always store the mirror we read the eb from
  fs/btrfs/volumes.c: add missing free_fs_devices
  btrfs: fix early abort in 'remount'
  Btrfs: fix max chunk size check in chunk allocator
  Btrfs: add missing read locks in backref.c
  Btrfs: don't call free_extent_buffer twice in iterate_irefs
  Btrfs: Make free_ipath() deal gracefully with NULL pointers
  Btrfs: avoid possible use-after-free in clear_extent_bit()
  ...

1  2 
fs/btrfs/ctree.h
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/scrub.c
fs/btrfs/super.c

diff --combined fs/btrfs/ctree.h
@@@ -1078,7 -1078,7 +1078,7 @@@ struct btrfs_fs_info 
         * is required instead of the faster short fsync log commits
         */
        u64 last_trans_log_full_commit;
-       unsigned long mount_opt:21;
+       unsigned long mount_opt;
        unsigned long compress_type:4;
        u64 max_inline;
        u64 alloc_start;
@@@ -2166,7 -2166,7 +2166,7 @@@ BTRFS_SETGET_STACK_FUNCS(root_last_snap
  
  static inline bool btrfs_root_readonly(struct btrfs_root *root)
  {
 -      return root->root_item.flags & BTRFS_ROOT_SUBVOL_RDONLY;
 +      return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
  }
  
  /* struct btrfs_root_backup */
diff --combined fs/btrfs/extent_io.c
@@@ -402,20 -402,28 +402,28 @@@ static int split_state(struct extent_io
        return 0;
  }
  
+ static struct extent_state *next_state(struct extent_state *state)
+ {
+       struct rb_node *next = rb_next(&state->rb_node);
+       if (next)
+               return rb_entry(next, struct extent_state, rb_node);
+       else
+               return NULL;
+ }
  /*
   * utility function to clear some bits in an extent state struct.
-  * it will optionally wake up any one waiting on this state (wake == 1), or
-  * forcibly remove the state from the tree (delete == 1).
+  * it will optionally wake up any one waiting on this state (wake == 1)
   *
   * If no bits are set on the state struct after clearing things, the
   * struct is freed and removed from the tree
   */
- static int clear_state_bit(struct extent_io_tree *tree,
-                           struct extent_state *state,
-                           int *bits, int wake)
+ static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
+                                           struct extent_state *state,
+                                           int *bits, int wake)
  {
+       struct extent_state *next;
        int bits_to_clear = *bits & ~EXTENT_CTLBITS;
-       int ret = state->state & bits_to_clear;
  
        if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
                u64 range = state->end - state->start + 1;
        if (wake)
                wake_up(&state->wq);
        if (state->state == 0) {
+               next = next_state(state);
                if (state->tree) {
                        rb_erase(&state->rb_node, &tree->state);
                        state->tree = NULL;
                }
        } else {
                merge_state(tree, state);
+               next = next_state(state);
        }
-       return ret;
+       return next;
  }
  
  static struct extent_state *
@@@ -476,7 -486,6 +486,6 @@@ int clear_extent_bit(struct extent_io_t
        struct extent_state *state;
        struct extent_state *cached;
        struct extent_state *prealloc = NULL;
-       struct rb_node *next_node;
        struct rb_node *node;
        u64 last_end;
        int err;
@@@ -528,14 -537,11 +537,11 @@@ hit_next
        WARN_ON(state->end < start);
        last_end = state->end;
  
-       if (state->end < end && !need_resched())
-               next_node = rb_next(&state->rb_node);
-       else
-               next_node = NULL;
        /* the state doesn't have the wanted bits, go ahead */
-       if (!(state->state & bits))
+       if (!(state->state & bits)) {
+               state = next_state(state);
                goto next;
+       }
  
        /*
         *     | ---- desired range ---- |
                goto out;
        }
  
-       clear_state_bit(tree, state, &bits, wake);
+       state = clear_state_bit(tree, state, &bits, wake);
  next:
        if (last_end == (u64)-1)
                goto out;
        start = last_end + 1;
-       if (start <= end && next_node) {
-               state = rb_entry(next_node, struct extent_state,
-                                rb_node);
+       if (start <= end && state && !need_resched())
                goto hit_next;
-       }
        goto search_again;
  
  out:
@@@ -2301,7 -2304,7 +2304,7 @@@ static void end_bio_extent_readpage(str
        u64 start;
        u64 end;
        int whole_page;
-       int failed_mirror;
+       int mirror;
        int ret;
  
        if (err)
                }
                spin_unlock(&tree->lock);
  
+               mirror = (int)(unsigned long)bio->bi_bdev;
                if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
                        ret = tree->ops->readpage_end_io_hook(page, start, end,
-                                                             state);
+                                                             state, mirror);
                        if (ret)
                                uptodate = 0;
                        else
                                clean_io_failure(start, page);
                }
  
-               if (!uptodate)
-                       failed_mirror = (int)(unsigned long)bio->bi_bdev;
                if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
-                       ret = tree->ops->readpage_io_failed_hook(page, failed_mirror);
+                       ret = tree->ops->readpage_io_failed_hook(page, mirror);
                        if (!ret && !err &&
                            test_bit(BIO_UPTODATE, &bio->bi_flags))
                                uptodate = 1;
                         * can't handle the error it will return -EIO and we
                         * remain responsible for that page.
                         */
-                       ret = bio_readpage_error(bio, page, start, end,
-                                                       failed_mirror, NULL);
+                       ret = bio_readpage_error(bio, page, start, end, mirror, NULL);
                        if (ret == 0) {
                                uptodate =
                                        test_bit(BIO_UPTODATE, &bio->bi_flags);
@@@ -2612,10 -2612,10 +2612,10 @@@ static int __extent_read_full_page(stru
  
                if (zero_offset) {
                        iosize = PAGE_CACHE_SIZE - zero_offset;
 -                      userpage = kmap_atomic(page, KM_USER0);
 +                      userpage = kmap_atomic(page);
                        memset(userpage + zero_offset, 0, iosize);
                        flush_dcache_page(page);
 -                      kunmap_atomic(userpage, KM_USER0);
 +                      kunmap_atomic(userpage);
                }
        }
        while (cur <= end) {
                        struct extent_state *cached = NULL;
  
                        iosize = PAGE_CACHE_SIZE - pg_offset;
 -                      userpage = kmap_atomic(page, KM_USER0);
 +                      userpage = kmap_atomic(page);
                        memset(userpage + pg_offset, 0, iosize);
                        flush_dcache_page(page);
 -                      kunmap_atomic(userpage, KM_USER0);
 +                      kunmap_atomic(userpage);
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
                                            &cached, GFP_NOFS);
                        unlock_extent_cached(tree, cur, cur + iosize - 1,
                        char *userpage;
                        struct extent_state *cached = NULL;
  
 -                      userpage = kmap_atomic(page, KM_USER0);
 +                      userpage = kmap_atomic(page);
                        memset(userpage + pg_offset, 0, iosize);
                        flush_dcache_page(page);
 -                      kunmap_atomic(userpage, KM_USER0);
 +                      kunmap_atomic(userpage);
  
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
                                            &cached, GFP_NOFS);
@@@ -2823,10 -2823,10 +2823,10 @@@ static int __extent_writepage(struct pa
        if (page->index == end_index) {
                char *userpage;
  
 -              userpage = kmap_atomic(page, KM_USER0);
 +              userpage = kmap_atomic(page);
                memset(userpage + pg_offset, 0,
                       PAGE_CACHE_SIZE - pg_offset);
 -              kunmap_atomic(userpage, KM_USER0);
 +              kunmap_atomic(userpage);
                flush_dcache_page(page);
        }
        pg_offset = 0;
@@@ -4462,7 -4462,7 +4462,7 @@@ int read_extent_buffer_pages(struct ext
        }
  
        clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
-       eb->failed_mirror = 0;
+       eb->read_mirror = 0;
        atomic_set(&eb->io_pages, num_reads);
        for (i = start_i; i < num_pages; i++) {
                page = extent_buffer_page(eb, i);
diff --combined fs/btrfs/inode.c
@@@ -172,9 -172,9 +172,9 @@@ static noinline int insert_inline_exten
                        cur_size = min_t(unsigned long, compressed_size,
                                       PAGE_CACHE_SIZE);
  
 -                      kaddr = kmap_atomic(cpage, KM_USER0);
 +                      kaddr = kmap_atomic(cpage);
                        write_extent_buffer(leaf, kaddr, ptr, cur_size);
 -                      kunmap_atomic(kaddr, KM_USER0);
 +                      kunmap_atomic(kaddr);
  
                        i++;
                        ptr += cur_size;
                page = find_get_page(inode->i_mapping,
                                     start >> PAGE_CACHE_SHIFT);
                btrfs_set_file_extent_compression(leaf, ei, 0);
 -              kaddr = kmap_atomic(page, KM_USER0);
 +              kaddr = kmap_atomic(page);
                offset = start & (PAGE_CACHE_SIZE - 1);
                write_extent_buffer(leaf, kaddr + offset, ptr, size);
 -              kunmap_atomic(kaddr, KM_USER0);
 +              kunmap_atomic(kaddr);
                page_cache_release(page);
        }
        btrfs_mark_buffer_dirty(leaf);
@@@ -426,10 -426,10 +426,10 @@@ again
                         * sending it down to disk
                         */
                        if (offset) {
 -                              kaddr = kmap_atomic(page, KM_USER0);
 +                              kaddr = kmap_atomic(page);
                                memset(kaddr + offset, 0,
                                       PAGE_CACHE_SIZE - offset);
 -                              kunmap_atomic(kaddr, KM_USER0);
 +                              kunmap_atomic(kaddr);
                        }
                        will_compress = 1;
                }
@@@ -1947,7 -1947,7 +1947,7 @@@ static int btrfs_writepage_end_io_hook(
   * extent_io.c will try to find good copies for us.
   */
  static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
-                              struct extent_state *state)
+                              struct extent_state *state, int mirror)
  {
        size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
        struct inode *inode = page->mapping->host;
        } else {
                ret = get_state_private(io_tree, start, &private);
        }
 -      kaddr = kmap_atomic(page, KM_USER0);
 +      kaddr = kmap_atomic(page);
        if (ret)
                goto zeroit;
  
        if (csum != private)
                goto zeroit;
  
 -      kunmap_atomic(kaddr, KM_USER0);
 +      kunmap_atomic(kaddr);
  good:
        return 0;
  
@@@ -2000,7 -2000,7 +2000,7 @@@ zeroit
                       (unsigned long long)private);
        memset(kaddr + offset, 1, end - start + 1);
        flush_dcache_page(page);
 -      kunmap_atomic(kaddr, KM_USER0);
 +      kunmap_atomic(kaddr);
        if (private == 0)
                return 0;
        return -EIO;
@@@ -4069,7 -4069,7 +4069,7 @@@ static struct inode *new_simple_dir(str
        BTRFS_I(inode)->dummy_inode = 1;
  
        inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
-       inode->i_op = &simple_dir_inode_operations;
+       inode->i_op = &btrfs_dir_ro_inode_operations;
        inode->i_fop = &simple_dir_operations;
        inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
        inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
@@@ -4140,14 -4140,18 +4140,18 @@@ struct inode *btrfs_lookup_dentry(struc
  static int btrfs_dentry_delete(const struct dentry *dentry)
  {
        struct btrfs_root *root;
+       struct inode *inode = dentry->d_inode;
  
-       if (!dentry->d_inode && !IS_ROOT(dentry))
-               dentry = dentry->d_parent;
+       if (!inode && !IS_ROOT(dentry))
+               inode = dentry->d_parent->d_inode;
  
-       if (dentry->d_inode) {
-               root = BTRFS_I(dentry->d_inode)->root;
+       if (inode) {
+               root = BTRFS_I(inode)->root;
                if (btrfs_root_refs(&root->root_item) == 0)
                        return 1;
+               if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
+                       return 1;
        }
        return 0;
  }
@@@ -4188,7 -4192,6 +4192,6 @@@ static int btrfs_real_readdir(struct fi
        struct btrfs_path *path;
        struct list_head ins_list;
        struct list_head del_list;
-       struct qstr q;
        int ret;
        struct extent_buffer *leaf;
        int slot;
  
                while (di_cur < di_total) {
                        struct btrfs_key location;
-                       struct dentry *tmp;
  
                        if (verify_dir_item(root, leaf, di))
                                break;
                        d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
                        btrfs_dir_item_key_to_cpu(leaf, di, &location);
  
-                       q.name = name_ptr;
-                       q.len = name_len;
-                       q.hash = full_name_hash(q.name, q.len);
-                       tmp = d_lookup(filp->f_dentry, &q);
-                       if (!tmp) {
-                               struct btrfs_key *newkey;
-                               newkey = kzalloc(sizeof(struct btrfs_key),
-                                                GFP_NOFS);
-                               if (!newkey)
-                                       goto no_dentry;
-                               tmp = d_alloc(filp->f_dentry, &q);
-                               if (!tmp) {
-                                       kfree(newkey);
-                                       dput(tmp);
-                                       goto no_dentry;
-                               }
-                               memcpy(newkey, &location,
-                                      sizeof(struct btrfs_key));
-                               tmp->d_fsdata = newkey;
-                               tmp->d_flags |= DCACHE_NEED_LOOKUP;
-                               d_rehash(tmp);
-                               dput(tmp);
-                       } else {
-                               dput(tmp);
-                       }
- no_dentry:
                        /* is this a reference to our own snapshot? If so
-                        * skip it
+                        * skip it.
+                        *
+                        * In contrast to old kernels, we insert the snapshot's
+                        * dir item and dir index after it has been created, so
+                        * we won't find a reference to our own snapshot. We
+                        * still keep the following code for backward
+                        * compatibility.
                         */
                        if (location.type == BTRFS_ROOT_ITEM_KEY &&
                            location.objectid == root->root_key.objectid) {
@@@ -5097,12 -5079,12 +5079,12 @@@ static noinline int uncompress_inline(s
        ret = btrfs_decompress(compress_type, tmp, page,
                               extent_offset, inline_size, max_size);
        if (ret) {
 -              char *kaddr = kmap_atomic(page, KM_USER0);
 +              char *kaddr = kmap_atomic(page);
                unsigned long copy_size = min_t(u64,
                                  PAGE_CACHE_SIZE - pg_offset,
                                  max_size - extent_offset);
                memset(kaddr + pg_offset, 0, copy_size);
 -              kunmap_atomic(kaddr, KM_USER0);
 +              kunmap_atomic(kaddr);
        }
        kfree(tmp);
        return 0;
@@@ -5880,11 -5862,11 +5862,11 @@@ static void btrfs_endio_direct_read(str
                        unsigned long flags;
  
                        local_irq_save(flags);
 -                      kaddr = kmap_atomic(page, KM_IRQ0);
 +                      kaddr = kmap_atomic(page);
                        csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
                                               csum, bvec->bv_len);
                        btrfs_csum_final(csum, (char *)&csum);
 -                      kunmap_atomic(kaddr, KM_IRQ0);
 +                      kunmap_atomic(kaddr);
                        local_irq_restore(flags);
  
                        flush_dcache_page(bvec->bv_page);
diff --combined fs/btrfs/scrub.c
@@@ -1091,7 -1091,7 +1091,7 @@@ static void scrub_recheck_block_checksu
        if (is_metadata) {
                struct btrfs_header *h;
  
 -              mapped_buffer = kmap_atomic(sblock->pagev[0].page, KM_USER0);
 +              mapped_buffer = kmap_atomic(sblock->pagev[0].page);
                h = (struct btrfs_header *)mapped_buffer;
  
                if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
                if (!have_csum)
                        return;
  
 -              mapped_buffer = kmap_atomic(sblock->pagev[0].page, KM_USER0);
 +              mapped_buffer = kmap_atomic(sblock->pagev[0].page);
        }
  
        for (page_num = 0;;) {
                        crc = btrfs_csum_data(root, mapped_buffer, crc,
                                              PAGE_SIZE);
  
 -              kunmap_atomic(mapped_buffer, KM_USER0);
 +              kunmap_atomic(mapped_buffer);
                page_num++;
                if (page_num >= sblock->page_count)
                        break;
                BUG_ON(!sblock->pagev[page_num].page);
  
 -              mapped_buffer = kmap_atomic(sblock->pagev[page_num].page,
 -                                          KM_USER0);
 +              mapped_buffer = kmap_atomic(sblock->pagev[page_num].page);
        }
  
        btrfs_csum_final(crc, calculated_csum);
@@@ -1234,7 -1235,7 +1234,7 @@@ static int scrub_checksum_data(struct s
  
        on_disk_csum = sblock->pagev[0].csum;
        page = sblock->pagev[0].page;
 -      buffer = kmap_atomic(page, KM_USER0);
 +      buffer = kmap_atomic(page);
  
        len = sdev->sectorsize;
        index = 0;
                u64 l = min_t(u64, len, PAGE_SIZE);
  
                crc = btrfs_csum_data(root, buffer, crc, l);
 -              kunmap_atomic(buffer, KM_USER0);
 +              kunmap_atomic(buffer);
                len -= l;
                if (len == 0)
                        break;
                BUG_ON(index >= sblock->page_count);
                BUG_ON(!sblock->pagev[index].page);
                page = sblock->pagev[index].page;
 -              buffer = kmap_atomic(page, KM_USER0);
 +              buffer = kmap_atomic(page);
        }
  
        btrfs_csum_final(crc, csum);
        if (memcmp(csum, on_disk_csum, sdev->csum_size))
                fail = 1;
  
-       if (fail) {
-               spin_lock(&sdev->stat_lock);
-               ++sdev->stat.csum_errors;
-               spin_unlock(&sdev->stat_lock);
-       }
        return fail;
  }
  
@@@ -1286,7 -1281,7 +1280,7 @@@ static int scrub_checksum_tree_block(st
  
        BUG_ON(sblock->page_count < 1);
        page = sblock->pagev[0].page;
 -      mapped_buffer = kmap_atomic(page, KM_USER0);
 +      mapped_buffer = kmap_atomic(page);
        h = (struct btrfs_header *)mapped_buffer;
        memcpy(on_disk_csum, h->csum, sdev->csum_size);
  
                u64 l = min_t(u64, len, mapped_size);
  
                crc = btrfs_csum_data(root, p, crc, l);
 -              kunmap_atomic(mapped_buffer, KM_USER0);
 +              kunmap_atomic(mapped_buffer);
                len -= l;
                if (len == 0)
                        break;
                BUG_ON(index >= sblock->page_count);
                BUG_ON(!sblock->pagev[index].page);
                page = sblock->pagev[index].page;
 -              mapped_buffer = kmap_atomic(page, KM_USER0);
 +              mapped_buffer = kmap_atomic(page);
                mapped_size = PAGE_SIZE;
                p = mapped_buffer;
        }
        if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
                ++crc_fail;
  
-       if (crc_fail || fail) {
-               spin_lock(&sdev->stat_lock);
-               if (crc_fail)
-                       ++sdev->stat.csum_errors;
-               if (fail)
-                       ++sdev->stat.verify_errors;
-               spin_unlock(&sdev->stat_lock);
-       }
        return fail || crc_fail;
  }
  
@@@ -1366,7 -1352,7 +1351,7 @@@ static int scrub_checksum_super(struct 
  
        BUG_ON(sblock->page_count < 1);
        page = sblock->pagev[0].page;
 -      mapped_buffer = kmap_atomic(page, KM_USER0);
 +      mapped_buffer = kmap_atomic(page);
        s = (struct btrfs_super_block *)mapped_buffer;
        memcpy(on_disk_csum, s->csum, sdev->csum_size);
  
                u64 l = min_t(u64, len, mapped_size);
  
                crc = btrfs_csum_data(root, p, crc, l);
 -              kunmap_atomic(mapped_buffer, KM_USER0);
 +              kunmap_atomic(mapped_buffer);
                len -= l;
                if (len == 0)
                        break;
                BUG_ON(index >= sblock->page_count);
                BUG_ON(!sblock->pagev[index].page);
                page = sblock->pagev[index].page;
 -              mapped_buffer = kmap_atomic(page, KM_USER0);
 +              mapped_buffer = kmap_atomic(page);
                mapped_size = PAGE_SIZE;
                p = mapped_buffer;
        }
diff --combined fs/btrfs/super.c
@@@ -755,6 -755,7 +755,6 @@@ static int btrfs_fill_super(struct supe
                            void *data, int silent)
  {
        struct inode *inode;
 -      struct dentry *root_dentry;
        struct btrfs_fs_info *fs_info = btrfs_sb(sb);
        struct btrfs_key key;
        int err;
                goto fail_close;
        }
  
 -      root_dentry = d_alloc_root(inode);
 -      if (!root_dentry) {
 -              iput(inode);
 +      sb->s_root = d_make_root(inode);
 +      if (!sb->s_root) {
                err = -ENOMEM;
                goto fail_close;
        }
  
 -      sb->s_root = root_dentry;
 -
        save_mount_options(sb, data);
        cleancache_init_fs(sb);
        sb->s_flags |= MS_ACTIVE;
@@@ -815,7 -819,6 +815,6 @@@ int btrfs_sync_fs(struct super_block *s
                return 0;
        }
  
-       btrfs_start_delalloc_inodes(root, 0);
        btrfs_wait_ordered_extents(root, 0, 0);
  
        trans = btrfs_start_transaction(root, 0);
@@@ -1148,13 -1151,15 +1147,15 @@@ static int btrfs_remount(struct super_b
                if (ret)
                        goto restore;
        } else {
-               if (fs_info->fs_devices->rw_devices == 0)
+               if (fs_info->fs_devices->rw_devices == 0) {
                        ret = -EACCES;
                        goto restore;
+               }
  
-               if (btrfs_super_log_root(fs_info->super_copy) != 0)
+               if (btrfs_super_log_root(fs_info->super_copy) != 0) {
                        ret = -EINVAL;
                        goto restore;
+               }
  
                ret = btrfs_cleanup_fs_roots(fs_info);
                if (ret)