* is required instead of the faster short fsync log commits
*/
u64 last_trans_log_full_commit;
- unsigned long mount_opt:21;
+ unsigned long mount_opt;
unsigned long compress_type:4;
u64 max_inline;
u64 alloc_start;
static inline bool btrfs_root_readonly(struct btrfs_root *root)
{
- return root->root_item.flags & BTRFS_ROOT_SUBVOL_RDONLY;
+ return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
}
/* struct btrfs_root_backup */
return 0;
}
+ static struct extent_state *next_state(struct extent_state *state)
+ {
+ struct rb_node *next = rb_next(&state->rb_node);
+ if (next)
+ return rb_entry(next, struct extent_state, rb_node);
+ else
+ return NULL;
+ }
+
/*
* utility function to clear some bits in an extent state struct.
- * it will optionally wake up any one waiting on this state (wake == 1), or
- * forcibly remove the state from the tree (delete == 1).
+ * it will optionally wake up any one waiting on this state (wake == 1)
*
* If no bits are set on the state struct after clearing things, the
* struct is freed and removed from the tree
*/
- static int clear_state_bit(struct extent_io_tree *tree,
- struct extent_state *state,
- int *bits, int wake)
+ static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
+ struct extent_state *state,
+ int *bits, int wake)
{
+ struct extent_state *next;
int bits_to_clear = *bits & ~EXTENT_CTLBITS;
- int ret = state->state & bits_to_clear;
if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
u64 range = state->end - state->start + 1;
if (wake)
wake_up(&state->wq);
if (state->state == 0) {
+ next = next_state(state);
if (state->tree) {
rb_erase(&state->rb_node, &tree->state);
state->tree = NULL;
}
} else {
merge_state(tree, state);
+ next = next_state(state);
}
- return ret;
+ return next;
}
static struct extent_state *
struct extent_state *state;
struct extent_state *cached;
struct extent_state *prealloc = NULL;
- struct rb_node *next_node;
struct rb_node *node;
u64 last_end;
int err;
WARN_ON(state->end < start);
last_end = state->end;
- if (state->end < end && !need_resched())
- next_node = rb_next(&state->rb_node);
- else
- next_node = NULL;
-
/* the state doesn't have the wanted bits, go ahead */
- if (!(state->state & bits))
+ if (!(state->state & bits)) {
+ state = next_state(state);
goto next;
+ }
/*
* | ---- desired range ---- |
goto out;
}
- clear_state_bit(tree, state, &bits, wake);
+ state = clear_state_bit(tree, state, &bits, wake);
next:
if (last_end == (u64)-1)
goto out;
start = last_end + 1;
- if (start <= end && next_node) {
- state = rb_entry(next_node, struct extent_state,
- rb_node);
+ if (start <= end && state && !need_resched())
goto hit_next;
- }
goto search_again;
out:
u64 start;
u64 end;
int whole_page;
- int failed_mirror;
+ int mirror;
int ret;
if (err)
}
spin_unlock(&tree->lock);
+ mirror = (int)(unsigned long)bio->bi_bdev;
if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
ret = tree->ops->readpage_end_io_hook(page, start, end,
- state);
+ state, mirror);
if (ret)
uptodate = 0;
else
clean_io_failure(start, page);
}
- if (!uptodate)
- failed_mirror = (int)(unsigned long)bio->bi_bdev;
-
if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
- ret = tree->ops->readpage_io_failed_hook(page, failed_mirror);
+ ret = tree->ops->readpage_io_failed_hook(page, mirror);
if (!ret && !err &&
test_bit(BIO_UPTODATE, &bio->bi_flags))
uptodate = 1;
* can't handle the error it will return -EIO and we
* remain responsible for that page.
*/
- ret = bio_readpage_error(bio, page, start, end,
- failed_mirror, NULL);
+ ret = bio_readpage_error(bio, page, start, end, mirror, NULL);
if (ret == 0) {
uptodate =
test_bit(BIO_UPTODATE, &bio->bi_flags);
if (zero_offset) {
iosize = PAGE_CACHE_SIZE - zero_offset;
- userpage = kmap_atomic(page, KM_USER0);
+ userpage = kmap_atomic(page);
memset(userpage + zero_offset, 0, iosize);
flush_dcache_page(page);
- kunmap_atomic(userpage, KM_USER0);
+ kunmap_atomic(userpage);
}
}
while (cur <= end) {
struct extent_state *cached = NULL;
iosize = PAGE_CACHE_SIZE - pg_offset;
- userpage = kmap_atomic(page, KM_USER0);
+ userpage = kmap_atomic(page);
memset(userpage + pg_offset, 0, iosize);
flush_dcache_page(page);
- kunmap_atomic(userpage, KM_USER0);
+ kunmap_atomic(userpage);
set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
unlock_extent_cached(tree, cur, cur + iosize - 1,
char *userpage;
struct extent_state *cached = NULL;
- userpage = kmap_atomic(page, KM_USER0);
+ userpage = kmap_atomic(page);
memset(userpage + pg_offset, 0, iosize);
flush_dcache_page(page);
- kunmap_atomic(userpage, KM_USER0);
+ kunmap_atomic(userpage);
set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
if (page->index == end_index) {
char *userpage;
- userpage = kmap_atomic(page, KM_USER0);
+ userpage = kmap_atomic(page);
memset(userpage + pg_offset, 0,
PAGE_CACHE_SIZE - pg_offset);
- kunmap_atomic(userpage, KM_USER0);
+ kunmap_atomic(userpage);
flush_dcache_page(page);
}
pg_offset = 0;
}
clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
- eb->failed_mirror = 0;
+ eb->read_mirror = 0;
atomic_set(&eb->io_pages, num_reads);
for (i = start_i; i < num_pages; i++) {
page = extent_buffer_page(eb, i);
cur_size = min_t(unsigned long, compressed_size,
PAGE_CACHE_SIZE);
- kaddr = kmap_atomic(cpage, KM_USER0);
+ kaddr = kmap_atomic(cpage);
write_extent_buffer(leaf, kaddr, ptr, cur_size);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
i++;
ptr += cur_size;
page = find_get_page(inode->i_mapping,
start >> PAGE_CACHE_SHIFT);
btrfs_set_file_extent_compression(leaf, ei, 0);
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
offset = start & (PAGE_CACHE_SIZE - 1);
write_extent_buffer(leaf, kaddr + offset, ptr, size);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
page_cache_release(page);
}
btrfs_mark_buffer_dirty(leaf);
* sending it down to disk
*/
if (offset) {
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
memset(kaddr + offset, 0,
PAGE_CACHE_SIZE - offset);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
will_compress = 1;
}
* extent_io.c will try to find good copies for us.
*/
static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
- struct extent_state *state)
+ struct extent_state *state, int mirror)
{
size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
struct inode *inode = page->mapping->host;
} else {
ret = get_state_private(io_tree, start, &private);
}
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
if (ret)
goto zeroit;
if (csum != private)
goto zeroit;
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
good:
return 0;
(unsigned long long)private);
memset(kaddr + offset, 1, end - start + 1);
flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
if (private == 0)
return 0;
return -EIO;
BTRFS_I(inode)->dummy_inode = 1;
inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
- inode->i_op = &simple_dir_inode_operations;
+ inode->i_op = &btrfs_dir_ro_inode_operations;
inode->i_fop = &simple_dir_operations;
inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
static int btrfs_dentry_delete(const struct dentry *dentry)
{
struct btrfs_root *root;
+ struct inode *inode = dentry->d_inode;
- if (!dentry->d_inode && !IS_ROOT(dentry))
- dentry = dentry->d_parent;
+ if (!inode && !IS_ROOT(dentry))
+ inode = dentry->d_parent->d_inode;
- if (dentry->d_inode) {
- root = BTRFS_I(dentry->d_inode)->root;
+ if (inode) {
+ root = BTRFS_I(inode)->root;
if (btrfs_root_refs(&root->root_item) == 0)
return 1;
+
+ if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
+ return 1;
}
return 0;
}
struct btrfs_path *path;
struct list_head ins_list;
struct list_head del_list;
- struct qstr q;
int ret;
struct extent_buffer *leaf;
int slot;
while (di_cur < di_total) {
struct btrfs_key location;
- struct dentry *tmp;
if (verify_dir_item(root, leaf, di))
break;
d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
btrfs_dir_item_key_to_cpu(leaf, di, &location);
- q.name = name_ptr;
- q.len = name_len;
- q.hash = full_name_hash(q.name, q.len);
- tmp = d_lookup(filp->f_dentry, &q);
- if (!tmp) {
- struct btrfs_key *newkey;
-
- newkey = kzalloc(sizeof(struct btrfs_key),
- GFP_NOFS);
- if (!newkey)
- goto no_dentry;
- tmp = d_alloc(filp->f_dentry, &q);
- if (!tmp) {
- kfree(newkey);
- dput(tmp);
- goto no_dentry;
- }
- memcpy(newkey, &location,
- sizeof(struct btrfs_key));
- tmp->d_fsdata = newkey;
- tmp->d_flags |= DCACHE_NEED_LOOKUP;
- d_rehash(tmp);
- dput(tmp);
- } else {
- dput(tmp);
- }
- no_dentry:
+
/* is this a reference to our own snapshot? If so
- * skip it
+ * skip it.
+ *
+ * In contrast to old kernels, we insert the snapshot's
+ * dir item and dir index after it has been created, so
+ * we won't find a reference to our own snapshot. We
+ * still keep the following code for backward
+ * compatibility.
*/
if (location.type == BTRFS_ROOT_ITEM_KEY &&
location.objectid == root->root_key.objectid) {
ret = btrfs_decompress(compress_type, tmp, page,
extent_offset, inline_size, max_size);
if (ret) {
- char *kaddr = kmap_atomic(page, KM_USER0);
+ char *kaddr = kmap_atomic(page);
unsigned long copy_size = min_t(u64,
PAGE_CACHE_SIZE - pg_offset,
max_size - extent_offset);
memset(kaddr + pg_offset, 0, copy_size);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
kfree(tmp);
return 0;
unsigned long flags;
local_irq_save(flags);
- kaddr = kmap_atomic(page, KM_IRQ0);
+ kaddr = kmap_atomic(page);
csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
csum, bvec->bv_len);
btrfs_csum_final(csum, (char *)&csum);
- kunmap_atomic(kaddr, KM_IRQ0);
+ kunmap_atomic(kaddr);
local_irq_restore(flags);
flush_dcache_page(bvec->bv_page);
if (is_metadata) {
struct btrfs_header *h;
- mapped_buffer = kmap_atomic(sblock->pagev[0].page, KM_USER0);
+ mapped_buffer = kmap_atomic(sblock->pagev[0].page);
h = (struct btrfs_header *)mapped_buffer;
if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
if (!have_csum)
return;
- mapped_buffer = kmap_atomic(sblock->pagev[0].page, KM_USER0);
+ mapped_buffer = kmap_atomic(sblock->pagev[0].page);
}
for (page_num = 0;;) {
crc = btrfs_csum_data(root, mapped_buffer, crc,
PAGE_SIZE);
- kunmap_atomic(mapped_buffer, KM_USER0);
+ kunmap_atomic(mapped_buffer);
page_num++;
if (page_num >= sblock->page_count)
break;
BUG_ON(!sblock->pagev[page_num].page);
- mapped_buffer = kmap_atomic(sblock->pagev[page_num].page,
- KM_USER0);
+ mapped_buffer = kmap_atomic(sblock->pagev[page_num].page);
}
btrfs_csum_final(crc, calculated_csum);
on_disk_csum = sblock->pagev[0].csum;
page = sblock->pagev[0].page;
- buffer = kmap_atomic(page, KM_USER0);
+ buffer = kmap_atomic(page);
len = sdev->sectorsize;
index = 0;
u64 l = min_t(u64, len, PAGE_SIZE);
crc = btrfs_csum_data(root, buffer, crc, l);
- kunmap_atomic(buffer, KM_USER0);
+ kunmap_atomic(buffer);
len -= l;
if (len == 0)
break;
BUG_ON(index >= sblock->page_count);
BUG_ON(!sblock->pagev[index].page);
page = sblock->pagev[index].page;
- buffer = kmap_atomic(page, KM_USER0);
+ buffer = kmap_atomic(page);
}
btrfs_csum_final(crc, csum);
if (memcmp(csum, on_disk_csum, sdev->csum_size))
fail = 1;
- if (fail) {
- spin_lock(&sdev->stat_lock);
- ++sdev->stat.csum_errors;
- spin_unlock(&sdev->stat_lock);
- }
-
return fail;
}
BUG_ON(sblock->page_count < 1);
page = sblock->pagev[0].page;
- mapped_buffer = kmap_atomic(page, KM_USER0);
+ mapped_buffer = kmap_atomic(page);
h = (struct btrfs_header *)mapped_buffer;
memcpy(on_disk_csum, h->csum, sdev->csum_size);
u64 l = min_t(u64, len, mapped_size);
crc = btrfs_csum_data(root, p, crc, l);
- kunmap_atomic(mapped_buffer, KM_USER0);
+ kunmap_atomic(mapped_buffer);
len -= l;
if (len == 0)
break;
BUG_ON(index >= sblock->page_count);
BUG_ON(!sblock->pagev[index].page);
page = sblock->pagev[index].page;
- mapped_buffer = kmap_atomic(page, KM_USER0);
+ mapped_buffer = kmap_atomic(page);
mapped_size = PAGE_SIZE;
p = mapped_buffer;
}
if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
++crc_fail;
- if (crc_fail || fail) {
- spin_lock(&sdev->stat_lock);
- if (crc_fail)
- ++sdev->stat.csum_errors;
- if (fail)
- ++sdev->stat.verify_errors;
- spin_unlock(&sdev->stat_lock);
- }
-
return fail || crc_fail;
}
BUG_ON(sblock->page_count < 1);
page = sblock->pagev[0].page;
- mapped_buffer = kmap_atomic(page, KM_USER0);
+ mapped_buffer = kmap_atomic(page);
s = (struct btrfs_super_block *)mapped_buffer;
memcpy(on_disk_csum, s->csum, sdev->csum_size);
u64 l = min_t(u64, len, mapped_size);
crc = btrfs_csum_data(root, p, crc, l);
- kunmap_atomic(mapped_buffer, KM_USER0);
+ kunmap_atomic(mapped_buffer);
len -= l;
if (len == 0)
break;
BUG_ON(index >= sblock->page_count);
BUG_ON(!sblock->pagev[index].page);
page = sblock->pagev[index].page;
- mapped_buffer = kmap_atomic(page, KM_USER0);
+ mapped_buffer = kmap_atomic(page);
mapped_size = PAGE_SIZE;
p = mapped_buffer;
}
void *data, int silent)
{
struct inode *inode;
- struct dentry *root_dentry;
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_key key;
int err;
goto fail_close;
}
- root_dentry = d_alloc_root(inode);
- if (!root_dentry) {
- iput(inode);
+ sb->s_root = d_make_root(inode);
+ if (!sb->s_root) {
err = -ENOMEM;
goto fail_close;
}
- sb->s_root = root_dentry;
-
save_mount_options(sb, data);
cleancache_init_fs(sb);
sb->s_flags |= MS_ACTIVE;
return 0;
}
- btrfs_start_delalloc_inodes(root, 0);
btrfs_wait_ordered_extents(root, 0, 0);
trans = btrfs_start_transaction(root, 0);
if (ret)
goto restore;
} else {
- if (fs_info->fs_devices->rw_devices == 0)
+ if (fs_info->fs_devices->rw_devices == 0) {
ret = -EACCES;
goto restore;
+ }
- if (btrfs_super_log_root(fs_info->super_copy) != 0)
+ if (btrfs_super_log_root(fs_info->super_copy) != 0) {
ret = -EINVAL;
goto restore;
+ }
ret = btrfs_cleanup_fs_roots(fs_info);
if (ret)