2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
20 #include <linux/slab.h>
21 #include <linux/iversion.h>
22 #include "delayed-inode.h"
24 #include "transaction.h"
28 #define BTRFS_DELAYED_WRITEBACK 512
29 #define BTRFS_DELAYED_BACKGROUND 128
30 #define BTRFS_DELAYED_BATCH 16
32 static struct kmem_cache *delayed_node_cache;
34 int __init btrfs_delayed_inode_init(void)
36 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
37 sizeof(struct btrfs_delayed_node),
41 if (!delayed_node_cache)
46 void __cold btrfs_delayed_inode_exit(void)
48 kmem_cache_destroy(delayed_node_cache);
51 static inline void btrfs_init_delayed_node(
52 struct btrfs_delayed_node *delayed_node,
53 struct btrfs_root *root, u64 inode_id)
55 delayed_node->root = root;
56 delayed_node->inode_id = inode_id;
57 refcount_set(&delayed_node->refs, 0);
58 delayed_node->ins_root = RB_ROOT;
59 delayed_node->del_root = RB_ROOT;
60 mutex_init(&delayed_node->mutex);
61 INIT_LIST_HEAD(&delayed_node->n_list);
62 INIT_LIST_HEAD(&delayed_node->p_list);
65 static inline int btrfs_is_continuous_delayed_item(
66 struct btrfs_delayed_item *item1,
67 struct btrfs_delayed_item *item2)
69 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
70 item1->key.objectid == item2->key.objectid &&
71 item1->key.type == item2->key.type &&
72 item1->key.offset + 1 == item2->key.offset)
77 static struct btrfs_delayed_node *btrfs_get_delayed_node(
78 struct btrfs_inode *btrfs_inode)
80 struct btrfs_root *root = btrfs_inode->root;
81 u64 ino = btrfs_ino(btrfs_inode);
82 struct btrfs_delayed_node *node;
84 node = READ_ONCE(btrfs_inode->delayed_node);
86 refcount_inc(&node->refs);
90 spin_lock(&root->inode_lock);
91 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
94 if (btrfs_inode->delayed_node) {
95 refcount_inc(&node->refs); /* can be accessed */
96 BUG_ON(btrfs_inode->delayed_node != node);
97 spin_unlock(&root->inode_lock);
102 * It's possible that we're racing into the middle of removing
103 * this node from the radix tree. In this case, the refcount
104 * was zero and it should never go back to one. Just return
105 * NULL like it was never in the radix at all; our release
106 * function is in the process of removing it.
108 * Some implementations of refcount_inc refuse to bump the
109 * refcount once it has hit zero. If we don't do this dance
110 * here, refcount_inc() may decide to just WARN_ONCE() instead
111 * of actually bumping the refcount.
113 * If this node is properly in the radix, we want to bump the
114 * refcount twice, once for the inode and once for this get
117 if (refcount_inc_not_zero(&node->refs)) {
118 refcount_inc(&node->refs);
119 btrfs_inode->delayed_node = node;
124 spin_unlock(&root->inode_lock);
127 spin_unlock(&root->inode_lock);
132 /* Will return either the node or PTR_ERR(-ENOMEM) */
133 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
134 struct btrfs_inode *btrfs_inode)
136 struct btrfs_delayed_node *node;
137 struct btrfs_root *root = btrfs_inode->root;
138 u64 ino = btrfs_ino(btrfs_inode);
142 node = btrfs_get_delayed_node(btrfs_inode);
146 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
148 return ERR_PTR(-ENOMEM);
149 btrfs_init_delayed_node(node, root, ino);
151 /* cached in the btrfs inode and can be accessed */
152 refcount_set(&node->refs, 2);
154 ret = radix_tree_preload(GFP_NOFS);
156 kmem_cache_free(delayed_node_cache, node);
160 spin_lock(&root->inode_lock);
161 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
162 if (ret == -EEXIST) {
163 spin_unlock(&root->inode_lock);
164 kmem_cache_free(delayed_node_cache, node);
165 radix_tree_preload_end();
168 btrfs_inode->delayed_node = node;
169 spin_unlock(&root->inode_lock);
170 radix_tree_preload_end();
176 * Call it when holding delayed_node->mutex
178 * If mod = 1, add this node into the prepared list.
180 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
181 struct btrfs_delayed_node *node,
184 spin_lock(&root->lock);
185 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
186 if (!list_empty(&node->p_list))
187 list_move_tail(&node->p_list, &root->prepare_list);
189 list_add_tail(&node->p_list, &root->prepare_list);
191 list_add_tail(&node->n_list, &root->node_list);
192 list_add_tail(&node->p_list, &root->prepare_list);
193 refcount_inc(&node->refs); /* inserted into list */
195 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
197 spin_unlock(&root->lock);
200 /* Call it when holding delayed_node->mutex */
201 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
202 struct btrfs_delayed_node *node)
204 spin_lock(&root->lock);
205 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
207 refcount_dec(&node->refs); /* not in the list */
208 list_del_init(&node->n_list);
209 if (!list_empty(&node->p_list))
210 list_del_init(&node->p_list);
211 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
213 spin_unlock(&root->lock);
216 static struct btrfs_delayed_node *btrfs_first_delayed_node(
217 struct btrfs_delayed_root *delayed_root)
220 struct btrfs_delayed_node *node = NULL;
222 spin_lock(&delayed_root->lock);
223 if (list_empty(&delayed_root->node_list))
226 p = delayed_root->node_list.next;
227 node = list_entry(p, struct btrfs_delayed_node, n_list);
228 refcount_inc(&node->refs);
230 spin_unlock(&delayed_root->lock);
235 static struct btrfs_delayed_node *btrfs_next_delayed_node(
236 struct btrfs_delayed_node *node)
238 struct btrfs_delayed_root *delayed_root;
240 struct btrfs_delayed_node *next = NULL;
242 delayed_root = node->root->fs_info->delayed_root;
243 spin_lock(&delayed_root->lock);
244 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
245 /* not in the list */
246 if (list_empty(&delayed_root->node_list))
248 p = delayed_root->node_list.next;
249 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
252 p = node->n_list.next;
254 next = list_entry(p, struct btrfs_delayed_node, n_list);
255 refcount_inc(&next->refs);
257 spin_unlock(&delayed_root->lock);
262 static void __btrfs_release_delayed_node(
263 struct btrfs_delayed_node *delayed_node,
266 struct btrfs_delayed_root *delayed_root;
271 delayed_root = delayed_node->root->fs_info->delayed_root;
273 mutex_lock(&delayed_node->mutex);
274 if (delayed_node->count)
275 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
277 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
278 mutex_unlock(&delayed_node->mutex);
280 if (refcount_dec_and_test(&delayed_node->refs)) {
281 struct btrfs_root *root = delayed_node->root;
283 spin_lock(&root->inode_lock);
285 * Once our refcount goes to zero, nobody is allowed to bump it
286 * back up. We can delete it now.
288 ASSERT(refcount_read(&delayed_node->refs) == 0);
289 radix_tree_delete(&root->delayed_nodes_tree,
290 delayed_node->inode_id);
291 spin_unlock(&root->inode_lock);
292 kmem_cache_free(delayed_node_cache, delayed_node);
296 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
298 __btrfs_release_delayed_node(node, 0);
301 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
302 struct btrfs_delayed_root *delayed_root)
305 struct btrfs_delayed_node *node = NULL;
307 spin_lock(&delayed_root->lock);
308 if (list_empty(&delayed_root->prepare_list))
311 p = delayed_root->prepare_list.next;
313 node = list_entry(p, struct btrfs_delayed_node, p_list);
314 refcount_inc(&node->refs);
316 spin_unlock(&delayed_root->lock);
321 static inline void btrfs_release_prepared_delayed_node(
322 struct btrfs_delayed_node *node)
324 __btrfs_release_delayed_node(node, 1);
327 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
329 struct btrfs_delayed_item *item;
330 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
332 item->data_len = data_len;
333 item->ins_or_del = 0;
334 item->bytes_reserved = 0;
335 item->delayed_node = NULL;
336 refcount_set(&item->refs, 1);
342 * __btrfs_lookup_delayed_item - look up the delayed item by key
343 * @delayed_node: pointer to the delayed node
344 * @key: the key to look up
345 * @prev: used to store the prev item if the right item isn't found
346 * @next: used to store the next item if the right item isn't found
348 * Note: if we don't find the right item, we will return the prev item and
351 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
352 struct rb_root *root,
353 struct btrfs_key *key,
354 struct btrfs_delayed_item **prev,
355 struct btrfs_delayed_item **next)
357 struct rb_node *node, *prev_node = NULL;
358 struct btrfs_delayed_item *delayed_item = NULL;
361 node = root->rb_node;
364 delayed_item = rb_entry(node, struct btrfs_delayed_item,
367 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
369 node = node->rb_right;
371 node = node->rb_left;
380 *prev = delayed_item;
381 else if ((node = rb_prev(prev_node)) != NULL) {
382 *prev = rb_entry(node, struct btrfs_delayed_item,
392 *next = delayed_item;
393 else if ((node = rb_next(prev_node)) != NULL) {
394 *next = rb_entry(node, struct btrfs_delayed_item,
402 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
403 struct btrfs_delayed_node *delayed_node,
404 struct btrfs_key *key)
406 return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
410 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
411 struct btrfs_delayed_item *ins,
414 struct rb_node **p, *node;
415 struct rb_node *parent_node = NULL;
416 struct rb_root *root;
417 struct btrfs_delayed_item *item;
420 if (action == BTRFS_DELAYED_INSERTION_ITEM)
421 root = &delayed_node->ins_root;
422 else if (action == BTRFS_DELAYED_DELETION_ITEM)
423 root = &delayed_node->del_root;
427 node = &ins->rb_node;
431 item = rb_entry(parent_node, struct btrfs_delayed_item,
434 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
443 rb_link_node(node, parent_node, p);
444 rb_insert_color(node, root);
445 ins->delayed_node = delayed_node;
446 ins->ins_or_del = action;
448 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
449 action == BTRFS_DELAYED_INSERTION_ITEM &&
450 ins->key.offset >= delayed_node->index_cnt)
451 delayed_node->index_cnt = ins->key.offset + 1;
453 delayed_node->count++;
454 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
458 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
459 struct btrfs_delayed_item *item)
461 return __btrfs_add_delayed_item(node, item,
462 BTRFS_DELAYED_INSERTION_ITEM);
465 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
466 struct btrfs_delayed_item *item)
468 return __btrfs_add_delayed_item(node, item,
469 BTRFS_DELAYED_DELETION_ITEM);
472 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
474 int seq = atomic_inc_return(&delayed_root->items_seq);
477 * atomic_dec_return implies a barrier for waitqueue_active
479 if ((atomic_dec_return(&delayed_root->items) <
480 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
481 waitqueue_active(&delayed_root->wait))
482 wake_up(&delayed_root->wait);
485 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
487 struct rb_root *root;
488 struct btrfs_delayed_root *delayed_root;
490 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
492 BUG_ON(!delayed_root);
493 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
494 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
496 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
497 root = &delayed_item->delayed_node->ins_root;
499 root = &delayed_item->delayed_node->del_root;
501 rb_erase(&delayed_item->rb_node, root);
502 delayed_item->delayed_node->count--;
504 finish_one_item(delayed_root);
507 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
510 __btrfs_remove_delayed_item(item);
511 if (refcount_dec_and_test(&item->refs))
516 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
517 struct btrfs_delayed_node *delayed_node)
520 struct btrfs_delayed_item *item = NULL;
522 p = rb_first(&delayed_node->ins_root);
524 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
529 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
530 struct btrfs_delayed_node *delayed_node)
533 struct btrfs_delayed_item *item = NULL;
535 p = rb_first(&delayed_node->del_root);
537 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
542 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
543 struct btrfs_delayed_item *item)
546 struct btrfs_delayed_item *next = NULL;
548 p = rb_next(&item->rb_node);
550 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
555 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
556 struct btrfs_root *root,
557 struct btrfs_delayed_item *item)
559 struct btrfs_block_rsv *src_rsv;
560 struct btrfs_block_rsv *dst_rsv;
561 struct btrfs_fs_info *fs_info = root->fs_info;
565 if (!trans->bytes_reserved)
568 src_rsv = trans->block_rsv;
569 dst_rsv = &fs_info->delayed_block_rsv;
571 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
572 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
574 trace_btrfs_space_reservation(fs_info, "delayed_item",
577 item->bytes_reserved = num_bytes;
583 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
584 struct btrfs_delayed_item *item)
586 struct btrfs_block_rsv *rsv;
587 struct btrfs_fs_info *fs_info = root->fs_info;
589 if (!item->bytes_reserved)
592 rsv = &fs_info->delayed_block_rsv;
593 btrfs_qgroup_convert_reserved_meta(root, item->bytes_reserved);
594 trace_btrfs_space_reservation(fs_info, "delayed_item",
595 item->key.objectid, item->bytes_reserved,
597 btrfs_block_rsv_release(fs_info, rsv,
598 item->bytes_reserved);
601 static int btrfs_delayed_inode_reserve_metadata(
602 struct btrfs_trans_handle *trans,
603 struct btrfs_root *root,
604 struct btrfs_inode *inode,
605 struct btrfs_delayed_node *node)
607 struct btrfs_fs_info *fs_info = root->fs_info;
608 struct btrfs_block_rsv *src_rsv;
609 struct btrfs_block_rsv *dst_rsv;
613 src_rsv = trans->block_rsv;
614 dst_rsv = &fs_info->delayed_block_rsv;
616 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
618 ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
622 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
623 * which doesn't reserve space for speed. This is a problem since we
624 * still need to reserve space for this update, so try to reserve the
627 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
628 * we always reserve enough to update the inode item.
630 if (!src_rsv || (!trans->bytes_reserved &&
631 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
632 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
633 BTRFS_RESERVE_NO_FLUSH);
635 * Since we're under a transaction reserve_metadata_bytes could
636 * try to commit the transaction which will make it return
637 * EAGAIN to make us stop the transaction we have, so return
638 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
640 if (ret == -EAGAIN) {
642 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
645 node->bytes_reserved = num_bytes;
646 trace_btrfs_space_reservation(fs_info,
654 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
656 trace_btrfs_space_reservation(fs_info, "delayed_inode",
657 btrfs_ino(inode), num_bytes, 1);
658 node->bytes_reserved = num_bytes;
664 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
665 struct btrfs_delayed_node *node,
668 struct btrfs_block_rsv *rsv;
670 if (!node->bytes_reserved)
673 rsv = &fs_info->delayed_block_rsv;
674 trace_btrfs_space_reservation(fs_info, "delayed_inode",
675 node->inode_id, node->bytes_reserved, 0);
676 btrfs_block_rsv_release(fs_info, rsv,
677 node->bytes_reserved);
679 btrfs_qgroup_free_meta_prealloc(node->root,
680 node->bytes_reserved);
682 btrfs_qgroup_convert_reserved_meta(node->root,
683 node->bytes_reserved);
684 node->bytes_reserved = 0;
688 * This helper will insert some continuous items into the same leaf according
689 * to the free space of the leaf.
691 static int btrfs_batch_insert_items(struct btrfs_root *root,
692 struct btrfs_path *path,
693 struct btrfs_delayed_item *item)
695 struct btrfs_fs_info *fs_info = root->fs_info;
696 struct btrfs_delayed_item *curr, *next;
698 int total_data_size = 0, total_size = 0;
699 struct extent_buffer *leaf;
701 struct btrfs_key *keys;
703 struct list_head head;
709 BUG_ON(!path->nodes[0]);
711 leaf = path->nodes[0];
712 free_space = btrfs_leaf_free_space(fs_info, leaf);
713 INIT_LIST_HEAD(&head);
719 * count the number of the continuous items that we can insert in batch
721 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
723 total_data_size += next->data_len;
724 total_size += next->data_len + sizeof(struct btrfs_item);
725 list_add_tail(&next->tree_list, &head);
729 next = __btrfs_next_delayed_item(curr);
733 if (!btrfs_is_continuous_delayed_item(curr, next))
743 * we need allocate some memory space, but it might cause the task
744 * to sleep, so we set all locked nodes in the path to blocking locks
747 btrfs_set_path_blocking(path);
749 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
755 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
761 /* get keys of all the delayed items */
763 list_for_each_entry(next, &head, tree_list) {
765 data_size[i] = next->data_len;
769 /* reset all the locked nodes in the patch to spinning locks. */
770 btrfs_clear_path_blocking(path, NULL, 0);
772 /* insert the keys of the items */
773 setup_items_for_insert(root, path, keys, data_size,
774 total_data_size, total_size, nitems);
776 /* insert the dir index items */
777 slot = path->slots[0];
778 list_for_each_entry_safe(curr, next, &head, tree_list) {
779 data_ptr = btrfs_item_ptr(leaf, slot, char);
780 write_extent_buffer(leaf, &curr->data,
781 (unsigned long)data_ptr,
785 btrfs_delayed_item_release_metadata(root, curr);
787 list_del(&curr->tree_list);
788 btrfs_release_delayed_item(curr);
799 * This helper can just do simple insertion that needn't extend item for new
800 * data, such as directory name index insertion, inode insertion.
802 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
803 struct btrfs_root *root,
804 struct btrfs_path *path,
805 struct btrfs_delayed_item *delayed_item)
807 struct extent_buffer *leaf;
811 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
812 delayed_item->data_len);
813 if (ret < 0 && ret != -EEXIST)
816 leaf = path->nodes[0];
818 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
820 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
821 delayed_item->data_len);
822 btrfs_mark_buffer_dirty(leaf);
824 btrfs_delayed_item_release_metadata(root, delayed_item);
829 * we insert an item first, then if there are some continuous items, we try
830 * to insert those items into the same leaf.
832 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
833 struct btrfs_path *path,
834 struct btrfs_root *root,
835 struct btrfs_delayed_node *node)
837 struct btrfs_delayed_item *curr, *prev;
841 mutex_lock(&node->mutex);
842 curr = __btrfs_first_delayed_insertion_item(node);
846 ret = btrfs_insert_delayed_item(trans, root, path, curr);
848 btrfs_release_path(path);
853 curr = __btrfs_next_delayed_item(prev);
854 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
855 /* insert the continuous items into the same leaf */
857 btrfs_batch_insert_items(root, path, curr);
859 btrfs_release_delayed_item(prev);
860 btrfs_mark_buffer_dirty(path->nodes[0]);
862 btrfs_release_path(path);
863 mutex_unlock(&node->mutex);
867 mutex_unlock(&node->mutex);
871 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
872 struct btrfs_root *root,
873 struct btrfs_path *path,
874 struct btrfs_delayed_item *item)
876 struct btrfs_delayed_item *curr, *next;
877 struct extent_buffer *leaf;
878 struct btrfs_key key;
879 struct list_head head;
880 int nitems, i, last_item;
883 BUG_ON(!path->nodes[0]);
885 leaf = path->nodes[0];
888 last_item = btrfs_header_nritems(leaf) - 1;
890 return -ENOENT; /* FIXME: Is errno suitable? */
893 INIT_LIST_HEAD(&head);
894 btrfs_item_key_to_cpu(leaf, &key, i);
897 * count the number of the dir index items that we can delete in batch
899 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
900 list_add_tail(&next->tree_list, &head);
904 next = __btrfs_next_delayed_item(curr);
908 if (!btrfs_is_continuous_delayed_item(curr, next))
914 btrfs_item_key_to_cpu(leaf, &key, i);
920 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
924 list_for_each_entry_safe(curr, next, &head, tree_list) {
925 btrfs_delayed_item_release_metadata(root, curr);
926 list_del(&curr->tree_list);
927 btrfs_release_delayed_item(curr);
934 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
935 struct btrfs_path *path,
936 struct btrfs_root *root,
937 struct btrfs_delayed_node *node)
939 struct btrfs_delayed_item *curr, *prev;
943 mutex_lock(&node->mutex);
944 curr = __btrfs_first_delayed_deletion_item(node);
948 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
953 * can't find the item which the node points to, so this node
954 * is invalid, just drop it.
957 curr = __btrfs_next_delayed_item(prev);
958 btrfs_release_delayed_item(prev);
960 btrfs_release_path(path);
962 mutex_unlock(&node->mutex);
968 btrfs_batch_delete_items(trans, root, path, curr);
969 btrfs_release_path(path);
970 mutex_unlock(&node->mutex);
974 btrfs_release_path(path);
975 mutex_unlock(&node->mutex);
979 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
981 struct btrfs_delayed_root *delayed_root;
984 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
985 BUG_ON(!delayed_node->root);
986 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
987 delayed_node->count--;
989 delayed_root = delayed_node->root->fs_info->delayed_root;
990 finish_one_item(delayed_root);
994 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
996 struct btrfs_delayed_root *delayed_root;
998 ASSERT(delayed_node->root);
999 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1000 delayed_node->count--;
1002 delayed_root = delayed_node->root->fs_info->delayed_root;
1003 finish_one_item(delayed_root);
1006 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1007 struct btrfs_root *root,
1008 struct btrfs_path *path,
1009 struct btrfs_delayed_node *node)
1011 struct btrfs_fs_info *fs_info = root->fs_info;
1012 struct btrfs_key key;
1013 struct btrfs_inode_item *inode_item;
1014 struct extent_buffer *leaf;
1018 key.objectid = node->inode_id;
1019 key.type = BTRFS_INODE_ITEM_KEY;
1022 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1027 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1029 btrfs_release_path(path);
1031 } else if (ret < 0) {
1035 leaf = path->nodes[0];
1036 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1037 struct btrfs_inode_item);
1038 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1039 sizeof(struct btrfs_inode_item));
1040 btrfs_mark_buffer_dirty(leaf);
1042 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1046 if (path->slots[0] >= btrfs_header_nritems(leaf))
1049 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1050 if (key.objectid != node->inode_id)
1053 if (key.type != BTRFS_INODE_REF_KEY &&
1054 key.type != BTRFS_INODE_EXTREF_KEY)
1058 * Delayed iref deletion is for the inode who has only one link,
1059 * so there is only one iref. The case that several irefs are
1060 * in the same item doesn't exist.
1062 btrfs_del_item(trans, root, path);
1064 btrfs_release_delayed_iref(node);
1066 btrfs_release_path(path);
1068 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1069 btrfs_release_delayed_inode(node);
1074 btrfs_release_path(path);
1076 key.type = BTRFS_INODE_EXTREF_KEY;
1078 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1084 leaf = path->nodes[0];
1089 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1090 struct btrfs_root *root,
1091 struct btrfs_path *path,
1092 struct btrfs_delayed_node *node)
1096 mutex_lock(&node->mutex);
1097 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1098 mutex_unlock(&node->mutex);
1102 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1103 mutex_unlock(&node->mutex);
1108 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1109 struct btrfs_path *path,
1110 struct btrfs_delayed_node *node)
1114 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1118 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1122 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1127 * Called when committing the transaction.
1128 * Returns 0 on success.
1129 * Returns < 0 on error and returns with an aborted transaction with any
1130 * outstanding delayed items cleaned up.
1132 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1134 struct btrfs_fs_info *fs_info = trans->fs_info;
1135 struct btrfs_delayed_root *delayed_root;
1136 struct btrfs_delayed_node *curr_node, *prev_node;
1137 struct btrfs_path *path;
1138 struct btrfs_block_rsv *block_rsv;
1140 bool count = (nr > 0);
1145 path = btrfs_alloc_path();
1148 path->leave_spinning = 1;
1150 block_rsv = trans->block_rsv;
1151 trans->block_rsv = &fs_info->delayed_block_rsv;
1153 delayed_root = fs_info->delayed_root;
1155 curr_node = btrfs_first_delayed_node(delayed_root);
1156 while (curr_node && (!count || (count && nr--))) {
1157 ret = __btrfs_commit_inode_delayed_items(trans, path,
1160 btrfs_release_delayed_node(curr_node);
1162 btrfs_abort_transaction(trans, ret);
1166 prev_node = curr_node;
1167 curr_node = btrfs_next_delayed_node(curr_node);
1168 btrfs_release_delayed_node(prev_node);
1172 btrfs_release_delayed_node(curr_node);
1173 btrfs_free_path(path);
1174 trans->block_rsv = block_rsv;
1179 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1181 return __btrfs_run_delayed_items(trans, -1);
1184 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1186 return __btrfs_run_delayed_items(trans, nr);
1189 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1190 struct btrfs_inode *inode)
1192 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1193 struct btrfs_path *path;
1194 struct btrfs_block_rsv *block_rsv;
1200 mutex_lock(&delayed_node->mutex);
1201 if (!delayed_node->count) {
1202 mutex_unlock(&delayed_node->mutex);
1203 btrfs_release_delayed_node(delayed_node);
1206 mutex_unlock(&delayed_node->mutex);
1208 path = btrfs_alloc_path();
1210 btrfs_release_delayed_node(delayed_node);
1213 path->leave_spinning = 1;
1215 block_rsv = trans->block_rsv;
1216 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1218 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1220 btrfs_release_delayed_node(delayed_node);
1221 btrfs_free_path(path);
1222 trans->block_rsv = block_rsv;
1227 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1229 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1230 struct btrfs_trans_handle *trans;
1231 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1232 struct btrfs_path *path;
1233 struct btrfs_block_rsv *block_rsv;
1239 mutex_lock(&delayed_node->mutex);
1240 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1241 mutex_unlock(&delayed_node->mutex);
1242 btrfs_release_delayed_node(delayed_node);
1245 mutex_unlock(&delayed_node->mutex);
1247 trans = btrfs_join_transaction(delayed_node->root);
1248 if (IS_ERR(trans)) {
1249 ret = PTR_ERR(trans);
1253 path = btrfs_alloc_path();
1258 path->leave_spinning = 1;
1260 block_rsv = trans->block_rsv;
1261 trans->block_rsv = &fs_info->delayed_block_rsv;
1263 mutex_lock(&delayed_node->mutex);
1264 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1265 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1266 path, delayed_node);
1269 mutex_unlock(&delayed_node->mutex);
1271 btrfs_free_path(path);
1272 trans->block_rsv = block_rsv;
1274 btrfs_end_transaction(trans);
1275 btrfs_btree_balance_dirty(fs_info);
1277 btrfs_release_delayed_node(delayed_node);
1282 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1284 struct btrfs_delayed_node *delayed_node;
1286 delayed_node = READ_ONCE(inode->delayed_node);
1290 inode->delayed_node = NULL;
1291 btrfs_release_delayed_node(delayed_node);
1294 struct btrfs_async_delayed_work {
1295 struct btrfs_delayed_root *delayed_root;
1297 struct btrfs_work work;
1300 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1302 struct btrfs_async_delayed_work *async_work;
1303 struct btrfs_delayed_root *delayed_root;
1304 struct btrfs_trans_handle *trans;
1305 struct btrfs_path *path;
1306 struct btrfs_delayed_node *delayed_node = NULL;
1307 struct btrfs_root *root;
1308 struct btrfs_block_rsv *block_rsv;
1311 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1312 delayed_root = async_work->delayed_root;
1314 path = btrfs_alloc_path();
1319 if (atomic_read(&delayed_root->items) <
1320 BTRFS_DELAYED_BACKGROUND / 2)
1323 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1327 path->leave_spinning = 1;
1328 root = delayed_node->root;
1330 trans = btrfs_join_transaction(root);
1331 if (IS_ERR(trans)) {
1332 btrfs_release_path(path);
1333 btrfs_release_prepared_delayed_node(delayed_node);
1338 block_rsv = trans->block_rsv;
1339 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1341 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1343 trans->block_rsv = block_rsv;
1344 btrfs_end_transaction(trans);
1345 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1347 btrfs_release_path(path);
1348 btrfs_release_prepared_delayed_node(delayed_node);
1351 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1352 || total_done < async_work->nr);
1354 btrfs_free_path(path);
1356 wake_up(&delayed_root->wait);
1361 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1362 struct btrfs_fs_info *fs_info, int nr)
1364 struct btrfs_async_delayed_work *async_work;
1366 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1370 async_work->delayed_root = delayed_root;
1371 btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1372 btrfs_async_run_delayed_root, NULL, NULL);
1373 async_work->nr = nr;
1375 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1379 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1381 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1384 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1386 int val = atomic_read(&delayed_root->items_seq);
1388 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1391 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1397 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1399 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1401 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1402 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1405 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1409 seq = atomic_read(&delayed_root->items_seq);
1411 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1415 wait_event_interruptible(delayed_root->wait,
1416 could_end_wait(delayed_root, seq));
1420 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1423 /* Will return 0 or -ENOMEM */
1424 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1425 struct btrfs_fs_info *fs_info,
1426 const char *name, int name_len,
1427 struct btrfs_inode *dir,
1428 struct btrfs_disk_key *disk_key, u8 type,
1431 struct btrfs_delayed_node *delayed_node;
1432 struct btrfs_delayed_item *delayed_item;
1433 struct btrfs_dir_item *dir_item;
1436 delayed_node = btrfs_get_or_create_delayed_node(dir);
1437 if (IS_ERR(delayed_node))
1438 return PTR_ERR(delayed_node);
1440 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1441 if (!delayed_item) {
1446 delayed_item->key.objectid = btrfs_ino(dir);
1447 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1448 delayed_item->key.offset = index;
1450 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1451 dir_item->location = *disk_key;
1452 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1453 btrfs_set_stack_dir_data_len(dir_item, 0);
1454 btrfs_set_stack_dir_name_len(dir_item, name_len);
1455 btrfs_set_stack_dir_type(dir_item, type);
1456 memcpy((char *)(dir_item + 1), name, name_len);
1458 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1460 * we have reserved enough space when we start a new transaction,
1461 * so reserving metadata failure is impossible
1466 mutex_lock(&delayed_node->mutex);
1467 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1468 if (unlikely(ret)) {
1470 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1471 name_len, name, delayed_node->root->objectid,
1472 delayed_node->inode_id, ret);
1475 mutex_unlock(&delayed_node->mutex);
1478 btrfs_release_delayed_node(delayed_node);
1482 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1483 struct btrfs_delayed_node *node,
1484 struct btrfs_key *key)
1486 struct btrfs_delayed_item *item;
1488 mutex_lock(&node->mutex);
1489 item = __btrfs_lookup_delayed_insertion_item(node, key);
1491 mutex_unlock(&node->mutex);
1495 btrfs_delayed_item_release_metadata(node->root, item);
1496 btrfs_release_delayed_item(item);
1497 mutex_unlock(&node->mutex);
1501 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1502 struct btrfs_fs_info *fs_info,
1503 struct btrfs_inode *dir, u64 index)
1505 struct btrfs_delayed_node *node;
1506 struct btrfs_delayed_item *item;
1507 struct btrfs_key item_key;
1510 node = btrfs_get_or_create_delayed_node(dir);
1512 return PTR_ERR(node);
1514 item_key.objectid = btrfs_ino(dir);
1515 item_key.type = BTRFS_DIR_INDEX_KEY;
1516 item_key.offset = index;
1518 ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
1522 item = btrfs_alloc_delayed_item(0);
1528 item->key = item_key;
1530 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1532 * we have reserved enough space when we start a new transaction,
1533 * so reserving metadata failure is impossible.
1537 mutex_lock(&node->mutex);
1538 ret = __btrfs_add_delayed_deletion_item(node, item);
1539 if (unlikely(ret)) {
1541 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1542 index, node->root->objectid, node->inode_id, ret);
1545 mutex_unlock(&node->mutex);
1547 btrfs_release_delayed_node(node);
1551 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1553 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1559 * Since we have held i_mutex of this directory, it is impossible that
1560 * a new directory index is added into the delayed node and index_cnt
1561 * is updated now. So we needn't lock the delayed node.
1563 if (!delayed_node->index_cnt) {
1564 btrfs_release_delayed_node(delayed_node);
1568 inode->index_cnt = delayed_node->index_cnt;
1569 btrfs_release_delayed_node(delayed_node);
1573 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1574 struct list_head *ins_list,
1575 struct list_head *del_list)
1577 struct btrfs_delayed_node *delayed_node;
1578 struct btrfs_delayed_item *item;
1580 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1585 * We can only do one readdir with delayed items at a time because of
1586 * item->readdir_list.
1588 inode_unlock_shared(inode);
1591 mutex_lock(&delayed_node->mutex);
1592 item = __btrfs_first_delayed_insertion_item(delayed_node);
1594 refcount_inc(&item->refs);
1595 list_add_tail(&item->readdir_list, ins_list);
1596 item = __btrfs_next_delayed_item(item);
1599 item = __btrfs_first_delayed_deletion_item(delayed_node);
1601 refcount_inc(&item->refs);
1602 list_add_tail(&item->readdir_list, del_list);
1603 item = __btrfs_next_delayed_item(item);
1605 mutex_unlock(&delayed_node->mutex);
1607 * This delayed node is still cached in the btrfs inode, so refs
1608 * must be > 1 now, and we needn't check it is going to be freed
1611 * Besides that, this function is used to read dir, we do not
1612 * insert/delete delayed items in this period. So we also needn't
1613 * requeue or dequeue this delayed node.
1615 refcount_dec(&delayed_node->refs);
1620 void btrfs_readdir_put_delayed_items(struct inode *inode,
1621 struct list_head *ins_list,
1622 struct list_head *del_list)
1624 struct btrfs_delayed_item *curr, *next;
1626 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1627 list_del(&curr->readdir_list);
1628 if (refcount_dec_and_test(&curr->refs))
1632 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1633 list_del(&curr->readdir_list);
1634 if (refcount_dec_and_test(&curr->refs))
1639 * The VFS is going to do up_read(), so we need to downgrade back to a
1642 downgrade_write(&inode->i_rwsem);
1645 int btrfs_should_delete_dir_index(struct list_head *del_list,
1648 struct btrfs_delayed_item *curr;
1651 list_for_each_entry(curr, del_list, readdir_list) {
1652 if (curr->key.offset > index)
1654 if (curr->key.offset == index) {
1663 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1666 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1667 struct list_head *ins_list)
1669 struct btrfs_dir_item *di;
1670 struct btrfs_delayed_item *curr, *next;
1671 struct btrfs_key location;
1675 unsigned char d_type;
1677 if (list_empty(ins_list))
1681 * Changing the data of the delayed item is impossible. So
1682 * we needn't lock them. And we have held i_mutex of the
1683 * directory, nobody can delete any directory indexes now.
1685 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1686 list_del(&curr->readdir_list);
1688 if (curr->key.offset < ctx->pos) {
1689 if (refcount_dec_and_test(&curr->refs))
1694 ctx->pos = curr->key.offset;
1696 di = (struct btrfs_dir_item *)curr->data;
1697 name = (char *)(di + 1);
1698 name_len = btrfs_stack_dir_name_len(di);
1700 d_type = btrfs_filetype_table[di->type];
1701 btrfs_disk_key_to_cpu(&location, &di->location);
1703 over = !dir_emit(ctx, name, name_len,
1704 location.objectid, d_type);
1706 if (refcount_dec_and_test(&curr->refs))
1716 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1717 struct btrfs_inode_item *inode_item,
1718 struct inode *inode)
1720 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1721 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1722 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1723 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1724 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1725 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1726 btrfs_set_stack_inode_generation(inode_item,
1727 BTRFS_I(inode)->generation);
1728 btrfs_set_stack_inode_sequence(inode_item,
1729 inode_peek_iversion(inode));
1730 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1731 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1732 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1733 btrfs_set_stack_inode_block_group(inode_item, 0);
1735 btrfs_set_stack_timespec_sec(&inode_item->atime,
1736 inode->i_atime.tv_sec);
1737 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1738 inode->i_atime.tv_nsec);
1740 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1741 inode->i_mtime.tv_sec);
1742 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1743 inode->i_mtime.tv_nsec);
1745 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1746 inode->i_ctime.tv_sec);
1747 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1748 inode->i_ctime.tv_nsec);
1750 btrfs_set_stack_timespec_sec(&inode_item->otime,
1751 BTRFS_I(inode)->i_otime.tv_sec);
1752 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1753 BTRFS_I(inode)->i_otime.tv_nsec);
1756 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1758 struct btrfs_delayed_node *delayed_node;
1759 struct btrfs_inode_item *inode_item;
1761 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1765 mutex_lock(&delayed_node->mutex);
1766 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1767 mutex_unlock(&delayed_node->mutex);
1768 btrfs_release_delayed_node(delayed_node);
1772 inode_item = &delayed_node->inode_item;
1774 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1775 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1776 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1777 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1778 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1779 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1780 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1781 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1783 inode_set_iversion_queried(inode,
1784 btrfs_stack_inode_sequence(inode_item));
1786 *rdev = btrfs_stack_inode_rdev(inode_item);
1787 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1789 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1790 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1792 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1793 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1795 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1796 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1798 BTRFS_I(inode)->i_otime.tv_sec =
1799 btrfs_stack_timespec_sec(&inode_item->otime);
1800 BTRFS_I(inode)->i_otime.tv_nsec =
1801 btrfs_stack_timespec_nsec(&inode_item->otime);
1803 inode->i_generation = BTRFS_I(inode)->generation;
1804 BTRFS_I(inode)->index_cnt = (u64)-1;
1806 mutex_unlock(&delayed_node->mutex);
1807 btrfs_release_delayed_node(delayed_node);
1811 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1812 struct btrfs_root *root, struct inode *inode)
1814 struct btrfs_delayed_node *delayed_node;
1817 delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1818 if (IS_ERR(delayed_node))
1819 return PTR_ERR(delayed_node);
1821 mutex_lock(&delayed_node->mutex);
1822 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1823 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1827 ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1832 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1833 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1834 delayed_node->count++;
1835 atomic_inc(&root->fs_info->delayed_root->items);
1837 mutex_unlock(&delayed_node->mutex);
1838 btrfs_release_delayed_node(delayed_node);
1842 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1844 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1845 struct btrfs_delayed_node *delayed_node;
1848 * we don't do delayed inode updates during log recovery because it
1849 * leads to enospc problems. This means we also can't do
1850 * delayed inode refs
1852 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1855 delayed_node = btrfs_get_or_create_delayed_node(inode);
1856 if (IS_ERR(delayed_node))
1857 return PTR_ERR(delayed_node);
1860 * We don't reserve space for inode ref deletion is because:
1861 * - We ONLY do async inode ref deletion for the inode who has only
1862 * one link(i_nlink == 1), it means there is only one inode ref.
1863 * And in most case, the inode ref and the inode item are in the
1864 * same leaf, and we will deal with them at the same time.
1865 * Since we are sure we will reserve the space for the inode item,
1866 * it is unnecessary to reserve space for inode ref deletion.
1867 * - If the inode ref and the inode item are not in the same leaf,
1868 * We also needn't worry about enospc problem, because we reserve
1869 * much more space for the inode update than it needs.
1870 * - At the worst, we can steal some space from the global reservation.
1873 mutex_lock(&delayed_node->mutex);
1874 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1877 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1878 delayed_node->count++;
1879 atomic_inc(&fs_info->delayed_root->items);
1881 mutex_unlock(&delayed_node->mutex);
1882 btrfs_release_delayed_node(delayed_node);
1886 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1888 struct btrfs_root *root = delayed_node->root;
1889 struct btrfs_fs_info *fs_info = root->fs_info;
1890 struct btrfs_delayed_item *curr_item, *prev_item;
1892 mutex_lock(&delayed_node->mutex);
1893 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1895 btrfs_delayed_item_release_metadata(root, curr_item);
1896 prev_item = curr_item;
1897 curr_item = __btrfs_next_delayed_item(prev_item);
1898 btrfs_release_delayed_item(prev_item);
1901 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1903 btrfs_delayed_item_release_metadata(root, curr_item);
1904 prev_item = curr_item;
1905 curr_item = __btrfs_next_delayed_item(prev_item);
1906 btrfs_release_delayed_item(prev_item);
1909 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1910 btrfs_release_delayed_iref(delayed_node);
1912 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1913 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1914 btrfs_release_delayed_inode(delayed_node);
1916 mutex_unlock(&delayed_node->mutex);
1919 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1921 struct btrfs_delayed_node *delayed_node;
1923 delayed_node = btrfs_get_delayed_node(inode);
1927 __btrfs_kill_delayed_node(delayed_node);
1928 btrfs_release_delayed_node(delayed_node);
1931 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1934 struct btrfs_delayed_node *delayed_nodes[8];
1938 spin_lock(&root->inode_lock);
1939 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1940 (void **)delayed_nodes, inode_id,
1941 ARRAY_SIZE(delayed_nodes));
1943 spin_unlock(&root->inode_lock);
1947 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1949 for (i = 0; i < n; i++)
1950 refcount_inc(&delayed_nodes[i]->refs);
1951 spin_unlock(&root->inode_lock);
1953 for (i = 0; i < n; i++) {
1954 __btrfs_kill_delayed_node(delayed_nodes[i]);
1955 btrfs_release_delayed_node(delayed_nodes[i]);
1960 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1962 struct btrfs_delayed_node *curr_node, *prev_node;
1964 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1966 __btrfs_kill_delayed_node(curr_node);
1968 prev_node = curr_node;
1969 curr_node = btrfs_next_delayed_node(curr_node);
1970 btrfs_release_delayed_node(prev_node);