1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
10 #include "delayed-ref.h"
11 #include "transaction.h"
14 struct kmem_cache *btrfs_delayed_ref_head_cachep;
15 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
16 struct kmem_cache *btrfs_delayed_data_ref_cachep;
17 struct kmem_cache *btrfs_delayed_extent_op_cachep;
19 * delayed back reference update tracking. For subvolume trees
20 * we queue up extent allocations and backref maintenance for
21 * delayed processing. This avoids deep call chains where we
22 * add extents in the middle of btrfs_search_slot, and it allows
23 * us to buffer up frequently modified backrefs in an rb tree instead
24 * of hammering updates on the extent allocation tree.
28 * compare two delayed tree backrefs with same bytenr and type
30 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
31 struct btrfs_delayed_tree_ref *ref2)
33 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
34 if (ref1->root < ref2->root)
36 if (ref1->root > ref2->root)
39 if (ref1->parent < ref2->parent)
41 if (ref1->parent > ref2->parent)
48 * compare two delayed data backrefs with same bytenr and type
50 static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
51 struct btrfs_delayed_data_ref *ref2)
53 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
54 if (ref1->root < ref2->root)
56 if (ref1->root > ref2->root)
58 if (ref1->objectid < ref2->objectid)
60 if (ref1->objectid > ref2->objectid)
62 if (ref1->offset < ref2->offset)
64 if (ref1->offset > ref2->offset)
67 if (ref1->parent < ref2->parent)
69 if (ref1->parent > ref2->parent)
75 static int comp_refs(struct btrfs_delayed_ref_node *ref1,
76 struct btrfs_delayed_ref_node *ref2,
81 if (ref1->type < ref2->type)
83 if (ref1->type > ref2->type)
85 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
86 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
87 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
88 btrfs_delayed_node_to_tree_ref(ref2));
90 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
91 btrfs_delayed_node_to_data_ref(ref2));
95 if (ref1->seq < ref2->seq)
97 if (ref1->seq > ref2->seq)
103 /* insert a new ref to head ref rbtree */
104 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
105 struct rb_node *node)
107 struct rb_node **p = &root->rb_root.rb_node;
108 struct rb_node *parent_node = NULL;
109 struct btrfs_delayed_ref_head *entry;
110 struct btrfs_delayed_ref_head *ins;
112 bool leftmost = true;
114 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
115 bytenr = ins->bytenr;
118 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
121 if (bytenr < entry->bytenr) {
123 } else if (bytenr > entry->bytenr) {
131 rb_link_node(node, parent_node, p);
132 rb_insert_color_cached(node, root, leftmost);
136 static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
137 struct btrfs_delayed_ref_node *ins)
139 struct rb_node **p = &root->rb_root.rb_node;
140 struct rb_node *node = &ins->ref_node;
141 struct rb_node *parent_node = NULL;
142 struct btrfs_delayed_ref_node *entry;
143 bool leftmost = true;
149 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
151 comp = comp_refs(ins, entry, true);
154 } else if (comp > 0) {
162 rb_link_node(node, parent_node, p);
163 rb_insert_color_cached(node, root, leftmost);
168 * find an head entry based on bytenr. This returns the delayed ref
169 * head if it was able to find one, or NULL if nothing was in that spot.
170 * If return_bigger is given, the next bigger entry is returned if no exact
173 static struct btrfs_delayed_ref_head* find_ref_head(
174 struct btrfs_delayed_ref_root *dr, u64 bytenr,
177 struct rb_root *root = &dr->href_root.rb_root;
179 struct btrfs_delayed_ref_head *entry;
184 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
186 if (bytenr < entry->bytenr)
188 else if (bytenr > entry->bytenr)
193 if (entry && return_bigger) {
194 if (bytenr > entry->bytenr) {
195 n = rb_next(&entry->href_node);
197 n = rb_first_cached(&dr->href_root);
198 entry = rb_entry(n, struct btrfs_delayed_ref_head,
207 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
208 struct btrfs_delayed_ref_head *head)
210 struct btrfs_delayed_ref_root *delayed_refs;
212 delayed_refs = &trans->transaction->delayed_refs;
213 lockdep_assert_held(&delayed_refs->lock);
214 if (mutex_trylock(&head->mutex))
217 refcount_inc(&head->refs);
218 spin_unlock(&delayed_refs->lock);
220 mutex_lock(&head->mutex);
221 spin_lock(&delayed_refs->lock);
222 if (RB_EMPTY_NODE(&head->href_node)) {
223 mutex_unlock(&head->mutex);
224 btrfs_put_delayed_ref_head(head);
227 btrfs_put_delayed_ref_head(head);
231 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
232 struct btrfs_delayed_ref_root *delayed_refs,
233 struct btrfs_delayed_ref_head *head,
234 struct btrfs_delayed_ref_node *ref)
236 lockdep_assert_held(&head->lock);
237 rb_erase_cached(&ref->ref_node, &head->ref_tree);
238 RB_CLEAR_NODE(&ref->ref_node);
239 if (!list_empty(&ref->add_list))
240 list_del(&ref->add_list);
242 btrfs_put_delayed_ref(ref);
243 atomic_dec(&delayed_refs->num_entries);
244 if (trans->delayed_ref_updates)
245 trans->delayed_ref_updates--;
248 static bool merge_ref(struct btrfs_trans_handle *trans,
249 struct btrfs_delayed_ref_root *delayed_refs,
250 struct btrfs_delayed_ref_head *head,
251 struct btrfs_delayed_ref_node *ref,
254 struct btrfs_delayed_ref_node *next;
255 struct rb_node *node = rb_next(&ref->ref_node);
258 while (!done && node) {
261 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
262 node = rb_next(node);
263 if (seq && next->seq >= seq)
265 if (comp_refs(ref, next, false))
268 if (ref->action == next->action) {
271 if (ref->ref_mod < next->ref_mod) {
275 mod = -next->ref_mod;
278 drop_delayed_ref(trans, delayed_refs, head, next);
280 if (ref->ref_mod == 0) {
281 drop_delayed_ref(trans, delayed_refs, head, ref);
285 * Can't have multiples of the same ref on a tree block.
287 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
288 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
295 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
296 struct btrfs_delayed_ref_root *delayed_refs,
297 struct btrfs_delayed_ref_head *head)
299 struct btrfs_fs_info *fs_info = trans->fs_info;
300 struct btrfs_delayed_ref_node *ref;
301 struct rb_node *node;
304 lockdep_assert_held(&head->lock);
306 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
309 /* We don't have too many refs to merge for data. */
313 spin_lock(&fs_info->tree_mod_seq_lock);
314 if (!list_empty(&fs_info->tree_mod_seq_list)) {
315 struct seq_list *elem;
317 elem = list_first_entry(&fs_info->tree_mod_seq_list,
318 struct seq_list, list);
321 spin_unlock(&fs_info->tree_mod_seq_lock);
324 for (node = rb_first_cached(&head->ref_tree); node;
325 node = rb_next(node)) {
326 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
327 if (seq && ref->seq >= seq)
329 if (merge_ref(trans, delayed_refs, head, ref, seq))
334 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
336 struct seq_list *elem;
339 spin_lock(&fs_info->tree_mod_seq_lock);
340 if (!list_empty(&fs_info->tree_mod_seq_list)) {
341 elem = list_first_entry(&fs_info->tree_mod_seq_list,
342 struct seq_list, list);
343 if (seq >= elem->seq) {
345 "holding back delayed_ref %#x.%x, lowest is %#x.%x",
346 (u32)(seq >> 32), (u32)seq,
347 (u32)(elem->seq >> 32), (u32)elem->seq);
352 spin_unlock(&fs_info->tree_mod_seq_lock);
356 struct btrfs_delayed_ref_head *
357 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
359 struct btrfs_delayed_ref_root *delayed_refs;
360 struct btrfs_delayed_ref_head *head;
364 delayed_refs = &trans->transaction->delayed_refs;
367 start = delayed_refs->run_delayed_start;
368 head = find_ref_head(delayed_refs, start, 1);
369 if (!head && !loop) {
370 delayed_refs->run_delayed_start = 0;
373 head = find_ref_head(delayed_refs, start, 1);
376 } else if (!head && loop) {
380 while (head->processing) {
381 struct rb_node *node;
383 node = rb_next(&head->href_node);
387 delayed_refs->run_delayed_start = 0;
392 head = rb_entry(node, struct btrfs_delayed_ref_head,
396 head->processing = 1;
397 WARN_ON(delayed_refs->num_heads_ready == 0);
398 delayed_refs->num_heads_ready--;
399 delayed_refs->run_delayed_start = head->bytenr +
405 * Helper to insert the ref_node to the tail or merge with tail.
407 * Return 0 for insert.
408 * Return >0 for merge.
410 static int insert_delayed_ref(struct btrfs_trans_handle *trans,
411 struct btrfs_delayed_ref_root *root,
412 struct btrfs_delayed_ref_head *href,
413 struct btrfs_delayed_ref_node *ref)
415 struct btrfs_delayed_ref_node *exist;
419 spin_lock(&href->lock);
420 exist = tree_insert(&href->ref_tree, ref);
424 /* Now we are sure we can merge */
426 if (exist->action == ref->action) {
429 /* Need to change action */
430 if (exist->ref_mod < ref->ref_mod) {
431 exist->action = ref->action;
432 mod = -exist->ref_mod;
433 exist->ref_mod = ref->ref_mod;
434 if (ref->action == BTRFS_ADD_DELAYED_REF)
435 list_add_tail(&exist->add_list,
436 &href->ref_add_list);
437 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
438 ASSERT(!list_empty(&exist->add_list));
439 list_del(&exist->add_list);
446 exist->ref_mod += mod;
448 /* remove existing tail if its ref_mod is zero */
449 if (exist->ref_mod == 0)
450 drop_delayed_ref(trans, root, href, exist);
451 spin_unlock(&href->lock);
454 if (ref->action == BTRFS_ADD_DELAYED_REF)
455 list_add_tail(&ref->add_list, &href->ref_add_list);
456 atomic_inc(&root->num_entries);
457 trans->delayed_ref_updates++;
458 spin_unlock(&href->lock);
463 * helper function to update the accounting in the head ref
464 * existing and update must have the same bytenr
467 update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
468 struct btrfs_delayed_ref_head *existing,
469 struct btrfs_delayed_ref_head *update,
470 int *old_ref_mod_ret)
474 BUG_ON(existing->is_data != update->is_data);
476 spin_lock(&existing->lock);
477 if (update->must_insert_reserved) {
478 /* if the extent was freed and then
479 * reallocated before the delayed ref
480 * entries were processed, we can end up
481 * with an existing head ref without
482 * the must_insert_reserved flag set.
485 existing->must_insert_reserved = update->must_insert_reserved;
488 * update the num_bytes so we make sure the accounting
491 existing->num_bytes = update->num_bytes;
495 if (update->extent_op) {
496 if (!existing->extent_op) {
497 existing->extent_op = update->extent_op;
499 if (update->extent_op->update_key) {
500 memcpy(&existing->extent_op->key,
501 &update->extent_op->key,
502 sizeof(update->extent_op->key));
503 existing->extent_op->update_key = true;
505 if (update->extent_op->update_flags) {
506 existing->extent_op->flags_to_set |=
507 update->extent_op->flags_to_set;
508 existing->extent_op->update_flags = true;
510 btrfs_free_delayed_extent_op(update->extent_op);
514 * update the reference mod on the head to reflect this new operation,
515 * only need the lock for this case cause we could be processing it
516 * currently, for refs we just added we know we're a-ok.
518 old_ref_mod = existing->total_ref_mod;
520 *old_ref_mod_ret = old_ref_mod;
521 existing->ref_mod += update->ref_mod;
522 existing->total_ref_mod += update->ref_mod;
525 * If we are going to from a positive ref mod to a negative or vice
526 * versa we need to make sure to adjust pending_csums accordingly.
528 if (existing->is_data) {
529 if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
530 delayed_refs->pending_csums -= existing->num_bytes;
531 if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
532 delayed_refs->pending_csums += existing->num_bytes;
534 spin_unlock(&existing->lock);
537 static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
538 struct btrfs_qgroup_extent_record *qrecord,
539 u64 bytenr, u64 num_bytes, u64 ref_root,
540 u64 reserved, int action, bool is_data,
544 int must_insert_reserved = 0;
546 /* If reserved is provided, it must be a data extent. */
547 BUG_ON(!is_data && reserved);
550 * The head node stores the sum of all the mods, so dropping a ref
551 * should drop the sum in the head node by one.
553 if (action == BTRFS_UPDATE_DELAYED_HEAD)
555 else if (action == BTRFS_DROP_DELAYED_REF)
559 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
560 * accounting when the extent is finally added, or if a later
561 * modification deletes the delayed ref without ever inserting the
562 * extent into the extent allocation tree. ref->must_insert_reserved
563 * is the flag used to record that accounting mods are required.
565 * Once we record must_insert_reserved, switch the action to
566 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
568 if (action == BTRFS_ADD_DELAYED_EXTENT)
569 must_insert_reserved = 1;
571 must_insert_reserved = 0;
573 refcount_set(&head_ref->refs, 1);
574 head_ref->bytenr = bytenr;
575 head_ref->num_bytes = num_bytes;
576 head_ref->ref_mod = count_mod;
577 head_ref->must_insert_reserved = must_insert_reserved;
578 head_ref->is_data = is_data;
579 head_ref->is_system = is_system;
580 head_ref->ref_tree = RB_ROOT_CACHED;
581 INIT_LIST_HEAD(&head_ref->ref_add_list);
582 RB_CLEAR_NODE(&head_ref->href_node);
583 head_ref->processing = 0;
584 head_ref->total_ref_mod = count_mod;
585 head_ref->qgroup_reserved = 0;
586 head_ref->qgroup_ref_root = 0;
587 spin_lock_init(&head_ref->lock);
588 mutex_init(&head_ref->mutex);
591 if (ref_root && reserved) {
592 head_ref->qgroup_ref_root = ref_root;
593 head_ref->qgroup_reserved = reserved;
596 qrecord->bytenr = bytenr;
597 qrecord->num_bytes = num_bytes;
598 qrecord->old_roots = NULL;
603 * helper function to actually insert a head node into the rbtree.
604 * this does all the dirty work in terms of maintaining the correct
605 * overall modification count.
607 static noinline struct btrfs_delayed_ref_head *
608 add_delayed_ref_head(struct btrfs_trans_handle *trans,
609 struct btrfs_delayed_ref_head *head_ref,
610 struct btrfs_qgroup_extent_record *qrecord,
611 int action, int *qrecord_inserted_ret,
612 int *old_ref_mod, int *new_ref_mod)
614 struct btrfs_delayed_ref_head *existing;
615 struct btrfs_delayed_ref_root *delayed_refs;
616 int qrecord_inserted = 0;
618 delayed_refs = &trans->transaction->delayed_refs;
620 /* Record qgroup extent info if provided */
622 if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
623 delayed_refs, qrecord))
626 qrecord_inserted = 1;
629 trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
631 existing = htree_insert(&delayed_refs->href_root,
632 &head_ref->href_node);
634 WARN_ON(qrecord && head_ref->qgroup_ref_root
635 && head_ref->qgroup_reserved
636 && existing->qgroup_ref_root
637 && existing->qgroup_reserved);
638 update_existing_head_ref(delayed_refs, existing, head_ref,
641 * we've updated the existing ref, free the newly
644 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
649 if (head_ref->is_data && head_ref->ref_mod < 0)
650 delayed_refs->pending_csums += head_ref->num_bytes;
651 delayed_refs->num_heads++;
652 delayed_refs->num_heads_ready++;
653 atomic_inc(&delayed_refs->num_entries);
654 trans->delayed_ref_updates++;
656 if (qrecord_inserted_ret)
657 *qrecord_inserted_ret = qrecord_inserted;
659 *new_ref_mod = head_ref->total_ref_mod;
665 * init_delayed_ref_common - Initialize the structure which represents a
666 * modification to a an extent.
668 * @fs_info: Internal to the mounted filesystem mount structure.
670 * @ref: The structure which is going to be initialized.
672 * @bytenr: The logical address of the extent for which a modification is
673 * going to be recorded.
675 * @num_bytes: Size of the extent whose modification is being recorded.
677 * @ref_root: The id of the root where this modification has originated, this
678 * can be either one of the well-known metadata trees or the
679 * subvolume id which references this extent.
681 * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
682 * BTRFS_ADD_DELAYED_EXTENT
684 * @ref_type: Holds the type of the extent which is being recorded, can be
685 * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
686 * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
687 * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
689 static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
690 struct btrfs_delayed_ref_node *ref,
691 u64 bytenr, u64 num_bytes, u64 ref_root,
692 int action, u8 ref_type)
696 if (action == BTRFS_ADD_DELAYED_EXTENT)
697 action = BTRFS_ADD_DELAYED_REF;
699 if (is_fstree(ref_root))
700 seq = atomic64_read(&fs_info->tree_mod_seq);
702 refcount_set(&ref->refs, 1);
703 ref->bytenr = bytenr;
704 ref->num_bytes = num_bytes;
706 ref->action = action;
710 ref->type = ref_type;
711 RB_CLEAR_NODE(&ref->ref_node);
712 INIT_LIST_HEAD(&ref->add_list);
716 * add a delayed tree ref. This does all of the accounting required
717 * to make sure the delayed ref is eventually processed before this
718 * transaction commits.
720 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
721 u64 bytenr, u64 num_bytes, u64 parent,
722 u64 ref_root, int level, int action,
723 struct btrfs_delayed_extent_op *extent_op,
724 int *old_ref_mod, int *new_ref_mod)
726 struct btrfs_fs_info *fs_info = trans->fs_info;
727 struct btrfs_delayed_tree_ref *ref;
728 struct btrfs_delayed_ref_head *head_ref;
729 struct btrfs_delayed_ref_root *delayed_refs;
730 struct btrfs_qgroup_extent_record *record = NULL;
731 int qrecord_inserted;
732 bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
736 BUG_ON(extent_op && extent_op->is_data);
737 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
741 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
743 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
747 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
748 is_fstree(ref_root)) {
749 record = kmalloc(sizeof(*record), GFP_NOFS);
751 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
752 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
758 ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
760 ref_type = BTRFS_TREE_BLOCK_REF_KEY;
762 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
763 ref_root, action, ref_type);
764 ref->root = ref_root;
765 ref->parent = parent;
768 init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
769 ref_root, 0, action, false, is_system);
770 head_ref->extent_op = extent_op;
772 delayed_refs = &trans->transaction->delayed_refs;
773 spin_lock(&delayed_refs->lock);
776 * insert both the head node and the new ref without dropping
779 head_ref = add_delayed_ref_head(trans, head_ref, record,
780 action, &qrecord_inserted,
781 old_ref_mod, new_ref_mod);
783 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
784 spin_unlock(&delayed_refs->lock);
786 trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
787 action == BTRFS_ADD_DELAYED_EXTENT ?
788 BTRFS_ADD_DELAYED_REF : action);
790 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
792 if (qrecord_inserted)
793 btrfs_qgroup_trace_extent_post(fs_info, record);
799 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
801 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
802 u64 bytenr, u64 num_bytes,
803 u64 parent, u64 ref_root,
804 u64 owner, u64 offset, u64 reserved, int action,
805 int *old_ref_mod, int *new_ref_mod)
807 struct btrfs_fs_info *fs_info = trans->fs_info;
808 struct btrfs_delayed_data_ref *ref;
809 struct btrfs_delayed_ref_head *head_ref;
810 struct btrfs_delayed_ref_root *delayed_refs;
811 struct btrfs_qgroup_extent_record *record = NULL;
812 int qrecord_inserted;
816 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
821 ref_type = BTRFS_SHARED_DATA_REF_KEY;
823 ref_type = BTRFS_EXTENT_DATA_REF_KEY;
824 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
825 ref_root, action, ref_type);
826 ref->root = ref_root;
827 ref->parent = parent;
828 ref->objectid = owner;
829 ref->offset = offset;
832 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
834 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
838 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
839 is_fstree(ref_root)) {
840 record = kmalloc(sizeof(*record), GFP_NOFS);
842 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
843 kmem_cache_free(btrfs_delayed_ref_head_cachep,
849 init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
850 reserved, action, true, false);
851 head_ref->extent_op = NULL;
853 delayed_refs = &trans->transaction->delayed_refs;
854 spin_lock(&delayed_refs->lock);
857 * insert both the head node and the new ref without dropping
860 head_ref = add_delayed_ref_head(trans, head_ref, record,
861 action, &qrecord_inserted,
862 old_ref_mod, new_ref_mod);
864 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
865 spin_unlock(&delayed_refs->lock);
867 trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
868 action == BTRFS_ADD_DELAYED_EXTENT ?
869 BTRFS_ADD_DELAYED_REF : action);
871 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
874 if (qrecord_inserted)
875 return btrfs_qgroup_trace_extent_post(fs_info, record);
879 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
880 struct btrfs_trans_handle *trans,
881 u64 bytenr, u64 num_bytes,
882 struct btrfs_delayed_extent_op *extent_op)
884 struct btrfs_delayed_ref_head *head_ref;
885 struct btrfs_delayed_ref_root *delayed_refs;
887 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
891 init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
892 BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
894 head_ref->extent_op = extent_op;
896 delayed_refs = &trans->transaction->delayed_refs;
897 spin_lock(&delayed_refs->lock);
899 add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
902 spin_unlock(&delayed_refs->lock);
907 * this does a simple search for the head node for a given extent.
908 * It must be called with the delayed ref spinlock held, and it returns
909 * the head node if any where found, or NULL if not.
911 struct btrfs_delayed_ref_head *
912 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
914 return find_ref_head(delayed_refs, bytenr, 0);
917 void __cold btrfs_delayed_ref_exit(void)
919 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
920 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
921 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
922 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
925 int __init btrfs_delayed_ref_init(void)
927 btrfs_delayed_ref_head_cachep = kmem_cache_create(
928 "btrfs_delayed_ref_head",
929 sizeof(struct btrfs_delayed_ref_head), 0,
930 SLAB_MEM_SPREAD, NULL);
931 if (!btrfs_delayed_ref_head_cachep)
934 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
935 "btrfs_delayed_tree_ref",
936 sizeof(struct btrfs_delayed_tree_ref), 0,
937 SLAB_MEM_SPREAD, NULL);
938 if (!btrfs_delayed_tree_ref_cachep)
941 btrfs_delayed_data_ref_cachep = kmem_cache_create(
942 "btrfs_delayed_data_ref",
943 sizeof(struct btrfs_delayed_data_ref), 0,
944 SLAB_MEM_SPREAD, NULL);
945 if (!btrfs_delayed_data_ref_cachep)
948 btrfs_delayed_extent_op_cachep = kmem_cache_create(
949 "btrfs_delayed_extent_op",
950 sizeof(struct btrfs_delayed_extent_op), 0,
951 SLAB_MEM_SPREAD, NULL);
952 if (!btrfs_delayed_extent_op_cachep)
957 btrfs_delayed_ref_exit();