1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/blkdev.h>
9 #include <linux/list_sort.h>
10 #include <linux/iversion.h>
16 #include "print-tree.h"
18 #include "compression.h"
20 #include "inode-map.h"
21 #include "block-group.h"
22 #include "space-info.h"
24 /* magic values for the inode_only field in btrfs_log_inode:
26 * LOG_INODE_ALL means to log everything
27 * LOG_INODE_EXISTS means to log just enough to recreate the inode
38 * directory trouble cases
40 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
41 * log, we must force a full commit before doing an fsync of the directory
42 * where the unlink was done.
43 * ---> record transid of last unlink/rename per directory
47 * rename foo/some_dir foo2/some_dir
49 * fsync foo/some_dir/some_file
51 * The fsync above will unlink the original some_dir without recording
52 * it in its new location (foo2). After a crash, some_dir will be gone
53 * unless the fsync of some_file forces a full commit
55 * 2) we must log any new names for any file or dir that is in the fsync
56 * log. ---> check inode while renaming/linking.
58 * 2a) we must log any new names for any file or dir during rename
59 * when the directory they are being removed from was logged.
60 * ---> check inode and old parent dir during rename
62 * 2a is actually the more important variant. With the extra logging
63 * a crash might unlink the old name without recreating the new one
65 * 3) after a crash, we must go through any directories with a link count
66 * of zero and redo the rm -rf
73 * The directory f1 was fully removed from the FS, but fsync was never
74 * called on f1, only its parent dir. After a crash the rm -rf must
75 * be replayed. This must be able to recurse down the entire
76 * directory tree. The inode link count fixup code takes care of the
81 * stages for the tree walking. The first
82 * stage (0) is to only pin down the blocks we find
83 * the second stage (1) is to make sure that all the inodes
84 * we find in the log are created in the subvolume.
86 * The last stage is to deal with directories and links and extents
87 * and all the other fun semantics
91 LOG_WALK_REPLAY_INODES,
92 LOG_WALK_REPLAY_DIR_INDEX,
96 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
97 struct btrfs_root *root, struct btrfs_inode *inode,
101 struct btrfs_log_ctx *ctx);
102 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
103 struct btrfs_root *root,
104 struct btrfs_path *path, u64 objectid);
105 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
106 struct btrfs_root *root,
107 struct btrfs_root *log,
108 struct btrfs_path *path,
109 u64 dirid, int del_all);
112 * tree logging is a special write ahead log used to make sure that
113 * fsyncs and O_SYNCs can happen without doing full tree commits.
115 * Full tree commits are expensive because they require commonly
116 * modified blocks to be recowed, creating many dirty pages in the
117 * extent tree an 4x-6x higher write load than ext3.
119 * Instead of doing a tree commit on every fsync, we use the
120 * key ranges and transaction ids to find items for a given file or directory
121 * that have changed in this transaction. Those items are copied into
122 * a special tree (one per subvolume root), that tree is written to disk
123 * and then the fsync is considered complete.
125 * After a crash, items are copied out of the log-tree back into the
126 * subvolume tree. Any file data extents found are recorded in the extent
127 * allocation tree, and the log-tree freed.
129 * The log tree is read three times, once to pin down all the extents it is
130 * using in ram and once, once to create all the inodes logged in the tree
131 * and once to do all the other items.
135 * start a sub transaction and setup the log tree
136 * this increments the log tree writer count to make the people
137 * syncing the tree wait for us to finish
139 static int start_log_trans(struct btrfs_trans_handle *trans,
140 struct btrfs_root *root,
141 struct btrfs_log_ctx *ctx)
143 struct btrfs_fs_info *fs_info = root->fs_info;
146 mutex_lock(&root->log_mutex);
148 if (root->log_root) {
149 if (btrfs_need_log_full_commit(trans)) {
154 if (!root->log_start_pid) {
155 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
156 root->log_start_pid = current->pid;
157 } else if (root->log_start_pid != current->pid) {
158 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
161 mutex_lock(&fs_info->tree_log_mutex);
162 if (!fs_info->log_root_tree)
163 ret = btrfs_init_log_root_tree(trans, fs_info);
164 mutex_unlock(&fs_info->tree_log_mutex);
168 ret = btrfs_add_log_tree(trans, root);
172 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
173 root->log_start_pid = current->pid;
176 atomic_inc(&root->log_batch);
177 atomic_inc(&root->log_writers);
179 int index = root->log_transid % 2;
180 list_add_tail(&ctx->list, &root->log_ctxs[index]);
181 ctx->log_transid = root->log_transid;
185 mutex_unlock(&root->log_mutex);
190 * returns 0 if there was a log transaction running and we were able
191 * to join, or returns -ENOENT if there were not transactions
194 static int join_running_log_trans(struct btrfs_root *root)
198 mutex_lock(&root->log_mutex);
199 if (root->log_root) {
201 atomic_inc(&root->log_writers);
203 mutex_unlock(&root->log_mutex);
208 * This either makes the current running log transaction wait
209 * until you call btrfs_end_log_trans() or it makes any future
210 * log transactions wait until you call btrfs_end_log_trans()
212 void btrfs_pin_log_trans(struct btrfs_root *root)
214 mutex_lock(&root->log_mutex);
215 atomic_inc(&root->log_writers);
216 mutex_unlock(&root->log_mutex);
220 * indicate we're done making changes to the log tree
221 * and wake up anyone waiting to do a sync
223 void btrfs_end_log_trans(struct btrfs_root *root)
225 if (atomic_dec_and_test(&root->log_writers)) {
226 /* atomic_dec_and_test implies a barrier */
227 cond_wake_up_nomb(&root->log_writer_wait);
231 static int btrfs_write_tree_block(struct extent_buffer *buf)
233 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
234 buf->start + buf->len - 1);
237 static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
239 filemap_fdatawait_range(buf->pages[0]->mapping,
240 buf->start, buf->start + buf->len - 1);
244 * the walk control struct is used to pass state down the chain when
245 * processing the log tree. The stage field tells us which part
246 * of the log tree processing we are currently doing. The others
247 * are state fields used for that specific part
249 struct walk_control {
250 /* should we free the extent on disk when done? This is used
251 * at transaction commit time while freeing a log tree
255 /* should we write out the extent buffer? This is used
256 * while flushing the log tree to disk during a sync
260 /* should we wait for the extent buffer io to finish? Also used
261 * while flushing the log tree to disk for a sync
265 /* pin only walk, we record which extents on disk belong to the
270 /* what stage of the replay code we're currently in */
274 * Ignore any items from the inode currently being processed. Needs
275 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
276 * the LOG_WALK_REPLAY_INODES stage.
278 bool ignore_cur_inode;
280 /* the root we are currently replaying */
281 struct btrfs_root *replay_dest;
283 /* the trans handle for the current replay */
284 struct btrfs_trans_handle *trans;
286 /* the function that gets used to process blocks we find in the
287 * tree. Note the extent_buffer might not be up to date when it is
288 * passed in, and it must be checked or read if you need the data
291 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
292 struct walk_control *wc, u64 gen, int level);
296 * process_func used to pin down extents, write them or wait on them
298 static int process_one_buffer(struct btrfs_root *log,
299 struct extent_buffer *eb,
300 struct walk_control *wc, u64 gen, int level)
302 struct btrfs_fs_info *fs_info = log->fs_info;
306 * If this fs is mixed then we need to be able to process the leaves to
307 * pin down any logged extents, so we have to read the block.
309 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
310 ret = btrfs_read_buffer(eb, gen, level, NULL);
316 ret = btrfs_pin_extent_for_log_replay(wc->trans, eb->start,
319 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
320 if (wc->pin && btrfs_header_level(eb) == 0)
321 ret = btrfs_exclude_logged_extents(eb);
323 btrfs_write_tree_block(eb);
325 btrfs_wait_tree_block_writeback(eb);
331 * Item overwrite used by replay and tree logging. eb, slot and key all refer
332 * to the src data we are copying out.
334 * root is the tree we are copying into, and path is a scratch
335 * path for use in this function (it should be released on entry and
336 * will be released on exit).
338 * If the key is already in the destination tree the existing item is
339 * overwritten. If the existing item isn't big enough, it is extended.
340 * If it is too large, it is truncated.
342 * If the key isn't in the destination yet, a new item is inserted.
344 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
345 struct btrfs_root *root,
346 struct btrfs_path *path,
347 struct extent_buffer *eb, int slot,
348 struct btrfs_key *key)
352 u64 saved_i_size = 0;
353 int save_old_i_size = 0;
354 unsigned long src_ptr;
355 unsigned long dst_ptr;
356 int overwrite_root = 0;
357 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
359 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
362 item_size = btrfs_item_size_nr(eb, slot);
363 src_ptr = btrfs_item_ptr_offset(eb, slot);
365 /* look for the key in the destination tree */
366 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
373 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
375 if (dst_size != item_size)
378 if (item_size == 0) {
379 btrfs_release_path(path);
382 dst_copy = kmalloc(item_size, GFP_NOFS);
383 src_copy = kmalloc(item_size, GFP_NOFS);
384 if (!dst_copy || !src_copy) {
385 btrfs_release_path(path);
391 read_extent_buffer(eb, src_copy, src_ptr, item_size);
393 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
394 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
396 ret = memcmp(dst_copy, src_copy, item_size);
401 * they have the same contents, just return, this saves
402 * us from cowing blocks in the destination tree and doing
403 * extra writes that may not have been done by a previous
407 btrfs_release_path(path);
412 * We need to load the old nbytes into the inode so when we
413 * replay the extents we've logged we get the right nbytes.
416 struct btrfs_inode_item *item;
420 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
421 struct btrfs_inode_item);
422 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
423 item = btrfs_item_ptr(eb, slot,
424 struct btrfs_inode_item);
425 btrfs_set_inode_nbytes(eb, item, nbytes);
428 * If this is a directory we need to reset the i_size to
429 * 0 so that we can set it up properly when replaying
430 * the rest of the items in this log.
432 mode = btrfs_inode_mode(eb, item);
434 btrfs_set_inode_size(eb, item, 0);
436 } else if (inode_item) {
437 struct btrfs_inode_item *item;
441 * New inode, set nbytes to 0 so that the nbytes comes out
442 * properly when we replay the extents.
444 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
445 btrfs_set_inode_nbytes(eb, item, 0);
448 * If this is a directory we need to reset the i_size to 0 so
449 * that we can set it up properly when replaying the rest of
450 * the items in this log.
452 mode = btrfs_inode_mode(eb, item);
454 btrfs_set_inode_size(eb, item, 0);
457 btrfs_release_path(path);
458 /* try to insert the key into the destination tree */
459 path->skip_release_on_error = 1;
460 ret = btrfs_insert_empty_item(trans, root, path,
462 path->skip_release_on_error = 0;
464 /* make sure any existing item is the correct size */
465 if (ret == -EEXIST || ret == -EOVERFLOW) {
467 found_size = btrfs_item_size_nr(path->nodes[0],
469 if (found_size > item_size)
470 btrfs_truncate_item(path, item_size, 1);
471 else if (found_size < item_size)
472 btrfs_extend_item(path, item_size - found_size);
476 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
479 /* don't overwrite an existing inode if the generation number
480 * was logged as zero. This is done when the tree logging code
481 * is just logging an inode to make sure it exists after recovery.
483 * Also, don't overwrite i_size on directories during replay.
484 * log replay inserts and removes directory items based on the
485 * state of the tree found in the subvolume, and i_size is modified
488 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
489 struct btrfs_inode_item *src_item;
490 struct btrfs_inode_item *dst_item;
492 src_item = (struct btrfs_inode_item *)src_ptr;
493 dst_item = (struct btrfs_inode_item *)dst_ptr;
495 if (btrfs_inode_generation(eb, src_item) == 0) {
496 struct extent_buffer *dst_eb = path->nodes[0];
497 const u64 ino_size = btrfs_inode_size(eb, src_item);
500 * For regular files an ino_size == 0 is used only when
501 * logging that an inode exists, as part of a directory
502 * fsync, and the inode wasn't fsynced before. In this
503 * case don't set the size of the inode in the fs/subvol
504 * tree, otherwise we would be throwing valid data away.
506 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
507 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
509 struct btrfs_map_token token;
511 btrfs_init_map_token(&token, dst_eb);
512 btrfs_set_token_inode_size(dst_eb, dst_item,
518 if (overwrite_root &&
519 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
520 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
522 saved_i_size = btrfs_inode_size(path->nodes[0],
527 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
530 if (save_old_i_size) {
531 struct btrfs_inode_item *dst_item;
532 dst_item = (struct btrfs_inode_item *)dst_ptr;
533 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
536 /* make sure the generation is filled in */
537 if (key->type == BTRFS_INODE_ITEM_KEY) {
538 struct btrfs_inode_item *dst_item;
539 dst_item = (struct btrfs_inode_item *)dst_ptr;
540 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
541 btrfs_set_inode_generation(path->nodes[0], dst_item,
546 btrfs_mark_buffer_dirty(path->nodes[0]);
547 btrfs_release_path(path);
552 * simple helper to read an inode off the disk from a given root
553 * This can only be called for subvolume roots and not for the log
555 static noinline struct inode *read_one_inode(struct btrfs_root *root,
558 struct btrfs_key key;
561 key.objectid = objectid;
562 key.type = BTRFS_INODE_ITEM_KEY;
564 inode = btrfs_iget(root->fs_info->sb, &key, root);
570 /* replays a single extent in 'eb' at 'slot' with 'key' into the
571 * subvolume 'root'. path is released on entry and should be released
574 * extents in the log tree have not been allocated out of the extent
575 * tree yet. So, this completes the allocation, taking a reference
576 * as required if the extent already exists or creating a new extent
577 * if it isn't in the extent allocation tree yet.
579 * The extent is inserted into the file, dropping any existing extents
580 * from the file that overlap the new one.
582 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
583 struct btrfs_root *root,
584 struct btrfs_path *path,
585 struct extent_buffer *eb, int slot,
586 struct btrfs_key *key)
588 struct btrfs_fs_info *fs_info = root->fs_info;
591 u64 start = key->offset;
593 struct btrfs_file_extent_item *item;
594 struct inode *inode = NULL;
598 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
599 found_type = btrfs_file_extent_type(eb, item);
601 if (found_type == BTRFS_FILE_EXTENT_REG ||
602 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
603 nbytes = btrfs_file_extent_num_bytes(eb, item);
604 extent_end = start + nbytes;
607 * We don't add to the inodes nbytes if we are prealloc or a
610 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
612 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
613 size = btrfs_file_extent_ram_bytes(eb, item);
614 nbytes = btrfs_file_extent_ram_bytes(eb, item);
615 extent_end = ALIGN(start + size,
616 fs_info->sectorsize);
622 inode = read_one_inode(root, key->objectid);
629 * first check to see if we already have this extent in the
630 * file. This must be done before the btrfs_drop_extents run
631 * so we don't try to drop this extent.
633 ret = btrfs_lookup_file_extent(trans, root, path,
634 btrfs_ino(BTRFS_I(inode)), start, 0);
637 (found_type == BTRFS_FILE_EXTENT_REG ||
638 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
639 struct btrfs_file_extent_item cmp1;
640 struct btrfs_file_extent_item cmp2;
641 struct btrfs_file_extent_item *existing;
642 struct extent_buffer *leaf;
644 leaf = path->nodes[0];
645 existing = btrfs_item_ptr(leaf, path->slots[0],
646 struct btrfs_file_extent_item);
648 read_extent_buffer(eb, &cmp1, (unsigned long)item,
650 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
654 * we already have a pointer to this exact extent,
655 * we don't have to do anything
657 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
658 btrfs_release_path(path);
662 btrfs_release_path(path);
664 /* drop any overlapping extents */
665 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
669 if (found_type == BTRFS_FILE_EXTENT_REG ||
670 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
672 unsigned long dest_offset;
673 struct btrfs_key ins;
675 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
676 btrfs_fs_incompat(fs_info, NO_HOLES))
679 ret = btrfs_insert_empty_item(trans, root, path, key,
683 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
685 copy_extent_buffer(path->nodes[0], eb, dest_offset,
686 (unsigned long)item, sizeof(*item));
688 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
689 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
690 ins.type = BTRFS_EXTENT_ITEM_KEY;
691 offset = key->offset - btrfs_file_extent_offset(eb, item);
694 * Manually record dirty extent, as here we did a shallow
695 * file extent item copy and skip normal backref update,
696 * but modifying extent tree all by ourselves.
697 * So need to manually record dirty extent for qgroup,
698 * as the owner of the file extent changed from log tree
699 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
701 ret = btrfs_qgroup_trace_extent(trans,
702 btrfs_file_extent_disk_bytenr(eb, item),
703 btrfs_file_extent_disk_num_bytes(eb, item),
708 if (ins.objectid > 0) {
709 struct btrfs_ref ref = { 0 };
712 LIST_HEAD(ordered_sums);
715 * is this extent already allocated in the extent
716 * allocation tree? If so, just add a reference
718 ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
721 btrfs_init_generic_ref(&ref,
722 BTRFS_ADD_DELAYED_REF,
723 ins.objectid, ins.offset, 0);
724 btrfs_init_data_ref(&ref,
725 root->root_key.objectid,
726 key->objectid, offset);
727 ret = btrfs_inc_extent_ref(trans, &ref);
732 * insert the extent pointer in the extent
735 ret = btrfs_alloc_logged_file_extent(trans,
736 root->root_key.objectid,
737 key->objectid, offset, &ins);
741 btrfs_release_path(path);
743 if (btrfs_file_extent_compression(eb, item)) {
744 csum_start = ins.objectid;
745 csum_end = csum_start + ins.offset;
747 csum_start = ins.objectid +
748 btrfs_file_extent_offset(eb, item);
749 csum_end = csum_start +
750 btrfs_file_extent_num_bytes(eb, item);
753 ret = btrfs_lookup_csums_range(root->log_root,
754 csum_start, csum_end - 1,
759 * Now delete all existing cums in the csum root that
760 * cover our range. We do this because we can have an
761 * extent that is completely referenced by one file
762 * extent item and partially referenced by another
763 * file extent item (like after using the clone or
764 * extent_same ioctls). In this case if we end up doing
765 * the replay of the one that partially references the
766 * extent first, and we do not do the csum deletion
767 * below, we can get 2 csum items in the csum tree that
768 * overlap each other. For example, imagine our log has
769 * the two following file extent items:
771 * key (257 EXTENT_DATA 409600)
772 * extent data disk byte 12845056 nr 102400
773 * extent data offset 20480 nr 20480 ram 102400
775 * key (257 EXTENT_DATA 819200)
776 * extent data disk byte 12845056 nr 102400
777 * extent data offset 0 nr 102400 ram 102400
779 * Where the second one fully references the 100K extent
780 * that starts at disk byte 12845056, and the log tree
781 * has a single csum item that covers the entire range
784 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
786 * After the first file extent item is replayed, the
787 * csum tree gets the following csum item:
789 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
791 * Which covers the 20K sub-range starting at offset 20K
792 * of our extent. Now when we replay the second file
793 * extent item, if we do not delete existing csum items
794 * that cover any of its blocks, we end up getting two
795 * csum items in our csum tree that overlap each other:
797 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
798 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
800 * Which is a problem, because after this anyone trying
801 * to lookup up for the checksum of any block of our
802 * extent starting at an offset of 40K or higher, will
803 * end up looking at the second csum item only, which
804 * does not contain the checksum for any block starting
805 * at offset 40K or higher of our extent.
807 while (!list_empty(&ordered_sums)) {
808 struct btrfs_ordered_sum *sums;
809 sums = list_entry(ordered_sums.next,
810 struct btrfs_ordered_sum,
813 ret = btrfs_del_csums(trans,
818 ret = btrfs_csum_file_blocks(trans,
819 fs_info->csum_root, sums);
820 list_del(&sums->list);
826 btrfs_release_path(path);
828 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
829 /* inline extents are easy, we just overwrite them */
830 ret = overwrite_item(trans, root, path, eb, slot, key);
835 ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start,
840 inode_add_bytes(inode, nbytes);
842 ret = btrfs_update_inode(trans, root, inode);
850 * when cleaning up conflicts between the directory names in the
851 * subvolume, directory names in the log and directory names in the
852 * inode back references, we may have to unlink inodes from directories.
854 * This is a helper function to do the unlink of a specific directory
857 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
858 struct btrfs_root *root,
859 struct btrfs_path *path,
860 struct btrfs_inode *dir,
861 struct btrfs_dir_item *di)
866 struct extent_buffer *leaf;
867 struct btrfs_key location;
870 leaf = path->nodes[0];
872 btrfs_dir_item_key_to_cpu(leaf, di, &location);
873 name_len = btrfs_dir_name_len(leaf, di);
874 name = kmalloc(name_len, GFP_NOFS);
878 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
879 btrfs_release_path(path);
881 inode = read_one_inode(root, location.objectid);
887 ret = link_to_fixup_dir(trans, root, path, location.objectid);
891 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name,
896 ret = btrfs_run_delayed_items(trans);
904 * helper function to see if a given name and sequence number found
905 * in an inode back reference are already in a directory and correctly
906 * point to this inode
908 static noinline int inode_in_dir(struct btrfs_root *root,
909 struct btrfs_path *path,
910 u64 dirid, u64 objectid, u64 index,
911 const char *name, int name_len)
913 struct btrfs_dir_item *di;
914 struct btrfs_key location;
917 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
918 index, name, name_len, 0);
919 if (di && !IS_ERR(di)) {
920 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
921 if (location.objectid != objectid)
925 btrfs_release_path(path);
927 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
928 if (di && !IS_ERR(di)) {
929 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
930 if (location.objectid != objectid)
936 btrfs_release_path(path);
941 * helper function to check a log tree for a named back reference in
942 * an inode. This is used to decide if a back reference that is
943 * found in the subvolume conflicts with what we find in the log.
945 * inode backreferences may have multiple refs in a single item,
946 * during replay we process one reference at a time, and we don't
947 * want to delete valid links to a file from the subvolume if that
948 * link is also in the log.
950 static noinline int backref_in_log(struct btrfs_root *log,
951 struct btrfs_key *key,
953 const char *name, int namelen)
955 struct btrfs_path *path;
958 path = btrfs_alloc_path();
962 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
965 } else if (ret == 1) {
970 if (key->type == BTRFS_INODE_EXTREF_KEY)
971 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
976 ret = !!btrfs_find_name_in_backref(path->nodes[0],
980 btrfs_free_path(path);
984 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
985 struct btrfs_root *root,
986 struct btrfs_path *path,
987 struct btrfs_root *log_root,
988 struct btrfs_inode *dir,
989 struct btrfs_inode *inode,
990 u64 inode_objectid, u64 parent_objectid,
991 u64 ref_index, char *name, int namelen,
997 struct extent_buffer *leaf;
998 struct btrfs_dir_item *di;
999 struct btrfs_key search_key;
1000 struct btrfs_inode_extref *extref;
1003 /* Search old style refs */
1004 search_key.objectid = inode_objectid;
1005 search_key.type = BTRFS_INODE_REF_KEY;
1006 search_key.offset = parent_objectid;
1007 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1009 struct btrfs_inode_ref *victim_ref;
1011 unsigned long ptr_end;
1013 leaf = path->nodes[0];
1015 /* are we trying to overwrite a back ref for the root directory
1016 * if so, just jump out, we're done
1018 if (search_key.objectid == search_key.offset)
1021 /* check all the names in this back reference to see
1022 * if they are in the log. if so, we allow them to stay
1023 * otherwise they must be unlinked as a conflict
1025 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1026 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1027 while (ptr < ptr_end) {
1028 victim_ref = (struct btrfs_inode_ref *)ptr;
1029 victim_name_len = btrfs_inode_ref_name_len(leaf,
1031 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1035 read_extent_buffer(leaf, victim_name,
1036 (unsigned long)(victim_ref + 1),
1039 ret = backref_in_log(log_root, &search_key,
1040 parent_objectid, victim_name,
1046 inc_nlink(&inode->vfs_inode);
1047 btrfs_release_path(path);
1049 ret = btrfs_unlink_inode(trans, root, dir, inode,
1050 victim_name, victim_name_len);
1054 ret = btrfs_run_delayed_items(trans);
1062 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1066 * NOTE: we have searched root tree and checked the
1067 * corresponding ref, it does not need to check again.
1071 btrfs_release_path(path);
1073 /* Same search but for extended refs */
1074 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1075 inode_objectid, parent_objectid, 0,
1077 if (!IS_ERR_OR_NULL(extref)) {
1081 struct inode *victim_parent;
1083 leaf = path->nodes[0];
1085 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1086 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1088 while (cur_offset < item_size) {
1089 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1091 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1093 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1096 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1099 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1102 search_key.objectid = inode_objectid;
1103 search_key.type = BTRFS_INODE_EXTREF_KEY;
1104 search_key.offset = btrfs_extref_hash(parent_objectid,
1107 ret = backref_in_log(log_root, &search_key,
1108 parent_objectid, victim_name,
1114 victim_parent = read_one_inode(root,
1116 if (victim_parent) {
1117 inc_nlink(&inode->vfs_inode);
1118 btrfs_release_path(path);
1120 ret = btrfs_unlink_inode(trans, root,
1121 BTRFS_I(victim_parent),
1126 ret = btrfs_run_delayed_items(
1129 iput(victim_parent);
1138 cur_offset += victim_name_len + sizeof(*extref);
1142 btrfs_release_path(path);
1144 /* look for a conflicting sequence number */
1145 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1146 ref_index, name, namelen, 0);
1147 if (di && !IS_ERR(di)) {
1148 ret = drop_one_dir_item(trans, root, path, dir, di);
1152 btrfs_release_path(path);
1154 /* look for a conflicting name */
1155 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1157 if (di && !IS_ERR(di)) {
1158 ret = drop_one_dir_item(trans, root, path, dir, di);
1162 btrfs_release_path(path);
1167 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1168 u32 *namelen, char **name, u64 *index,
1169 u64 *parent_objectid)
1171 struct btrfs_inode_extref *extref;
1173 extref = (struct btrfs_inode_extref *)ref_ptr;
1175 *namelen = btrfs_inode_extref_name_len(eb, extref);
1176 *name = kmalloc(*namelen, GFP_NOFS);
1180 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1184 *index = btrfs_inode_extref_index(eb, extref);
1185 if (parent_objectid)
1186 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1191 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1192 u32 *namelen, char **name, u64 *index)
1194 struct btrfs_inode_ref *ref;
1196 ref = (struct btrfs_inode_ref *)ref_ptr;
1198 *namelen = btrfs_inode_ref_name_len(eb, ref);
1199 *name = kmalloc(*namelen, GFP_NOFS);
1203 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1206 *index = btrfs_inode_ref_index(eb, ref);
1212 * Take an inode reference item from the log tree and iterate all names from the
1213 * inode reference item in the subvolume tree with the same key (if it exists).
1214 * For any name that is not in the inode reference item from the log tree, do a
1215 * proper unlink of that name (that is, remove its entry from the inode
1216 * reference item and both dir index keys).
1218 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
1219 struct btrfs_root *root,
1220 struct btrfs_path *path,
1221 struct btrfs_inode *inode,
1222 struct extent_buffer *log_eb,
1224 struct btrfs_key *key)
1227 unsigned long ref_ptr;
1228 unsigned long ref_end;
1229 struct extent_buffer *eb;
1232 btrfs_release_path(path);
1233 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1241 eb = path->nodes[0];
1242 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
1243 ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
1244 while (ref_ptr < ref_end) {
1249 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1250 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1253 parent_id = key->offset;
1254 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1260 if (key->type == BTRFS_INODE_EXTREF_KEY)
1261 ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot,
1265 ret = !!btrfs_find_name_in_backref(log_eb, log_slot,
1271 btrfs_release_path(path);
1272 dir = read_one_inode(root, parent_id);
1278 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
1279 inode, name, namelen);
1289 if (key->type == BTRFS_INODE_EXTREF_KEY)
1290 ref_ptr += sizeof(struct btrfs_inode_extref);
1292 ref_ptr += sizeof(struct btrfs_inode_ref);
1296 btrfs_release_path(path);
1300 static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
1301 const u8 ref_type, const char *name,
1304 struct btrfs_key key;
1305 struct btrfs_path *path;
1306 const u64 parent_id = btrfs_ino(BTRFS_I(dir));
1309 path = btrfs_alloc_path();
1313 key.objectid = btrfs_ino(BTRFS_I(inode));
1314 key.type = ref_type;
1315 if (key.type == BTRFS_INODE_REF_KEY)
1316 key.offset = parent_id;
1318 key.offset = btrfs_extref_hash(parent_id, name, namelen);
1320 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
1327 if (key.type == BTRFS_INODE_EXTREF_KEY)
1328 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
1329 path->slots[0], parent_id, name, namelen);
1331 ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
1335 btrfs_free_path(path);
1339 static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1340 struct inode *dir, struct inode *inode, const char *name,
1341 int namelen, u64 ref_index)
1343 struct btrfs_dir_item *dir_item;
1344 struct btrfs_key key;
1345 struct btrfs_path *path;
1346 struct inode *other_inode = NULL;
1349 path = btrfs_alloc_path();
1353 dir_item = btrfs_lookup_dir_item(NULL, root, path,
1354 btrfs_ino(BTRFS_I(dir)),
1357 btrfs_release_path(path);
1359 } else if (IS_ERR(dir_item)) {
1360 ret = PTR_ERR(dir_item);
1365 * Our inode's dentry collides with the dentry of another inode which is
1366 * in the log but not yet processed since it has a higher inode number.
1367 * So delete that other dentry.
1369 btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key);
1370 btrfs_release_path(path);
1371 other_inode = read_one_inode(root, key.objectid);
1376 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), BTRFS_I(other_inode),
1381 * If we dropped the link count to 0, bump it so that later the iput()
1382 * on the inode will not free it. We will fixup the link count later.
1384 if (other_inode->i_nlink == 0)
1385 inc_nlink(other_inode);
1387 ret = btrfs_run_delayed_items(trans);
1391 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
1392 name, namelen, 0, ref_index);
1395 btrfs_free_path(path);
1401 * replay one inode back reference item found in the log tree.
1402 * eb, slot and key refer to the buffer and key found in the log tree.
1403 * root is the destination we are replaying into, and path is for temp
1404 * use by this function. (it should be released on return).
1406 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1407 struct btrfs_root *root,
1408 struct btrfs_root *log,
1409 struct btrfs_path *path,
1410 struct extent_buffer *eb, int slot,
1411 struct btrfs_key *key)
1413 struct inode *dir = NULL;
1414 struct inode *inode = NULL;
1415 unsigned long ref_ptr;
1416 unsigned long ref_end;
1420 int search_done = 0;
1421 int log_ref_ver = 0;
1422 u64 parent_objectid;
1425 int ref_struct_size;
1427 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1428 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1430 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1431 struct btrfs_inode_extref *r;
1433 ref_struct_size = sizeof(struct btrfs_inode_extref);
1435 r = (struct btrfs_inode_extref *)ref_ptr;
1436 parent_objectid = btrfs_inode_extref_parent(eb, r);
1438 ref_struct_size = sizeof(struct btrfs_inode_ref);
1439 parent_objectid = key->offset;
1441 inode_objectid = key->objectid;
1444 * it is possible that we didn't log all the parent directories
1445 * for a given inode. If we don't find the dir, just don't
1446 * copy the back ref in. The link count fixup code will take
1449 dir = read_one_inode(root, parent_objectid);
1455 inode = read_one_inode(root, inode_objectid);
1461 while (ref_ptr < ref_end) {
1463 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1464 &ref_index, &parent_objectid);
1466 * parent object can change from one array
1470 dir = read_one_inode(root, parent_objectid);
1476 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1482 /* if we already have a perfect match, we're done */
1483 if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
1484 btrfs_ino(BTRFS_I(inode)), ref_index,
1487 * look for a conflicting back reference in the
1488 * metadata. if we find one we have to unlink that name
1489 * of the file before we add our new link. Later on, we
1490 * overwrite any existing back reference, and we don't
1491 * want to create dangling pointers in the directory.
1495 ret = __add_inode_ref(trans, root, path, log,
1500 ref_index, name, namelen,
1510 * If a reference item already exists for this inode
1511 * with the same parent and name, but different index,
1512 * drop it and the corresponding directory index entries
1513 * from the parent before adding the new reference item
1514 * and dir index entries, otherwise we would fail with
1515 * -EEXIST returned from btrfs_add_link() below.
1517 ret = btrfs_inode_ref_exists(inode, dir, key->type,
1520 ret = btrfs_unlink_inode(trans, root,
1525 * If we dropped the link count to 0, bump it so
1526 * that later the iput() on the inode will not
1527 * free it. We will fixup the link count later.
1529 if (!ret && inode->i_nlink == 0)
1535 /* insert our name */
1536 ret = add_link(trans, root, dir, inode, name, namelen,
1541 btrfs_update_inode(trans, root, inode);
1544 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1554 * Before we overwrite the inode reference item in the subvolume tree
1555 * with the item from the log tree, we must unlink all names from the
1556 * parent directory that are in the subvolume's tree inode reference
1557 * item, otherwise we end up with an inconsistent subvolume tree where
1558 * dir index entries exist for a name but there is no inode reference
1559 * item with the same name.
1561 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
1566 /* finally write the back reference in the inode */
1567 ret = overwrite_item(trans, root, path, eb, slot, key);
1569 btrfs_release_path(path);
1576 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1577 struct btrfs_root *root, u64 ino)
1581 ret = btrfs_insert_orphan_item(trans, root, ino);
1588 static int count_inode_extrefs(struct btrfs_root *root,
1589 struct btrfs_inode *inode, struct btrfs_path *path)
1593 unsigned int nlink = 0;
1596 u64 inode_objectid = btrfs_ino(inode);
1599 struct btrfs_inode_extref *extref;
1600 struct extent_buffer *leaf;
1603 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1608 leaf = path->nodes[0];
1609 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1610 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1613 while (cur_offset < item_size) {
1614 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1615 name_len = btrfs_inode_extref_name_len(leaf, extref);
1619 cur_offset += name_len + sizeof(*extref);
1623 btrfs_release_path(path);
1625 btrfs_release_path(path);
1627 if (ret < 0 && ret != -ENOENT)
1632 static int count_inode_refs(struct btrfs_root *root,
1633 struct btrfs_inode *inode, struct btrfs_path *path)
1636 struct btrfs_key key;
1637 unsigned int nlink = 0;
1639 unsigned long ptr_end;
1641 u64 ino = btrfs_ino(inode);
1644 key.type = BTRFS_INODE_REF_KEY;
1645 key.offset = (u64)-1;
1648 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1652 if (path->slots[0] == 0)
1657 btrfs_item_key_to_cpu(path->nodes[0], &key,
1659 if (key.objectid != ino ||
1660 key.type != BTRFS_INODE_REF_KEY)
1662 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1663 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1665 while (ptr < ptr_end) {
1666 struct btrfs_inode_ref *ref;
1668 ref = (struct btrfs_inode_ref *)ptr;
1669 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1671 ptr = (unsigned long)(ref + 1) + name_len;
1675 if (key.offset == 0)
1677 if (path->slots[0] > 0) {
1682 btrfs_release_path(path);
1684 btrfs_release_path(path);
1690 * There are a few corners where the link count of the file can't
1691 * be properly maintained during replay. So, instead of adding
1692 * lots of complexity to the log code, we just scan the backrefs
1693 * for any file that has been through replay.
1695 * The scan will update the link count on the inode to reflect the
1696 * number of back refs found. If it goes down to zero, the iput
1697 * will free the inode.
1699 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1700 struct btrfs_root *root,
1701 struct inode *inode)
1703 struct btrfs_path *path;
1706 u64 ino = btrfs_ino(BTRFS_I(inode));
1708 path = btrfs_alloc_path();
1712 ret = count_inode_refs(root, BTRFS_I(inode), path);
1718 ret = count_inode_extrefs(root, BTRFS_I(inode), path);
1726 if (nlink != inode->i_nlink) {
1727 set_nlink(inode, nlink);
1728 btrfs_update_inode(trans, root, inode);
1730 BTRFS_I(inode)->index_cnt = (u64)-1;
1732 if (inode->i_nlink == 0) {
1733 if (S_ISDIR(inode->i_mode)) {
1734 ret = replay_dir_deletes(trans, root, NULL, path,
1739 ret = insert_orphan_item(trans, root, ino);
1743 btrfs_free_path(path);
1747 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1748 struct btrfs_root *root,
1749 struct btrfs_path *path)
1752 struct btrfs_key key;
1753 struct inode *inode;
1755 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1756 key.type = BTRFS_ORPHAN_ITEM_KEY;
1757 key.offset = (u64)-1;
1759 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1764 if (path->slots[0] == 0)
1769 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1770 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1771 key.type != BTRFS_ORPHAN_ITEM_KEY)
1774 ret = btrfs_del_item(trans, root, path);
1778 btrfs_release_path(path);
1779 inode = read_one_inode(root, key.offset);
1783 ret = fixup_inode_link_count(trans, root, inode);
1789 * fixup on a directory may create new entries,
1790 * make sure we always look for the highset possible
1793 key.offset = (u64)-1;
1797 btrfs_release_path(path);
1803 * record a given inode in the fixup dir so we can check its link
1804 * count when replay is done. The link count is incremented here
1805 * so the inode won't go away until we check it
1807 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1808 struct btrfs_root *root,
1809 struct btrfs_path *path,
1812 struct btrfs_key key;
1814 struct inode *inode;
1816 inode = read_one_inode(root, objectid);
1820 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1821 key.type = BTRFS_ORPHAN_ITEM_KEY;
1822 key.offset = objectid;
1824 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1826 btrfs_release_path(path);
1828 if (!inode->i_nlink)
1829 set_nlink(inode, 1);
1832 ret = btrfs_update_inode(trans, root, inode);
1833 } else if (ret == -EEXIST) {
1836 BUG(); /* Logic Error */
1844 * when replaying the log for a directory, we only insert names
1845 * for inodes that actually exist. This means an fsync on a directory
1846 * does not implicitly fsync all the new files in it
1848 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1849 struct btrfs_root *root,
1850 u64 dirid, u64 index,
1851 char *name, int name_len,
1852 struct btrfs_key *location)
1854 struct inode *inode;
1858 inode = read_one_inode(root, location->objectid);
1862 dir = read_one_inode(root, dirid);
1868 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
1869 name_len, 1, index);
1871 /* FIXME, put inode into FIXUP list */
1879 * take a single entry in a log directory item and replay it into
1882 * if a conflicting item exists in the subdirectory already,
1883 * the inode it points to is unlinked and put into the link count
1886 * If a name from the log points to a file or directory that does
1887 * not exist in the FS, it is skipped. fsyncs on directories
1888 * do not force down inodes inside that directory, just changes to the
1889 * names or unlinks in a directory.
1891 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1892 * non-existing inode) and 1 if the name was replayed.
1894 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1895 struct btrfs_root *root,
1896 struct btrfs_path *path,
1897 struct extent_buffer *eb,
1898 struct btrfs_dir_item *di,
1899 struct btrfs_key *key)
1903 struct btrfs_dir_item *dst_di;
1904 struct btrfs_key found_key;
1905 struct btrfs_key log_key;
1910 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1911 bool name_added = false;
1913 dir = read_one_inode(root, key->objectid);
1917 name_len = btrfs_dir_name_len(eb, di);
1918 name = kmalloc(name_len, GFP_NOFS);
1924 log_type = btrfs_dir_type(eb, di);
1925 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1928 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1929 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1934 btrfs_release_path(path);
1936 if (key->type == BTRFS_DIR_ITEM_KEY) {
1937 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1939 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1940 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1949 if (IS_ERR_OR_NULL(dst_di)) {
1950 /* we need a sequence number to insert, so we only
1951 * do inserts for the BTRFS_DIR_INDEX_KEY types
1953 if (key->type != BTRFS_DIR_INDEX_KEY)
1958 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1959 /* the existing item matches the logged item */
1960 if (found_key.objectid == log_key.objectid &&
1961 found_key.type == log_key.type &&
1962 found_key.offset == log_key.offset &&
1963 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1964 update_size = false;
1969 * don't drop the conflicting directory entry if the inode
1970 * for the new entry doesn't exist
1975 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di);
1979 if (key->type == BTRFS_DIR_INDEX_KEY)
1982 btrfs_release_path(path);
1983 if (!ret && update_size) {
1984 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
1985 ret = btrfs_update_inode(trans, root, dir);
1989 if (!ret && name_added)
1995 * Check if the inode reference exists in the log for the given name,
1996 * inode and parent inode
1998 found_key.objectid = log_key.objectid;
1999 found_key.type = BTRFS_INODE_REF_KEY;
2000 found_key.offset = key->objectid;
2001 ret = backref_in_log(root->log_root, &found_key, 0, name, name_len);
2005 /* The dentry will be added later. */
2007 update_size = false;
2011 found_key.objectid = log_key.objectid;
2012 found_key.type = BTRFS_INODE_EXTREF_KEY;
2013 found_key.offset = key->objectid;
2014 ret = backref_in_log(root->log_root, &found_key, key->objectid, name,
2019 /* The dentry will be added later. */
2021 update_size = false;
2024 btrfs_release_path(path);
2025 ret = insert_one_name(trans, root, key->objectid, key->offset,
2026 name, name_len, &log_key);
2027 if (ret && ret != -ENOENT && ret != -EEXIST)
2031 update_size = false;
2037 * find all the names in a directory item and reconcile them into
2038 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
2039 * one name in a directory item, but the same code gets used for
2040 * both directory index types
2042 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
2043 struct btrfs_root *root,
2044 struct btrfs_path *path,
2045 struct extent_buffer *eb, int slot,
2046 struct btrfs_key *key)
2049 u32 item_size = btrfs_item_size_nr(eb, slot);
2050 struct btrfs_dir_item *di;
2053 unsigned long ptr_end;
2054 struct btrfs_path *fixup_path = NULL;
2056 ptr = btrfs_item_ptr_offset(eb, slot);
2057 ptr_end = ptr + item_size;
2058 while (ptr < ptr_end) {
2059 di = (struct btrfs_dir_item *)ptr;
2060 name_len = btrfs_dir_name_len(eb, di);
2061 ret = replay_one_name(trans, root, path, eb, di, key);
2064 ptr = (unsigned long)(di + 1);
2068 * If this entry refers to a non-directory (directories can not
2069 * have a link count > 1) and it was added in the transaction
2070 * that was not committed, make sure we fixup the link count of
2071 * the inode it the entry points to. Otherwise something like
2072 * the following would result in a directory pointing to an
2073 * inode with a wrong link that does not account for this dir
2081 * ln testdir/bar testdir/bar_link
2082 * ln testdir/foo testdir/foo_link
2083 * xfs_io -c "fsync" testdir/bar
2087 * mount fs, log replay happens
2089 * File foo would remain with a link count of 1 when it has two
2090 * entries pointing to it in the directory testdir. This would
2091 * make it impossible to ever delete the parent directory has
2092 * it would result in stale dentries that can never be deleted.
2094 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
2095 struct btrfs_key di_key;
2098 fixup_path = btrfs_alloc_path();
2105 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2106 ret = link_to_fixup_dir(trans, root, fixup_path,
2113 btrfs_free_path(fixup_path);
2118 * directory replay has two parts. There are the standard directory
2119 * items in the log copied from the subvolume, and range items
2120 * created in the log while the subvolume was logged.
2122 * The range items tell us which parts of the key space the log
2123 * is authoritative for. During replay, if a key in the subvolume
2124 * directory is in a logged range item, but not actually in the log
2125 * that means it was deleted from the directory before the fsync
2126 * and should be removed.
2128 static noinline int find_dir_range(struct btrfs_root *root,
2129 struct btrfs_path *path,
2130 u64 dirid, int key_type,
2131 u64 *start_ret, u64 *end_ret)
2133 struct btrfs_key key;
2135 struct btrfs_dir_log_item *item;
2139 if (*start_ret == (u64)-1)
2142 key.objectid = dirid;
2143 key.type = key_type;
2144 key.offset = *start_ret;
2146 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2150 if (path->slots[0] == 0)
2155 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2157 if (key.type != key_type || key.objectid != dirid) {
2161 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2162 struct btrfs_dir_log_item);
2163 found_end = btrfs_dir_log_end(path->nodes[0], item);
2165 if (*start_ret >= key.offset && *start_ret <= found_end) {
2167 *start_ret = key.offset;
2168 *end_ret = found_end;
2173 /* check the next slot in the tree to see if it is a valid item */
2174 nritems = btrfs_header_nritems(path->nodes[0]);
2176 if (path->slots[0] >= nritems) {
2177 ret = btrfs_next_leaf(root, path);
2182 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2184 if (key.type != key_type || key.objectid != dirid) {
2188 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2189 struct btrfs_dir_log_item);
2190 found_end = btrfs_dir_log_end(path->nodes[0], item);
2191 *start_ret = key.offset;
2192 *end_ret = found_end;
2195 btrfs_release_path(path);
2200 * this looks for a given directory item in the log. If the directory
2201 * item is not in the log, the item is removed and the inode it points
2204 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
2205 struct btrfs_root *root,
2206 struct btrfs_root *log,
2207 struct btrfs_path *path,
2208 struct btrfs_path *log_path,
2210 struct btrfs_key *dir_key)
2213 struct extent_buffer *eb;
2216 struct btrfs_dir_item *di;
2217 struct btrfs_dir_item *log_di;
2220 unsigned long ptr_end;
2222 struct inode *inode;
2223 struct btrfs_key location;
2226 eb = path->nodes[0];
2227 slot = path->slots[0];
2228 item_size = btrfs_item_size_nr(eb, slot);
2229 ptr = btrfs_item_ptr_offset(eb, slot);
2230 ptr_end = ptr + item_size;
2231 while (ptr < ptr_end) {
2232 di = (struct btrfs_dir_item *)ptr;
2233 name_len = btrfs_dir_name_len(eb, di);
2234 name = kmalloc(name_len, GFP_NOFS);
2239 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2242 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2243 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2246 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2247 log_di = btrfs_lookup_dir_index_item(trans, log,
2253 if (!log_di || log_di == ERR_PTR(-ENOENT)) {
2254 btrfs_dir_item_key_to_cpu(eb, di, &location);
2255 btrfs_release_path(path);
2256 btrfs_release_path(log_path);
2257 inode = read_one_inode(root, location.objectid);
2263 ret = link_to_fixup_dir(trans, root,
2264 path, location.objectid);
2272 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
2273 BTRFS_I(inode), name, name_len);
2275 ret = btrfs_run_delayed_items(trans);
2281 /* there might still be more names under this key
2282 * check and repeat if required
2284 ret = btrfs_search_slot(NULL, root, dir_key, path,
2290 } else if (IS_ERR(log_di)) {
2292 return PTR_ERR(log_di);
2294 btrfs_release_path(log_path);
2297 ptr = (unsigned long)(di + 1);
2302 btrfs_release_path(path);
2303 btrfs_release_path(log_path);
2307 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2308 struct btrfs_root *root,
2309 struct btrfs_root *log,
2310 struct btrfs_path *path,
2313 struct btrfs_key search_key;
2314 struct btrfs_path *log_path;
2319 log_path = btrfs_alloc_path();
2323 search_key.objectid = ino;
2324 search_key.type = BTRFS_XATTR_ITEM_KEY;
2325 search_key.offset = 0;
2327 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2331 nritems = btrfs_header_nritems(path->nodes[0]);
2332 for (i = path->slots[0]; i < nritems; i++) {
2333 struct btrfs_key key;
2334 struct btrfs_dir_item *di;
2335 struct btrfs_dir_item *log_di;
2339 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2340 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2345 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2346 total_size = btrfs_item_size_nr(path->nodes[0], i);
2348 while (cur < total_size) {
2349 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2350 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2351 u32 this_len = sizeof(*di) + name_len + data_len;
2354 name = kmalloc(name_len, GFP_NOFS);
2359 read_extent_buffer(path->nodes[0], name,
2360 (unsigned long)(di + 1), name_len);
2362 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2364 btrfs_release_path(log_path);
2366 /* Doesn't exist in log tree, so delete it. */
2367 btrfs_release_path(path);
2368 di = btrfs_lookup_xattr(trans, root, path, ino,
2369 name, name_len, -1);
2376 ret = btrfs_delete_one_dir_name(trans, root,
2380 btrfs_release_path(path);
2385 if (IS_ERR(log_di)) {
2386 ret = PTR_ERR(log_di);
2390 di = (struct btrfs_dir_item *)((char *)di + this_len);
2393 ret = btrfs_next_leaf(root, path);
2399 btrfs_free_path(log_path);
2400 btrfs_release_path(path);
2406 * deletion replay happens before we copy any new directory items
2407 * out of the log or out of backreferences from inodes. It
2408 * scans the log to find ranges of keys that log is authoritative for,
2409 * and then scans the directory to find items in those ranges that are
2410 * not present in the log.
2412 * Anything we don't find in the log is unlinked and removed from the
2415 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2416 struct btrfs_root *root,
2417 struct btrfs_root *log,
2418 struct btrfs_path *path,
2419 u64 dirid, int del_all)
2423 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2425 struct btrfs_key dir_key;
2426 struct btrfs_key found_key;
2427 struct btrfs_path *log_path;
2430 dir_key.objectid = dirid;
2431 dir_key.type = BTRFS_DIR_ITEM_KEY;
2432 log_path = btrfs_alloc_path();
2436 dir = read_one_inode(root, dirid);
2437 /* it isn't an error if the inode isn't there, that can happen
2438 * because we replay the deletes before we copy in the inode item
2442 btrfs_free_path(log_path);
2450 range_end = (u64)-1;
2452 ret = find_dir_range(log, path, dirid, key_type,
2453 &range_start, &range_end);
2458 dir_key.offset = range_start;
2461 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2466 nritems = btrfs_header_nritems(path->nodes[0]);
2467 if (path->slots[0] >= nritems) {
2468 ret = btrfs_next_leaf(root, path);
2474 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2476 if (found_key.objectid != dirid ||
2477 found_key.type != dir_key.type)
2480 if (found_key.offset > range_end)
2483 ret = check_item_in_log(trans, root, log, path,
2488 if (found_key.offset == (u64)-1)
2490 dir_key.offset = found_key.offset + 1;
2492 btrfs_release_path(path);
2493 if (range_end == (u64)-1)
2495 range_start = range_end + 1;
2500 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2501 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2502 dir_key.type = BTRFS_DIR_INDEX_KEY;
2503 btrfs_release_path(path);
2507 btrfs_release_path(path);
2508 btrfs_free_path(log_path);
2514 * the process_func used to replay items from the log tree. This
2515 * gets called in two different stages. The first stage just looks
2516 * for inodes and makes sure they are all copied into the subvolume.
2518 * The second stage copies all the other item types from the log into
2519 * the subvolume. The two stage approach is slower, but gets rid of
2520 * lots of complexity around inodes referencing other inodes that exist
2521 * only in the log (references come from either directory items or inode
2524 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2525 struct walk_control *wc, u64 gen, int level)
2528 struct btrfs_path *path;
2529 struct btrfs_root *root = wc->replay_dest;
2530 struct btrfs_key key;
2534 ret = btrfs_read_buffer(eb, gen, level, NULL);
2538 level = btrfs_header_level(eb);
2543 path = btrfs_alloc_path();
2547 nritems = btrfs_header_nritems(eb);
2548 for (i = 0; i < nritems; i++) {
2549 btrfs_item_key_to_cpu(eb, &key, i);
2551 /* inode keys are done during the first stage */
2552 if (key.type == BTRFS_INODE_ITEM_KEY &&
2553 wc->stage == LOG_WALK_REPLAY_INODES) {
2554 struct btrfs_inode_item *inode_item;
2557 inode_item = btrfs_item_ptr(eb, i,
2558 struct btrfs_inode_item);
2560 * If we have a tmpfile (O_TMPFILE) that got fsync'ed
2561 * and never got linked before the fsync, skip it, as
2562 * replaying it is pointless since it would be deleted
2563 * later. We skip logging tmpfiles, but it's always
2564 * possible we are replaying a log created with a kernel
2565 * that used to log tmpfiles.
2567 if (btrfs_inode_nlink(eb, inode_item) == 0) {
2568 wc->ignore_cur_inode = true;
2571 wc->ignore_cur_inode = false;
2573 ret = replay_xattr_deletes(wc->trans, root, log,
2574 path, key.objectid);
2577 mode = btrfs_inode_mode(eb, inode_item);
2578 if (S_ISDIR(mode)) {
2579 ret = replay_dir_deletes(wc->trans,
2580 root, log, path, key.objectid, 0);
2584 ret = overwrite_item(wc->trans, root, path,
2590 * Before replaying extents, truncate the inode to its
2591 * size. We need to do it now and not after log replay
2592 * because before an fsync we can have prealloc extents
2593 * added beyond the inode's i_size. If we did it after,
2594 * through orphan cleanup for example, we would drop
2595 * those prealloc extents just after replaying them.
2597 if (S_ISREG(mode)) {
2598 struct inode *inode;
2601 inode = read_one_inode(root, key.objectid);
2606 from = ALIGN(i_size_read(inode),
2607 root->fs_info->sectorsize);
2608 ret = btrfs_drop_extents(wc->trans, root, inode,
2611 /* Update the inode's nbytes. */
2612 ret = btrfs_update_inode(wc->trans,
2620 ret = link_to_fixup_dir(wc->trans, root,
2621 path, key.objectid);
2626 if (wc->ignore_cur_inode)
2629 if (key.type == BTRFS_DIR_INDEX_KEY &&
2630 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2631 ret = replay_one_dir_item(wc->trans, root, path,
2637 if (wc->stage < LOG_WALK_REPLAY_ALL)
2640 /* these keys are simply copied */
2641 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2642 ret = overwrite_item(wc->trans, root, path,
2646 } else if (key.type == BTRFS_INODE_REF_KEY ||
2647 key.type == BTRFS_INODE_EXTREF_KEY) {
2648 ret = add_inode_ref(wc->trans, root, log, path,
2650 if (ret && ret != -ENOENT)
2653 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2654 ret = replay_one_extent(wc->trans, root, path,
2658 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2659 ret = replay_one_dir_item(wc->trans, root, path,
2665 btrfs_free_path(path);
2670 * Correctly adjust the reserved bytes occupied by a log tree extent buffer
2672 static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start)
2674 struct btrfs_block_group *cache;
2676 cache = btrfs_lookup_block_group(fs_info, start);
2678 btrfs_err(fs_info, "unable to find block group for %llu", start);
2682 spin_lock(&cache->space_info->lock);
2683 spin_lock(&cache->lock);
2684 cache->reserved -= fs_info->nodesize;
2685 cache->space_info->bytes_reserved -= fs_info->nodesize;
2686 spin_unlock(&cache->lock);
2687 spin_unlock(&cache->space_info->lock);
2689 btrfs_put_block_group(cache);
2692 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2693 struct btrfs_root *root,
2694 struct btrfs_path *path, int *level,
2695 struct walk_control *wc)
2697 struct btrfs_fs_info *fs_info = root->fs_info;
2700 struct extent_buffer *next;
2701 struct extent_buffer *cur;
2705 while (*level > 0) {
2706 struct btrfs_key first_key;
2708 cur = path->nodes[*level];
2710 WARN_ON(btrfs_header_level(cur) != *level);
2712 if (path->slots[*level] >=
2713 btrfs_header_nritems(cur))
2716 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2717 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2718 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]);
2719 blocksize = fs_info->nodesize;
2721 next = btrfs_find_create_tree_block(fs_info, bytenr);
2723 return PTR_ERR(next);
2726 ret = wc->process_func(root, next, wc, ptr_gen,
2729 free_extent_buffer(next);
2733 path->slots[*level]++;
2735 ret = btrfs_read_buffer(next, ptr_gen,
2736 *level - 1, &first_key);
2738 free_extent_buffer(next);
2743 btrfs_tree_lock(next);
2744 btrfs_set_lock_blocking_write(next);
2745 btrfs_clean_tree_block(next);
2746 btrfs_wait_tree_block_writeback(next);
2747 btrfs_tree_unlock(next);
2748 ret = btrfs_pin_reserved_extent(trans,
2751 free_extent_buffer(next);
2755 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2756 clear_extent_buffer_dirty(next);
2757 unaccount_log_buffer(fs_info, bytenr);
2760 free_extent_buffer(next);
2763 ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key);
2765 free_extent_buffer(next);
2769 if (path->nodes[*level-1])
2770 free_extent_buffer(path->nodes[*level-1]);
2771 path->nodes[*level-1] = next;
2772 *level = btrfs_header_level(next);
2773 path->slots[*level] = 0;
2776 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2782 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2783 struct btrfs_root *root,
2784 struct btrfs_path *path, int *level,
2785 struct walk_control *wc)
2787 struct btrfs_fs_info *fs_info = root->fs_info;
2792 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2793 slot = path->slots[i];
2794 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2797 WARN_ON(*level == 0);
2800 ret = wc->process_func(root, path->nodes[*level], wc,
2801 btrfs_header_generation(path->nodes[*level]),
2807 struct extent_buffer *next;
2809 next = path->nodes[*level];
2812 btrfs_tree_lock(next);
2813 btrfs_set_lock_blocking_write(next);
2814 btrfs_clean_tree_block(next);
2815 btrfs_wait_tree_block_writeback(next);
2816 btrfs_tree_unlock(next);
2817 ret = btrfs_pin_reserved_extent(trans,
2818 path->nodes[*level]->start,
2819 path->nodes[*level]->len);
2823 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2824 clear_extent_buffer_dirty(next);
2826 unaccount_log_buffer(fs_info,
2827 path->nodes[*level]->start);
2830 free_extent_buffer(path->nodes[*level]);
2831 path->nodes[*level] = NULL;
2839 * drop the reference count on the tree rooted at 'snap'. This traverses
2840 * the tree freeing any blocks that have a ref count of zero after being
2843 static int walk_log_tree(struct btrfs_trans_handle *trans,
2844 struct btrfs_root *log, struct walk_control *wc)
2846 struct btrfs_fs_info *fs_info = log->fs_info;
2850 struct btrfs_path *path;
2853 path = btrfs_alloc_path();
2857 level = btrfs_header_level(log->node);
2859 path->nodes[level] = log->node;
2860 atomic_inc(&log->node->refs);
2861 path->slots[level] = 0;
2864 wret = walk_down_log_tree(trans, log, path, &level, wc);
2872 wret = walk_up_log_tree(trans, log, path, &level, wc);
2881 /* was the root node processed? if not, catch it here */
2882 if (path->nodes[orig_level]) {
2883 ret = wc->process_func(log, path->nodes[orig_level], wc,
2884 btrfs_header_generation(path->nodes[orig_level]),
2889 struct extent_buffer *next;
2891 next = path->nodes[orig_level];
2894 btrfs_tree_lock(next);
2895 btrfs_set_lock_blocking_write(next);
2896 btrfs_clean_tree_block(next);
2897 btrfs_wait_tree_block_writeback(next);
2898 btrfs_tree_unlock(next);
2899 ret = btrfs_pin_reserved_extent(trans,
2900 next->start, next->len);
2904 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2905 clear_extent_buffer_dirty(next);
2906 unaccount_log_buffer(fs_info, next->start);
2912 btrfs_free_path(path);
2917 * helper function to update the item for a given subvolumes log root
2918 * in the tree of log roots
2920 static int update_log_root(struct btrfs_trans_handle *trans,
2921 struct btrfs_root *log,
2922 struct btrfs_root_item *root_item)
2924 struct btrfs_fs_info *fs_info = log->fs_info;
2927 if (log->log_transid == 1) {
2928 /* insert root item on the first sync */
2929 ret = btrfs_insert_root(trans, fs_info->log_root_tree,
2930 &log->root_key, root_item);
2932 ret = btrfs_update_root(trans, fs_info->log_root_tree,
2933 &log->root_key, root_item);
2938 static void wait_log_commit(struct btrfs_root *root, int transid)
2941 int index = transid % 2;
2944 * we only allow two pending log transactions at a time,
2945 * so we know that if ours is more than 2 older than the
2946 * current transaction, we're done
2949 prepare_to_wait(&root->log_commit_wait[index],
2950 &wait, TASK_UNINTERRUPTIBLE);
2952 if (!(root->log_transid_committed < transid &&
2953 atomic_read(&root->log_commit[index])))
2956 mutex_unlock(&root->log_mutex);
2958 mutex_lock(&root->log_mutex);
2960 finish_wait(&root->log_commit_wait[index], &wait);
2963 static void wait_for_writer(struct btrfs_root *root)
2968 prepare_to_wait(&root->log_writer_wait, &wait,
2969 TASK_UNINTERRUPTIBLE);
2970 if (!atomic_read(&root->log_writers))
2973 mutex_unlock(&root->log_mutex);
2975 mutex_lock(&root->log_mutex);
2977 finish_wait(&root->log_writer_wait, &wait);
2980 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2981 struct btrfs_log_ctx *ctx)
2986 mutex_lock(&root->log_mutex);
2987 list_del_init(&ctx->list);
2988 mutex_unlock(&root->log_mutex);
2992 * Invoked in log mutex context, or be sure there is no other task which
2993 * can access the list.
2995 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2996 int index, int error)
2998 struct btrfs_log_ctx *ctx;
2999 struct btrfs_log_ctx *safe;
3001 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
3002 list_del_init(&ctx->list);
3003 ctx->log_ret = error;
3006 INIT_LIST_HEAD(&root->log_ctxs[index]);
3010 * btrfs_sync_log does sends a given tree log down to the disk and
3011 * updates the super blocks to record it. When this call is done,
3012 * you know that any inodes previously logged are safely on disk only
3015 * Any other return value means you need to call btrfs_commit_transaction.
3016 * Some of the edge cases for fsyncing directories that have had unlinks
3017 * or renames done in the past mean that sometimes the only safe
3018 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
3019 * that has happened.
3021 int btrfs_sync_log(struct btrfs_trans_handle *trans,
3022 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
3028 struct btrfs_fs_info *fs_info = root->fs_info;
3029 struct btrfs_root *log = root->log_root;
3030 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
3031 struct btrfs_root_item new_root_item;
3032 int log_transid = 0;
3033 struct btrfs_log_ctx root_log_ctx;
3034 struct blk_plug plug;
3036 mutex_lock(&root->log_mutex);
3037 log_transid = ctx->log_transid;
3038 if (root->log_transid_committed >= log_transid) {
3039 mutex_unlock(&root->log_mutex);
3040 return ctx->log_ret;
3043 index1 = log_transid % 2;
3044 if (atomic_read(&root->log_commit[index1])) {
3045 wait_log_commit(root, log_transid);
3046 mutex_unlock(&root->log_mutex);
3047 return ctx->log_ret;
3049 ASSERT(log_transid == root->log_transid);
3050 atomic_set(&root->log_commit[index1], 1);
3052 /* wait for previous tree log sync to complete */
3053 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
3054 wait_log_commit(root, log_transid - 1);
3057 int batch = atomic_read(&root->log_batch);
3058 /* when we're on an ssd, just kick the log commit out */
3059 if (!btrfs_test_opt(fs_info, SSD) &&
3060 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
3061 mutex_unlock(&root->log_mutex);
3062 schedule_timeout_uninterruptible(1);
3063 mutex_lock(&root->log_mutex);
3065 wait_for_writer(root);
3066 if (batch == atomic_read(&root->log_batch))
3070 /* bail out if we need to do a full commit */
3071 if (btrfs_need_log_full_commit(trans)) {
3073 mutex_unlock(&root->log_mutex);
3077 if (log_transid % 2 == 0)
3078 mark = EXTENT_DIRTY;
3082 /* we start IO on all the marked extents here, but we don't actually
3083 * wait for them until later.
3085 blk_start_plug(&plug);
3086 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
3088 blk_finish_plug(&plug);
3089 btrfs_abort_transaction(trans, ret);
3090 btrfs_set_log_full_commit(trans);
3091 mutex_unlock(&root->log_mutex);
3096 * We _must_ update under the root->log_mutex in order to make sure we
3097 * have a consistent view of the log root we are trying to commit at
3100 * We _must_ copy this into a local copy, because we are not holding the
3101 * log_root_tree->log_mutex yet. This is important because when we
3102 * commit the log_root_tree we must have a consistent view of the
3103 * log_root_tree when we update the super block to point at the
3104 * log_root_tree bytenr. If we update the log_root_tree here we'll race
3105 * with the commit and possibly point at the new block which we may not
3108 btrfs_set_root_node(&log->root_item, log->node);
3109 memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
3111 root->log_transid++;
3112 log->log_transid = root->log_transid;
3113 root->log_start_pid = 0;
3115 * IO has been started, blocks of the log tree have WRITTEN flag set
3116 * in their headers. new modifications of the log will be written to
3117 * new positions. so it's safe to allow log writers to go in.
3119 mutex_unlock(&root->log_mutex);
3121 btrfs_init_log_ctx(&root_log_ctx, NULL);
3123 mutex_lock(&log_root_tree->log_mutex);
3124 atomic_inc(&log_root_tree->log_batch);
3125 atomic_inc(&log_root_tree->log_writers);
3127 index2 = log_root_tree->log_transid % 2;
3128 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
3129 root_log_ctx.log_transid = log_root_tree->log_transid;
3131 mutex_unlock(&log_root_tree->log_mutex);
3133 mutex_lock(&log_root_tree->log_mutex);
3136 * Now we are safe to update the log_root_tree because we're under the
3137 * log_mutex, and we're a current writer so we're holding the commit
3138 * open until we drop the log_mutex.
3140 ret = update_log_root(trans, log, &new_root_item);
3142 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
3143 /* atomic_dec_and_test implies a barrier */
3144 cond_wake_up_nomb(&log_root_tree->log_writer_wait);
3148 if (!list_empty(&root_log_ctx.list))
3149 list_del_init(&root_log_ctx.list);
3151 blk_finish_plug(&plug);
3152 btrfs_set_log_full_commit(trans);
3154 if (ret != -ENOSPC) {
3155 btrfs_abort_transaction(trans, ret);
3156 mutex_unlock(&log_root_tree->log_mutex);
3159 btrfs_wait_tree_log_extents(log, mark);
3160 mutex_unlock(&log_root_tree->log_mutex);
3165 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
3166 blk_finish_plug(&plug);
3167 list_del_init(&root_log_ctx.list);
3168 mutex_unlock(&log_root_tree->log_mutex);
3169 ret = root_log_ctx.log_ret;
3173 index2 = root_log_ctx.log_transid % 2;
3174 if (atomic_read(&log_root_tree->log_commit[index2])) {
3175 blk_finish_plug(&plug);
3176 ret = btrfs_wait_tree_log_extents(log, mark);
3177 wait_log_commit(log_root_tree,
3178 root_log_ctx.log_transid);
3179 mutex_unlock(&log_root_tree->log_mutex);
3181 ret = root_log_ctx.log_ret;
3184 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
3185 atomic_set(&log_root_tree->log_commit[index2], 1);
3187 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
3188 wait_log_commit(log_root_tree,
3189 root_log_ctx.log_transid - 1);
3192 wait_for_writer(log_root_tree);
3195 * now that we've moved on to the tree of log tree roots,
3196 * check the full commit flag again
3198 if (btrfs_need_log_full_commit(trans)) {
3199 blk_finish_plug(&plug);
3200 btrfs_wait_tree_log_extents(log, mark);
3201 mutex_unlock(&log_root_tree->log_mutex);
3203 goto out_wake_log_root;
3206 ret = btrfs_write_marked_extents(fs_info,
3207 &log_root_tree->dirty_log_pages,
3208 EXTENT_DIRTY | EXTENT_NEW);
3209 blk_finish_plug(&plug);
3211 btrfs_set_log_full_commit(trans);
3212 btrfs_abort_transaction(trans, ret);
3213 mutex_unlock(&log_root_tree->log_mutex);
3214 goto out_wake_log_root;
3216 ret = btrfs_wait_tree_log_extents(log, mark);
3218 ret = btrfs_wait_tree_log_extents(log_root_tree,
3219 EXTENT_NEW | EXTENT_DIRTY);
3221 btrfs_set_log_full_commit(trans);
3222 mutex_unlock(&log_root_tree->log_mutex);
3223 goto out_wake_log_root;
3226 btrfs_set_super_log_root(fs_info->super_for_commit,
3227 log_root_tree->node->start);
3228 btrfs_set_super_log_root_level(fs_info->super_for_commit,
3229 btrfs_header_level(log_root_tree->node));
3231 log_root_tree->log_transid++;
3232 mutex_unlock(&log_root_tree->log_mutex);
3235 * Nobody else is going to jump in and write the ctree
3236 * super here because the log_commit atomic below is protecting
3237 * us. We must be called with a transaction handle pinning
3238 * the running transaction open, so a full commit can't hop
3239 * in and cause problems either.
3241 ret = write_all_supers(fs_info, 1);
3243 btrfs_set_log_full_commit(trans);
3244 btrfs_abort_transaction(trans, ret);
3245 goto out_wake_log_root;
3248 mutex_lock(&root->log_mutex);
3249 if (root->last_log_commit < log_transid)
3250 root->last_log_commit = log_transid;
3251 mutex_unlock(&root->log_mutex);
3254 mutex_lock(&log_root_tree->log_mutex);
3255 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
3257 log_root_tree->log_transid_committed++;
3258 atomic_set(&log_root_tree->log_commit[index2], 0);
3259 mutex_unlock(&log_root_tree->log_mutex);
3262 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3263 * all the updates above are seen by the woken threads. It might not be
3264 * necessary, but proving that seems to be hard.
3266 cond_wake_up(&log_root_tree->log_commit_wait[index2]);
3268 mutex_lock(&root->log_mutex);
3269 btrfs_remove_all_log_ctxs(root, index1, ret);
3270 root->log_transid_committed++;
3271 atomic_set(&root->log_commit[index1], 0);
3272 mutex_unlock(&root->log_mutex);
3275 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3276 * all the updates above are seen by the woken threads. It might not be
3277 * necessary, but proving that seems to be hard.
3279 cond_wake_up(&root->log_commit_wait[index1]);
3283 static void free_log_tree(struct btrfs_trans_handle *trans,
3284 struct btrfs_root *log)
3287 struct walk_control wc = {
3289 .process_func = process_one_buffer
3292 ret = walk_log_tree(trans, log, &wc);
3295 btrfs_abort_transaction(trans, ret);
3297 btrfs_handle_fs_error(log->fs_info, ret, NULL);
3300 clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
3301 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3302 btrfs_put_root(log);
3306 * free all the extents used by the tree log. This should be called
3307 * at commit time of the full transaction
3309 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3311 if (root->log_root) {
3312 free_log_tree(trans, root->log_root);
3313 root->log_root = NULL;
3318 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3319 struct btrfs_fs_info *fs_info)
3321 if (fs_info->log_root_tree) {
3322 free_log_tree(trans, fs_info->log_root_tree);
3323 fs_info->log_root_tree = NULL;
3329 * Check if an inode was logged in the current transaction. We can't always rely
3330 * on an inode's logged_trans value, because it's an in-memory only field and
3331 * therefore not persisted. This means that its value is lost if the inode gets
3332 * evicted and loaded again from disk (in which case it has a value of 0, and
3333 * certainly it is smaller then any possible transaction ID), when that happens
3334 * the full_sync flag is set in the inode's runtime flags, so on that case we
3335 * assume eviction happened and ignore the logged_trans value, assuming the
3336 * worst case, that the inode was logged before in the current transaction.
3338 static bool inode_logged(struct btrfs_trans_handle *trans,
3339 struct btrfs_inode *inode)
3341 if (inode->logged_trans == trans->transid)
3344 if (inode->last_trans == trans->transid &&
3345 test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) &&
3346 !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags))
3353 * If both a file and directory are logged, and unlinks or renames are
3354 * mixed in, we have a few interesting corners:
3356 * create file X in dir Y
3357 * link file X to X.link in dir Y
3359 * unlink file X but leave X.link
3362 * After a crash we would expect only X.link to exist. But file X
3363 * didn't get fsync'd again so the log has back refs for X and X.link.
3365 * We solve this by removing directory entries and inode backrefs from the
3366 * log when a file that was logged in the current transaction is
3367 * unlinked. Any later fsync will include the updated log entries, and
3368 * we'll be able to reconstruct the proper directory items from backrefs.
3370 * This optimizations allows us to avoid relogging the entire inode
3371 * or the entire directory.
3373 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3374 struct btrfs_root *root,
3375 const char *name, int name_len,
3376 struct btrfs_inode *dir, u64 index)
3378 struct btrfs_root *log;
3379 struct btrfs_dir_item *di;
3380 struct btrfs_path *path;
3384 u64 dir_ino = btrfs_ino(dir);
3386 if (!inode_logged(trans, dir))
3389 ret = join_running_log_trans(root);
3393 mutex_lock(&dir->log_mutex);
3395 log = root->log_root;
3396 path = btrfs_alloc_path();
3402 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3403 name, name_len, -1);
3409 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3410 bytes_del += name_len;
3416 btrfs_release_path(path);
3417 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3418 index, name, name_len, -1);
3424 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3425 bytes_del += name_len;
3432 /* update the directory size in the log to reflect the names
3436 struct btrfs_key key;
3438 key.objectid = dir_ino;
3440 key.type = BTRFS_INODE_ITEM_KEY;
3441 btrfs_release_path(path);
3443 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
3449 struct btrfs_inode_item *item;
3452 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3453 struct btrfs_inode_item);
3454 i_size = btrfs_inode_size(path->nodes[0], item);
3455 if (i_size > bytes_del)
3456 i_size -= bytes_del;
3459 btrfs_set_inode_size(path->nodes[0], item, i_size);
3460 btrfs_mark_buffer_dirty(path->nodes[0]);
3463 btrfs_release_path(path);
3466 btrfs_free_path(path);
3468 mutex_unlock(&dir->log_mutex);
3469 if (ret == -ENOSPC) {
3470 btrfs_set_log_full_commit(trans);
3473 btrfs_abort_transaction(trans, ret);
3475 btrfs_end_log_trans(root);
3480 /* see comments for btrfs_del_dir_entries_in_log */
3481 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3482 struct btrfs_root *root,
3483 const char *name, int name_len,
3484 struct btrfs_inode *inode, u64 dirid)
3486 struct btrfs_root *log;
3490 if (!inode_logged(trans, inode))
3493 ret = join_running_log_trans(root);
3496 log = root->log_root;
3497 mutex_lock(&inode->log_mutex);
3499 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3501 mutex_unlock(&inode->log_mutex);
3502 if (ret == -ENOSPC) {
3503 btrfs_set_log_full_commit(trans);
3505 } else if (ret < 0 && ret != -ENOENT)
3506 btrfs_abort_transaction(trans, ret);
3507 btrfs_end_log_trans(root);
3513 * creates a range item in the log for 'dirid'. first_offset and
3514 * last_offset tell us which parts of the key space the log should
3515 * be considered authoritative for.
3517 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3518 struct btrfs_root *log,
3519 struct btrfs_path *path,
3520 int key_type, u64 dirid,
3521 u64 first_offset, u64 last_offset)
3524 struct btrfs_key key;
3525 struct btrfs_dir_log_item *item;
3527 key.objectid = dirid;
3528 key.offset = first_offset;
3529 if (key_type == BTRFS_DIR_ITEM_KEY)
3530 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3532 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3533 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3537 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3538 struct btrfs_dir_log_item);
3539 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3540 btrfs_mark_buffer_dirty(path->nodes[0]);
3541 btrfs_release_path(path);
3546 * log all the items included in the current transaction for a given
3547 * directory. This also creates the range items in the log tree required
3548 * to replay anything deleted before the fsync
3550 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3551 struct btrfs_root *root, struct btrfs_inode *inode,
3552 struct btrfs_path *path,
3553 struct btrfs_path *dst_path, int key_type,
3554 struct btrfs_log_ctx *ctx,
3555 u64 min_offset, u64 *last_offset_ret)
3557 struct btrfs_key min_key;
3558 struct btrfs_root *log = root->log_root;
3559 struct extent_buffer *src;
3564 u64 first_offset = min_offset;
3565 u64 last_offset = (u64)-1;
3566 u64 ino = btrfs_ino(inode);
3568 log = root->log_root;
3570 min_key.objectid = ino;
3571 min_key.type = key_type;
3572 min_key.offset = min_offset;
3574 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3577 * we didn't find anything from this transaction, see if there
3578 * is anything at all
3580 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3581 min_key.objectid = ino;
3582 min_key.type = key_type;
3583 min_key.offset = (u64)-1;
3584 btrfs_release_path(path);
3585 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3587 btrfs_release_path(path);
3590 ret = btrfs_previous_item(root, path, ino, key_type);
3592 /* if ret == 0 there are items for this type,
3593 * create a range to tell us the last key of this type.
3594 * otherwise, there are no items in this directory after
3595 * *min_offset, and we create a range to indicate that.
3598 struct btrfs_key tmp;
3599 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3601 if (key_type == tmp.type)
3602 first_offset = max(min_offset, tmp.offset) + 1;
3607 /* go backward to find any previous key */
3608 ret = btrfs_previous_item(root, path, ino, key_type);
3610 struct btrfs_key tmp;
3611 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3612 if (key_type == tmp.type) {
3613 first_offset = tmp.offset;
3614 ret = overwrite_item(trans, log, dst_path,
3615 path->nodes[0], path->slots[0],
3623 btrfs_release_path(path);
3626 * Find the first key from this transaction again. See the note for
3627 * log_new_dir_dentries, if we're logging a directory recursively we
3628 * won't be holding its i_mutex, which means we can modify the directory
3629 * while we're logging it. If we remove an entry between our first
3630 * search and this search we'll not find the key again and can just
3633 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3638 * we have a block from this transaction, log every item in it
3639 * from our directory
3642 struct btrfs_key tmp;
3643 src = path->nodes[0];
3644 nritems = btrfs_header_nritems(src);
3645 for (i = path->slots[0]; i < nritems; i++) {
3646 struct btrfs_dir_item *di;
3648 btrfs_item_key_to_cpu(src, &min_key, i);
3650 if (min_key.objectid != ino || min_key.type != key_type)
3652 ret = overwrite_item(trans, log, dst_path, src, i,
3660 * We must make sure that when we log a directory entry,
3661 * the corresponding inode, after log replay, has a
3662 * matching link count. For example:
3668 * xfs_io -c "fsync" mydir
3670 * <mount fs and log replay>
3672 * Would result in a fsync log that when replayed, our
3673 * file inode would have a link count of 1, but we get
3674 * two directory entries pointing to the same inode.
3675 * After removing one of the names, it would not be
3676 * possible to remove the other name, which resulted
3677 * always in stale file handle errors, and would not
3678 * be possible to rmdir the parent directory, since
3679 * its i_size could never decrement to the value
3680 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3682 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3683 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3685 (btrfs_dir_transid(src, di) == trans->transid ||
3686 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3687 tmp.type != BTRFS_ROOT_ITEM_KEY)
3688 ctx->log_new_dentries = true;
3690 path->slots[0] = nritems;
3693 * look ahead to the next item and see if it is also
3694 * from this directory and from this transaction
3696 ret = btrfs_next_leaf(root, path);
3699 last_offset = (u64)-1;
3704 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3705 if (tmp.objectid != ino || tmp.type != key_type) {
3706 last_offset = (u64)-1;
3709 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3710 ret = overwrite_item(trans, log, dst_path,
3711 path->nodes[0], path->slots[0],
3716 last_offset = tmp.offset;
3721 btrfs_release_path(path);
3722 btrfs_release_path(dst_path);
3725 *last_offset_ret = last_offset;
3727 * insert the log range keys to indicate where the log
3730 ret = insert_dir_log_key(trans, log, path, key_type,
3731 ino, first_offset, last_offset);
3739 * logging directories is very similar to logging inodes, We find all the items
3740 * from the current transaction and write them to the log.
3742 * The recovery code scans the directory in the subvolume, and if it finds a
3743 * key in the range logged that is not present in the log tree, then it means
3744 * that dir entry was unlinked during the transaction.
3746 * In order for that scan to work, we must include one key smaller than
3747 * the smallest logged by this transaction and one key larger than the largest
3748 * key logged by this transaction.
3750 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3751 struct btrfs_root *root, struct btrfs_inode *inode,
3752 struct btrfs_path *path,
3753 struct btrfs_path *dst_path,
3754 struct btrfs_log_ctx *ctx)
3759 int key_type = BTRFS_DIR_ITEM_KEY;
3765 ret = log_dir_items(trans, root, inode, path, dst_path, key_type,
3766 ctx, min_key, &max_key);
3769 if (max_key == (u64)-1)
3771 min_key = max_key + 1;
3774 if (key_type == BTRFS_DIR_ITEM_KEY) {
3775 key_type = BTRFS_DIR_INDEX_KEY;
3782 * a helper function to drop items from the log before we relog an
3783 * inode. max_key_type indicates the highest item type to remove.
3784 * This cannot be run for file data extents because it does not
3785 * free the extents they point to.
3787 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3788 struct btrfs_root *log,
3789 struct btrfs_path *path,
3790 u64 objectid, int max_key_type)
3793 struct btrfs_key key;
3794 struct btrfs_key found_key;
3797 key.objectid = objectid;
3798 key.type = max_key_type;
3799 key.offset = (u64)-1;
3802 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3803 BUG_ON(ret == 0); /* Logic error */
3807 if (path->slots[0] == 0)
3811 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3814 if (found_key.objectid != objectid)
3817 found_key.offset = 0;
3819 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3824 ret = btrfs_del_items(trans, log, path, start_slot,
3825 path->slots[0] - start_slot + 1);
3827 * If start slot isn't 0 then we don't need to re-search, we've
3828 * found the last guy with the objectid in this tree.
3830 if (ret || start_slot != 0)
3832 btrfs_release_path(path);
3834 btrfs_release_path(path);
3840 static void fill_inode_item(struct btrfs_trans_handle *trans,
3841 struct extent_buffer *leaf,
3842 struct btrfs_inode_item *item,
3843 struct inode *inode, int log_inode_only,
3846 struct btrfs_map_token token;
3848 btrfs_init_map_token(&token, leaf);
3850 if (log_inode_only) {
3851 /* set the generation to zero so the recover code
3852 * can tell the difference between an logging
3853 * just to say 'this inode exists' and a logging
3854 * to say 'update this inode with these values'
3856 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3857 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
3859 btrfs_set_token_inode_generation(leaf, item,
3860 BTRFS_I(inode)->generation,
3862 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3865 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3866 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3867 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3868 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3870 btrfs_set_token_timespec_sec(leaf, &item->atime,
3871 inode->i_atime.tv_sec, &token);
3872 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3873 inode->i_atime.tv_nsec, &token);
3875 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3876 inode->i_mtime.tv_sec, &token);
3877 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3878 inode->i_mtime.tv_nsec, &token);
3880 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3881 inode->i_ctime.tv_sec, &token);
3882 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3883 inode->i_ctime.tv_nsec, &token);
3885 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3888 btrfs_set_token_inode_sequence(leaf, item,
3889 inode_peek_iversion(inode), &token);
3890 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3891 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3892 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3893 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3896 static int log_inode_item(struct btrfs_trans_handle *trans,
3897 struct btrfs_root *log, struct btrfs_path *path,
3898 struct btrfs_inode *inode)
3900 struct btrfs_inode_item *inode_item;
3903 ret = btrfs_insert_empty_item(trans, log, path,
3904 &inode->location, sizeof(*inode_item));
3905 if (ret && ret != -EEXIST)
3907 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3908 struct btrfs_inode_item);
3909 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
3911 btrfs_release_path(path);
3915 static int log_csums(struct btrfs_trans_handle *trans,
3916 struct btrfs_root *log_root,
3917 struct btrfs_ordered_sum *sums)
3922 * Due to extent cloning, we might have logged a csum item that covers a
3923 * subrange of a cloned extent, and later we can end up logging a csum
3924 * item for a larger subrange of the same extent or the entire range.
3925 * This would leave csum items in the log tree that cover the same range
3926 * and break the searches for checksums in the log tree, resulting in
3927 * some checksums missing in the fs/subvolume tree. So just delete (or
3928 * trim and adjust) any existing csum items in the log for this range.
3930 ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len);
3934 return btrfs_csum_file_blocks(trans, log_root, sums);
3937 static noinline int copy_items(struct btrfs_trans_handle *trans,
3938 struct btrfs_inode *inode,
3939 struct btrfs_path *dst_path,
3940 struct btrfs_path *src_path,
3941 int start_slot, int nr, int inode_only,
3944 struct btrfs_fs_info *fs_info = trans->fs_info;
3945 unsigned long src_offset;
3946 unsigned long dst_offset;
3947 struct btrfs_root *log = inode->root->log_root;
3948 struct btrfs_file_extent_item *extent;
3949 struct btrfs_inode_item *inode_item;
3950 struct extent_buffer *src = src_path->nodes[0];
3952 struct btrfs_key *ins_keys;
3956 struct list_head ordered_sums;
3957 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM;
3959 INIT_LIST_HEAD(&ordered_sums);
3961 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3962 nr * sizeof(u32), GFP_NOFS);
3966 ins_sizes = (u32 *)ins_data;
3967 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3969 for (i = 0; i < nr; i++) {
3970 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3971 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3973 ret = btrfs_insert_empty_items(trans, log, dst_path,
3974 ins_keys, ins_sizes, nr);
3980 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3981 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3982 dst_path->slots[0]);
3984 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3986 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3987 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3989 struct btrfs_inode_item);
3990 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3992 inode_only == LOG_INODE_EXISTS,
3995 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3996 src_offset, ins_sizes[i]);
3999 /* take a reference on file data extents so that truncates
4000 * or deletes of this inode don't have to relog the inode
4003 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
4006 extent = btrfs_item_ptr(src, start_slot + i,
4007 struct btrfs_file_extent_item);
4009 if (btrfs_file_extent_generation(src, extent) < trans->transid)
4012 found_type = btrfs_file_extent_type(src, extent);
4013 if (found_type == BTRFS_FILE_EXTENT_REG) {
4015 ds = btrfs_file_extent_disk_bytenr(src,
4017 /* ds == 0 is a hole */
4021 dl = btrfs_file_extent_disk_num_bytes(src,
4023 cs = btrfs_file_extent_offset(src, extent);
4024 cl = btrfs_file_extent_num_bytes(src,
4026 if (btrfs_file_extent_compression(src,
4032 ret = btrfs_lookup_csums_range(
4034 ds + cs, ds + cs + cl - 1,
4037 btrfs_release_path(dst_path);
4045 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
4046 btrfs_release_path(dst_path);
4050 * we have to do this after the loop above to avoid changing the
4051 * log tree while trying to change the log tree.
4054 while (!list_empty(&ordered_sums)) {
4055 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4056 struct btrfs_ordered_sum,
4059 ret = log_csums(trans, log, sums);
4060 list_del(&sums->list);
4067 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
4069 struct extent_map *em1, *em2;
4071 em1 = list_entry(a, struct extent_map, list);
4072 em2 = list_entry(b, struct extent_map, list);
4074 if (em1->start < em2->start)
4076 else if (em1->start > em2->start)
4081 static int log_extent_csums(struct btrfs_trans_handle *trans,
4082 struct btrfs_inode *inode,
4083 struct btrfs_root *log_root,
4084 const struct extent_map *em)
4088 LIST_HEAD(ordered_sums);
4091 if (inode->flags & BTRFS_INODE_NODATASUM ||
4092 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
4093 em->block_start == EXTENT_MAP_HOLE)
4096 /* If we're compressed we have to save the entire range of csums. */
4097 if (em->compress_type) {
4099 csum_len = max(em->block_len, em->orig_block_len);
4101 csum_offset = em->mod_start - em->start;
4102 csum_len = em->mod_len;
4105 /* block start is already adjusted for the file extent offset. */
4106 ret = btrfs_lookup_csums_range(trans->fs_info->csum_root,
4107 em->block_start + csum_offset,
4108 em->block_start + csum_offset +
4109 csum_len - 1, &ordered_sums, 0);
4113 while (!list_empty(&ordered_sums)) {
4114 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4115 struct btrfs_ordered_sum,
4118 ret = log_csums(trans, log_root, sums);
4119 list_del(&sums->list);
4126 static int log_one_extent(struct btrfs_trans_handle *trans,
4127 struct btrfs_inode *inode, struct btrfs_root *root,
4128 const struct extent_map *em,
4129 struct btrfs_path *path,
4130 struct btrfs_log_ctx *ctx)
4132 struct btrfs_root *log = root->log_root;
4133 struct btrfs_file_extent_item *fi;
4134 struct extent_buffer *leaf;
4135 struct btrfs_map_token token;
4136 struct btrfs_key key;
4137 u64 extent_offset = em->start - em->orig_start;
4140 int extent_inserted = 0;
4142 ret = log_extent_csums(trans, inode, log, em);
4146 ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start,
4147 em->start + em->len, NULL, 0, 1,
4148 sizeof(*fi), &extent_inserted);
4152 if (!extent_inserted) {
4153 key.objectid = btrfs_ino(inode);
4154 key.type = BTRFS_EXTENT_DATA_KEY;
4155 key.offset = em->start;
4157 ret = btrfs_insert_empty_item(trans, log, path, &key,
4162 leaf = path->nodes[0];
4163 btrfs_init_map_token(&token, leaf);
4164 fi = btrfs_item_ptr(leaf, path->slots[0],
4165 struct btrfs_file_extent_item);
4167 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
4169 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4170 btrfs_set_token_file_extent_type(leaf, fi,
4171 BTRFS_FILE_EXTENT_PREALLOC,
4174 btrfs_set_token_file_extent_type(leaf, fi,
4175 BTRFS_FILE_EXTENT_REG,
4178 block_len = max(em->block_len, em->orig_block_len);
4179 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4180 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4183 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4185 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4186 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4188 extent_offset, &token);
4189 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4192 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
4193 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
4197 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
4198 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
4199 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
4200 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
4202 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
4203 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
4204 btrfs_mark_buffer_dirty(leaf);
4206 btrfs_release_path(path);
4212 * Log all prealloc extents beyond the inode's i_size to make sure we do not
4213 * lose them after doing a fast fsync and replaying the log. We scan the
4214 * subvolume's root instead of iterating the inode's extent map tree because
4215 * otherwise we can log incorrect extent items based on extent map conversion.
4216 * That can happen due to the fact that extent maps are merged when they
4217 * are not in the extent map tree's list of modified extents.
4219 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
4220 struct btrfs_inode *inode,
4221 struct btrfs_path *path)
4223 struct btrfs_root *root = inode->root;
4224 struct btrfs_key key;
4225 const u64 i_size = i_size_read(&inode->vfs_inode);
4226 const u64 ino = btrfs_ino(inode);
4227 struct btrfs_path *dst_path = NULL;
4228 bool dropped_extents = false;
4233 if (!(inode->flags & BTRFS_INODE_PREALLOC))
4237 key.type = BTRFS_EXTENT_DATA_KEY;
4238 key.offset = i_size;
4239 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4244 struct extent_buffer *leaf = path->nodes[0];
4245 int slot = path->slots[0];
4247 if (slot >= btrfs_header_nritems(leaf)) {
4249 ret = copy_items(trans, inode, dst_path, path,
4250 start_slot, ins_nr, 1, 0);
4255 ret = btrfs_next_leaf(root, path);
4265 btrfs_item_key_to_cpu(leaf, &key, slot);
4266 if (key.objectid > ino)
4268 if (WARN_ON_ONCE(key.objectid < ino) ||
4269 key.type < BTRFS_EXTENT_DATA_KEY ||
4270 key.offset < i_size) {
4274 if (!dropped_extents) {
4276 * Avoid logging extent items logged in past fsync calls
4277 * and leading to duplicate keys in the log tree.
4280 ret = btrfs_truncate_inode_items(trans,
4284 BTRFS_EXTENT_DATA_KEY);
4285 } while (ret == -EAGAIN);
4288 dropped_extents = true;
4295 dst_path = btrfs_alloc_path();
4303 ret = copy_items(trans, inode, dst_path, path,
4304 start_slot, ins_nr, 1, 0);
4309 btrfs_release_path(path);
4310 btrfs_free_path(dst_path);
4314 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4315 struct btrfs_root *root,
4316 struct btrfs_inode *inode,
4317 struct btrfs_path *path,
4318 struct btrfs_log_ctx *ctx,
4322 struct extent_map *em, *n;
4323 struct list_head extents;
4324 struct extent_map_tree *tree = &inode->extent_tree;
4329 INIT_LIST_HEAD(&extents);
4331 write_lock(&tree->lock);
4332 test_gen = root->fs_info->last_trans_committed;
4334 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4336 * Skip extents outside our logging range. It's important to do
4337 * it for correctness because if we don't ignore them, we may
4338 * log them before their ordered extent completes, and therefore
4339 * we could log them without logging their respective checksums
4340 * (the checksum items are added to the csum tree at the very
4341 * end of btrfs_finish_ordered_io()). Also leave such extents
4342 * outside of our range in the list, since we may have another
4343 * ranged fsync in the near future that needs them. If an extent
4344 * outside our range corresponds to a hole, log it to avoid
4345 * leaving gaps between extents (fsck will complain when we are
4346 * not using the NO_HOLES feature).
4348 if ((em->start > end || em->start + em->len <= start) &&
4349 em->block_start != EXTENT_MAP_HOLE)
4352 list_del_init(&em->list);
4354 * Just an arbitrary number, this can be really CPU intensive
4355 * once we start getting a lot of extents, and really once we
4356 * have a bunch of extents we just want to commit since it will
4359 if (++num > 32768) {
4360 list_del_init(&tree->modified_extents);
4365 if (em->generation <= test_gen)
4368 /* We log prealloc extents beyond eof later. */
4369 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) &&
4370 em->start >= i_size_read(&inode->vfs_inode))
4373 /* Need a ref to keep it from getting evicted from cache */
4374 refcount_inc(&em->refs);
4375 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4376 list_add_tail(&em->list, &extents);
4380 list_sort(NULL, &extents, extent_cmp);
4382 while (!list_empty(&extents)) {
4383 em = list_entry(extents.next, struct extent_map, list);
4385 list_del_init(&em->list);
4388 * If we had an error we just need to delete everybody from our
4392 clear_em_logging(tree, em);
4393 free_extent_map(em);
4397 write_unlock(&tree->lock);
4399 ret = log_one_extent(trans, inode, root, em, path, ctx);
4400 write_lock(&tree->lock);
4401 clear_em_logging(tree, em);
4402 free_extent_map(em);
4404 WARN_ON(!list_empty(&extents));
4405 write_unlock(&tree->lock);
4407 btrfs_release_path(path);
4409 ret = btrfs_log_prealloc_extents(trans, inode, path);
4414 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
4415 struct btrfs_path *path, u64 *size_ret)
4417 struct btrfs_key key;
4420 key.objectid = btrfs_ino(inode);
4421 key.type = BTRFS_INODE_ITEM_KEY;
4424 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4427 } else if (ret > 0) {
4430 struct btrfs_inode_item *item;
4432 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4433 struct btrfs_inode_item);
4434 *size_ret = btrfs_inode_size(path->nodes[0], item);
4436 * If the in-memory inode's i_size is smaller then the inode
4437 * size stored in the btree, return the inode's i_size, so
4438 * that we get a correct inode size after replaying the log
4439 * when before a power failure we had a shrinking truncate
4440 * followed by addition of a new name (rename / new hard link).
4441 * Otherwise return the inode size from the btree, to avoid
4442 * data loss when replaying a log due to previously doing a
4443 * write that expands the inode's size and logging a new name
4444 * immediately after.
4446 if (*size_ret > inode->vfs_inode.i_size)
4447 *size_ret = inode->vfs_inode.i_size;
4450 btrfs_release_path(path);
4455 * At the moment we always log all xattrs. This is to figure out at log replay
4456 * time which xattrs must have their deletion replayed. If a xattr is missing
4457 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4458 * because if a xattr is deleted, the inode is fsynced and a power failure
4459 * happens, causing the log to be replayed the next time the fs is mounted,
4460 * we want the xattr to not exist anymore (same behaviour as other filesystems
4461 * with a journal, ext3/4, xfs, f2fs, etc).
4463 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4464 struct btrfs_root *root,
4465 struct btrfs_inode *inode,
4466 struct btrfs_path *path,
4467 struct btrfs_path *dst_path)
4470 struct btrfs_key key;
4471 const u64 ino = btrfs_ino(inode);
4476 key.type = BTRFS_XATTR_ITEM_KEY;
4479 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4484 int slot = path->slots[0];
4485 struct extent_buffer *leaf = path->nodes[0];
4486 int nritems = btrfs_header_nritems(leaf);
4488 if (slot >= nritems) {
4490 ret = copy_items(trans, inode, dst_path, path,
4491 start_slot, ins_nr, 1, 0);
4496 ret = btrfs_next_leaf(root, path);
4504 btrfs_item_key_to_cpu(leaf, &key, slot);
4505 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4515 ret = copy_items(trans, inode, dst_path, path,
4516 start_slot, ins_nr, 1, 0);
4525 * When using the NO_HOLES feature if we punched a hole that causes the
4526 * deletion of entire leafs or all the extent items of the first leaf (the one
4527 * that contains the inode item and references) we may end up not processing
4528 * any extents, because there are no leafs with a generation matching the
4529 * current transaction that have extent items for our inode. So we need to find
4530 * if any holes exist and then log them. We also need to log holes after any
4531 * truncate operation that changes the inode's size.
4533 static int btrfs_log_holes(struct btrfs_trans_handle *trans,
4534 struct btrfs_root *root,
4535 struct btrfs_inode *inode,
4536 struct btrfs_path *path)
4538 struct btrfs_fs_info *fs_info = root->fs_info;
4539 struct btrfs_key key;
4540 const u64 ino = btrfs_ino(inode);
4541 const u64 i_size = i_size_read(&inode->vfs_inode);
4542 u64 prev_extent_end = 0;
4545 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || i_size == 0)
4549 key.type = BTRFS_EXTENT_DATA_KEY;
4552 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4557 struct extent_buffer *leaf = path->nodes[0];
4559 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
4560 ret = btrfs_next_leaf(root, path);
4567 leaf = path->nodes[0];
4570 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4571 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
4574 /* We have a hole, log it. */
4575 if (prev_extent_end < key.offset) {
4576 const u64 hole_len = key.offset - prev_extent_end;
4579 * Release the path to avoid deadlocks with other code
4580 * paths that search the root while holding locks on
4581 * leafs from the log root.
4583 btrfs_release_path(path);
4584 ret = btrfs_insert_file_extent(trans, root->log_root,
4585 ino, prev_extent_end, 0,
4586 0, hole_len, 0, hole_len,
4592 * Search for the same key again in the root. Since it's
4593 * an extent item and we are holding the inode lock, the
4594 * key must still exist. If it doesn't just emit warning
4595 * and return an error to fall back to a transaction
4598 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4601 if (WARN_ON(ret > 0))
4603 leaf = path->nodes[0];
4606 prev_extent_end = btrfs_file_extent_end(path);
4611 if (prev_extent_end < i_size) {
4614 btrfs_release_path(path);
4615 hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize);
4616 ret = btrfs_insert_file_extent(trans, root->log_root,
4617 ino, prev_extent_end, 0, 0,
4618 hole_len, 0, hole_len,
4628 * When we are logging a new inode X, check if it doesn't have a reference that
4629 * matches the reference from some other inode Y created in a past transaction
4630 * and that was renamed in the current transaction. If we don't do this, then at
4631 * log replay time we can lose inode Y (and all its files if it's a directory):
4634 * echo "hello world" > /mnt/x/foobar
4637 * mkdir /mnt/x # or touch /mnt/x
4638 * xfs_io -c fsync /mnt/x
4640 * mount fs, trigger log replay
4642 * After the log replay procedure, we would lose the first directory and all its
4643 * files (file foobar).
4644 * For the case where inode Y is not a directory we simply end up losing it:
4646 * echo "123" > /mnt/foo
4648 * mv /mnt/foo /mnt/bar
4649 * echo "abc" > /mnt/foo
4650 * xfs_io -c fsync /mnt/foo
4653 * We also need this for cases where a snapshot entry is replaced by some other
4654 * entry (file or directory) otherwise we end up with an unreplayable log due to
4655 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4656 * if it were a regular entry:
4659 * btrfs subvolume snapshot /mnt /mnt/x/snap
4660 * btrfs subvolume delete /mnt/x/snap
4663 * fsync /mnt/x or fsync some new file inside it
4666 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4667 * the same transaction.
4669 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4671 const struct btrfs_key *key,
4672 struct btrfs_inode *inode,
4673 u64 *other_ino, u64 *other_parent)
4676 struct btrfs_path *search_path;
4679 u32 item_size = btrfs_item_size_nr(eb, slot);
4681 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4683 search_path = btrfs_alloc_path();
4686 search_path->search_commit_root = 1;
4687 search_path->skip_locking = 1;
4689 while (cur_offset < item_size) {
4693 unsigned long name_ptr;
4694 struct btrfs_dir_item *di;
4696 if (key->type == BTRFS_INODE_REF_KEY) {
4697 struct btrfs_inode_ref *iref;
4699 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4700 parent = key->offset;
4701 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4702 name_ptr = (unsigned long)(iref + 1);
4703 this_len = sizeof(*iref) + this_name_len;
4705 struct btrfs_inode_extref *extref;
4707 extref = (struct btrfs_inode_extref *)(ptr +
4709 parent = btrfs_inode_extref_parent(eb, extref);
4710 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4711 name_ptr = (unsigned long)&extref->name;
4712 this_len = sizeof(*extref) + this_name_len;
4715 if (this_name_len > name_len) {
4718 new_name = krealloc(name, this_name_len, GFP_NOFS);
4723 name_len = this_name_len;
4727 read_extent_buffer(eb, name, name_ptr, this_name_len);
4728 di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
4729 parent, name, this_name_len, 0);
4730 if (di && !IS_ERR(di)) {
4731 struct btrfs_key di_key;
4733 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4735 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4736 if (di_key.objectid != key->objectid) {
4738 *other_ino = di_key.objectid;
4739 *other_parent = parent;
4747 } else if (IS_ERR(di)) {
4751 btrfs_release_path(search_path);
4753 cur_offset += this_len;
4757 btrfs_free_path(search_path);
4762 struct btrfs_ino_list {
4765 struct list_head list;
4768 static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
4769 struct btrfs_root *root,
4770 struct btrfs_path *path,
4771 struct btrfs_log_ctx *ctx,
4772 u64 ino, u64 parent)
4774 struct btrfs_ino_list *ino_elem;
4775 LIST_HEAD(inode_list);
4778 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
4781 ino_elem->ino = ino;
4782 ino_elem->parent = parent;
4783 list_add_tail(&ino_elem->list, &inode_list);
4785 while (!list_empty(&inode_list)) {
4786 struct btrfs_fs_info *fs_info = root->fs_info;
4787 struct btrfs_key key;
4788 struct inode *inode;
4790 ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list,
4792 ino = ino_elem->ino;
4793 parent = ino_elem->parent;
4794 list_del(&ino_elem->list);
4799 btrfs_release_path(path);
4802 key.type = BTRFS_INODE_ITEM_KEY;
4804 inode = btrfs_iget(fs_info->sb, &key, root);
4806 * If the other inode that had a conflicting dir entry was
4807 * deleted in the current transaction, we need to log its parent
4810 if (IS_ERR(inode)) {
4811 ret = PTR_ERR(inode);
4812 if (ret == -ENOENT) {
4813 key.objectid = parent;
4814 inode = btrfs_iget(fs_info->sb, &key, root);
4815 if (IS_ERR(inode)) {
4816 ret = PTR_ERR(inode);
4818 ret = btrfs_log_inode(trans, root,
4820 LOG_OTHER_INODE_ALL,
4822 btrfs_add_delayed_iput(inode);
4828 * If the inode was already logged skip it - otherwise we can
4829 * hit an infinite loop. Example:
4831 * From the commit root (previous transaction) we have the
4834 * inode 257 a directory
4835 * inode 258 with references "zz" and "zz_link" on inode 257
4836 * inode 259 with reference "a" on inode 257
4838 * And in the current (uncommitted) transaction we have:
4840 * inode 257 a directory, unchanged
4841 * inode 258 with references "a" and "a2" on inode 257
4842 * inode 259 with reference "zz_link" on inode 257
4843 * inode 261 with reference "zz" on inode 257
4845 * When logging inode 261 the following infinite loop could
4846 * happen if we don't skip already logged inodes:
4848 * - we detect inode 258 as a conflicting inode, with inode 261
4849 * on reference "zz", and log it;
4851 * - we detect inode 259 as a conflicting inode, with inode 258
4852 * on reference "a", and log it;
4854 * - we detect inode 258 as a conflicting inode, with inode 259
4855 * on reference "zz_link", and log it - again! After this we
4856 * repeat the above steps forever.
4858 spin_lock(&BTRFS_I(inode)->lock);
4860 * Check the inode's logged_trans only instead of
4861 * btrfs_inode_in_log(). This is because the last_log_commit of
4862 * the inode is not updated when we only log that it exists and
4863 * and it has the full sync bit set (see btrfs_log_inode()).
4865 if (BTRFS_I(inode)->logged_trans == trans->transid) {
4866 spin_unlock(&BTRFS_I(inode)->lock);
4867 btrfs_add_delayed_iput(inode);
4870 spin_unlock(&BTRFS_I(inode)->lock);
4872 * We are safe logging the other inode without acquiring its
4873 * lock as long as we log with the LOG_INODE_EXISTS mode. We
4874 * are safe against concurrent renames of the other inode as
4875 * well because during a rename we pin the log and update the
4876 * log with the new name before we unpin it.
4878 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
4879 LOG_OTHER_INODE, 0, LLONG_MAX, ctx);
4881 btrfs_add_delayed_iput(inode);
4886 key.type = BTRFS_INODE_REF_KEY;
4888 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4890 btrfs_add_delayed_iput(inode);
4895 struct extent_buffer *leaf = path->nodes[0];
4896 int slot = path->slots[0];
4898 u64 other_parent = 0;
4900 if (slot >= btrfs_header_nritems(leaf)) {
4901 ret = btrfs_next_leaf(root, path);
4904 } else if (ret > 0) {
4911 btrfs_item_key_to_cpu(leaf, &key, slot);
4912 if (key.objectid != ino ||
4913 (key.type != BTRFS_INODE_REF_KEY &&
4914 key.type != BTRFS_INODE_EXTREF_KEY)) {
4919 ret = btrfs_check_ref_name_override(leaf, slot, &key,
4920 BTRFS_I(inode), &other_ino,
4925 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
4930 ino_elem->ino = other_ino;
4931 ino_elem->parent = other_parent;
4932 list_add_tail(&ino_elem->list, &inode_list);
4937 btrfs_add_delayed_iput(inode);
4943 static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
4944 struct btrfs_inode *inode,
4945 struct btrfs_key *min_key,
4946 const struct btrfs_key *max_key,
4947 struct btrfs_path *path,
4948 struct btrfs_path *dst_path,
4949 const u64 logged_isize,
4950 const bool recursive_logging,
4951 const int inode_only,
4952 struct btrfs_log_ctx *ctx,
4953 bool *need_log_inode_item)
4955 struct btrfs_root *root = inode->root;
4956 int ins_start_slot = 0;
4961 ret = btrfs_search_forward(root, min_key, path, trans->transid);
4969 /* Note, ins_nr might be > 0 here, cleanup outside the loop */
4970 if (min_key->objectid != max_key->objectid)
4972 if (min_key->type > max_key->type)
4975 if (min_key->type == BTRFS_INODE_ITEM_KEY)
4976 *need_log_inode_item = false;
4978 if ((min_key->type == BTRFS_INODE_REF_KEY ||
4979 min_key->type == BTRFS_INODE_EXTREF_KEY) &&
4980 inode->generation == trans->transid &&
4981 !recursive_logging) {
4983 u64 other_parent = 0;
4985 ret = btrfs_check_ref_name_override(path->nodes[0],
4986 path->slots[0], min_key, inode,
4987 &other_ino, &other_parent);
4990 } else if (ret > 0 && ctx &&
4991 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
4996 ins_start_slot = path->slots[0];
4998 ret = copy_items(trans, inode, dst_path, path,
4999 ins_start_slot, ins_nr,
5000 inode_only, logged_isize);
5005 ret = log_conflicting_inodes(trans, root, path,
5006 ctx, other_ino, other_parent);
5009 btrfs_release_path(path);
5014 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
5015 if (min_key->type == BTRFS_XATTR_ITEM_KEY) {
5018 ret = copy_items(trans, inode, dst_path, path,
5020 ins_nr, inode_only, logged_isize);
5027 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
5030 } else if (!ins_nr) {
5031 ins_start_slot = path->slots[0];
5036 ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5037 ins_nr, inode_only, logged_isize);
5041 ins_start_slot = path->slots[0];
5044 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
5045 btrfs_item_key_to_cpu(path->nodes[0], min_key,
5050 ret = copy_items(trans, inode, dst_path, path,
5051 ins_start_slot, ins_nr, inode_only,
5057 btrfs_release_path(path);
5059 if (min_key->offset < (u64)-1) {
5061 } else if (min_key->type < max_key->type) {
5063 min_key->offset = 0;
5069 ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5070 ins_nr, inode_only, logged_isize);
5075 /* log a single inode in the tree log.
5076 * At least one parent directory for this inode must exist in the tree
5077 * or be logged already.
5079 * Any items from this inode changed by the current transaction are copied
5080 * to the log tree. An extra reference is taken on any extents in this
5081 * file, allowing us to avoid a whole pile of corner cases around logging
5082 * blocks that have been removed from the tree.
5084 * See LOG_INODE_ALL and related defines for a description of what inode_only
5087 * This handles both files and directories.
5089 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
5090 struct btrfs_root *root, struct btrfs_inode *inode,
5094 struct btrfs_log_ctx *ctx)
5096 struct btrfs_fs_info *fs_info = root->fs_info;
5097 struct btrfs_path *path;
5098 struct btrfs_path *dst_path;
5099 struct btrfs_key min_key;
5100 struct btrfs_key max_key;
5101 struct btrfs_root *log = root->log_root;
5104 bool fast_search = false;
5105 u64 ino = btrfs_ino(inode);
5106 struct extent_map_tree *em_tree = &inode->extent_tree;
5107 u64 logged_isize = 0;
5108 bool need_log_inode_item = true;
5109 bool xattrs_logged = false;
5110 bool recursive_logging = false;
5112 path = btrfs_alloc_path();
5115 dst_path = btrfs_alloc_path();
5117 btrfs_free_path(path);
5121 min_key.objectid = ino;
5122 min_key.type = BTRFS_INODE_ITEM_KEY;
5125 max_key.objectid = ino;
5128 /* today the code can only do partial logging of directories */
5129 if (S_ISDIR(inode->vfs_inode.i_mode) ||
5130 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5131 &inode->runtime_flags) &&
5132 inode_only >= LOG_INODE_EXISTS))
5133 max_key.type = BTRFS_XATTR_ITEM_KEY;
5135 max_key.type = (u8)-1;
5136 max_key.offset = (u64)-1;
5139 * Only run delayed items if we are a dir or a new file.
5140 * Otherwise commit the delayed inode only, which is needed in
5141 * order for the log replay code to mark inodes for link count
5142 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
5144 if (S_ISDIR(inode->vfs_inode.i_mode) ||
5145 inode->generation > fs_info->last_trans_committed)
5146 ret = btrfs_commit_inode_delayed_items(trans, inode);
5148 ret = btrfs_commit_inode_delayed_inode(inode);
5151 btrfs_free_path(path);
5152 btrfs_free_path(dst_path);
5156 if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) {
5157 recursive_logging = true;
5158 if (inode_only == LOG_OTHER_INODE)
5159 inode_only = LOG_INODE_EXISTS;
5161 inode_only = LOG_INODE_ALL;
5162 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
5164 mutex_lock(&inode->log_mutex);
5168 * a brute force approach to making sure we get the most uptodate
5169 * copies of everything.
5171 if (S_ISDIR(inode->vfs_inode.i_mode)) {
5172 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
5174 if (inode_only == LOG_INODE_EXISTS)
5175 max_key_type = BTRFS_XATTR_ITEM_KEY;
5176 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
5178 if (inode_only == LOG_INODE_EXISTS) {
5180 * Make sure the new inode item we write to the log has
5181 * the same isize as the current one (if it exists).
5182 * This is necessary to prevent data loss after log
5183 * replay, and also to prevent doing a wrong expanding
5184 * truncate - for e.g. create file, write 4K into offset
5185 * 0, fsync, write 4K into offset 4096, add hard link,
5186 * fsync some other file (to sync log), power fail - if
5187 * we use the inode's current i_size, after log replay
5188 * we get a 8Kb file, with the last 4Kb extent as a hole
5189 * (zeroes), as if an expanding truncate happened,
5190 * instead of getting a file of 4Kb only.
5192 err = logged_inode_size(log, inode, path, &logged_isize);
5196 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5197 &inode->runtime_flags)) {
5198 if (inode_only == LOG_INODE_EXISTS) {
5199 max_key.type = BTRFS_XATTR_ITEM_KEY;
5200 ret = drop_objectid_items(trans, log, path, ino,
5203 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5204 &inode->runtime_flags);
5205 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5206 &inode->runtime_flags);
5208 ret = btrfs_truncate_inode_items(trans,
5209 log, &inode->vfs_inode, 0, 0);
5214 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5215 &inode->runtime_flags) ||
5216 inode_only == LOG_INODE_EXISTS) {
5217 if (inode_only == LOG_INODE_ALL)
5219 max_key.type = BTRFS_XATTR_ITEM_KEY;
5220 ret = drop_objectid_items(trans, log, path, ino,
5223 if (inode_only == LOG_INODE_ALL)
5234 err = copy_inode_items_to_log(trans, inode, &min_key, &max_key,
5235 path, dst_path, logged_isize,
5236 recursive_logging, inode_only, ctx,
5237 &need_log_inode_item);
5241 btrfs_release_path(path);
5242 btrfs_release_path(dst_path);
5243 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
5246 xattrs_logged = true;
5247 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
5248 btrfs_release_path(path);
5249 btrfs_release_path(dst_path);
5250 err = btrfs_log_holes(trans, root, inode, path);
5255 btrfs_release_path(path);
5256 btrfs_release_path(dst_path);
5257 if (need_log_inode_item) {
5258 err = log_inode_item(trans, log, dst_path, inode);
5259 if (!err && !xattrs_logged) {
5260 err = btrfs_log_all_xattrs(trans, root, inode, path,
5262 btrfs_release_path(path);
5268 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
5274 } else if (inode_only == LOG_INODE_ALL) {
5275 struct extent_map *em, *n;
5277 write_lock(&em_tree->lock);
5279 * We can't just remove every em if we're called for a ranged
5280 * fsync - that is, one that doesn't cover the whole possible
5281 * file range (0 to LLONG_MAX). This is because we can have
5282 * em's that fall outside the range we're logging and therefore
5283 * their ordered operations haven't completed yet
5284 * (btrfs_finish_ordered_io() not invoked yet). This means we
5285 * didn't get their respective file extent item in the fs/subvol
5286 * tree yet, and need to let the next fast fsync (one which
5287 * consults the list of modified extent maps) find the em so
5288 * that it logs a matching file extent item and waits for the
5289 * respective ordered operation to complete (if it's still
5292 * Removing every em outside the range we're logging would make
5293 * the next fast fsync not log their matching file extent items,
5294 * therefore making us lose data after a log replay.
5296 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
5298 const u64 mod_end = em->mod_start + em->mod_len - 1;
5300 if (em->mod_start >= start && mod_end <= end)
5301 list_del_init(&em->list);
5303 write_unlock(&em_tree->lock);
5306 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) {
5307 ret = log_directory_changes(trans, root, inode, path, dst_path,
5316 * Don't update last_log_commit if we logged that an inode exists after
5317 * it was loaded to memory (full_sync bit set).
5318 * This is to prevent data loss when we do a write to the inode, then
5319 * the inode gets evicted after all delalloc was flushed, then we log
5320 * it exists (due to a rename for example) and then fsync it. This last
5321 * fsync would do nothing (not logging the extents previously written).
5323 spin_lock(&inode->lock);
5324 inode->logged_trans = trans->transid;
5325 if (inode_only != LOG_INODE_EXISTS ||
5326 !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
5327 inode->last_log_commit = inode->last_sub_trans;
5328 spin_unlock(&inode->lock);
5330 mutex_unlock(&inode->log_mutex);
5332 btrfs_free_path(path);
5333 btrfs_free_path(dst_path);
5338 * Check if we must fallback to a transaction commit when logging an inode.
5339 * This must be called after logging the inode and is used only in the context
5340 * when fsyncing an inode requires the need to log some other inode - in which
5341 * case we can't lock the i_mutex of each other inode we need to log as that
5342 * can lead to deadlocks with concurrent fsync against other inodes (as we can
5343 * log inodes up or down in the hierarchy) or rename operations for example. So
5344 * we take the log_mutex of the inode after we have logged it and then check for
5345 * its last_unlink_trans value - this is safe because any task setting
5346 * last_unlink_trans must take the log_mutex and it must do this before it does
5347 * the actual unlink operation, so if we do this check before a concurrent task
5348 * sets last_unlink_trans it means we've logged a consistent version/state of
5349 * all the inode items, otherwise we are not sure and must do a transaction
5350 * commit (the concurrent task might have only updated last_unlink_trans before
5351 * we logged the inode or it might have also done the unlink).
5353 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
5354 struct btrfs_inode *inode)
5356 struct btrfs_fs_info *fs_info = inode->root->fs_info;
5359 mutex_lock(&inode->log_mutex);
5360 if (inode->last_unlink_trans > fs_info->last_trans_committed) {
5362 * Make sure any commits to the log are forced to be full
5365 btrfs_set_log_full_commit(trans);
5368 mutex_unlock(&inode->log_mutex);
5374 * follow the dentry parent pointers up the chain and see if any
5375 * of the directories in it require a full commit before they can
5376 * be logged. Returns zero if nothing special needs to be done or 1 if
5377 * a full commit is required.
5379 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
5380 struct btrfs_inode *inode,
5381 struct dentry *parent,
5382 struct super_block *sb,
5386 struct dentry *old_parent = NULL;
5389 * for regular files, if its inode is already on disk, we don't
5390 * have to worry about the parents at all. This is because
5391 * we can use the last_unlink_trans field to record renames
5392 * and other fun in this file.
5394 if (S_ISREG(inode->vfs_inode.i_mode) &&
5395 inode->generation <= last_committed &&
5396 inode->last_unlink_trans <= last_committed)
5399 if (!S_ISDIR(inode->vfs_inode.i_mode)) {
5400 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5402 inode = BTRFS_I(d_inode(parent));
5406 if (btrfs_must_commit_transaction(trans, inode)) {
5411 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5414 if (IS_ROOT(parent)) {
5415 inode = BTRFS_I(d_inode(parent));
5416 if (btrfs_must_commit_transaction(trans, inode))
5421 parent = dget_parent(parent);
5423 old_parent = parent;
5424 inode = BTRFS_I(d_inode(parent));
5432 struct btrfs_dir_list {
5434 struct list_head list;
5438 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5439 * details about the why it is needed.
5440 * This is a recursive operation - if an existing dentry corresponds to a
5441 * directory, that directory's new entries are logged too (same behaviour as
5442 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5443 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5444 * complains about the following circular lock dependency / possible deadlock:
5448 * lock(&type->i_mutex_dir_key#3/2);
5449 * lock(sb_internal#2);
5450 * lock(&type->i_mutex_dir_key#3/2);
5451 * lock(&sb->s_type->i_mutex_key#14);
5453 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5454 * sb_start_intwrite() in btrfs_start_transaction().
5455 * Not locking i_mutex of the inodes is still safe because:
5457 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5458 * that while logging the inode new references (names) are added or removed
5459 * from the inode, leaving the logged inode item with a link count that does
5460 * not match the number of logged inode reference items. This is fine because
5461 * at log replay time we compute the real number of links and correct the
5462 * link count in the inode item (see replay_one_buffer() and
5463 * link_to_fixup_dir());
5465 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5466 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5467 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5468 * has a size that doesn't match the sum of the lengths of all the logged
5469 * names. This does not result in a problem because if a dir_item key is
5470 * logged but its matching dir_index key is not logged, at log replay time we
5471 * don't use it to replay the respective name (see replay_one_name()). On the
5472 * other hand if only the dir_index key ends up being logged, the respective
5473 * name is added to the fs/subvol tree with both the dir_item and dir_index
5474 * keys created (see replay_one_name()).
5475 * The directory's inode item with a wrong i_size is not a problem as well,
5476 * since we don't use it at log replay time to set the i_size in the inode
5477 * item of the fs/subvol tree (see overwrite_item()).
5479 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5480 struct btrfs_root *root,
5481 struct btrfs_inode *start_inode,
5482 struct btrfs_log_ctx *ctx)
5484 struct btrfs_fs_info *fs_info = root->fs_info;
5485 struct btrfs_root *log = root->log_root;
5486 struct btrfs_path *path;
5487 LIST_HEAD(dir_list);
5488 struct btrfs_dir_list *dir_elem;
5491 path = btrfs_alloc_path();
5495 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5497 btrfs_free_path(path);
5500 dir_elem->ino = btrfs_ino(start_inode);
5501 list_add_tail(&dir_elem->list, &dir_list);
5503 while (!list_empty(&dir_list)) {
5504 struct extent_buffer *leaf;
5505 struct btrfs_key min_key;
5509 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5512 goto next_dir_inode;
5514 min_key.objectid = dir_elem->ino;
5515 min_key.type = BTRFS_DIR_ITEM_KEY;
5518 btrfs_release_path(path);
5519 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5521 goto next_dir_inode;
5522 } else if (ret > 0) {
5524 goto next_dir_inode;
5528 leaf = path->nodes[0];
5529 nritems = btrfs_header_nritems(leaf);
5530 for (i = path->slots[0]; i < nritems; i++) {
5531 struct btrfs_dir_item *di;
5532 struct btrfs_key di_key;
5533 struct inode *di_inode;
5534 struct btrfs_dir_list *new_dir_elem;
5535 int log_mode = LOG_INODE_EXISTS;
5538 btrfs_item_key_to_cpu(leaf, &min_key, i);
5539 if (min_key.objectid != dir_elem->ino ||
5540 min_key.type != BTRFS_DIR_ITEM_KEY)
5541 goto next_dir_inode;
5543 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5544 type = btrfs_dir_type(leaf, di);
5545 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5546 type != BTRFS_FT_DIR)
5548 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5549 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5552 btrfs_release_path(path);
5553 di_inode = btrfs_iget(fs_info->sb, &di_key, root);
5554 if (IS_ERR(di_inode)) {
5555 ret = PTR_ERR(di_inode);
5556 goto next_dir_inode;
5559 if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
5560 btrfs_add_delayed_iput(di_inode);
5564 ctx->log_new_dentries = false;
5565 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
5566 log_mode = LOG_INODE_ALL;
5567 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
5568 log_mode, 0, LLONG_MAX, ctx);
5570 btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
5572 btrfs_add_delayed_iput(di_inode);
5574 goto next_dir_inode;
5575 if (ctx->log_new_dentries) {
5576 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5578 if (!new_dir_elem) {
5580 goto next_dir_inode;
5582 new_dir_elem->ino = di_key.objectid;
5583 list_add_tail(&new_dir_elem->list, &dir_list);
5588 ret = btrfs_next_leaf(log, path);
5590 goto next_dir_inode;
5591 } else if (ret > 0) {
5593 goto next_dir_inode;
5597 if (min_key.offset < (u64)-1) {
5602 list_del(&dir_elem->list);
5606 btrfs_free_path(path);
5610 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5611 struct btrfs_inode *inode,
5612 struct btrfs_log_ctx *ctx)
5614 struct btrfs_fs_info *fs_info = trans->fs_info;
5616 struct btrfs_path *path;
5617 struct btrfs_key key;
5618 struct btrfs_root *root = inode->root;
5619 const u64 ino = btrfs_ino(inode);
5621 path = btrfs_alloc_path();
5624 path->skip_locking = 1;
5625 path->search_commit_root = 1;
5628 key.type = BTRFS_INODE_REF_KEY;
5630 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5635 struct extent_buffer *leaf = path->nodes[0];
5636 int slot = path->slots[0];
5641 if (slot >= btrfs_header_nritems(leaf)) {
5642 ret = btrfs_next_leaf(root, path);
5650 btrfs_item_key_to_cpu(leaf, &key, slot);
5651 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5652 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5655 item_size = btrfs_item_size_nr(leaf, slot);
5656 ptr = btrfs_item_ptr_offset(leaf, slot);
5657 while (cur_offset < item_size) {
5658 struct btrfs_key inode_key;
5659 struct inode *dir_inode;
5661 inode_key.type = BTRFS_INODE_ITEM_KEY;
5662 inode_key.offset = 0;
5664 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5665 struct btrfs_inode_extref *extref;
5667 extref = (struct btrfs_inode_extref *)
5669 inode_key.objectid = btrfs_inode_extref_parent(
5671 cur_offset += sizeof(*extref);
5672 cur_offset += btrfs_inode_extref_name_len(leaf,
5675 inode_key.objectid = key.offset;
5676 cur_offset = item_size;
5679 dir_inode = btrfs_iget(fs_info->sb, &inode_key, root);
5681 * If the parent inode was deleted, return an error to
5682 * fallback to a transaction commit. This is to prevent
5683 * getting an inode that was moved from one parent A to
5684 * a parent B, got its former parent A deleted and then
5685 * it got fsync'ed, from existing at both parents after
5686 * a log replay (and the old parent still existing).
5693 * mv /mnt/B/bar /mnt/A/bar
5694 * mv -T /mnt/A /mnt/B
5698 * If we ignore the old parent B which got deleted,
5699 * after a log replay we would have file bar linked
5700 * at both parents and the old parent B would still
5703 if (IS_ERR(dir_inode)) {
5704 ret = PTR_ERR(dir_inode);
5709 ctx->log_new_dentries = false;
5710 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode),
5711 LOG_INODE_ALL, 0, LLONG_MAX, ctx);
5713 btrfs_must_commit_transaction(trans, BTRFS_I(dir_inode)))
5715 if (!ret && ctx && ctx->log_new_dentries)
5716 ret = log_new_dir_dentries(trans, root,
5717 BTRFS_I(dir_inode), ctx);
5718 btrfs_add_delayed_iput(dir_inode);
5726 btrfs_free_path(path);
5730 static int log_new_ancestors(struct btrfs_trans_handle *trans,
5731 struct btrfs_root *root,
5732 struct btrfs_path *path,
5733 struct btrfs_log_ctx *ctx)
5735 struct btrfs_key found_key;
5737 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
5740 struct btrfs_fs_info *fs_info = root->fs_info;
5741 const u64 last_committed = fs_info->last_trans_committed;
5742 struct extent_buffer *leaf = path->nodes[0];
5743 int slot = path->slots[0];
5744 struct btrfs_key search_key;
5745 struct inode *inode;
5748 btrfs_release_path(path);
5750 search_key.objectid = found_key.offset;
5751 search_key.type = BTRFS_INODE_ITEM_KEY;
5752 search_key.offset = 0;
5753 inode = btrfs_iget(fs_info->sb, &search_key, root);
5755 return PTR_ERR(inode);
5757 if (BTRFS_I(inode)->generation > last_committed)
5758 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
5761 btrfs_add_delayed_iput(inode);
5765 if (search_key.objectid == BTRFS_FIRST_FREE_OBJECTID)
5768 search_key.type = BTRFS_INODE_REF_KEY;
5769 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
5773 leaf = path->nodes[0];
5774 slot = path->slots[0];
5775 if (slot >= btrfs_header_nritems(leaf)) {
5776 ret = btrfs_next_leaf(root, path);
5781 leaf = path->nodes[0];
5782 slot = path->slots[0];
5785 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5786 if (found_key.objectid != search_key.objectid ||
5787 found_key.type != BTRFS_INODE_REF_KEY)
5793 static int log_new_ancestors_fast(struct btrfs_trans_handle *trans,
5794 struct btrfs_inode *inode,
5795 struct dentry *parent,
5796 struct btrfs_log_ctx *ctx)
5798 struct btrfs_root *root = inode->root;
5799 struct btrfs_fs_info *fs_info = root->fs_info;
5800 struct dentry *old_parent = NULL;
5801 struct super_block *sb = inode->vfs_inode.i_sb;
5805 if (!parent || d_really_is_negative(parent) ||
5809 inode = BTRFS_I(d_inode(parent));
5810 if (root != inode->root)
5813 if (inode->generation > fs_info->last_trans_committed) {
5814 ret = btrfs_log_inode(trans, root, inode,
5815 LOG_INODE_EXISTS, 0, LLONG_MAX, ctx);
5819 if (IS_ROOT(parent))
5822 parent = dget_parent(parent);
5824 old_parent = parent;
5831 static int log_all_new_ancestors(struct btrfs_trans_handle *trans,
5832 struct btrfs_inode *inode,
5833 struct dentry *parent,
5834 struct btrfs_log_ctx *ctx)
5836 struct btrfs_root *root = inode->root;
5837 const u64 ino = btrfs_ino(inode);
5838 struct btrfs_path *path;
5839 struct btrfs_key search_key;
5843 * For a single hard link case, go through a fast path that does not
5844 * need to iterate the fs/subvolume tree.
5846 if (inode->vfs_inode.i_nlink < 2)
5847 return log_new_ancestors_fast(trans, inode, parent, ctx);
5849 path = btrfs_alloc_path();
5853 search_key.objectid = ino;
5854 search_key.type = BTRFS_INODE_REF_KEY;
5855 search_key.offset = 0;
5857 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
5864 struct extent_buffer *leaf = path->nodes[0];
5865 int slot = path->slots[0];
5866 struct btrfs_key found_key;
5868 if (slot >= btrfs_header_nritems(leaf)) {
5869 ret = btrfs_next_leaf(root, path);
5877 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5878 if (found_key.objectid != ino ||
5879 found_key.type > BTRFS_INODE_EXTREF_KEY)
5883 * Don't deal with extended references because they are rare
5884 * cases and too complex to deal with (we would need to keep
5885 * track of which subitem we are processing for each item in
5886 * this loop, etc). So just return some error to fallback to
5887 * a transaction commit.
5889 if (found_key.type == BTRFS_INODE_EXTREF_KEY) {
5895 * Logging ancestors needs to do more searches on the fs/subvol
5896 * tree, so it releases the path as needed to avoid deadlocks.
5897 * Keep track of the last inode ref key and resume from that key
5898 * after logging all new ancestors for the current hard link.
5900 memcpy(&search_key, &found_key, sizeof(search_key));
5902 ret = log_new_ancestors(trans, root, path, ctx);
5905 btrfs_release_path(path);
5910 btrfs_free_path(path);
5915 * helper function around btrfs_log_inode to make sure newly created
5916 * parent directories also end up in the log. A minimal inode and backref
5917 * only logging is done of any parent directories that are older than
5918 * the last committed transaction
5920 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
5921 struct btrfs_inode *inode,
5922 struct dentry *parent,
5926 struct btrfs_log_ctx *ctx)
5928 struct btrfs_root *root = inode->root;
5929 struct btrfs_fs_info *fs_info = root->fs_info;
5930 struct super_block *sb;
5932 u64 last_committed = fs_info->last_trans_committed;
5933 bool log_dentries = false;
5935 sb = inode->vfs_inode.i_sb;
5937 if (btrfs_test_opt(fs_info, NOTREELOG)) {
5943 * The prev transaction commit doesn't complete, we need do
5944 * full commit by ourselves.
5946 if (fs_info->last_trans_log_full_commit >
5947 fs_info->last_trans_committed) {
5952 if (btrfs_root_refs(&root->root_item) == 0) {
5957 ret = check_parent_dirs_for_sync(trans, inode, parent, sb,
5963 * Skip already logged inodes or inodes corresponding to tmpfiles
5964 * (since logging them is pointless, a link count of 0 means they
5965 * will never be accessible).
5967 if (btrfs_inode_in_log(inode, trans->transid) ||
5968 inode->vfs_inode.i_nlink == 0) {
5969 ret = BTRFS_NO_LOG_SYNC;
5973 ret = start_log_trans(trans, root, ctx);
5977 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
5982 * for regular files, if its inode is already on disk, we don't
5983 * have to worry about the parents at all. This is because
5984 * we can use the last_unlink_trans field to record renames
5985 * and other fun in this file.
5987 if (S_ISREG(inode->vfs_inode.i_mode) &&
5988 inode->generation <= last_committed &&
5989 inode->last_unlink_trans <= last_committed) {
5994 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries)
5995 log_dentries = true;
5998 * On unlink we must make sure all our current and old parent directory
5999 * inodes are fully logged. This is to prevent leaving dangling
6000 * directory index entries in directories that were our parents but are
6001 * not anymore. Not doing this results in old parent directory being
6002 * impossible to delete after log replay (rmdir will always fail with
6003 * error -ENOTEMPTY).
6009 * ln testdir/foo testdir/bar
6011 * unlink testdir/bar
6012 * xfs_io -c fsync testdir/foo
6014 * mount fs, triggers log replay
6016 * If we don't log the parent directory (testdir), after log replay the
6017 * directory still has an entry pointing to the file inode using the bar
6018 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
6019 * the file inode has a link count of 1.
6025 * ln foo testdir/foo2
6026 * ln foo testdir/foo3
6028 * unlink testdir/foo3
6029 * xfs_io -c fsync foo
6031 * mount fs, triggers log replay
6033 * Similar as the first example, after log replay the parent directory
6034 * testdir still has an entry pointing to the inode file with name foo3
6035 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
6036 * and has a link count of 2.
6038 if (inode->last_unlink_trans > last_committed) {
6039 ret = btrfs_log_all_parents(trans, inode, ctx);
6044 ret = log_all_new_ancestors(trans, inode, parent, ctx);
6049 ret = log_new_dir_dentries(trans, root, inode, ctx);
6054 btrfs_set_log_full_commit(trans);
6059 btrfs_remove_log_ctx(root, ctx);
6060 btrfs_end_log_trans(root);
6066 * it is not safe to log dentry if the chunk root has added new
6067 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
6068 * If this returns 1, you must commit the transaction to safely get your
6071 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
6072 struct dentry *dentry,
6075 struct btrfs_log_ctx *ctx)
6077 struct dentry *parent = dget_parent(dentry);
6080 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent,
6081 start, end, LOG_INODE_ALL, ctx);
6088 * should be called during mount to recover any replay any log trees
6091 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
6094 struct btrfs_path *path;
6095 struct btrfs_trans_handle *trans;
6096 struct btrfs_key key;
6097 struct btrfs_key found_key;
6098 struct btrfs_key tmp_key;
6099 struct btrfs_root *log;
6100 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
6101 struct walk_control wc = {
6102 .process_func = process_one_buffer,
6103 .stage = LOG_WALK_PIN_ONLY,
6106 path = btrfs_alloc_path();
6110 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6112 trans = btrfs_start_transaction(fs_info->tree_root, 0);
6113 if (IS_ERR(trans)) {
6114 ret = PTR_ERR(trans);
6121 ret = walk_log_tree(trans, log_root_tree, &wc);
6123 btrfs_handle_fs_error(fs_info, ret,
6124 "Failed to pin buffers while recovering log root tree.");
6129 key.objectid = BTRFS_TREE_LOG_OBJECTID;
6130 key.offset = (u64)-1;
6131 key.type = BTRFS_ROOT_ITEM_KEY;
6134 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
6137 btrfs_handle_fs_error(fs_info, ret,
6138 "Couldn't find tree log root.");
6142 if (path->slots[0] == 0)
6146 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
6148 btrfs_release_path(path);
6149 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
6152 log = btrfs_read_tree_root(log_root_tree, &found_key);
6155 btrfs_handle_fs_error(fs_info, ret,
6156 "Couldn't read tree log root.");
6160 tmp_key.objectid = found_key.offset;
6161 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
6162 tmp_key.offset = (u64)-1;
6164 wc.replay_dest = btrfs_get_fs_root(fs_info, &tmp_key, true);
6165 if (IS_ERR(wc.replay_dest)) {
6166 ret = PTR_ERR(wc.replay_dest);
6169 * We didn't find the subvol, likely because it was
6170 * deleted. This is ok, simply skip this log and go to
6173 * We need to exclude the root because we can't have
6174 * other log replays overwriting this log as we'll read
6175 * it back in a few more times. This will keep our
6176 * block from being modified, and we'll just bail for
6177 * each subsequent pass.
6180 ret = btrfs_pin_extent_for_log_replay(trans,
6183 btrfs_put_root(log);
6187 btrfs_handle_fs_error(fs_info, ret,
6188 "Couldn't read target root for tree log recovery.");
6192 wc.replay_dest->log_root = log;
6193 btrfs_record_root_in_trans(trans, wc.replay_dest);
6194 ret = walk_log_tree(trans, log, &wc);
6196 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6197 ret = fixup_inode_link_counts(trans, wc.replay_dest,
6201 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6202 struct btrfs_root *root = wc.replay_dest;
6204 btrfs_release_path(path);
6207 * We have just replayed everything, and the highest
6208 * objectid of fs roots probably has changed in case
6209 * some inode_item's got replayed.
6211 * root->objectid_mutex is not acquired as log replay
6212 * could only happen during mount.
6214 ret = btrfs_find_highest_objectid(root,
6215 &root->highest_objectid);
6218 wc.replay_dest->log_root = NULL;
6219 btrfs_put_root(wc.replay_dest);
6220 btrfs_put_root(log);
6225 if (found_key.offset == 0)
6227 key.offset = found_key.offset - 1;
6229 btrfs_release_path(path);
6231 /* step one is to pin it all, step two is to replay just inodes */
6234 wc.process_func = replay_one_buffer;
6235 wc.stage = LOG_WALK_REPLAY_INODES;
6238 /* step three is to replay everything */
6239 if (wc.stage < LOG_WALK_REPLAY_ALL) {
6244 btrfs_free_path(path);
6246 /* step 4: commit the transaction, which also unpins the blocks */
6247 ret = btrfs_commit_transaction(trans);
6251 log_root_tree->log_root = NULL;
6252 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6253 btrfs_put_root(log_root_tree);
6258 btrfs_end_transaction(wc.trans);
6259 btrfs_free_path(path);
6264 * there are some corner cases where we want to force a full
6265 * commit instead of allowing a directory to be logged.
6267 * They revolve around files there were unlinked from the directory, and
6268 * this function updates the parent directory so that a full commit is
6269 * properly done if it is fsync'd later after the unlinks are done.
6271 * Must be called before the unlink operations (updates to the subvolume tree,
6272 * inodes, etc) are done.
6274 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
6275 struct btrfs_inode *dir, struct btrfs_inode *inode,
6279 * when we're logging a file, if it hasn't been renamed
6280 * or unlinked, and its inode is fully committed on disk,
6281 * we don't have to worry about walking up the directory chain
6282 * to log its parents.
6284 * So, we use the last_unlink_trans field to put this transid
6285 * into the file. When the file is logged we check it and
6286 * don't log the parents if the file is fully on disk.
6288 mutex_lock(&inode->log_mutex);
6289 inode->last_unlink_trans = trans->transid;
6290 mutex_unlock(&inode->log_mutex);
6293 * if this directory was already logged any new
6294 * names for this file/dir will get recorded
6296 if (dir->logged_trans == trans->transid)
6300 * if the inode we're about to unlink was logged,
6301 * the log will be properly updated for any new names
6303 if (inode->logged_trans == trans->transid)
6307 * when renaming files across directories, if the directory
6308 * there we're unlinking from gets fsync'd later on, there's
6309 * no way to find the destination directory later and fsync it
6310 * properly. So, we have to be conservative and force commits
6311 * so the new name gets discovered.
6316 /* we can safely do the unlink without any special recording */
6320 mutex_lock(&dir->log_mutex);
6321 dir->last_unlink_trans = trans->transid;
6322 mutex_unlock(&dir->log_mutex);
6326 * Make sure that if someone attempts to fsync the parent directory of a deleted
6327 * snapshot, it ends up triggering a transaction commit. This is to guarantee
6328 * that after replaying the log tree of the parent directory's root we will not
6329 * see the snapshot anymore and at log replay time we will not see any log tree
6330 * corresponding to the deleted snapshot's root, which could lead to replaying
6331 * it after replaying the log tree of the parent directory (which would replay
6332 * the snapshot delete operation).
6334 * Must be called before the actual snapshot destroy operation (updates to the
6335 * parent root and tree of tree roots trees, etc) are done.
6337 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
6338 struct btrfs_inode *dir)
6340 mutex_lock(&dir->log_mutex);
6341 dir->last_unlink_trans = trans->transid;
6342 mutex_unlock(&dir->log_mutex);
6346 * Call this after adding a new name for a file and it will properly
6347 * update the log to reflect the new name.
6349 * @ctx can not be NULL when @sync_log is false, and should be NULL when it's
6350 * true (because it's not used).
6352 * Return value depends on whether @sync_log is true or false.
6353 * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
6354 * committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT
6356 * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to
6357 * to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log,
6358 * or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
6359 * committed (without attempting to sync the log).
6361 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
6362 struct btrfs_inode *inode, struct btrfs_inode *old_dir,
6363 struct dentry *parent,
6364 bool sync_log, struct btrfs_log_ctx *ctx)
6366 struct btrfs_fs_info *fs_info = trans->fs_info;
6370 * this will force the logging code to walk the dentry chain
6373 if (!S_ISDIR(inode->vfs_inode.i_mode))
6374 inode->last_unlink_trans = trans->transid;
6377 * if this inode hasn't been logged and directory we're renaming it
6378 * from hasn't been logged, we don't need to log it
6380 if (inode->logged_trans <= fs_info->last_trans_committed &&
6381 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
6382 return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT :
6383 BTRFS_DONT_NEED_LOG_SYNC;
6386 struct btrfs_log_ctx ctx2;
6388 btrfs_init_log_ctx(&ctx2, &inode->vfs_inode);
6389 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
6390 LOG_INODE_EXISTS, &ctx2);
6391 if (ret == BTRFS_NO_LOG_SYNC)
6392 return BTRFS_DONT_NEED_TRANS_COMMIT;
6394 return BTRFS_NEED_TRANS_COMMIT;
6396 ret = btrfs_sync_log(trans, inode->root, &ctx2);
6398 return BTRFS_NEED_TRANS_COMMIT;
6399 return BTRFS_DONT_NEED_TRANS_COMMIT;
6403 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
6404 LOG_INODE_EXISTS, ctx);
6405 if (ret == BTRFS_NO_LOG_SYNC)
6406 return BTRFS_DONT_NEED_LOG_SYNC;
6408 return BTRFS_NEED_TRANS_COMMIT;
6410 return BTRFS_NEED_LOG_SYNC;