2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include <linux/list_sort.h>
26 #include "print-tree.h"
29 #include "inode-map.h"
31 /* magic values for the inode_only field in btrfs_log_inode:
33 * LOG_INODE_ALL means to log everything
34 * LOG_INODE_EXISTS means to log just enough to recreate the inode
37 #define LOG_INODE_ALL 0
38 #define LOG_INODE_EXISTS 1
41 * directory trouble cases
43 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
44 * log, we must force a full commit before doing an fsync of the directory
45 * where the unlink was done.
46 * ---> record transid of last unlink/rename per directory
50 * rename foo/some_dir foo2/some_dir
52 * fsync foo/some_dir/some_file
54 * The fsync above will unlink the original some_dir without recording
55 * it in its new location (foo2). After a crash, some_dir will be gone
56 * unless the fsync of some_file forces a full commit
58 * 2) we must log any new names for any file or dir that is in the fsync
59 * log. ---> check inode while renaming/linking.
61 * 2a) we must log any new names for any file or dir during rename
62 * when the directory they are being removed from was logged.
63 * ---> check inode and old parent dir during rename
65 * 2a is actually the more important variant. With the extra logging
66 * a crash might unlink the old name without recreating the new one
68 * 3) after a crash, we must go through any directories with a link count
69 * of zero and redo the rm -rf
76 * The directory f1 was fully removed from the FS, but fsync was never
77 * called on f1, only its parent dir. After a crash the rm -rf must
78 * be replayed. This must be able to recurse down the entire
79 * directory tree. The inode link count fixup code takes care of the
84 * stages for the tree walking. The first
85 * stage (0) is to only pin down the blocks we find
86 * the second stage (1) is to make sure that all the inodes
87 * we find in the log are created in the subvolume.
89 * The last stage is to deal with directories and links and extents
90 * and all the other fun semantics
92 #define LOG_WALK_PIN_ONLY 0
93 #define LOG_WALK_REPLAY_INODES 1
94 #define LOG_WALK_REPLAY_DIR_INDEX 2
95 #define LOG_WALK_REPLAY_ALL 3
97 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
98 struct btrfs_root *root, struct inode *inode,
102 struct btrfs_log_ctx *ctx);
103 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
104 struct btrfs_root *root,
105 struct btrfs_path *path, u64 objectid);
106 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
107 struct btrfs_root *root,
108 struct btrfs_root *log,
109 struct btrfs_path *path,
110 u64 dirid, int del_all);
113 * tree logging is a special write ahead log used to make sure that
114 * fsyncs and O_SYNCs can happen without doing full tree commits.
116 * Full tree commits are expensive because they require commonly
117 * modified blocks to be recowed, creating many dirty pages in the
118 * extent tree an 4x-6x higher write load than ext3.
120 * Instead of doing a tree commit on every fsync, we use the
121 * key ranges and transaction ids to find items for a given file or directory
122 * that have changed in this transaction. Those items are copied into
123 * a special tree (one per subvolume root), that tree is written to disk
124 * and then the fsync is considered complete.
126 * After a crash, items are copied out of the log-tree back into the
127 * subvolume tree. Any file data extents found are recorded in the extent
128 * allocation tree, and the log-tree freed.
130 * The log tree is read three times, once to pin down all the extents it is
131 * using in ram and once, once to create all the inodes logged in the tree
132 * and once to do all the other items.
136 * start a sub transaction and setup the log tree
137 * this increments the log tree writer count to make the people
138 * syncing the tree wait for us to finish
140 static int start_log_trans(struct btrfs_trans_handle *trans,
141 struct btrfs_root *root,
142 struct btrfs_log_ctx *ctx)
146 mutex_lock(&root->log_mutex);
148 if (root->log_root) {
149 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
154 if (!root->log_start_pid) {
155 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
156 root->log_start_pid = current->pid;
157 } else if (root->log_start_pid != current->pid) {
158 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
161 mutex_lock(&root->fs_info->tree_log_mutex);
162 if (!root->fs_info->log_root_tree)
163 ret = btrfs_init_log_root_tree(trans, root->fs_info);
164 mutex_unlock(&root->fs_info->tree_log_mutex);
168 ret = btrfs_add_log_tree(trans, root);
172 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
173 root->log_start_pid = current->pid;
176 atomic_inc(&root->log_batch);
177 atomic_inc(&root->log_writers);
179 int index = root->log_transid % 2;
180 list_add_tail(&ctx->list, &root->log_ctxs[index]);
181 ctx->log_transid = root->log_transid;
185 mutex_unlock(&root->log_mutex);
190 * returns 0 if there was a log transaction running and we were able
191 * to join, or returns -ENOENT if there were not transactions
194 static int join_running_log_trans(struct btrfs_root *root)
202 mutex_lock(&root->log_mutex);
203 if (root->log_root) {
205 atomic_inc(&root->log_writers);
207 mutex_unlock(&root->log_mutex);
212 * This either makes the current running log transaction wait
213 * until you call btrfs_end_log_trans() or it makes any future
214 * log transactions wait until you call btrfs_end_log_trans()
216 int btrfs_pin_log_trans(struct btrfs_root *root)
220 mutex_lock(&root->log_mutex);
221 atomic_inc(&root->log_writers);
222 mutex_unlock(&root->log_mutex);
227 * indicate we're done making changes to the log tree
228 * and wake up anyone waiting to do a sync
230 void btrfs_end_log_trans(struct btrfs_root *root)
232 if (atomic_dec_and_test(&root->log_writers)) {
234 * Implicit memory barrier after atomic_dec_and_test
236 if (waitqueue_active(&root->log_writer_wait))
237 wake_up(&root->log_writer_wait);
243 * the walk control struct is used to pass state down the chain when
244 * processing the log tree. The stage field tells us which part
245 * of the log tree processing we are currently doing. The others
246 * are state fields used for that specific part
248 struct walk_control {
249 /* should we free the extent on disk when done? This is used
250 * at transaction commit time while freeing a log tree
254 /* should we write out the extent buffer? This is used
255 * while flushing the log tree to disk during a sync
259 /* should we wait for the extent buffer io to finish? Also used
260 * while flushing the log tree to disk for a sync
264 /* pin only walk, we record which extents on disk belong to the
269 /* what stage of the replay code we're currently in */
272 /* the root we are currently replaying */
273 struct btrfs_root *replay_dest;
275 /* the trans handle for the current replay */
276 struct btrfs_trans_handle *trans;
278 /* the function that gets used to process blocks we find in the
279 * tree. Note the extent_buffer might not be up to date when it is
280 * passed in, and it must be checked or read if you need the data
283 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
284 struct walk_control *wc, u64 gen);
288 * process_func used to pin down extents, write them or wait on them
290 static int process_one_buffer(struct btrfs_root *log,
291 struct extent_buffer *eb,
292 struct walk_control *wc, u64 gen)
297 * If this fs is mixed then we need to be able to process the leaves to
298 * pin down any logged extents, so we have to read the block.
300 if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) {
301 ret = btrfs_read_buffer(eb, gen);
307 ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
310 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
311 if (wc->pin && btrfs_header_level(eb) == 0)
312 ret = btrfs_exclude_logged_extents(log, eb);
314 btrfs_write_tree_block(eb);
316 btrfs_wait_tree_block_writeback(eb);
322 * Item overwrite used by replay and tree logging. eb, slot and key all refer
323 * to the src data we are copying out.
325 * root is the tree we are copying into, and path is a scratch
326 * path for use in this function (it should be released on entry and
327 * will be released on exit).
329 * If the key is already in the destination tree the existing item is
330 * overwritten. If the existing item isn't big enough, it is extended.
331 * If it is too large, it is truncated.
333 * If the key isn't in the destination yet, a new item is inserted.
335 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
336 struct btrfs_root *root,
337 struct btrfs_path *path,
338 struct extent_buffer *eb, int slot,
339 struct btrfs_key *key)
343 u64 saved_i_size = 0;
344 int save_old_i_size = 0;
345 unsigned long src_ptr;
346 unsigned long dst_ptr;
347 int overwrite_root = 0;
348 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
350 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
353 item_size = btrfs_item_size_nr(eb, slot);
354 src_ptr = btrfs_item_ptr_offset(eb, slot);
356 /* look for the key in the destination tree */
357 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
364 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
366 if (dst_size != item_size)
369 if (item_size == 0) {
370 btrfs_release_path(path);
373 dst_copy = kmalloc(item_size, GFP_NOFS);
374 src_copy = kmalloc(item_size, GFP_NOFS);
375 if (!dst_copy || !src_copy) {
376 btrfs_release_path(path);
382 read_extent_buffer(eb, src_copy, src_ptr, item_size);
384 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
385 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
387 ret = memcmp(dst_copy, src_copy, item_size);
392 * they have the same contents, just return, this saves
393 * us from cowing blocks in the destination tree and doing
394 * extra writes that may not have been done by a previous
398 btrfs_release_path(path);
403 * We need to load the old nbytes into the inode so when we
404 * replay the extents we've logged we get the right nbytes.
407 struct btrfs_inode_item *item;
411 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
412 struct btrfs_inode_item);
413 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
414 item = btrfs_item_ptr(eb, slot,
415 struct btrfs_inode_item);
416 btrfs_set_inode_nbytes(eb, item, nbytes);
419 * If this is a directory we need to reset the i_size to
420 * 0 so that we can set it up properly when replaying
421 * the rest of the items in this log.
423 mode = btrfs_inode_mode(eb, item);
425 btrfs_set_inode_size(eb, item, 0);
427 } else if (inode_item) {
428 struct btrfs_inode_item *item;
432 * New inode, set nbytes to 0 so that the nbytes comes out
433 * properly when we replay the extents.
435 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
436 btrfs_set_inode_nbytes(eb, item, 0);
439 * If this is a directory we need to reset the i_size to 0 so
440 * that we can set it up properly when replaying the rest of
441 * the items in this log.
443 mode = btrfs_inode_mode(eb, item);
445 btrfs_set_inode_size(eb, item, 0);
448 btrfs_release_path(path);
449 /* try to insert the key into the destination tree */
450 path->skip_release_on_error = 1;
451 ret = btrfs_insert_empty_item(trans, root, path,
453 path->skip_release_on_error = 0;
455 /* make sure any existing item is the correct size */
456 if (ret == -EEXIST || ret == -EOVERFLOW) {
458 found_size = btrfs_item_size_nr(path->nodes[0],
460 if (found_size > item_size)
461 btrfs_truncate_item(root, path, item_size, 1);
462 else if (found_size < item_size)
463 btrfs_extend_item(root, path,
464 item_size - found_size);
468 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
471 /* don't overwrite an existing inode if the generation number
472 * was logged as zero. This is done when the tree logging code
473 * is just logging an inode to make sure it exists after recovery.
475 * Also, don't overwrite i_size on directories during replay.
476 * log replay inserts and removes directory items based on the
477 * state of the tree found in the subvolume, and i_size is modified
480 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
481 struct btrfs_inode_item *src_item;
482 struct btrfs_inode_item *dst_item;
484 src_item = (struct btrfs_inode_item *)src_ptr;
485 dst_item = (struct btrfs_inode_item *)dst_ptr;
487 if (btrfs_inode_generation(eb, src_item) == 0) {
488 struct extent_buffer *dst_eb = path->nodes[0];
489 const u64 ino_size = btrfs_inode_size(eb, src_item);
492 * For regular files an ino_size == 0 is used only when
493 * logging that an inode exists, as part of a directory
494 * fsync, and the inode wasn't fsynced before. In this
495 * case don't set the size of the inode in the fs/subvol
496 * tree, otherwise we would be throwing valid data away.
498 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
499 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
501 struct btrfs_map_token token;
503 btrfs_init_map_token(&token);
504 btrfs_set_token_inode_size(dst_eb, dst_item,
510 if (overwrite_root &&
511 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
512 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
514 saved_i_size = btrfs_inode_size(path->nodes[0],
519 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
522 if (save_old_i_size) {
523 struct btrfs_inode_item *dst_item;
524 dst_item = (struct btrfs_inode_item *)dst_ptr;
525 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
528 /* make sure the generation is filled in */
529 if (key->type == BTRFS_INODE_ITEM_KEY) {
530 struct btrfs_inode_item *dst_item;
531 dst_item = (struct btrfs_inode_item *)dst_ptr;
532 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
533 btrfs_set_inode_generation(path->nodes[0], dst_item,
538 btrfs_mark_buffer_dirty(path->nodes[0]);
539 btrfs_release_path(path);
544 * simple helper to read an inode off the disk from a given root
545 * This can only be called for subvolume roots and not for the log
547 static noinline struct inode *read_one_inode(struct btrfs_root *root,
550 struct btrfs_key key;
553 key.objectid = objectid;
554 key.type = BTRFS_INODE_ITEM_KEY;
556 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
559 } else if (is_bad_inode(inode)) {
566 /* replays a single extent in 'eb' at 'slot' with 'key' into the
567 * subvolume 'root'. path is released on entry and should be released
570 * extents in the log tree have not been allocated out of the extent
571 * tree yet. So, this completes the allocation, taking a reference
572 * as required if the extent already exists or creating a new extent
573 * if it isn't in the extent allocation tree yet.
575 * The extent is inserted into the file, dropping any existing extents
576 * from the file that overlap the new one.
578 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
579 struct btrfs_root *root,
580 struct btrfs_path *path,
581 struct extent_buffer *eb, int slot,
582 struct btrfs_key *key)
586 u64 start = key->offset;
588 struct btrfs_file_extent_item *item;
589 struct inode *inode = NULL;
593 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
594 found_type = btrfs_file_extent_type(eb, item);
596 if (found_type == BTRFS_FILE_EXTENT_REG ||
597 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
598 nbytes = btrfs_file_extent_num_bytes(eb, item);
599 extent_end = start + nbytes;
602 * We don't add to the inodes nbytes if we are prealloc or a
605 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
607 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
608 size = btrfs_file_extent_inline_len(eb, slot, item);
609 nbytes = btrfs_file_extent_ram_bytes(eb, item);
610 extent_end = ALIGN(start + size, root->sectorsize);
616 inode = read_one_inode(root, key->objectid);
623 * first check to see if we already have this extent in the
624 * file. This must be done before the btrfs_drop_extents run
625 * so we don't try to drop this extent.
627 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
631 (found_type == BTRFS_FILE_EXTENT_REG ||
632 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
633 struct btrfs_file_extent_item cmp1;
634 struct btrfs_file_extent_item cmp2;
635 struct btrfs_file_extent_item *existing;
636 struct extent_buffer *leaf;
638 leaf = path->nodes[0];
639 existing = btrfs_item_ptr(leaf, path->slots[0],
640 struct btrfs_file_extent_item);
642 read_extent_buffer(eb, &cmp1, (unsigned long)item,
644 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
648 * we already have a pointer to this exact extent,
649 * we don't have to do anything
651 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
652 btrfs_release_path(path);
656 btrfs_release_path(path);
658 /* drop any overlapping extents */
659 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
663 if (found_type == BTRFS_FILE_EXTENT_REG ||
664 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
666 unsigned long dest_offset;
667 struct btrfs_key ins;
669 ret = btrfs_insert_empty_item(trans, root, path, key,
673 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
675 copy_extent_buffer(path->nodes[0], eb, dest_offset,
676 (unsigned long)item, sizeof(*item));
678 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
679 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
680 ins.type = BTRFS_EXTENT_ITEM_KEY;
681 offset = key->offset - btrfs_file_extent_offset(eb, item);
683 if (ins.objectid > 0) {
686 LIST_HEAD(ordered_sums);
688 * is this extent already allocated in the extent
689 * allocation tree? If so, just add a reference
691 ret = btrfs_lookup_data_extent(root, ins.objectid,
694 ret = btrfs_inc_extent_ref(trans, root,
695 ins.objectid, ins.offset,
696 0, root->root_key.objectid,
697 key->objectid, offset);
702 * insert the extent pointer in the extent
705 ret = btrfs_alloc_logged_file_extent(trans,
706 root, root->root_key.objectid,
707 key->objectid, offset, &ins);
711 btrfs_release_path(path);
713 if (btrfs_file_extent_compression(eb, item)) {
714 csum_start = ins.objectid;
715 csum_end = csum_start + ins.offset;
717 csum_start = ins.objectid +
718 btrfs_file_extent_offset(eb, item);
719 csum_end = csum_start +
720 btrfs_file_extent_num_bytes(eb, item);
723 ret = btrfs_lookup_csums_range(root->log_root,
724 csum_start, csum_end - 1,
729 * Now delete all existing cums in the csum root that
730 * cover our range. We do this because we can have an
731 * extent that is completely referenced by one file
732 * extent item and partially referenced by another
733 * file extent item (like after using the clone or
734 * extent_same ioctls). In this case if we end up doing
735 * the replay of the one that partially references the
736 * extent first, and we do not do the csum deletion
737 * below, we can get 2 csum items in the csum tree that
738 * overlap each other. For example, imagine our log has
739 * the two following file extent items:
741 * key (257 EXTENT_DATA 409600)
742 * extent data disk byte 12845056 nr 102400
743 * extent data offset 20480 nr 20480 ram 102400
745 * key (257 EXTENT_DATA 819200)
746 * extent data disk byte 12845056 nr 102400
747 * extent data offset 0 nr 102400 ram 102400
749 * Where the second one fully references the 100K extent
750 * that starts at disk byte 12845056, and the log tree
751 * has a single csum item that covers the entire range
754 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
756 * After the first file extent item is replayed, the
757 * csum tree gets the following csum item:
759 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
761 * Which covers the 20K sub-range starting at offset 20K
762 * of our extent. Now when we replay the second file
763 * extent item, if we do not delete existing csum items
764 * that cover any of its blocks, we end up getting two
765 * csum items in our csum tree that overlap each other:
767 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
768 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
770 * Which is a problem, because after this anyone trying
771 * to lookup up for the checksum of any block of our
772 * extent starting at an offset of 40K or higher, will
773 * end up looking at the second csum item only, which
774 * does not contain the checksum for any block starting
775 * at offset 40K or higher of our extent.
777 while (!list_empty(&ordered_sums)) {
778 struct btrfs_ordered_sum *sums;
779 sums = list_entry(ordered_sums.next,
780 struct btrfs_ordered_sum,
783 ret = btrfs_del_csums(trans,
784 root->fs_info->csum_root,
788 ret = btrfs_csum_file_blocks(trans,
789 root->fs_info->csum_root,
791 list_del(&sums->list);
797 btrfs_release_path(path);
799 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
800 /* inline extents are easy, we just overwrite them */
801 ret = overwrite_item(trans, root, path, eb, slot, key);
806 inode_add_bytes(inode, nbytes);
807 ret = btrfs_update_inode(trans, root, inode);
815 * when cleaning up conflicts between the directory names in the
816 * subvolume, directory names in the log and directory names in the
817 * inode back references, we may have to unlink inodes from directories.
819 * This is a helper function to do the unlink of a specific directory
822 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
823 struct btrfs_root *root,
824 struct btrfs_path *path,
826 struct btrfs_dir_item *di)
831 struct extent_buffer *leaf;
832 struct btrfs_key location;
835 leaf = path->nodes[0];
837 btrfs_dir_item_key_to_cpu(leaf, di, &location);
838 name_len = btrfs_dir_name_len(leaf, di);
839 name = kmalloc(name_len, GFP_NOFS);
843 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
844 btrfs_release_path(path);
846 inode = read_one_inode(root, location.objectid);
852 ret = link_to_fixup_dir(trans, root, path, location.objectid);
856 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
860 ret = btrfs_run_delayed_items(trans, root);
868 * helper function to see if a given name and sequence number found
869 * in an inode back reference are already in a directory and correctly
870 * point to this inode
872 static noinline int inode_in_dir(struct btrfs_root *root,
873 struct btrfs_path *path,
874 u64 dirid, u64 objectid, u64 index,
875 const char *name, int name_len)
877 struct btrfs_dir_item *di;
878 struct btrfs_key location;
881 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
882 index, name, name_len, 0);
883 if (di && !IS_ERR(di)) {
884 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
885 if (location.objectid != objectid)
889 btrfs_release_path(path);
891 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
892 if (di && !IS_ERR(di)) {
893 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
894 if (location.objectid != objectid)
900 btrfs_release_path(path);
905 * helper function to check a log tree for a named back reference in
906 * an inode. This is used to decide if a back reference that is
907 * found in the subvolume conflicts with what we find in the log.
909 * inode backreferences may have multiple refs in a single item,
910 * during replay we process one reference at a time, and we don't
911 * want to delete valid links to a file from the subvolume if that
912 * link is also in the log.
914 static noinline int backref_in_log(struct btrfs_root *log,
915 struct btrfs_key *key,
917 const char *name, int namelen)
919 struct btrfs_path *path;
920 struct btrfs_inode_ref *ref;
922 unsigned long ptr_end;
923 unsigned long name_ptr;
929 path = btrfs_alloc_path();
933 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
937 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
939 if (key->type == BTRFS_INODE_EXTREF_KEY) {
940 if (btrfs_find_name_in_ext_backref(path, ref_objectid,
941 name, namelen, NULL))
947 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
948 ptr_end = ptr + item_size;
949 while (ptr < ptr_end) {
950 ref = (struct btrfs_inode_ref *)ptr;
951 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
952 if (found_name_len == namelen) {
953 name_ptr = (unsigned long)(ref + 1);
954 ret = memcmp_extent_buffer(path->nodes[0], name,
961 ptr = (unsigned long)(ref + 1) + found_name_len;
964 btrfs_free_path(path);
968 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
969 struct btrfs_root *root,
970 struct btrfs_path *path,
971 struct btrfs_root *log_root,
972 struct inode *dir, struct inode *inode,
973 struct extent_buffer *eb,
974 u64 inode_objectid, u64 parent_objectid,
975 u64 ref_index, char *name, int namelen,
981 struct extent_buffer *leaf;
982 struct btrfs_dir_item *di;
983 struct btrfs_key search_key;
984 struct btrfs_inode_extref *extref;
987 /* Search old style refs */
988 search_key.objectid = inode_objectid;
989 search_key.type = BTRFS_INODE_REF_KEY;
990 search_key.offset = parent_objectid;
991 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
993 struct btrfs_inode_ref *victim_ref;
995 unsigned long ptr_end;
997 leaf = path->nodes[0];
999 /* are we trying to overwrite a back ref for the root directory
1000 * if so, just jump out, we're done
1002 if (search_key.objectid == search_key.offset)
1005 /* check all the names in this back reference to see
1006 * if they are in the log. if so, we allow them to stay
1007 * otherwise they must be unlinked as a conflict
1009 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1010 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1011 while (ptr < ptr_end) {
1012 victim_ref = (struct btrfs_inode_ref *)ptr;
1013 victim_name_len = btrfs_inode_ref_name_len(leaf,
1015 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1019 read_extent_buffer(leaf, victim_name,
1020 (unsigned long)(victim_ref + 1),
1023 if (!backref_in_log(log_root, &search_key,
1028 btrfs_release_path(path);
1030 ret = btrfs_unlink_inode(trans, root, dir,
1036 ret = btrfs_run_delayed_items(trans, root);
1044 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1048 * NOTE: we have searched root tree and checked the
1049 * coresponding ref, it does not need to check again.
1053 btrfs_release_path(path);
1055 /* Same search but for extended refs */
1056 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1057 inode_objectid, parent_objectid, 0,
1059 if (!IS_ERR_OR_NULL(extref)) {
1063 struct inode *victim_parent;
1065 leaf = path->nodes[0];
1067 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1068 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1070 while (cur_offset < item_size) {
1071 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1073 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1075 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1078 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1081 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1084 search_key.objectid = inode_objectid;
1085 search_key.type = BTRFS_INODE_EXTREF_KEY;
1086 search_key.offset = btrfs_extref_hash(parent_objectid,
1090 if (!backref_in_log(log_root, &search_key,
1091 parent_objectid, victim_name,
1094 victim_parent = read_one_inode(root,
1096 if (victim_parent) {
1098 btrfs_release_path(path);
1100 ret = btrfs_unlink_inode(trans, root,
1106 ret = btrfs_run_delayed_items(
1109 iput(victim_parent);
1120 cur_offset += victim_name_len + sizeof(*extref);
1124 btrfs_release_path(path);
1126 /* look for a conflicting sequence number */
1127 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1128 ref_index, name, namelen, 0);
1129 if (di && !IS_ERR(di)) {
1130 ret = drop_one_dir_item(trans, root, path, dir, di);
1134 btrfs_release_path(path);
1136 /* look for a conflicing name */
1137 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1139 if (di && !IS_ERR(di)) {
1140 ret = drop_one_dir_item(trans, root, path, dir, di);
1144 btrfs_release_path(path);
1149 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1150 u32 *namelen, char **name, u64 *index,
1151 u64 *parent_objectid)
1153 struct btrfs_inode_extref *extref;
1155 extref = (struct btrfs_inode_extref *)ref_ptr;
1157 *namelen = btrfs_inode_extref_name_len(eb, extref);
1158 *name = kmalloc(*namelen, GFP_NOFS);
1162 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1165 *index = btrfs_inode_extref_index(eb, extref);
1166 if (parent_objectid)
1167 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1172 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1173 u32 *namelen, char **name, u64 *index)
1175 struct btrfs_inode_ref *ref;
1177 ref = (struct btrfs_inode_ref *)ref_ptr;
1179 *namelen = btrfs_inode_ref_name_len(eb, ref);
1180 *name = kmalloc(*namelen, GFP_NOFS);
1184 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1186 *index = btrfs_inode_ref_index(eb, ref);
1192 * replay one inode back reference item found in the log tree.
1193 * eb, slot and key refer to the buffer and key found in the log tree.
1194 * root is the destination we are replaying into, and path is for temp
1195 * use by this function. (it should be released on return).
1197 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1198 struct btrfs_root *root,
1199 struct btrfs_root *log,
1200 struct btrfs_path *path,
1201 struct extent_buffer *eb, int slot,
1202 struct btrfs_key *key)
1204 struct inode *dir = NULL;
1205 struct inode *inode = NULL;
1206 unsigned long ref_ptr;
1207 unsigned long ref_end;
1211 int search_done = 0;
1212 int log_ref_ver = 0;
1213 u64 parent_objectid;
1216 int ref_struct_size;
1218 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1219 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1221 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1222 struct btrfs_inode_extref *r;
1224 ref_struct_size = sizeof(struct btrfs_inode_extref);
1226 r = (struct btrfs_inode_extref *)ref_ptr;
1227 parent_objectid = btrfs_inode_extref_parent(eb, r);
1229 ref_struct_size = sizeof(struct btrfs_inode_ref);
1230 parent_objectid = key->offset;
1232 inode_objectid = key->objectid;
1235 * it is possible that we didn't log all the parent directories
1236 * for a given inode. If we don't find the dir, just don't
1237 * copy the back ref in. The link count fixup code will take
1240 dir = read_one_inode(root, parent_objectid);
1246 inode = read_one_inode(root, inode_objectid);
1252 while (ref_ptr < ref_end) {
1254 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1255 &ref_index, &parent_objectid);
1257 * parent object can change from one array
1261 dir = read_one_inode(root, parent_objectid);
1267 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1273 /* if we already have a perfect match, we're done */
1274 if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
1275 ref_index, name, namelen)) {
1277 * look for a conflicting back reference in the
1278 * metadata. if we find one we have to unlink that name
1279 * of the file before we add our new link. Later on, we
1280 * overwrite any existing back reference, and we don't
1281 * want to create dangling pointers in the directory.
1285 ret = __add_inode_ref(trans, root, path, log,
1289 ref_index, name, namelen,
1298 /* insert our name */
1299 ret = btrfs_add_link(trans, dir, inode, name, namelen,
1304 btrfs_update_inode(trans, root, inode);
1307 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1316 /* finally write the back reference in the inode */
1317 ret = overwrite_item(trans, root, path, eb, slot, key);
1319 btrfs_release_path(path);
1326 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1327 struct btrfs_root *root, u64 ino)
1331 ret = btrfs_insert_orphan_item(trans, root, ino);
1338 static int count_inode_extrefs(struct btrfs_root *root,
1339 struct inode *inode, struct btrfs_path *path)
1343 unsigned int nlink = 0;
1346 u64 inode_objectid = btrfs_ino(inode);
1349 struct btrfs_inode_extref *extref;
1350 struct extent_buffer *leaf;
1353 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1358 leaf = path->nodes[0];
1359 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1360 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1363 while (cur_offset < item_size) {
1364 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1365 name_len = btrfs_inode_extref_name_len(leaf, extref);
1369 cur_offset += name_len + sizeof(*extref);
1373 btrfs_release_path(path);
1375 btrfs_release_path(path);
1377 if (ret < 0 && ret != -ENOENT)
1382 static int count_inode_refs(struct btrfs_root *root,
1383 struct inode *inode, struct btrfs_path *path)
1386 struct btrfs_key key;
1387 unsigned int nlink = 0;
1389 unsigned long ptr_end;
1391 u64 ino = btrfs_ino(inode);
1394 key.type = BTRFS_INODE_REF_KEY;
1395 key.offset = (u64)-1;
1398 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1402 if (path->slots[0] == 0)
1407 btrfs_item_key_to_cpu(path->nodes[0], &key,
1409 if (key.objectid != ino ||
1410 key.type != BTRFS_INODE_REF_KEY)
1412 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1413 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1415 while (ptr < ptr_end) {
1416 struct btrfs_inode_ref *ref;
1418 ref = (struct btrfs_inode_ref *)ptr;
1419 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1421 ptr = (unsigned long)(ref + 1) + name_len;
1425 if (key.offset == 0)
1427 if (path->slots[0] > 0) {
1432 btrfs_release_path(path);
1434 btrfs_release_path(path);
1440 * There are a few corners where the link count of the file can't
1441 * be properly maintained during replay. So, instead of adding
1442 * lots of complexity to the log code, we just scan the backrefs
1443 * for any file that has been through replay.
1445 * The scan will update the link count on the inode to reflect the
1446 * number of back refs found. If it goes down to zero, the iput
1447 * will free the inode.
1449 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1450 struct btrfs_root *root,
1451 struct inode *inode)
1453 struct btrfs_path *path;
1456 u64 ino = btrfs_ino(inode);
1458 path = btrfs_alloc_path();
1462 ret = count_inode_refs(root, inode, path);
1468 ret = count_inode_extrefs(root, inode, path);
1476 if (nlink != inode->i_nlink) {
1477 set_nlink(inode, nlink);
1478 btrfs_update_inode(trans, root, inode);
1480 BTRFS_I(inode)->index_cnt = (u64)-1;
1482 if (inode->i_nlink == 0) {
1483 if (S_ISDIR(inode->i_mode)) {
1484 ret = replay_dir_deletes(trans, root, NULL, path,
1489 ret = insert_orphan_item(trans, root, ino);
1493 btrfs_free_path(path);
1497 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1498 struct btrfs_root *root,
1499 struct btrfs_path *path)
1502 struct btrfs_key key;
1503 struct inode *inode;
1505 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1506 key.type = BTRFS_ORPHAN_ITEM_KEY;
1507 key.offset = (u64)-1;
1509 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1514 if (path->slots[0] == 0)
1519 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1520 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1521 key.type != BTRFS_ORPHAN_ITEM_KEY)
1524 ret = btrfs_del_item(trans, root, path);
1528 btrfs_release_path(path);
1529 inode = read_one_inode(root, key.offset);
1533 ret = fixup_inode_link_count(trans, root, inode);
1539 * fixup on a directory may create new entries,
1540 * make sure we always look for the highset possible
1543 key.offset = (u64)-1;
1547 btrfs_release_path(path);
1553 * record a given inode in the fixup dir so we can check its link
1554 * count when replay is done. The link count is incremented here
1555 * so the inode won't go away until we check it
1557 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1558 struct btrfs_root *root,
1559 struct btrfs_path *path,
1562 struct btrfs_key key;
1564 struct inode *inode;
1566 inode = read_one_inode(root, objectid);
1570 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1571 key.type = BTRFS_ORPHAN_ITEM_KEY;
1572 key.offset = objectid;
1574 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1576 btrfs_release_path(path);
1578 if (!inode->i_nlink)
1579 set_nlink(inode, 1);
1582 ret = btrfs_update_inode(trans, root, inode);
1583 } else if (ret == -EEXIST) {
1586 BUG(); /* Logic Error */
1594 * when replaying the log for a directory, we only insert names
1595 * for inodes that actually exist. This means an fsync on a directory
1596 * does not implicitly fsync all the new files in it
1598 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1599 struct btrfs_root *root,
1600 u64 dirid, u64 index,
1601 char *name, int name_len,
1602 struct btrfs_key *location)
1604 struct inode *inode;
1608 inode = read_one_inode(root, location->objectid);
1612 dir = read_one_inode(root, dirid);
1618 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1620 /* FIXME, put inode into FIXUP list */
1628 * Return true if an inode reference exists in the log for the given name,
1629 * inode and parent inode.
1631 static bool name_in_log_ref(struct btrfs_root *log_root,
1632 const char *name, const int name_len,
1633 const u64 dirid, const u64 ino)
1635 struct btrfs_key search_key;
1637 search_key.objectid = ino;
1638 search_key.type = BTRFS_INODE_REF_KEY;
1639 search_key.offset = dirid;
1640 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1643 search_key.type = BTRFS_INODE_EXTREF_KEY;
1644 search_key.offset = btrfs_extref_hash(dirid, name, name_len);
1645 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1652 * take a single entry in a log directory item and replay it into
1655 * if a conflicting item exists in the subdirectory already,
1656 * the inode it points to is unlinked and put into the link count
1659 * If a name from the log points to a file or directory that does
1660 * not exist in the FS, it is skipped. fsyncs on directories
1661 * do not force down inodes inside that directory, just changes to the
1662 * names or unlinks in a directory.
1664 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1665 * non-existing inode) and 1 if the name was replayed.
1667 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1668 struct btrfs_root *root,
1669 struct btrfs_path *path,
1670 struct extent_buffer *eb,
1671 struct btrfs_dir_item *di,
1672 struct btrfs_key *key)
1676 struct btrfs_dir_item *dst_di;
1677 struct btrfs_key found_key;
1678 struct btrfs_key log_key;
1683 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1684 bool name_added = false;
1686 dir = read_one_inode(root, key->objectid);
1690 name_len = btrfs_dir_name_len(eb, di);
1691 name = kmalloc(name_len, GFP_NOFS);
1697 log_type = btrfs_dir_type(eb, di);
1698 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1701 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1702 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1707 btrfs_release_path(path);
1709 if (key->type == BTRFS_DIR_ITEM_KEY) {
1710 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1712 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1713 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1722 if (IS_ERR_OR_NULL(dst_di)) {
1723 /* we need a sequence number to insert, so we only
1724 * do inserts for the BTRFS_DIR_INDEX_KEY types
1726 if (key->type != BTRFS_DIR_INDEX_KEY)
1731 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1732 /* the existing item matches the logged item */
1733 if (found_key.objectid == log_key.objectid &&
1734 found_key.type == log_key.type &&
1735 found_key.offset == log_key.offset &&
1736 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1737 update_size = false;
1742 * don't drop the conflicting directory entry if the inode
1743 * for the new entry doesn't exist
1748 ret = drop_one_dir_item(trans, root, path, dir, dst_di);
1752 if (key->type == BTRFS_DIR_INDEX_KEY)
1755 btrfs_release_path(path);
1756 if (!ret && update_size) {
1757 btrfs_i_size_write(dir, dir->i_size + name_len * 2);
1758 ret = btrfs_update_inode(trans, root, dir);
1762 if (!ret && name_added)
1767 if (name_in_log_ref(root->log_root, name, name_len,
1768 key->objectid, log_key.objectid)) {
1769 /* The dentry will be added later. */
1771 update_size = false;
1774 btrfs_release_path(path);
1775 ret = insert_one_name(trans, root, key->objectid, key->offset,
1776 name, name_len, &log_key);
1777 if (ret && ret != -ENOENT && ret != -EEXIST)
1781 update_size = false;
1787 * find all the names in a directory item and reconcile them into
1788 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1789 * one name in a directory item, but the same code gets used for
1790 * both directory index types
1792 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1793 struct btrfs_root *root,
1794 struct btrfs_path *path,
1795 struct extent_buffer *eb, int slot,
1796 struct btrfs_key *key)
1799 u32 item_size = btrfs_item_size_nr(eb, slot);
1800 struct btrfs_dir_item *di;
1803 unsigned long ptr_end;
1804 struct btrfs_path *fixup_path = NULL;
1806 ptr = btrfs_item_ptr_offset(eb, slot);
1807 ptr_end = ptr + item_size;
1808 while (ptr < ptr_end) {
1809 di = (struct btrfs_dir_item *)ptr;
1810 if (verify_dir_item(root, eb, di))
1812 name_len = btrfs_dir_name_len(eb, di);
1813 ret = replay_one_name(trans, root, path, eb, di, key);
1816 ptr = (unsigned long)(di + 1);
1820 * If this entry refers to a non-directory (directories can not
1821 * have a link count > 1) and it was added in the transaction
1822 * that was not committed, make sure we fixup the link count of
1823 * the inode it the entry points to. Otherwise something like
1824 * the following would result in a directory pointing to an
1825 * inode with a wrong link that does not account for this dir
1833 * ln testdir/bar testdir/bar_link
1834 * ln testdir/foo testdir/foo_link
1835 * xfs_io -c "fsync" testdir/bar
1839 * mount fs, log replay happens
1841 * File foo would remain with a link count of 1 when it has two
1842 * entries pointing to it in the directory testdir. This would
1843 * make it impossible to ever delete the parent directory has
1844 * it would result in stale dentries that can never be deleted.
1846 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
1847 struct btrfs_key di_key;
1850 fixup_path = btrfs_alloc_path();
1857 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1858 ret = link_to_fixup_dir(trans, root, fixup_path,
1865 btrfs_free_path(fixup_path);
1870 * directory replay has two parts. There are the standard directory
1871 * items in the log copied from the subvolume, and range items
1872 * created in the log while the subvolume was logged.
1874 * The range items tell us which parts of the key space the log
1875 * is authoritative for. During replay, if a key in the subvolume
1876 * directory is in a logged range item, but not actually in the log
1877 * that means it was deleted from the directory before the fsync
1878 * and should be removed.
1880 static noinline int find_dir_range(struct btrfs_root *root,
1881 struct btrfs_path *path,
1882 u64 dirid, int key_type,
1883 u64 *start_ret, u64 *end_ret)
1885 struct btrfs_key key;
1887 struct btrfs_dir_log_item *item;
1891 if (*start_ret == (u64)-1)
1894 key.objectid = dirid;
1895 key.type = key_type;
1896 key.offset = *start_ret;
1898 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1902 if (path->slots[0] == 0)
1907 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1909 if (key.type != key_type || key.objectid != dirid) {
1913 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1914 struct btrfs_dir_log_item);
1915 found_end = btrfs_dir_log_end(path->nodes[0], item);
1917 if (*start_ret >= key.offset && *start_ret <= found_end) {
1919 *start_ret = key.offset;
1920 *end_ret = found_end;
1925 /* check the next slot in the tree to see if it is a valid item */
1926 nritems = btrfs_header_nritems(path->nodes[0]);
1928 if (path->slots[0] >= nritems) {
1929 ret = btrfs_next_leaf(root, path);
1934 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1936 if (key.type != key_type || key.objectid != dirid) {
1940 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1941 struct btrfs_dir_log_item);
1942 found_end = btrfs_dir_log_end(path->nodes[0], item);
1943 *start_ret = key.offset;
1944 *end_ret = found_end;
1947 btrfs_release_path(path);
1952 * this looks for a given directory item in the log. If the directory
1953 * item is not in the log, the item is removed and the inode it points
1956 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1957 struct btrfs_root *root,
1958 struct btrfs_root *log,
1959 struct btrfs_path *path,
1960 struct btrfs_path *log_path,
1962 struct btrfs_key *dir_key)
1965 struct extent_buffer *eb;
1968 struct btrfs_dir_item *di;
1969 struct btrfs_dir_item *log_di;
1972 unsigned long ptr_end;
1974 struct inode *inode;
1975 struct btrfs_key location;
1978 eb = path->nodes[0];
1979 slot = path->slots[0];
1980 item_size = btrfs_item_size_nr(eb, slot);
1981 ptr = btrfs_item_ptr_offset(eb, slot);
1982 ptr_end = ptr + item_size;
1983 while (ptr < ptr_end) {
1984 di = (struct btrfs_dir_item *)ptr;
1985 if (verify_dir_item(root, eb, di)) {
1990 name_len = btrfs_dir_name_len(eb, di);
1991 name = kmalloc(name_len, GFP_NOFS);
1996 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1999 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2000 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2003 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2004 log_di = btrfs_lookup_dir_index_item(trans, log,
2010 if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
2011 btrfs_dir_item_key_to_cpu(eb, di, &location);
2012 btrfs_release_path(path);
2013 btrfs_release_path(log_path);
2014 inode = read_one_inode(root, location.objectid);
2020 ret = link_to_fixup_dir(trans, root,
2021 path, location.objectid);
2029 ret = btrfs_unlink_inode(trans, root, dir, inode,
2032 ret = btrfs_run_delayed_items(trans, root);
2038 /* there might still be more names under this key
2039 * check and repeat if required
2041 ret = btrfs_search_slot(NULL, root, dir_key, path,
2047 } else if (IS_ERR(log_di)) {
2049 return PTR_ERR(log_di);
2051 btrfs_release_path(log_path);
2054 ptr = (unsigned long)(di + 1);
2059 btrfs_release_path(path);
2060 btrfs_release_path(log_path);
2064 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2065 struct btrfs_root *root,
2066 struct btrfs_root *log,
2067 struct btrfs_path *path,
2070 struct btrfs_key search_key;
2071 struct btrfs_path *log_path;
2076 log_path = btrfs_alloc_path();
2080 search_key.objectid = ino;
2081 search_key.type = BTRFS_XATTR_ITEM_KEY;
2082 search_key.offset = 0;
2084 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2088 nritems = btrfs_header_nritems(path->nodes[0]);
2089 for (i = path->slots[0]; i < nritems; i++) {
2090 struct btrfs_key key;
2091 struct btrfs_dir_item *di;
2092 struct btrfs_dir_item *log_di;
2096 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2097 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2102 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2103 total_size = btrfs_item_size_nr(path->nodes[0], i);
2105 while (cur < total_size) {
2106 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2107 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2108 u32 this_len = sizeof(*di) + name_len + data_len;
2111 name = kmalloc(name_len, GFP_NOFS);
2116 read_extent_buffer(path->nodes[0], name,
2117 (unsigned long)(di + 1), name_len);
2119 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2121 btrfs_release_path(log_path);
2123 /* Doesn't exist in log tree, so delete it. */
2124 btrfs_release_path(path);
2125 di = btrfs_lookup_xattr(trans, root, path, ino,
2126 name, name_len, -1);
2133 ret = btrfs_delete_one_dir_name(trans, root,
2137 btrfs_release_path(path);
2142 if (IS_ERR(log_di)) {
2143 ret = PTR_ERR(log_di);
2147 di = (struct btrfs_dir_item *)((char *)di + this_len);
2150 ret = btrfs_next_leaf(root, path);
2156 btrfs_free_path(log_path);
2157 btrfs_release_path(path);
2163 * deletion replay happens before we copy any new directory items
2164 * out of the log or out of backreferences from inodes. It
2165 * scans the log to find ranges of keys that log is authoritative for,
2166 * and then scans the directory to find items in those ranges that are
2167 * not present in the log.
2169 * Anything we don't find in the log is unlinked and removed from the
2172 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2173 struct btrfs_root *root,
2174 struct btrfs_root *log,
2175 struct btrfs_path *path,
2176 u64 dirid, int del_all)
2180 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2182 struct btrfs_key dir_key;
2183 struct btrfs_key found_key;
2184 struct btrfs_path *log_path;
2187 dir_key.objectid = dirid;
2188 dir_key.type = BTRFS_DIR_ITEM_KEY;
2189 log_path = btrfs_alloc_path();
2193 dir = read_one_inode(root, dirid);
2194 /* it isn't an error if the inode isn't there, that can happen
2195 * because we replay the deletes before we copy in the inode item
2199 btrfs_free_path(log_path);
2207 range_end = (u64)-1;
2209 ret = find_dir_range(log, path, dirid, key_type,
2210 &range_start, &range_end);
2215 dir_key.offset = range_start;
2218 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2223 nritems = btrfs_header_nritems(path->nodes[0]);
2224 if (path->slots[0] >= nritems) {
2225 ret = btrfs_next_leaf(root, path);
2231 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2233 if (found_key.objectid != dirid ||
2234 found_key.type != dir_key.type)
2237 if (found_key.offset > range_end)
2240 ret = check_item_in_log(trans, root, log, path,
2245 if (found_key.offset == (u64)-1)
2247 dir_key.offset = found_key.offset + 1;
2249 btrfs_release_path(path);
2250 if (range_end == (u64)-1)
2252 range_start = range_end + 1;
2257 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2258 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2259 dir_key.type = BTRFS_DIR_INDEX_KEY;
2260 btrfs_release_path(path);
2264 btrfs_release_path(path);
2265 btrfs_free_path(log_path);
2271 * the process_func used to replay items from the log tree. This
2272 * gets called in two different stages. The first stage just looks
2273 * for inodes and makes sure they are all copied into the subvolume.
2275 * The second stage copies all the other item types from the log into
2276 * the subvolume. The two stage approach is slower, but gets rid of
2277 * lots of complexity around inodes referencing other inodes that exist
2278 * only in the log (references come from either directory items or inode
2281 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2282 struct walk_control *wc, u64 gen)
2285 struct btrfs_path *path;
2286 struct btrfs_root *root = wc->replay_dest;
2287 struct btrfs_key key;
2292 ret = btrfs_read_buffer(eb, gen);
2296 level = btrfs_header_level(eb);
2301 path = btrfs_alloc_path();
2305 nritems = btrfs_header_nritems(eb);
2306 for (i = 0; i < nritems; i++) {
2307 btrfs_item_key_to_cpu(eb, &key, i);
2309 /* inode keys are done during the first stage */
2310 if (key.type == BTRFS_INODE_ITEM_KEY &&
2311 wc->stage == LOG_WALK_REPLAY_INODES) {
2312 struct btrfs_inode_item *inode_item;
2315 inode_item = btrfs_item_ptr(eb, i,
2316 struct btrfs_inode_item);
2317 ret = replay_xattr_deletes(wc->trans, root, log,
2318 path, key.objectid);
2321 mode = btrfs_inode_mode(eb, inode_item);
2322 if (S_ISDIR(mode)) {
2323 ret = replay_dir_deletes(wc->trans,
2324 root, log, path, key.objectid, 0);
2328 ret = overwrite_item(wc->trans, root, path,
2333 /* for regular files, make sure corresponding
2334 * orhpan item exist. extents past the new EOF
2335 * will be truncated later by orphan cleanup.
2337 if (S_ISREG(mode)) {
2338 ret = insert_orphan_item(wc->trans, root,
2344 ret = link_to_fixup_dir(wc->trans, root,
2345 path, key.objectid);
2350 if (key.type == BTRFS_DIR_INDEX_KEY &&
2351 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2352 ret = replay_one_dir_item(wc->trans, root, path,
2358 if (wc->stage < LOG_WALK_REPLAY_ALL)
2361 /* these keys are simply copied */
2362 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2363 ret = overwrite_item(wc->trans, root, path,
2367 } else if (key.type == BTRFS_INODE_REF_KEY ||
2368 key.type == BTRFS_INODE_EXTREF_KEY) {
2369 ret = add_inode_ref(wc->trans, root, log, path,
2371 if (ret && ret != -ENOENT)
2374 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2375 ret = replay_one_extent(wc->trans, root, path,
2379 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2380 ret = replay_one_dir_item(wc->trans, root, path,
2386 btrfs_free_path(path);
2390 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2391 struct btrfs_root *root,
2392 struct btrfs_path *path, int *level,
2393 struct walk_control *wc)
2398 struct extent_buffer *next;
2399 struct extent_buffer *cur;
2400 struct extent_buffer *parent;
2404 WARN_ON(*level < 0);
2405 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2407 while (*level > 0) {
2408 WARN_ON(*level < 0);
2409 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2410 cur = path->nodes[*level];
2412 WARN_ON(btrfs_header_level(cur) != *level);
2414 if (path->slots[*level] >=
2415 btrfs_header_nritems(cur))
2418 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2419 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2420 blocksize = root->nodesize;
2422 parent = path->nodes[*level];
2423 root_owner = btrfs_header_owner(parent);
2425 next = btrfs_find_create_tree_block(root, bytenr);
2430 ret = wc->process_func(root, next, wc, ptr_gen);
2432 free_extent_buffer(next);
2436 path->slots[*level]++;
2438 ret = btrfs_read_buffer(next, ptr_gen);
2440 free_extent_buffer(next);
2445 btrfs_tree_lock(next);
2446 btrfs_set_lock_blocking(next);
2447 clean_tree_block(trans, root->fs_info,
2449 btrfs_wait_tree_block_writeback(next);
2450 btrfs_tree_unlock(next);
2452 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2453 clear_extent_buffer_dirty(next);
2456 WARN_ON(root_owner !=
2457 BTRFS_TREE_LOG_OBJECTID);
2458 ret = btrfs_free_and_pin_reserved_extent(root,
2461 free_extent_buffer(next);
2465 free_extent_buffer(next);
2468 ret = btrfs_read_buffer(next, ptr_gen);
2470 free_extent_buffer(next);
2474 WARN_ON(*level <= 0);
2475 if (path->nodes[*level-1])
2476 free_extent_buffer(path->nodes[*level-1]);
2477 path->nodes[*level-1] = next;
2478 *level = btrfs_header_level(next);
2479 path->slots[*level] = 0;
2482 WARN_ON(*level < 0);
2483 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2485 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2491 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2492 struct btrfs_root *root,
2493 struct btrfs_path *path, int *level,
2494 struct walk_control *wc)
2501 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2502 slot = path->slots[i];
2503 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2506 WARN_ON(*level == 0);
2509 struct extent_buffer *parent;
2510 if (path->nodes[*level] == root->node)
2511 parent = path->nodes[*level];
2513 parent = path->nodes[*level + 1];
2515 root_owner = btrfs_header_owner(parent);
2516 ret = wc->process_func(root, path->nodes[*level], wc,
2517 btrfs_header_generation(path->nodes[*level]));
2522 struct extent_buffer *next;
2524 next = path->nodes[*level];
2527 btrfs_tree_lock(next);
2528 btrfs_set_lock_blocking(next);
2529 clean_tree_block(trans, root->fs_info,
2531 btrfs_wait_tree_block_writeback(next);
2532 btrfs_tree_unlock(next);
2534 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2535 clear_extent_buffer_dirty(next);
2538 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2539 ret = btrfs_free_and_pin_reserved_extent(root,
2540 path->nodes[*level]->start,
2541 path->nodes[*level]->len);
2545 free_extent_buffer(path->nodes[*level]);
2546 path->nodes[*level] = NULL;
2554 * drop the reference count on the tree rooted at 'snap'. This traverses
2555 * the tree freeing any blocks that have a ref count of zero after being
2558 static int walk_log_tree(struct btrfs_trans_handle *trans,
2559 struct btrfs_root *log, struct walk_control *wc)
2564 struct btrfs_path *path;
2567 path = btrfs_alloc_path();
2571 level = btrfs_header_level(log->node);
2573 path->nodes[level] = log->node;
2574 extent_buffer_get(log->node);
2575 path->slots[level] = 0;
2578 wret = walk_down_log_tree(trans, log, path, &level, wc);
2586 wret = walk_up_log_tree(trans, log, path, &level, wc);
2595 /* was the root node processed? if not, catch it here */
2596 if (path->nodes[orig_level]) {
2597 ret = wc->process_func(log, path->nodes[orig_level], wc,
2598 btrfs_header_generation(path->nodes[orig_level]));
2602 struct extent_buffer *next;
2604 next = path->nodes[orig_level];
2607 btrfs_tree_lock(next);
2608 btrfs_set_lock_blocking(next);
2609 clean_tree_block(trans, log->fs_info, next);
2610 btrfs_wait_tree_block_writeback(next);
2611 btrfs_tree_unlock(next);
2613 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2614 clear_extent_buffer_dirty(next);
2617 WARN_ON(log->root_key.objectid !=
2618 BTRFS_TREE_LOG_OBJECTID);
2619 ret = btrfs_free_and_pin_reserved_extent(log, next->start,
2627 btrfs_free_path(path);
2632 * helper function to update the item for a given subvolumes log root
2633 * in the tree of log roots
2635 static int update_log_root(struct btrfs_trans_handle *trans,
2636 struct btrfs_root *log)
2640 if (log->log_transid == 1) {
2641 /* insert root item on the first sync */
2642 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
2643 &log->root_key, &log->root_item);
2645 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
2646 &log->root_key, &log->root_item);
2651 static void wait_log_commit(struct btrfs_root *root, int transid)
2654 int index = transid % 2;
2657 * we only allow two pending log transactions at a time,
2658 * so we know that if ours is more than 2 older than the
2659 * current transaction, we're done
2662 prepare_to_wait(&root->log_commit_wait[index],
2663 &wait, TASK_UNINTERRUPTIBLE);
2664 mutex_unlock(&root->log_mutex);
2666 if (root->log_transid_committed < transid &&
2667 atomic_read(&root->log_commit[index]))
2670 finish_wait(&root->log_commit_wait[index], &wait);
2671 mutex_lock(&root->log_mutex);
2672 } while (root->log_transid_committed < transid &&
2673 atomic_read(&root->log_commit[index]));
2676 static void wait_for_writer(struct btrfs_root *root)
2680 while (atomic_read(&root->log_writers)) {
2681 prepare_to_wait(&root->log_writer_wait,
2682 &wait, TASK_UNINTERRUPTIBLE);
2683 mutex_unlock(&root->log_mutex);
2684 if (atomic_read(&root->log_writers))
2686 finish_wait(&root->log_writer_wait, &wait);
2687 mutex_lock(&root->log_mutex);
2691 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2692 struct btrfs_log_ctx *ctx)
2697 mutex_lock(&root->log_mutex);
2698 list_del_init(&ctx->list);
2699 mutex_unlock(&root->log_mutex);
2703 * Invoked in log mutex context, or be sure there is no other task which
2704 * can access the list.
2706 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2707 int index, int error)
2709 struct btrfs_log_ctx *ctx;
2710 struct btrfs_log_ctx *safe;
2712 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
2713 list_del_init(&ctx->list);
2714 ctx->log_ret = error;
2717 INIT_LIST_HEAD(&root->log_ctxs[index]);
2721 * btrfs_sync_log does sends a given tree log down to the disk and
2722 * updates the super blocks to record it. When this call is done,
2723 * you know that any inodes previously logged are safely on disk only
2726 * Any other return value means you need to call btrfs_commit_transaction.
2727 * Some of the edge cases for fsyncing directories that have had unlinks
2728 * or renames done in the past mean that sometimes the only safe
2729 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2730 * that has happened.
2732 int btrfs_sync_log(struct btrfs_trans_handle *trans,
2733 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
2739 struct btrfs_root *log = root->log_root;
2740 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
2741 int log_transid = 0;
2742 struct btrfs_log_ctx root_log_ctx;
2743 struct blk_plug plug;
2745 mutex_lock(&root->log_mutex);
2746 log_transid = ctx->log_transid;
2747 if (root->log_transid_committed >= log_transid) {
2748 mutex_unlock(&root->log_mutex);
2749 return ctx->log_ret;
2752 index1 = log_transid % 2;
2753 if (atomic_read(&root->log_commit[index1])) {
2754 wait_log_commit(root, log_transid);
2755 mutex_unlock(&root->log_mutex);
2756 return ctx->log_ret;
2758 ASSERT(log_transid == root->log_transid);
2759 atomic_set(&root->log_commit[index1], 1);
2761 /* wait for previous tree log sync to complete */
2762 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2763 wait_log_commit(root, log_transid - 1);
2766 int batch = atomic_read(&root->log_batch);
2767 /* when we're on an ssd, just kick the log commit out */
2768 if (!btrfs_test_opt(root, SSD) &&
2769 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
2770 mutex_unlock(&root->log_mutex);
2771 schedule_timeout_uninterruptible(1);
2772 mutex_lock(&root->log_mutex);
2774 wait_for_writer(root);
2775 if (batch == atomic_read(&root->log_batch))
2779 /* bail out if we need to do a full commit */
2780 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
2782 btrfs_free_logged_extents(log, log_transid);
2783 mutex_unlock(&root->log_mutex);
2787 if (log_transid % 2 == 0)
2788 mark = EXTENT_DIRTY;
2792 /* we start IO on all the marked extents here, but we don't actually
2793 * wait for them until later.
2795 blk_start_plug(&plug);
2796 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
2798 blk_finish_plug(&plug);
2799 btrfs_abort_transaction(trans, root, ret);
2800 btrfs_free_logged_extents(log, log_transid);
2801 btrfs_set_log_full_commit(root->fs_info, trans);
2802 mutex_unlock(&root->log_mutex);
2806 btrfs_set_root_node(&log->root_item, log->node);
2808 root->log_transid++;
2809 log->log_transid = root->log_transid;
2810 root->log_start_pid = 0;
2812 * Update or create log root item under the root's log_mutex to prevent
2813 * races with concurrent log syncs that can lead to failure to update
2814 * log root item because it was not created yet.
2816 ret = update_log_root(trans, log);
2818 * IO has been started, blocks of the log tree have WRITTEN flag set
2819 * in their headers. new modifications of the log will be written to
2820 * new positions. so it's safe to allow log writers to go in.
2822 mutex_unlock(&root->log_mutex);
2824 btrfs_init_log_ctx(&root_log_ctx);
2826 mutex_lock(&log_root_tree->log_mutex);
2827 atomic_inc(&log_root_tree->log_batch);
2828 atomic_inc(&log_root_tree->log_writers);
2830 index2 = log_root_tree->log_transid % 2;
2831 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
2832 root_log_ctx.log_transid = log_root_tree->log_transid;
2834 mutex_unlock(&log_root_tree->log_mutex);
2836 mutex_lock(&log_root_tree->log_mutex);
2837 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2839 * Implicit memory barrier after atomic_dec_and_test
2841 if (waitqueue_active(&log_root_tree->log_writer_wait))
2842 wake_up(&log_root_tree->log_writer_wait);
2846 if (!list_empty(&root_log_ctx.list))
2847 list_del_init(&root_log_ctx.list);
2849 blk_finish_plug(&plug);
2850 btrfs_set_log_full_commit(root->fs_info, trans);
2852 if (ret != -ENOSPC) {
2853 btrfs_abort_transaction(trans, root, ret);
2854 mutex_unlock(&log_root_tree->log_mutex);
2857 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2858 btrfs_free_logged_extents(log, log_transid);
2859 mutex_unlock(&log_root_tree->log_mutex);
2864 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
2865 blk_finish_plug(&plug);
2866 list_del_init(&root_log_ctx.list);
2867 mutex_unlock(&log_root_tree->log_mutex);
2868 ret = root_log_ctx.log_ret;
2872 index2 = root_log_ctx.log_transid % 2;
2873 if (atomic_read(&log_root_tree->log_commit[index2])) {
2874 blk_finish_plug(&plug);
2875 ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages,
2877 btrfs_wait_logged_extents(trans, log, log_transid);
2878 wait_log_commit(log_root_tree,
2879 root_log_ctx.log_transid);
2880 mutex_unlock(&log_root_tree->log_mutex);
2882 ret = root_log_ctx.log_ret;
2885 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
2886 atomic_set(&log_root_tree->log_commit[index2], 1);
2888 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
2889 wait_log_commit(log_root_tree,
2890 root_log_ctx.log_transid - 1);
2893 wait_for_writer(log_root_tree);
2896 * now that we've moved on to the tree of log tree roots,
2897 * check the full commit flag again
2899 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
2900 blk_finish_plug(&plug);
2901 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2902 btrfs_free_logged_extents(log, log_transid);
2903 mutex_unlock(&log_root_tree->log_mutex);
2905 goto out_wake_log_root;
2908 ret = btrfs_write_marked_extents(log_root_tree,
2909 &log_root_tree->dirty_log_pages,
2910 EXTENT_DIRTY | EXTENT_NEW);
2911 blk_finish_plug(&plug);
2913 btrfs_set_log_full_commit(root->fs_info, trans);
2914 btrfs_abort_transaction(trans, root, ret);
2915 btrfs_free_logged_extents(log, log_transid);
2916 mutex_unlock(&log_root_tree->log_mutex);
2917 goto out_wake_log_root;
2919 ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2921 ret = btrfs_wait_marked_extents(log_root_tree,
2922 &log_root_tree->dirty_log_pages,
2923 EXTENT_NEW | EXTENT_DIRTY);
2925 btrfs_set_log_full_commit(root->fs_info, trans);
2926 btrfs_free_logged_extents(log, log_transid);
2927 mutex_unlock(&log_root_tree->log_mutex);
2928 goto out_wake_log_root;
2930 btrfs_wait_logged_extents(trans, log, log_transid);
2932 btrfs_set_super_log_root(root->fs_info->super_for_commit,
2933 log_root_tree->node->start);
2934 btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
2935 btrfs_header_level(log_root_tree->node));
2937 log_root_tree->log_transid++;
2938 mutex_unlock(&log_root_tree->log_mutex);
2941 * nobody else is going to jump in and write the the ctree
2942 * super here because the log_commit atomic below is protecting
2943 * us. We must be called with a transaction handle pinning
2944 * the running transaction open, so a full commit can't hop
2945 * in and cause problems either.
2947 ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
2949 btrfs_set_log_full_commit(root->fs_info, trans);
2950 btrfs_abort_transaction(trans, root, ret);
2951 goto out_wake_log_root;
2954 mutex_lock(&root->log_mutex);
2955 if (root->last_log_commit < log_transid)
2956 root->last_log_commit = log_transid;
2957 mutex_unlock(&root->log_mutex);
2960 mutex_lock(&log_root_tree->log_mutex);
2961 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
2963 log_root_tree->log_transid_committed++;
2964 atomic_set(&log_root_tree->log_commit[index2], 0);
2965 mutex_unlock(&log_root_tree->log_mutex);
2968 * The barrier before waitqueue_active is needed so all the updates
2969 * above are seen by the woken threads. It might not be necessary, but
2970 * proving that seems to be hard.
2973 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2974 wake_up(&log_root_tree->log_commit_wait[index2]);
2976 mutex_lock(&root->log_mutex);
2977 btrfs_remove_all_log_ctxs(root, index1, ret);
2978 root->log_transid_committed++;
2979 atomic_set(&root->log_commit[index1], 0);
2980 mutex_unlock(&root->log_mutex);
2983 * The barrier before waitqueue_active is needed so all the updates
2984 * above are seen by the woken threads. It might not be necessary, but
2985 * proving that seems to be hard.
2988 if (waitqueue_active(&root->log_commit_wait[index1]))
2989 wake_up(&root->log_commit_wait[index1]);
2993 static void free_log_tree(struct btrfs_trans_handle *trans,
2994 struct btrfs_root *log)
2999 struct walk_control wc = {
3001 .process_func = process_one_buffer
3004 ret = walk_log_tree(trans, log, &wc);
3005 /* I don't think this can happen but just in case */
3007 btrfs_abort_transaction(trans, log, ret);
3010 ret = find_first_extent_bit(&log->dirty_log_pages,
3011 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
3016 clear_extent_bits(&log->dirty_log_pages, start, end,
3017 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
3021 * We may have short-circuited the log tree with the full commit logic
3022 * and left ordered extents on our list, so clear these out to keep us
3023 * from leaking inodes and memory.
3025 btrfs_free_logged_extents(log, 0);
3026 btrfs_free_logged_extents(log, 1);
3028 free_extent_buffer(log->node);
3033 * free all the extents used by the tree log. This should be called
3034 * at commit time of the full transaction
3036 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3038 if (root->log_root) {
3039 free_log_tree(trans, root->log_root);
3040 root->log_root = NULL;
3045 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3046 struct btrfs_fs_info *fs_info)
3048 if (fs_info->log_root_tree) {
3049 free_log_tree(trans, fs_info->log_root_tree);
3050 fs_info->log_root_tree = NULL;
3056 * If both a file and directory are logged, and unlinks or renames are
3057 * mixed in, we have a few interesting corners:
3059 * create file X in dir Y
3060 * link file X to X.link in dir Y
3062 * unlink file X but leave X.link
3065 * After a crash we would expect only X.link to exist. But file X
3066 * didn't get fsync'd again so the log has back refs for X and X.link.
3068 * We solve this by removing directory entries and inode backrefs from the
3069 * log when a file that was logged in the current transaction is
3070 * unlinked. Any later fsync will include the updated log entries, and
3071 * we'll be able to reconstruct the proper directory items from backrefs.
3073 * This optimizations allows us to avoid relogging the entire inode
3074 * or the entire directory.
3076 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3077 struct btrfs_root *root,
3078 const char *name, int name_len,
3079 struct inode *dir, u64 index)
3081 struct btrfs_root *log;
3082 struct btrfs_dir_item *di;
3083 struct btrfs_path *path;
3087 u64 dir_ino = btrfs_ino(dir);
3089 if (BTRFS_I(dir)->logged_trans < trans->transid)
3092 ret = join_running_log_trans(root);
3096 mutex_lock(&BTRFS_I(dir)->log_mutex);
3098 log = root->log_root;
3099 path = btrfs_alloc_path();
3105 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3106 name, name_len, -1);
3112 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3113 bytes_del += name_len;
3119 btrfs_release_path(path);
3120 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3121 index, name, name_len, -1);
3127 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3128 bytes_del += name_len;
3135 /* update the directory size in the log to reflect the names
3139 struct btrfs_key key;
3141 key.objectid = dir_ino;
3143 key.type = BTRFS_INODE_ITEM_KEY;
3144 btrfs_release_path(path);
3146 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
3152 struct btrfs_inode_item *item;
3155 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3156 struct btrfs_inode_item);
3157 i_size = btrfs_inode_size(path->nodes[0], item);
3158 if (i_size > bytes_del)
3159 i_size -= bytes_del;
3162 btrfs_set_inode_size(path->nodes[0], item, i_size);
3163 btrfs_mark_buffer_dirty(path->nodes[0]);
3166 btrfs_release_path(path);
3169 btrfs_free_path(path);
3171 mutex_unlock(&BTRFS_I(dir)->log_mutex);
3172 if (ret == -ENOSPC) {
3173 btrfs_set_log_full_commit(root->fs_info, trans);
3176 btrfs_abort_transaction(trans, root, ret);
3178 btrfs_end_log_trans(root);
3183 /* see comments for btrfs_del_dir_entries_in_log */
3184 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3185 struct btrfs_root *root,
3186 const char *name, int name_len,
3187 struct inode *inode, u64 dirid)
3189 struct btrfs_root *log;
3193 if (BTRFS_I(inode)->logged_trans < trans->transid)
3196 ret = join_running_log_trans(root);
3199 log = root->log_root;
3200 mutex_lock(&BTRFS_I(inode)->log_mutex);
3202 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3204 mutex_unlock(&BTRFS_I(inode)->log_mutex);
3205 if (ret == -ENOSPC) {
3206 btrfs_set_log_full_commit(root->fs_info, trans);
3208 } else if (ret < 0 && ret != -ENOENT)
3209 btrfs_abort_transaction(trans, root, ret);
3210 btrfs_end_log_trans(root);
3216 * creates a range item in the log for 'dirid'. first_offset and
3217 * last_offset tell us which parts of the key space the log should
3218 * be considered authoritative for.
3220 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3221 struct btrfs_root *log,
3222 struct btrfs_path *path,
3223 int key_type, u64 dirid,
3224 u64 first_offset, u64 last_offset)
3227 struct btrfs_key key;
3228 struct btrfs_dir_log_item *item;
3230 key.objectid = dirid;
3231 key.offset = first_offset;
3232 if (key_type == BTRFS_DIR_ITEM_KEY)
3233 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3235 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3236 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3240 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3241 struct btrfs_dir_log_item);
3242 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3243 btrfs_mark_buffer_dirty(path->nodes[0]);
3244 btrfs_release_path(path);
3249 * log all the items included in the current transaction for a given
3250 * directory. This also creates the range items in the log tree required
3251 * to replay anything deleted before the fsync
3253 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3254 struct btrfs_root *root, struct inode *inode,
3255 struct btrfs_path *path,
3256 struct btrfs_path *dst_path, int key_type,
3257 struct btrfs_log_ctx *ctx,
3258 u64 min_offset, u64 *last_offset_ret)
3260 struct btrfs_key min_key;
3261 struct btrfs_root *log = root->log_root;
3262 struct extent_buffer *src;
3267 u64 first_offset = min_offset;
3268 u64 last_offset = (u64)-1;
3269 u64 ino = btrfs_ino(inode);
3271 log = root->log_root;
3273 min_key.objectid = ino;
3274 min_key.type = key_type;
3275 min_key.offset = min_offset;
3277 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3280 * we didn't find anything from this transaction, see if there
3281 * is anything at all
3283 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3284 min_key.objectid = ino;
3285 min_key.type = key_type;
3286 min_key.offset = (u64)-1;
3287 btrfs_release_path(path);
3288 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3290 btrfs_release_path(path);
3293 ret = btrfs_previous_item(root, path, ino, key_type);
3295 /* if ret == 0 there are items for this type,
3296 * create a range to tell us the last key of this type.
3297 * otherwise, there are no items in this directory after
3298 * *min_offset, and we create a range to indicate that.
3301 struct btrfs_key tmp;
3302 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3304 if (key_type == tmp.type)
3305 first_offset = max(min_offset, tmp.offset) + 1;
3310 /* go backward to find any previous key */
3311 ret = btrfs_previous_item(root, path, ino, key_type);
3313 struct btrfs_key tmp;
3314 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3315 if (key_type == tmp.type) {
3316 first_offset = tmp.offset;
3317 ret = overwrite_item(trans, log, dst_path,
3318 path->nodes[0], path->slots[0],
3326 btrfs_release_path(path);
3329 * Find the first key from this transaction again. See the note for
3330 * log_new_dir_dentries, if we're logging a directory recursively we
3331 * won't be holding its i_mutex, which means we can modify the directory
3332 * while we're logging it. If we remove an entry between our first
3333 * search and this search we'll not find the key again and can just
3336 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3341 * we have a block from this transaction, log every item in it
3342 * from our directory
3345 struct btrfs_key tmp;
3346 src = path->nodes[0];
3347 nritems = btrfs_header_nritems(src);
3348 for (i = path->slots[0]; i < nritems; i++) {
3349 struct btrfs_dir_item *di;
3351 btrfs_item_key_to_cpu(src, &min_key, i);
3353 if (min_key.objectid != ino || min_key.type != key_type)
3355 ret = overwrite_item(trans, log, dst_path, src, i,
3363 * We must make sure that when we log a directory entry,
3364 * the corresponding inode, after log replay, has a
3365 * matching link count. For example:
3371 * xfs_io -c "fsync" mydir
3373 * <mount fs and log replay>
3375 * Would result in a fsync log that when replayed, our
3376 * file inode would have a link count of 1, but we get
3377 * two directory entries pointing to the same inode.
3378 * After removing one of the names, it would not be
3379 * possible to remove the other name, which resulted
3380 * always in stale file handle errors, and would not
3381 * be possible to rmdir the parent directory, since
3382 * its i_size could never decrement to the value
3383 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3385 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3386 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3388 (btrfs_dir_transid(src, di) == trans->transid ||
3389 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3390 tmp.type != BTRFS_ROOT_ITEM_KEY)
3391 ctx->log_new_dentries = true;
3393 path->slots[0] = nritems;
3396 * look ahead to the next item and see if it is also
3397 * from this directory and from this transaction
3399 ret = btrfs_next_leaf(root, path);
3402 last_offset = (u64)-1;
3407 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3408 if (tmp.objectid != ino || tmp.type != key_type) {
3409 last_offset = (u64)-1;
3412 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3413 ret = overwrite_item(trans, log, dst_path,
3414 path->nodes[0], path->slots[0],
3419 last_offset = tmp.offset;
3424 btrfs_release_path(path);
3425 btrfs_release_path(dst_path);
3428 *last_offset_ret = last_offset;
3430 * insert the log range keys to indicate where the log
3433 ret = insert_dir_log_key(trans, log, path, key_type,
3434 ino, first_offset, last_offset);
3442 * logging directories is very similar to logging inodes, We find all the items
3443 * from the current transaction and write them to the log.
3445 * The recovery code scans the directory in the subvolume, and if it finds a
3446 * key in the range logged that is not present in the log tree, then it means
3447 * that dir entry was unlinked during the transaction.
3449 * In order for that scan to work, we must include one key smaller than
3450 * the smallest logged by this transaction and one key larger than the largest
3451 * key logged by this transaction.
3453 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3454 struct btrfs_root *root, struct inode *inode,
3455 struct btrfs_path *path,
3456 struct btrfs_path *dst_path,
3457 struct btrfs_log_ctx *ctx)
3462 int key_type = BTRFS_DIR_ITEM_KEY;
3468 ret = log_dir_items(trans, root, inode, path,
3469 dst_path, key_type, ctx, min_key,
3473 if (max_key == (u64)-1)
3475 min_key = max_key + 1;
3478 if (key_type == BTRFS_DIR_ITEM_KEY) {
3479 key_type = BTRFS_DIR_INDEX_KEY;
3486 * a helper function to drop items from the log before we relog an
3487 * inode. max_key_type indicates the highest item type to remove.
3488 * This cannot be run for file data extents because it does not
3489 * free the extents they point to.
3491 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3492 struct btrfs_root *log,
3493 struct btrfs_path *path,
3494 u64 objectid, int max_key_type)
3497 struct btrfs_key key;
3498 struct btrfs_key found_key;
3501 key.objectid = objectid;
3502 key.type = max_key_type;
3503 key.offset = (u64)-1;
3506 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3507 BUG_ON(ret == 0); /* Logic error */
3511 if (path->slots[0] == 0)
3515 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3518 if (found_key.objectid != objectid)
3521 found_key.offset = 0;
3523 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3526 ret = btrfs_del_items(trans, log, path, start_slot,
3527 path->slots[0] - start_slot + 1);
3529 * If start slot isn't 0 then we don't need to re-search, we've
3530 * found the last guy with the objectid in this tree.
3532 if (ret || start_slot != 0)
3534 btrfs_release_path(path);
3536 btrfs_release_path(path);
3542 static void fill_inode_item(struct btrfs_trans_handle *trans,
3543 struct extent_buffer *leaf,
3544 struct btrfs_inode_item *item,
3545 struct inode *inode, int log_inode_only,
3548 struct btrfs_map_token token;
3550 btrfs_init_map_token(&token);
3552 if (log_inode_only) {
3553 /* set the generation to zero so the recover code
3554 * can tell the difference between an logging
3555 * just to say 'this inode exists' and a logging
3556 * to say 'update this inode with these values'
3558 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3559 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
3561 btrfs_set_token_inode_generation(leaf, item,
3562 BTRFS_I(inode)->generation,
3564 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3567 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3568 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3569 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3570 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3572 btrfs_set_token_timespec_sec(leaf, &item->atime,
3573 inode->i_atime.tv_sec, &token);
3574 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3575 inode->i_atime.tv_nsec, &token);
3577 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3578 inode->i_mtime.tv_sec, &token);
3579 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3580 inode->i_mtime.tv_nsec, &token);
3582 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3583 inode->i_ctime.tv_sec, &token);
3584 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3585 inode->i_ctime.tv_nsec, &token);
3587 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3590 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3591 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3592 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3593 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3594 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3597 static int log_inode_item(struct btrfs_trans_handle *trans,
3598 struct btrfs_root *log, struct btrfs_path *path,
3599 struct inode *inode)
3601 struct btrfs_inode_item *inode_item;
3604 ret = btrfs_insert_empty_item(trans, log, path,
3605 &BTRFS_I(inode)->location,
3606 sizeof(*inode_item));
3607 if (ret && ret != -EEXIST)
3609 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3610 struct btrfs_inode_item);
3611 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0);
3612 btrfs_release_path(path);
3616 static noinline int copy_items(struct btrfs_trans_handle *trans,
3617 struct inode *inode,
3618 struct btrfs_path *dst_path,
3619 struct btrfs_path *src_path, u64 *last_extent,
3620 int start_slot, int nr, int inode_only,
3623 unsigned long src_offset;
3624 unsigned long dst_offset;
3625 struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
3626 struct btrfs_file_extent_item *extent;
3627 struct btrfs_inode_item *inode_item;
3628 struct extent_buffer *src = src_path->nodes[0];
3629 struct btrfs_key first_key, last_key, key;
3631 struct btrfs_key *ins_keys;
3635 struct list_head ordered_sums;
3636 int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3637 bool has_extents = false;
3638 bool need_find_last_extent = true;
3641 INIT_LIST_HEAD(&ordered_sums);
3643 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3644 nr * sizeof(u32), GFP_NOFS);
3648 first_key.objectid = (u64)-1;
3650 ins_sizes = (u32 *)ins_data;
3651 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3653 for (i = 0; i < nr; i++) {
3654 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3655 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3657 ret = btrfs_insert_empty_items(trans, log, dst_path,
3658 ins_keys, ins_sizes, nr);
3664 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3665 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3666 dst_path->slots[0]);
3668 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3670 if ((i == (nr - 1)))
3671 last_key = ins_keys[i];
3673 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3674 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3676 struct btrfs_inode_item);
3677 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3678 inode, inode_only == LOG_INODE_EXISTS,
3681 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3682 src_offset, ins_sizes[i]);
3686 * We set need_find_last_extent here in case we know we were
3687 * processing other items and then walk into the first extent in
3688 * the inode. If we don't hit an extent then nothing changes,
3689 * we'll do the last search the next time around.
3691 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
3693 if (first_key.objectid == (u64)-1)
3694 first_key = ins_keys[i];
3696 need_find_last_extent = false;
3699 /* take a reference on file data extents so that truncates
3700 * or deletes of this inode don't have to relog the inode
3703 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
3706 extent = btrfs_item_ptr(src, start_slot + i,
3707 struct btrfs_file_extent_item);
3709 if (btrfs_file_extent_generation(src, extent) < trans->transid)
3712 found_type = btrfs_file_extent_type(src, extent);
3713 if (found_type == BTRFS_FILE_EXTENT_REG) {
3715 ds = btrfs_file_extent_disk_bytenr(src,
3717 /* ds == 0 is a hole */
3721 dl = btrfs_file_extent_disk_num_bytes(src,
3723 cs = btrfs_file_extent_offset(src, extent);
3724 cl = btrfs_file_extent_num_bytes(src,
3726 if (btrfs_file_extent_compression(src,
3732 ret = btrfs_lookup_csums_range(
3733 log->fs_info->csum_root,
3734 ds + cs, ds + cs + cl - 1,
3737 btrfs_release_path(dst_path);
3745 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
3746 btrfs_release_path(dst_path);
3750 * we have to do this after the loop above to avoid changing the
3751 * log tree while trying to change the log tree.
3754 while (!list_empty(&ordered_sums)) {
3755 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3756 struct btrfs_ordered_sum,
3759 ret = btrfs_csum_file_blocks(trans, log, sums);
3760 list_del(&sums->list);
3767 if (need_find_last_extent && *last_extent == first_key.offset) {
3769 * We don't have any leafs between our current one and the one
3770 * we processed before that can have file extent items for our
3771 * inode (and have a generation number smaller than our current
3774 need_find_last_extent = false;
3778 * Because we use btrfs_search_forward we could skip leaves that were
3779 * not modified and then assume *last_extent is valid when it really
3780 * isn't. So back up to the previous leaf and read the end of the last
3781 * extent before we go and fill in holes.
3783 if (need_find_last_extent) {
3786 ret = btrfs_prev_leaf(BTRFS_I(inode)->root, src_path);
3791 if (src_path->slots[0])
3792 src_path->slots[0]--;
3793 src = src_path->nodes[0];
3794 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
3795 if (key.objectid != btrfs_ino(inode) ||
3796 key.type != BTRFS_EXTENT_DATA_KEY)
3798 extent = btrfs_item_ptr(src, src_path->slots[0],
3799 struct btrfs_file_extent_item);
3800 if (btrfs_file_extent_type(src, extent) ==
3801 BTRFS_FILE_EXTENT_INLINE) {
3802 len = btrfs_file_extent_inline_len(src,
3805 *last_extent = ALIGN(key.offset + len,
3808 len = btrfs_file_extent_num_bytes(src, extent);
3809 *last_extent = key.offset + len;
3813 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3814 * things could have happened
3816 * 1) A merge could have happened, so we could currently be on a leaf
3817 * that holds what we were copying in the first place.
3818 * 2) A split could have happened, and now not all of the items we want
3819 * are on the same leaf.
3821 * So we need to adjust how we search for holes, we need to drop the
3822 * path and re-search for the first extent key we found, and then walk
3823 * forward until we hit the last one we copied.
3825 if (need_find_last_extent) {
3826 /* btrfs_prev_leaf could return 1 without releasing the path */
3827 btrfs_release_path(src_path);
3828 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &first_key,
3833 src = src_path->nodes[0];
3834 i = src_path->slots[0];
3840 * Ok so here we need to go through and fill in any holes we may have
3841 * to make sure that holes are punched for those areas in case they had
3842 * extents previously.
3848 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
3849 ret = btrfs_next_leaf(BTRFS_I(inode)->root, src_path);
3853 src = src_path->nodes[0];
3855 need_find_last_extent = true;
3858 btrfs_item_key_to_cpu(src, &key, i);
3859 if (!btrfs_comp_cpu_keys(&key, &last_key))
3861 if (key.objectid != btrfs_ino(inode) ||
3862 key.type != BTRFS_EXTENT_DATA_KEY) {
3866 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
3867 if (btrfs_file_extent_type(src, extent) ==
3868 BTRFS_FILE_EXTENT_INLINE) {
3869 len = btrfs_file_extent_inline_len(src, i, extent);
3870 extent_end = ALIGN(key.offset + len, log->sectorsize);
3872 len = btrfs_file_extent_num_bytes(src, extent);
3873 extent_end = key.offset + len;
3877 if (*last_extent == key.offset) {
3878 *last_extent = extent_end;
3881 offset = *last_extent;
3882 len = key.offset - *last_extent;
3883 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
3884 offset, 0, 0, len, 0, len, 0,
3888 *last_extent = extent_end;
3891 * Need to let the callers know we dropped the path so they should
3894 if (!ret && need_find_last_extent)
3899 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
3901 struct extent_map *em1, *em2;
3903 em1 = list_entry(a, struct extent_map, list);
3904 em2 = list_entry(b, struct extent_map, list);
3906 if (em1->start < em2->start)
3908 else if (em1->start > em2->start)
3913 static int wait_ordered_extents(struct btrfs_trans_handle *trans,
3914 struct inode *inode,
3915 struct btrfs_root *root,
3916 const struct extent_map *em,
3917 const struct list_head *logged_list,
3918 bool *ordered_io_error)
3920 struct btrfs_ordered_extent *ordered;
3921 struct btrfs_root *log = root->log_root;
3922 u64 mod_start = em->mod_start;
3923 u64 mod_len = em->mod_len;
3924 const bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3927 LIST_HEAD(ordered_sums);
3930 *ordered_io_error = false;
3932 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
3933 em->block_start == EXTENT_MAP_HOLE)
3937 * Wait far any ordered extent that covers our extent map. If it
3938 * finishes without an error, first check and see if our csums are on
3939 * our outstanding ordered extents.
3941 list_for_each_entry(ordered, logged_list, log_list) {
3942 struct btrfs_ordered_sum *sum;
3947 if (ordered->file_offset + ordered->len <= mod_start ||
3948 mod_start + mod_len <= ordered->file_offset)
3951 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
3952 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3953 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
3954 const u64 start = ordered->file_offset;
3955 const u64 end = ordered->file_offset + ordered->len - 1;
3957 WARN_ON(ordered->inode != inode);
3958 filemap_fdatawrite_range(inode->i_mapping, start, end);
3961 wait_event(ordered->wait,
3962 (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) ||
3963 test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)));
3965 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) {
3967 * Clear the AS_EIO/AS_ENOSPC flags from the inode's
3968 * i_mapping flags, so that the next fsync won't get
3969 * an outdated io error too.
3971 btrfs_inode_check_errors(inode);
3972 *ordered_io_error = true;
3976 * We are going to copy all the csums on this ordered extent, so
3977 * go ahead and adjust mod_start and mod_len in case this
3978 * ordered extent has already been logged.
3980 if (ordered->file_offset > mod_start) {
3981 if (ordered->file_offset + ordered->len >=
3982 mod_start + mod_len)
3983 mod_len = ordered->file_offset - mod_start;
3985 * If we have this case
3987 * |--------- logged extent ---------|
3988 * |----- ordered extent ----|
3990 * Just don't mess with mod_start and mod_len, we'll
3991 * just end up logging more csums than we need and it
3995 if (ordered->file_offset + ordered->len <
3996 mod_start + mod_len) {
3997 mod_len = (mod_start + mod_len) -
3998 (ordered->file_offset + ordered->len);
3999 mod_start = ordered->file_offset +
4010 * To keep us from looping for the above case of an ordered
4011 * extent that falls inside of the logged extent.
4013 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
4017 list_for_each_entry(sum, &ordered->list, list) {
4018 ret = btrfs_csum_file_blocks(trans, log, sum);
4024 if (*ordered_io_error || !mod_len || ret || skip_csum)
4027 if (em->compress_type) {
4029 csum_len = max(em->block_len, em->orig_block_len);
4031 csum_offset = mod_start - em->start;
4035 /* block start is already adjusted for the file extent offset. */
4036 ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
4037 em->block_start + csum_offset,
4038 em->block_start + csum_offset +
4039 csum_len - 1, &ordered_sums, 0);
4043 while (!list_empty(&ordered_sums)) {
4044 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4045 struct btrfs_ordered_sum,
4048 ret = btrfs_csum_file_blocks(trans, log, sums);
4049 list_del(&sums->list);
4056 static int log_one_extent(struct btrfs_trans_handle *trans,
4057 struct inode *inode, struct btrfs_root *root,
4058 const struct extent_map *em,
4059 struct btrfs_path *path,
4060 const struct list_head *logged_list,
4061 struct btrfs_log_ctx *ctx)
4063 struct btrfs_root *log = root->log_root;
4064 struct btrfs_file_extent_item *fi;
4065 struct extent_buffer *leaf;
4066 struct btrfs_map_token token;
4067 struct btrfs_key key;
4068 u64 extent_offset = em->start - em->orig_start;
4071 int extent_inserted = 0;
4072 bool ordered_io_err = false;
4074 ret = wait_ordered_extents(trans, inode, root, em, logged_list,
4079 if (ordered_io_err) {
4084 btrfs_init_map_token(&token);
4086 ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
4087 em->start + em->len, NULL, 0, 1,
4088 sizeof(*fi), &extent_inserted);
4092 if (!extent_inserted) {
4093 key.objectid = btrfs_ino(inode);
4094 key.type = BTRFS_EXTENT_DATA_KEY;
4095 key.offset = em->start;
4097 ret = btrfs_insert_empty_item(trans, log, path, &key,
4102 leaf = path->nodes[0];
4103 fi = btrfs_item_ptr(leaf, path->slots[0],
4104 struct btrfs_file_extent_item);
4106 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
4108 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4109 btrfs_set_token_file_extent_type(leaf, fi,
4110 BTRFS_FILE_EXTENT_PREALLOC,
4113 btrfs_set_token_file_extent_type(leaf, fi,
4114 BTRFS_FILE_EXTENT_REG,
4117 block_len = max(em->block_len, em->orig_block_len);
4118 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4119 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4122 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4124 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4125 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4127 extent_offset, &token);
4128 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4131 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
4132 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
4136 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
4137 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
4138 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
4139 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
4141 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
4142 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
4143 btrfs_mark_buffer_dirty(leaf);
4145 btrfs_release_path(path);
4150 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4151 struct btrfs_root *root,
4152 struct inode *inode,
4153 struct btrfs_path *path,
4154 struct list_head *logged_list,
4155 struct btrfs_log_ctx *ctx)
4157 struct extent_map *em, *n;
4158 struct list_head extents;
4159 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
4164 INIT_LIST_HEAD(&extents);
4166 write_lock(&tree->lock);
4167 test_gen = root->fs_info->last_trans_committed;
4169 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4170 list_del_init(&em->list);
4173 * Just an arbitrary number, this can be really CPU intensive
4174 * once we start getting a lot of extents, and really once we
4175 * have a bunch of extents we just want to commit since it will
4178 if (++num > 32768) {
4179 list_del_init(&tree->modified_extents);
4184 if (em->generation <= test_gen)
4186 /* Need a ref to keep it from getting evicted from cache */
4187 atomic_inc(&em->refs);
4188 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4189 list_add_tail(&em->list, &extents);
4193 list_sort(NULL, &extents, extent_cmp);
4196 while (!list_empty(&extents)) {
4197 em = list_entry(extents.next, struct extent_map, list);
4199 list_del_init(&em->list);
4202 * If we had an error we just need to delete everybody from our
4206 clear_em_logging(tree, em);
4207 free_extent_map(em);
4211 write_unlock(&tree->lock);
4213 ret = log_one_extent(trans, inode, root, em, path, logged_list,
4215 write_lock(&tree->lock);
4216 clear_em_logging(tree, em);
4217 free_extent_map(em);
4219 WARN_ON(!list_empty(&extents));
4220 write_unlock(&tree->lock);
4222 btrfs_release_path(path);
4226 static int logged_inode_size(struct btrfs_root *log, struct inode *inode,
4227 struct btrfs_path *path, u64 *size_ret)
4229 struct btrfs_key key;
4232 key.objectid = btrfs_ino(inode);
4233 key.type = BTRFS_INODE_ITEM_KEY;
4236 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4239 } else if (ret > 0) {
4242 struct btrfs_inode_item *item;
4244 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4245 struct btrfs_inode_item);
4246 *size_ret = btrfs_inode_size(path->nodes[0], item);
4249 btrfs_release_path(path);
4254 * At the moment we always log all xattrs. This is to figure out at log replay
4255 * time which xattrs must have their deletion replayed. If a xattr is missing
4256 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4257 * because if a xattr is deleted, the inode is fsynced and a power failure
4258 * happens, causing the log to be replayed the next time the fs is mounted,
4259 * we want the xattr to not exist anymore (same behaviour as other filesystems
4260 * with a journal, ext3/4, xfs, f2fs, etc).
4262 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4263 struct btrfs_root *root,
4264 struct inode *inode,
4265 struct btrfs_path *path,
4266 struct btrfs_path *dst_path)
4269 struct btrfs_key key;
4270 const u64 ino = btrfs_ino(inode);
4275 key.type = BTRFS_XATTR_ITEM_KEY;
4278 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4283 int slot = path->slots[0];
4284 struct extent_buffer *leaf = path->nodes[0];
4285 int nritems = btrfs_header_nritems(leaf);
4287 if (slot >= nritems) {
4289 u64 last_extent = 0;
4291 ret = copy_items(trans, inode, dst_path, path,
4292 &last_extent, start_slot,
4294 /* can't be 1, extent items aren't processed */
4300 ret = btrfs_next_leaf(root, path);
4308 btrfs_item_key_to_cpu(leaf, &key, slot);
4309 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4319 u64 last_extent = 0;
4321 ret = copy_items(trans, inode, dst_path, path,
4322 &last_extent, start_slot,
4324 /* can't be 1, extent items aren't processed */
4334 * If the no holes feature is enabled we need to make sure any hole between the
4335 * last extent and the i_size of our inode is explicitly marked in the log. This
4336 * is to make sure that doing something like:
4338 * 1) create file with 128Kb of data
4339 * 2) truncate file to 64Kb
4340 * 3) truncate file to 256Kb
4342 * 5) <crash/power failure>
4343 * 6) mount fs and trigger log replay
4345 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4346 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4347 * file correspond to a hole. The presence of explicit holes in a log tree is
4348 * what guarantees that log replay will remove/adjust file extent items in the
4351 * Here we do not need to care about holes between extents, that is already done
4352 * by copy_items(). We also only need to do this in the full sync path, where we
4353 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4354 * lookup the list of modified extent maps and if any represents a hole, we
4355 * insert a corresponding extent representing a hole in the log tree.
4357 static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4358 struct btrfs_root *root,
4359 struct inode *inode,
4360 struct btrfs_path *path)
4363 struct btrfs_key key;
4366 struct extent_buffer *leaf;
4367 struct btrfs_root *log = root->log_root;
4368 const u64 ino = btrfs_ino(inode);
4369 const u64 i_size = i_size_read(inode);
4371 if (!btrfs_fs_incompat(root->fs_info, NO_HOLES))
4375 key.type = BTRFS_EXTENT_DATA_KEY;
4376 key.offset = (u64)-1;
4378 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4383 ASSERT(path->slots[0] > 0);
4385 leaf = path->nodes[0];
4386 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4388 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
4389 /* inode does not have any extents */
4393 struct btrfs_file_extent_item *extent;
4397 * If there's an extent beyond i_size, an explicit hole was
4398 * already inserted by copy_items().
4400 if (key.offset >= i_size)
4403 extent = btrfs_item_ptr(leaf, path->slots[0],
4404 struct btrfs_file_extent_item);
4406 if (btrfs_file_extent_type(leaf, extent) ==
4407 BTRFS_FILE_EXTENT_INLINE) {
4408 len = btrfs_file_extent_inline_len(leaf,
4411 ASSERT(len == i_size);
4415 len = btrfs_file_extent_num_bytes(leaf, extent);
4416 /* Last extent goes beyond i_size, no need to log a hole. */
4417 if (key.offset + len > i_size)
4419 hole_start = key.offset + len;
4420 hole_size = i_size - hole_start;
4422 btrfs_release_path(path);
4424 /* Last extent ends at i_size. */
4428 hole_size = ALIGN(hole_size, root->sectorsize);
4429 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
4430 hole_size, 0, hole_size, 0, 0, 0);
4435 * When we are logging a new inode X, check if it doesn't have a reference that
4436 * matches the reference from some other inode Y created in a past transaction
4437 * and that was renamed in the current transaction. If we don't do this, then at
4438 * log replay time we can lose inode Y (and all its files if it's a directory):
4441 * echo "hello world" > /mnt/x/foobar
4444 * mkdir /mnt/x # or touch /mnt/x
4445 * xfs_io -c fsync /mnt/x
4447 * mount fs, trigger log replay
4449 * After the log replay procedure, we would lose the first directory and all its
4450 * files (file foobar).
4451 * For the case where inode Y is not a directory we simply end up losing it:
4453 * echo "123" > /mnt/foo
4455 * mv /mnt/foo /mnt/bar
4456 * echo "abc" > /mnt/foo
4457 * xfs_io -c fsync /mnt/foo
4460 * We also need this for cases where a snapshot entry is replaced by some other
4461 * entry (file or directory) otherwise we end up with an unreplayable log due to
4462 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4463 * if it were a regular entry:
4466 * btrfs subvolume snapshot /mnt /mnt/x/snap
4467 * btrfs subvolume delete /mnt/x/snap
4470 * fsync /mnt/x or fsync some new file inside it
4473 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4474 * the same transaction.
4476 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4478 const struct btrfs_key *key,
4479 struct inode *inode)
4482 struct btrfs_path *search_path;
4485 u32 item_size = btrfs_item_size_nr(eb, slot);
4487 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4489 search_path = btrfs_alloc_path();
4492 search_path->search_commit_root = 1;
4493 search_path->skip_locking = 1;
4495 while (cur_offset < item_size) {
4499 unsigned long name_ptr;
4500 struct btrfs_dir_item *di;
4502 if (key->type == BTRFS_INODE_REF_KEY) {
4503 struct btrfs_inode_ref *iref;
4505 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4506 parent = key->offset;
4507 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4508 name_ptr = (unsigned long)(iref + 1);
4509 this_len = sizeof(*iref) + this_name_len;
4511 struct btrfs_inode_extref *extref;
4513 extref = (struct btrfs_inode_extref *)(ptr +
4515 parent = btrfs_inode_extref_parent(eb, extref);
4516 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4517 name_ptr = (unsigned long)&extref->name;
4518 this_len = sizeof(*extref) + this_name_len;
4521 if (this_name_len > name_len) {
4524 new_name = krealloc(name, this_name_len, GFP_NOFS);
4529 name_len = this_name_len;
4533 read_extent_buffer(eb, name, name_ptr, this_name_len);
4534 di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root,
4535 search_path, parent,
4536 name, this_name_len, 0);
4537 if (di && !IS_ERR(di)) {
4540 } else if (IS_ERR(di)) {
4544 btrfs_release_path(search_path);
4546 cur_offset += this_len;
4550 btrfs_free_path(search_path);
4555 /* log a single inode in the tree log.
4556 * At least one parent directory for this inode must exist in the tree
4557 * or be logged already.
4559 * Any items from this inode changed by the current transaction are copied
4560 * to the log tree. An extra reference is taken on any extents in this
4561 * file, allowing us to avoid a whole pile of corner cases around logging
4562 * blocks that have been removed from the tree.
4564 * See LOG_INODE_ALL and related defines for a description of what inode_only
4567 * This handles both files and directories.
4569 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
4570 struct btrfs_root *root, struct inode *inode,
4574 struct btrfs_log_ctx *ctx)
4576 struct btrfs_path *path;
4577 struct btrfs_path *dst_path;
4578 struct btrfs_key min_key;
4579 struct btrfs_key max_key;
4580 struct btrfs_root *log = root->log_root;
4581 struct extent_buffer *src = NULL;
4582 LIST_HEAD(logged_list);
4583 u64 last_extent = 0;
4587 int ins_start_slot = 0;
4589 bool fast_search = false;
4590 u64 ino = btrfs_ino(inode);
4591 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4592 u64 logged_isize = 0;
4593 bool need_log_inode_item = true;
4594 bool xattrs_logged = false;
4596 path = btrfs_alloc_path();
4599 dst_path = btrfs_alloc_path();
4601 btrfs_free_path(path);
4605 min_key.objectid = ino;
4606 min_key.type = BTRFS_INODE_ITEM_KEY;
4609 max_key.objectid = ino;
4612 /* today the code can only do partial logging of directories */
4613 if (S_ISDIR(inode->i_mode) ||
4614 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4615 &BTRFS_I(inode)->runtime_flags) &&
4616 inode_only == LOG_INODE_EXISTS))
4617 max_key.type = BTRFS_XATTR_ITEM_KEY;
4619 max_key.type = (u8)-1;
4620 max_key.offset = (u64)-1;
4623 * Only run delayed items if we are a dir or a new file.
4624 * Otherwise commit the delayed inode only, which is needed in
4625 * order for the log replay code to mark inodes for link count
4626 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
4628 if (S_ISDIR(inode->i_mode) ||
4629 BTRFS_I(inode)->generation > root->fs_info->last_trans_committed)
4630 ret = btrfs_commit_inode_delayed_items(trans, inode);
4632 ret = btrfs_commit_inode_delayed_inode(inode);
4635 btrfs_free_path(path);
4636 btrfs_free_path(dst_path);
4640 mutex_lock(&BTRFS_I(inode)->log_mutex);
4642 btrfs_get_logged_extents(inode, &logged_list, start, end);
4645 * a brute force approach to making sure we get the most uptodate
4646 * copies of everything.
4648 if (S_ISDIR(inode->i_mode)) {
4649 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
4651 if (inode_only == LOG_INODE_EXISTS)
4652 max_key_type = BTRFS_XATTR_ITEM_KEY;
4653 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
4655 if (inode_only == LOG_INODE_EXISTS) {
4657 * Make sure the new inode item we write to the log has
4658 * the same isize as the current one (if it exists).
4659 * This is necessary to prevent data loss after log
4660 * replay, and also to prevent doing a wrong expanding
4661 * truncate - for e.g. create file, write 4K into offset
4662 * 0, fsync, write 4K into offset 4096, add hard link,
4663 * fsync some other file (to sync log), power fail - if
4664 * we use the inode's current i_size, after log replay
4665 * we get a 8Kb file, with the last 4Kb extent as a hole
4666 * (zeroes), as if an expanding truncate happened,
4667 * instead of getting a file of 4Kb only.
4669 err = logged_inode_size(log, inode, path,
4674 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4675 &BTRFS_I(inode)->runtime_flags)) {
4676 if (inode_only == LOG_INODE_EXISTS) {
4677 max_key.type = BTRFS_XATTR_ITEM_KEY;
4678 ret = drop_objectid_items(trans, log, path, ino,
4681 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4682 &BTRFS_I(inode)->runtime_flags);
4683 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4684 &BTRFS_I(inode)->runtime_flags);
4686 ret = btrfs_truncate_inode_items(trans,
4692 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4693 &BTRFS_I(inode)->runtime_flags) ||
4694 inode_only == LOG_INODE_EXISTS) {
4695 if (inode_only == LOG_INODE_ALL)
4697 max_key.type = BTRFS_XATTR_ITEM_KEY;
4698 ret = drop_objectid_items(trans, log, path, ino,
4701 if (inode_only == LOG_INODE_ALL)
4714 ret = btrfs_search_forward(root, &min_key,
4715 path, trans->transid);
4719 /* note, ins_nr might be > 0 here, cleanup outside the loop */
4720 if (min_key.objectid != ino)
4722 if (min_key.type > max_key.type)
4725 if (min_key.type == BTRFS_INODE_ITEM_KEY)
4726 need_log_inode_item = false;
4728 if ((min_key.type == BTRFS_INODE_REF_KEY ||
4729 min_key.type == BTRFS_INODE_EXTREF_KEY) &&
4730 BTRFS_I(inode)->generation == trans->transid) {
4731 ret = btrfs_check_ref_name_override(path->nodes[0],
4737 } else if (ret > 0) {
4739 btrfs_set_log_full_commit(root->fs_info, trans);
4744 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
4745 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
4748 ret = copy_items(trans, inode, dst_path, path,
4749 &last_extent, ins_start_slot,
4750 ins_nr, inode_only, logged_isize);
4757 btrfs_release_path(path);
4763 src = path->nodes[0];
4764 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
4767 } else if (!ins_nr) {
4768 ins_start_slot = path->slots[0];
4773 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4774 ins_start_slot, ins_nr, inode_only,
4782 btrfs_release_path(path);
4786 ins_start_slot = path->slots[0];
4789 nritems = btrfs_header_nritems(path->nodes[0]);
4791 if (path->slots[0] < nritems) {
4792 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
4797 ret = copy_items(trans, inode, dst_path, path,
4798 &last_extent, ins_start_slot,
4799 ins_nr, inode_only, logged_isize);
4807 btrfs_release_path(path);
4809 if (min_key.offset < (u64)-1) {
4811 } else if (min_key.type < max_key.type) {
4819 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4820 ins_start_slot, ins_nr, inode_only,
4830 btrfs_release_path(path);
4831 btrfs_release_path(dst_path);
4832 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
4835 xattrs_logged = true;
4836 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
4837 btrfs_release_path(path);
4838 btrfs_release_path(dst_path);
4839 err = btrfs_log_trailing_hole(trans, root, inode, path);
4844 btrfs_release_path(path);
4845 btrfs_release_path(dst_path);
4846 if (need_log_inode_item) {
4847 err = log_inode_item(trans, log, dst_path, inode);
4848 if (!err && !xattrs_logged) {
4849 err = btrfs_log_all_xattrs(trans, root, inode, path,
4851 btrfs_release_path(path);
4858 * Some ordered extents started by fsync might have completed
4859 * before we collected the ordered extents in logged_list, which
4860 * means they're gone, not in our logged_list nor in the inode's
4861 * ordered tree. We want the application/user space to know an
4862 * error happened while attempting to persist file data so that
4863 * it can take proper action. If such error happened, we leave
4864 * without writing to the log tree and the fsync must report the
4865 * file data write error and not commit the current transaction.
4867 err = btrfs_inode_check_errors(inode);
4872 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
4878 } else if (inode_only == LOG_INODE_ALL) {
4879 struct extent_map *em, *n;
4881 write_lock(&em_tree->lock);
4883 * We can't just remove every em if we're called for a ranged
4884 * fsync - that is, one that doesn't cover the whole possible
4885 * file range (0 to LLONG_MAX). This is because we can have
4886 * em's that fall outside the range we're logging and therefore
4887 * their ordered operations haven't completed yet
4888 * (btrfs_finish_ordered_io() not invoked yet). This means we
4889 * didn't get their respective file extent item in the fs/subvol
4890 * tree yet, and need to let the next fast fsync (one which
4891 * consults the list of modified extent maps) find the em so
4892 * that it logs a matching file extent item and waits for the
4893 * respective ordered operation to complete (if it's still
4896 * Removing every em outside the range we're logging would make
4897 * the next fast fsync not log their matching file extent items,
4898 * therefore making us lose data after a log replay.
4900 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
4902 const u64 mod_end = em->mod_start + em->mod_len - 1;
4904 if (em->mod_start >= start && mod_end <= end)
4905 list_del_init(&em->list);
4907 write_unlock(&em_tree->lock);
4910 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
4911 ret = log_directory_changes(trans, root, inode, path, dst_path,
4919 spin_lock(&BTRFS_I(inode)->lock);
4920 BTRFS_I(inode)->logged_trans = trans->transid;
4921 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
4922 spin_unlock(&BTRFS_I(inode)->lock);
4925 btrfs_put_logged_extents(&logged_list);
4927 btrfs_submit_logged_extents(&logged_list, log);
4928 mutex_unlock(&BTRFS_I(inode)->log_mutex);
4930 btrfs_free_path(path);
4931 btrfs_free_path(dst_path);
4936 * follow the dentry parent pointers up the chain and see if any
4937 * of the directories in it require a full commit before they can
4938 * be logged. Returns zero if nothing special needs to be done or 1 if
4939 * a full commit is required.
4941 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
4942 struct inode *inode,
4943 struct dentry *parent,
4944 struct super_block *sb,
4948 struct btrfs_root *root;
4949 struct dentry *old_parent = NULL;
4950 struct inode *orig_inode = inode;
4953 * for regular files, if its inode is already on disk, we don't
4954 * have to worry about the parents at all. This is because
4955 * we can use the last_unlink_trans field to record renames
4956 * and other fun in this file.
4958 if (S_ISREG(inode->i_mode) &&
4959 BTRFS_I(inode)->generation <= last_committed &&
4960 BTRFS_I(inode)->last_unlink_trans <= last_committed)
4963 if (!S_ISDIR(inode->i_mode)) {
4964 if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb)
4966 inode = d_inode(parent);
4971 * If we are logging a directory then we start with our inode,
4972 * not our parents inode, so we need to skipp setting the
4973 * logged_trans so that further down in the log code we don't
4974 * think this inode has already been logged.
4976 if (inode != orig_inode)
4977 BTRFS_I(inode)->logged_trans = trans->transid;
4980 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
4981 root = BTRFS_I(inode)->root;
4984 * make sure any commits to the log are forced
4985 * to be full commits
4987 btrfs_set_log_full_commit(root->fs_info, trans);
4992 if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb)
4995 if (IS_ROOT(parent))
4998 parent = dget_parent(parent);
5000 old_parent = parent;
5001 inode = d_inode(parent);
5009 struct btrfs_dir_list {
5011 struct list_head list;
5015 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5016 * details about the why it is needed.
5017 * This is a recursive operation - if an existing dentry corresponds to a
5018 * directory, that directory's new entries are logged too (same behaviour as
5019 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5020 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5021 * complains about the following circular lock dependency / possible deadlock:
5025 * lock(&type->i_mutex_dir_key#3/2);
5026 * lock(sb_internal#2);
5027 * lock(&type->i_mutex_dir_key#3/2);
5028 * lock(&sb->s_type->i_mutex_key#14);
5030 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5031 * sb_start_intwrite() in btrfs_start_transaction().
5032 * Not locking i_mutex of the inodes is still safe because:
5034 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5035 * that while logging the inode new references (names) are added or removed
5036 * from the inode, leaving the logged inode item with a link count that does
5037 * not match the number of logged inode reference items. This is fine because
5038 * at log replay time we compute the real number of links and correct the
5039 * link count in the inode item (see replay_one_buffer() and
5040 * link_to_fixup_dir());
5042 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5043 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5044 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5045 * has a size that doesn't match the sum of the lengths of all the logged
5046 * names. This does not result in a problem because if a dir_item key is
5047 * logged but its matching dir_index key is not logged, at log replay time we
5048 * don't use it to replay the respective name (see replay_one_name()). On the
5049 * other hand if only the dir_index key ends up being logged, the respective
5050 * name is added to the fs/subvol tree with both the dir_item and dir_index
5051 * keys created (see replay_one_name()).
5052 * The directory's inode item with a wrong i_size is not a problem as well,
5053 * since we don't use it at log replay time to set the i_size in the inode
5054 * item of the fs/subvol tree (see overwrite_item()).
5056 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5057 struct btrfs_root *root,
5058 struct inode *start_inode,
5059 struct btrfs_log_ctx *ctx)
5061 struct btrfs_root *log = root->log_root;
5062 struct btrfs_path *path;
5063 LIST_HEAD(dir_list);
5064 struct btrfs_dir_list *dir_elem;
5067 path = btrfs_alloc_path();
5071 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5073 btrfs_free_path(path);
5076 dir_elem->ino = btrfs_ino(start_inode);
5077 list_add_tail(&dir_elem->list, &dir_list);
5079 while (!list_empty(&dir_list)) {
5080 struct extent_buffer *leaf;
5081 struct btrfs_key min_key;
5085 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5088 goto next_dir_inode;
5090 min_key.objectid = dir_elem->ino;
5091 min_key.type = BTRFS_DIR_ITEM_KEY;
5094 btrfs_release_path(path);
5095 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5097 goto next_dir_inode;
5098 } else if (ret > 0) {
5100 goto next_dir_inode;
5104 leaf = path->nodes[0];
5105 nritems = btrfs_header_nritems(leaf);
5106 for (i = path->slots[0]; i < nritems; i++) {
5107 struct btrfs_dir_item *di;
5108 struct btrfs_key di_key;
5109 struct inode *di_inode;
5110 struct btrfs_dir_list *new_dir_elem;
5111 int log_mode = LOG_INODE_EXISTS;
5114 btrfs_item_key_to_cpu(leaf, &min_key, i);
5115 if (min_key.objectid != dir_elem->ino ||
5116 min_key.type != BTRFS_DIR_ITEM_KEY)
5117 goto next_dir_inode;
5119 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5120 type = btrfs_dir_type(leaf, di);
5121 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5122 type != BTRFS_FT_DIR)
5124 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5125 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5128 di_inode = btrfs_iget(root->fs_info->sb, &di_key,
5130 if (IS_ERR(di_inode)) {
5131 ret = PTR_ERR(di_inode);
5132 goto next_dir_inode;
5135 if (btrfs_inode_in_log(di_inode, trans->transid)) {
5136 btrfs_add_delayed_iput(di_inode);
5140 ctx->log_new_dentries = false;
5141 if (type == BTRFS_FT_DIR)
5142 log_mode = LOG_INODE_ALL;
5143 btrfs_release_path(path);
5144 ret = btrfs_log_inode(trans, root, di_inode,
5145 log_mode, 0, LLONG_MAX, ctx);
5146 btrfs_add_delayed_iput(di_inode);
5148 goto next_dir_inode;
5149 if (ctx->log_new_dentries) {
5150 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5152 if (!new_dir_elem) {
5154 goto next_dir_inode;
5156 new_dir_elem->ino = di_key.objectid;
5157 list_add_tail(&new_dir_elem->list, &dir_list);
5162 ret = btrfs_next_leaf(log, path);
5164 goto next_dir_inode;
5165 } else if (ret > 0) {
5167 goto next_dir_inode;
5171 if (min_key.offset < (u64)-1) {
5176 list_del(&dir_elem->list);
5180 btrfs_free_path(path);
5184 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5185 struct inode *inode,
5186 struct btrfs_log_ctx *ctx)
5189 struct btrfs_path *path;
5190 struct btrfs_key key;
5191 struct btrfs_root *root = BTRFS_I(inode)->root;
5192 const u64 ino = btrfs_ino(inode);
5194 path = btrfs_alloc_path();
5197 path->skip_locking = 1;
5198 path->search_commit_root = 1;
5201 key.type = BTRFS_INODE_REF_KEY;
5203 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5208 struct extent_buffer *leaf = path->nodes[0];
5209 int slot = path->slots[0];
5214 if (slot >= btrfs_header_nritems(leaf)) {
5215 ret = btrfs_next_leaf(root, path);
5223 btrfs_item_key_to_cpu(leaf, &key, slot);
5224 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5225 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5228 item_size = btrfs_item_size_nr(leaf, slot);
5229 ptr = btrfs_item_ptr_offset(leaf, slot);
5230 while (cur_offset < item_size) {
5231 struct btrfs_key inode_key;
5232 struct inode *dir_inode;
5234 inode_key.type = BTRFS_INODE_ITEM_KEY;
5235 inode_key.offset = 0;
5237 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5238 struct btrfs_inode_extref *extref;
5240 extref = (struct btrfs_inode_extref *)
5242 inode_key.objectid = btrfs_inode_extref_parent(
5244 cur_offset += sizeof(*extref);
5245 cur_offset += btrfs_inode_extref_name_len(leaf,
5248 inode_key.objectid = key.offset;
5249 cur_offset = item_size;
5252 dir_inode = btrfs_iget(root->fs_info->sb, &inode_key,
5255 * If the parent inode was deleted, return an error to
5256 * fallback to a transaction commit. This is to prevent
5257 * getting an inode that was moved from one parent A to
5258 * a parent B, got its former parent A deleted and then
5259 * it got fsync'ed, from existing at both parents after
5260 * a log replay (and the old parent still existing).
5267 * mv /mnt/B/bar /mnt/A/bar
5268 * mv -T /mnt/A /mnt/B
5272 * If we ignore the old parent B which got deleted,
5273 * after a log replay we would have file bar linked
5274 * at both parents and the old parent B would still
5277 if (IS_ERR(dir_inode)) {
5278 ret = PTR_ERR(dir_inode);
5282 ret = btrfs_log_inode(trans, root, dir_inode,
5283 LOG_INODE_ALL, 0, LLONG_MAX, ctx);
5284 btrfs_add_delayed_iput(dir_inode);
5292 btrfs_free_path(path);
5297 * helper function around btrfs_log_inode to make sure newly created
5298 * parent directories also end up in the log. A minimal inode and backref
5299 * only logging is done of any parent directories that are older than
5300 * the last committed transaction
5302 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
5303 struct btrfs_root *root, struct inode *inode,
5304 struct dentry *parent,
5308 struct btrfs_log_ctx *ctx)
5310 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
5311 struct super_block *sb;
5312 struct dentry *old_parent = NULL;
5314 u64 last_committed = root->fs_info->last_trans_committed;
5315 bool log_dentries = false;
5316 struct inode *orig_inode = inode;
5320 if (btrfs_test_opt(root, NOTREELOG)) {
5326 * The prev transaction commit doesn't complete, we need do
5327 * full commit by ourselves.
5329 if (root->fs_info->last_trans_log_full_commit >
5330 root->fs_info->last_trans_committed) {
5335 if (root != BTRFS_I(inode)->root ||
5336 btrfs_root_refs(&root->root_item) == 0) {
5341 ret = check_parent_dirs_for_sync(trans, inode, parent,
5342 sb, last_committed);
5346 if (btrfs_inode_in_log(inode, trans->transid)) {
5347 ret = BTRFS_NO_LOG_SYNC;
5351 ret = start_log_trans(trans, root, ctx);
5355 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
5360 * for regular files, if its inode is already on disk, we don't
5361 * have to worry about the parents at all. This is because
5362 * we can use the last_unlink_trans field to record renames
5363 * and other fun in this file.
5365 if (S_ISREG(inode->i_mode) &&
5366 BTRFS_I(inode)->generation <= last_committed &&
5367 BTRFS_I(inode)->last_unlink_trans <= last_committed) {
5372 if (S_ISDIR(inode->i_mode) && ctx && ctx->log_new_dentries)
5373 log_dentries = true;
5376 * On unlink we must make sure all our current and old parent directores
5377 * inodes are fully logged. This is to prevent leaving dangling
5378 * directory index entries in directories that were our parents but are
5379 * not anymore. Not doing this results in old parent directory being
5380 * impossible to delete after log replay (rmdir will always fail with
5381 * error -ENOTEMPTY).
5387 * ln testdir/foo testdir/bar
5389 * unlink testdir/bar
5390 * xfs_io -c fsync testdir/foo
5392 * mount fs, triggers log replay
5394 * If we don't log the parent directory (testdir), after log replay the
5395 * directory still has an entry pointing to the file inode using the bar
5396 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
5397 * the file inode has a link count of 1.
5403 * ln foo testdir/foo2
5404 * ln foo testdir/foo3
5406 * unlink testdir/foo3
5407 * xfs_io -c fsync foo
5409 * mount fs, triggers log replay
5411 * Similar as the first example, after log replay the parent directory
5412 * testdir still has an entry pointing to the inode file with name foo3
5413 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
5414 * and has a link count of 2.
5416 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
5417 ret = btrfs_log_all_parents(trans, orig_inode, ctx);
5423 if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb)
5426 inode = d_inode(parent);
5427 if (root != BTRFS_I(inode)->root)
5430 if (BTRFS_I(inode)->generation > last_committed) {
5431 ret = btrfs_log_inode(trans, root, inode,
5437 if (IS_ROOT(parent))
5440 parent = dget_parent(parent);
5442 old_parent = parent;
5445 ret = log_new_dir_dentries(trans, root, orig_inode, ctx);
5451 btrfs_set_log_full_commit(root->fs_info, trans);
5456 btrfs_remove_log_ctx(root, ctx);
5457 btrfs_end_log_trans(root);
5463 * it is not safe to log dentry if the chunk root has added new
5464 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
5465 * If this returns 1, you must commit the transaction to safely get your
5468 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
5469 struct btrfs_root *root, struct dentry *dentry,
5472 struct btrfs_log_ctx *ctx)
5474 struct dentry *parent = dget_parent(dentry);
5477 ret = btrfs_log_inode_parent(trans, root, d_inode(dentry), parent,
5478 start, end, 0, ctx);
5485 * should be called during mount to recover any replay any log trees
5488 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
5491 struct btrfs_path *path;
5492 struct btrfs_trans_handle *trans;
5493 struct btrfs_key key;
5494 struct btrfs_key found_key;
5495 struct btrfs_key tmp_key;
5496 struct btrfs_root *log;
5497 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
5498 struct walk_control wc = {
5499 .process_func = process_one_buffer,
5503 path = btrfs_alloc_path();
5507 fs_info->log_root_recovering = 1;
5509 trans = btrfs_start_transaction(fs_info->tree_root, 0);
5510 if (IS_ERR(trans)) {
5511 ret = PTR_ERR(trans);
5518 ret = walk_log_tree(trans, log_root_tree, &wc);
5520 btrfs_std_error(fs_info, ret, "Failed to pin buffers while "
5521 "recovering log root tree.");
5526 key.objectid = BTRFS_TREE_LOG_OBJECTID;
5527 key.offset = (u64)-1;
5528 key.type = BTRFS_ROOT_ITEM_KEY;
5531 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
5534 btrfs_std_error(fs_info, ret,
5535 "Couldn't find tree log root.");
5539 if (path->slots[0] == 0)
5543 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
5545 btrfs_release_path(path);
5546 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
5549 log = btrfs_read_fs_root(log_root_tree, &found_key);
5552 btrfs_std_error(fs_info, ret,
5553 "Couldn't read tree log root.");
5557 tmp_key.objectid = found_key.offset;
5558 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
5559 tmp_key.offset = (u64)-1;
5561 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
5562 if (IS_ERR(wc.replay_dest)) {
5563 ret = PTR_ERR(wc.replay_dest);
5564 free_extent_buffer(log->node);
5565 free_extent_buffer(log->commit_root);
5567 btrfs_std_error(fs_info, ret, "Couldn't read target root "
5568 "for tree log recovery.");
5572 wc.replay_dest->log_root = log;
5573 btrfs_record_root_in_trans(trans, wc.replay_dest);
5574 ret = walk_log_tree(trans, log, &wc);
5576 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
5577 ret = fixup_inode_link_counts(trans, wc.replay_dest,
5581 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
5582 struct btrfs_root *root = wc.replay_dest;
5584 btrfs_release_path(path);
5587 * We have just replayed everything, and the highest
5588 * objectid of fs roots probably has changed in case
5589 * some inode_item's got replayed.
5591 * root->objectid_mutex is not acquired as log replay
5592 * could only happen during mount.
5594 ret = btrfs_find_highest_objectid(root,
5595 &root->highest_objectid);
5598 key.offset = found_key.offset - 1;
5599 wc.replay_dest->log_root = NULL;
5600 free_extent_buffer(log->node);
5601 free_extent_buffer(log->commit_root);
5607 if (found_key.offset == 0)
5610 btrfs_release_path(path);
5612 /* step one is to pin it all, step two is to replay just inodes */
5615 wc.process_func = replay_one_buffer;
5616 wc.stage = LOG_WALK_REPLAY_INODES;
5619 /* step three is to replay everything */
5620 if (wc.stage < LOG_WALK_REPLAY_ALL) {
5625 btrfs_free_path(path);
5627 /* step 4: commit the transaction, which also unpins the blocks */
5628 ret = btrfs_commit_transaction(trans, fs_info->tree_root);
5632 free_extent_buffer(log_root_tree->node);
5633 log_root_tree->log_root = NULL;
5634 fs_info->log_root_recovering = 0;
5635 kfree(log_root_tree);
5640 btrfs_end_transaction(wc.trans, fs_info->tree_root);
5641 btrfs_free_path(path);
5646 * there are some corner cases where we want to force a full
5647 * commit instead of allowing a directory to be logged.
5649 * They revolve around files there were unlinked from the directory, and
5650 * this function updates the parent directory so that a full commit is
5651 * properly done if it is fsync'd later after the unlinks are done.
5653 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
5654 struct inode *dir, struct inode *inode,
5658 * when we're logging a file, if it hasn't been renamed
5659 * or unlinked, and its inode is fully committed on disk,
5660 * we don't have to worry about walking up the directory chain
5661 * to log its parents.
5663 * So, we use the last_unlink_trans field to put this transid
5664 * into the file. When the file is logged we check it and
5665 * don't log the parents if the file is fully on disk.
5667 if (S_ISREG(inode->i_mode))
5668 BTRFS_I(inode)->last_unlink_trans = trans->transid;
5671 * if this directory was already logged any new
5672 * names for this file/dir will get recorded
5675 if (BTRFS_I(dir)->logged_trans == trans->transid)
5679 * if the inode we're about to unlink was logged,
5680 * the log will be properly updated for any new names
5682 if (BTRFS_I(inode)->logged_trans == trans->transid)
5686 * when renaming files across directories, if the directory
5687 * there we're unlinking from gets fsync'd later on, there's
5688 * no way to find the destination directory later and fsync it
5689 * properly. So, we have to be conservative and force commits
5690 * so the new name gets discovered.
5695 /* we can safely do the unlink without any special recording */
5699 BTRFS_I(dir)->last_unlink_trans = trans->transid;
5703 * Call this after adding a new name for a file and it will properly
5704 * update the log to reflect the new name.
5706 * It will return zero if all goes well, and it will return 1 if a
5707 * full transaction commit is required.
5709 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
5710 struct inode *inode, struct inode *old_dir,
5711 struct dentry *parent)
5713 struct btrfs_root * root = BTRFS_I(inode)->root;
5716 * this will force the logging code to walk the dentry chain
5719 if (S_ISREG(inode->i_mode))
5720 BTRFS_I(inode)->last_unlink_trans = trans->transid;
5723 * if this inode hasn't been logged and directory we're renaming it
5724 * from hasn't been logged, we don't need to log it
5726 if (BTRFS_I(inode)->logged_trans <=
5727 root->fs_info->last_trans_committed &&
5728 (!old_dir || BTRFS_I(old_dir)->logged_trans <=
5729 root->fs_info->last_trans_committed))
5732 return btrfs_log_inode_parent(trans, root, inode, parent, 0,
5733 LLONG_MAX, 1, NULL);