1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 Alexander Block. All rights reserved.
6 #include <linux/bsearch.h>
8 #include <linux/file.h>
9 #include <linux/sort.h>
10 #include <linux/mount.h>
11 #include <linux/xattr.h>
12 #include <linux/posix_acl_xattr.h>
13 #include <linux/radix-tree.h>
14 #include <linux/vmalloc.h>
15 #include <linux/string.h>
16 #include <linux/compat.h>
17 #include <linux/crc32c.h>
18 #include <linux/fsverity.h>
25 #include "btrfs_inode.h"
26 #include "transaction.h"
27 #include "compression.h"
29 #include "print-tree.h"
30 #include "accessors.h"
32 #include "file-item.h"
37 * Maximum number of references an extent can have in order for us to attempt to
38 * issue clone operations instead of write operations. This currently exists to
39 * avoid hitting limitations of the backreference walking code (taking a lot of
40 * time and using too much memory for extents with large number of references).
42 #define SEND_MAX_EXTENT_REFS 1024
45 * A fs_path is a helper to dynamically build path names with unknown size.
46 * It reallocates the internal buffer on demand.
47 * It allows fast adding of path elements on the right side (normal path) and
48 * fast adding to the left side (reversed path). A reversed path can also be
49 * unreversed if needed.
58 unsigned short buf_len:15;
59 unsigned short reversed:1;
63 * Average path length does not exceed 200 bytes, we'll have
64 * better packing in the slab and higher chance to satisfy
65 * a allocation later during send.
70 #define FS_PATH_INLINE_SIZE \
71 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
74 /* reused for each extent */
76 struct btrfs_root *root;
83 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
84 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
87 * Limit the root_ids array of struct backref_cache_entry to 12 elements.
88 * This makes the size of a cache entry to be exactly 128 bytes on x86_64.
89 * The most common case is to have a single root for cloning, which corresponds
90 * to the send root. Having the user specify more than 11 clone roots is not
91 * common, and in such rare cases we simply don't use caching if the number of
92 * cloning roots that lead down to a leaf is more than 12.
94 #define SEND_MAX_BACKREF_CACHE_ROOTS 12
97 * Max number of entries in the cache.
98 * With SEND_MAX_BACKREF_CACHE_ROOTS as 12, the size in bytes, excluding
99 * maple tree's internal nodes, is 16K.
101 #define SEND_MAX_BACKREF_CACHE_SIZE 128
104 * A backref cache entry maps a leaf to a list of IDs of roots from which the
105 * leaf is accessible and we can use for clone operations.
106 * With SEND_MAX_BACKREF_CACHE_ROOTS as 12, each cache entry is 128 bytes (on
109 struct backref_cache_entry {
110 /* List to link to the cache's lru list. */
111 struct list_head list;
112 /* The key for this entry in the cache. */
114 u64 root_ids[SEND_MAX_BACKREF_CACHE_ROOTS];
115 /* Number of valid elements in the root_ids array. */
120 struct file *send_filp;
126 * Whether BTRFS_SEND_A_DATA attribute was already added to current
127 * command (since protocol v2, data must be the last attribute).
130 struct page **send_buf_pages;
131 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
132 /* Protocol version compatibility requested */
135 struct btrfs_root *send_root;
136 struct btrfs_root *parent_root;
137 struct clone_root *clone_roots;
140 /* current state of the compare_tree call */
141 struct btrfs_path *left_path;
142 struct btrfs_path *right_path;
143 struct btrfs_key *cmp_key;
146 * Keep track of the generation of the last transaction that was used
147 * for relocating a block group. This is periodically checked in order
148 * to detect if a relocation happened since the last check, so that we
149 * don't operate on stale extent buffers for nodes (level >= 1) or on
150 * stale disk_bytenr values of file extent items.
152 u64 last_reloc_trans;
155 * infos of the currently processed inode. In case of deleted inodes,
156 * these are the values from the deleted inode.
163 u64 cur_inode_last_extent;
164 u64 cur_inode_next_write_offset;
166 bool cur_inode_new_gen;
167 bool cur_inode_deleted;
168 bool ignore_cur_inode;
169 bool cur_inode_needs_verity;
170 void *verity_descriptor;
174 struct list_head new_refs;
175 struct list_head deleted_refs;
177 struct radix_tree_root name_cache;
178 struct list_head name_cache_list;
182 * The inode we are currently processing. It's not NULL only when we
183 * need to issue write commands for data extents from this inode.
185 struct inode *cur_inode;
186 struct file_ra_state ra;
187 u64 page_cache_clear_start;
188 bool clean_page_cache;
191 * We process inodes by their increasing order, so if before an
192 * incremental send we reverse the parent/child relationship of
193 * directories such that a directory with a lower inode number was
194 * the parent of a directory with a higher inode number, and the one
195 * becoming the new parent got renamed too, we can't rename/move the
196 * directory with lower inode number when we finish processing it - we
197 * must process the directory with higher inode number first, then
198 * rename/move it and then rename/move the directory with lower inode
199 * number. Example follows.
201 * Tree state when the first send was performed:
213 * Tree state when the second (incremental) send is performed:
222 * The sequence of steps that lead to the second state was:
224 * mv /a/b/c/d /a/b/c2/d2
225 * mv /a/b/c /a/b/c2/d2/cc
227 * "c" has lower inode number, but we can't move it (2nd mv operation)
228 * before we move "d", which has higher inode number.
230 * So we just memorize which move/rename operations must be performed
231 * later when their respective parent is processed and moved/renamed.
234 /* Indexed by parent directory inode number. */
235 struct rb_root pending_dir_moves;
238 * Reverse index, indexed by the inode number of a directory that
239 * is waiting for the move/rename of its immediate parent before its
240 * own move/rename can be performed.
242 struct rb_root waiting_dir_moves;
245 * A directory that is going to be rm'ed might have a child directory
246 * which is in the pending directory moves index above. In this case,
247 * the directory can only be removed after the move/rename of its child
248 * is performed. Example:
268 * Sequence of steps that lead to the send snapshot:
269 * rm -f /a/b/c/foo.txt
271 * mv /a/b/c/x /a/b/YY
274 * When the child is processed, its move/rename is delayed until its
275 * parent is processed (as explained above), but all other operations
276 * like update utimes, chown, chgrp, etc, are performed and the paths
277 * that it uses for those operations must use the orphanized name of
278 * its parent (the directory we're going to rm later), so we need to
279 * memorize that name.
281 * Indexed by the inode number of the directory to be deleted.
283 struct rb_root orphan_dirs;
285 struct rb_root rbtree_new_refs;
286 struct rb_root rbtree_deleted_refs;
289 u64 last_reloc_trans;
290 struct list_head lru_list;
291 struct maple_tree entries;
292 /* Number of entries stored in the cache. */
297 struct pending_dir_move {
299 struct list_head list;
303 struct list_head update_refs;
306 struct waiting_dir_move {
310 * There might be some directory that could not be removed because it
311 * was waiting for this directory inode to be moved first. Therefore
312 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
319 struct orphan_dir_info {
323 u64 last_dir_index_offset;
326 struct name_cache_entry {
327 struct list_head list;
329 * radix_tree has only 32bit entries but we need to handle 64bit inums.
330 * We use the lower 32bit of the 64bit inum to store it in the tree. If
331 * more then one inum would fall into the same entry, we use radix_list
332 * to store the additional entries. radix_list is also used to store
333 * entries where two entries have the same inum but different
336 struct list_head radix_list;
342 int need_later_update;
348 #define ADVANCE_ONLY_NEXT -1
350 enum btrfs_compare_tree_result {
351 BTRFS_COMPARE_TREE_NEW,
352 BTRFS_COMPARE_TREE_DELETED,
353 BTRFS_COMPARE_TREE_CHANGED,
354 BTRFS_COMPARE_TREE_SAME,
358 static void inconsistent_snapshot_error(struct send_ctx *sctx,
359 enum btrfs_compare_tree_result result,
362 const char *result_string;
365 case BTRFS_COMPARE_TREE_NEW:
366 result_string = "new";
368 case BTRFS_COMPARE_TREE_DELETED:
369 result_string = "deleted";
371 case BTRFS_COMPARE_TREE_CHANGED:
372 result_string = "updated";
374 case BTRFS_COMPARE_TREE_SAME:
376 result_string = "unchanged";
380 result_string = "unexpected";
383 btrfs_err(sctx->send_root->fs_info,
384 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
385 result_string, what, sctx->cmp_key->objectid,
386 sctx->send_root->root_key.objectid,
388 sctx->parent_root->root_key.objectid : 0));
392 static bool proto_cmd_ok(const struct send_ctx *sctx, int cmd)
394 switch (sctx->proto) {
395 case 1: return cmd <= BTRFS_SEND_C_MAX_V1;
396 case 2: return cmd <= BTRFS_SEND_C_MAX_V2;
397 case 3: return cmd <= BTRFS_SEND_C_MAX_V3;
398 default: return false;
402 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
404 static struct waiting_dir_move *
405 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
407 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen);
409 static int need_send_hole(struct send_ctx *sctx)
411 return (sctx->parent_root && !sctx->cur_inode_new &&
412 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
413 S_ISREG(sctx->cur_inode_mode));
416 static void fs_path_reset(struct fs_path *p)
419 p->start = p->buf + p->buf_len - 1;
429 static struct fs_path *fs_path_alloc(void)
433 p = kmalloc(sizeof(*p), GFP_KERNEL);
437 p->buf = p->inline_buf;
438 p->buf_len = FS_PATH_INLINE_SIZE;
443 static struct fs_path *fs_path_alloc_reversed(void)
455 static void fs_path_free(struct fs_path *p)
459 if (p->buf != p->inline_buf)
464 static int fs_path_len(struct fs_path *p)
466 return p->end - p->start;
469 static int fs_path_ensure_buf(struct fs_path *p, int len)
477 if (p->buf_len >= len)
480 if (len > PATH_MAX) {
485 path_len = p->end - p->start;
486 old_buf_len = p->buf_len;
489 * Allocate to the next largest kmalloc bucket size, to let
490 * the fast path happen most of the time.
492 len = kmalloc_size_roundup(len);
494 * First time the inline_buf does not suffice
496 if (p->buf == p->inline_buf) {
497 tmp_buf = kmalloc(len, GFP_KERNEL);
499 memcpy(tmp_buf, p->buf, old_buf_len);
501 tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
509 tmp_buf = p->buf + old_buf_len - path_len - 1;
510 p->end = p->buf + p->buf_len - 1;
511 p->start = p->end - path_len;
512 memmove(p->start, tmp_buf, path_len + 1);
515 p->end = p->start + path_len;
520 static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
526 new_len = p->end - p->start + name_len;
527 if (p->start != p->end)
529 ret = fs_path_ensure_buf(p, new_len);
534 if (p->start != p->end)
536 p->start -= name_len;
537 *prepared = p->start;
539 if (p->start != p->end)
550 static int fs_path_add(struct fs_path *p, const char *name, int name_len)
555 ret = fs_path_prepare_for_add(p, name_len, &prepared);
558 memcpy(prepared, name, name_len);
564 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
569 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
572 memcpy(prepared, p2->start, p2->end - p2->start);
578 static int fs_path_add_from_extent_buffer(struct fs_path *p,
579 struct extent_buffer *eb,
580 unsigned long off, int len)
585 ret = fs_path_prepare_for_add(p, len, &prepared);
589 read_extent_buffer(eb, prepared, off, len);
595 static int fs_path_copy(struct fs_path *p, struct fs_path *from)
597 p->reversed = from->reversed;
600 return fs_path_add_path(p, from);
603 static void fs_path_unreverse(struct fs_path *p)
612 len = p->end - p->start;
614 p->end = p->start + len;
615 memmove(p->start, tmp, len + 1);
619 static struct btrfs_path *alloc_path_for_send(void)
621 struct btrfs_path *path;
623 path = btrfs_alloc_path();
626 path->search_commit_root = 1;
627 path->skip_locking = 1;
628 path->need_commit_sem = 1;
632 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
638 ret = kernel_write(filp, buf + pos, len - pos, off);
649 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
651 struct btrfs_tlv_header *hdr;
652 int total_len = sizeof(*hdr) + len;
653 int left = sctx->send_max_size - sctx->send_size;
655 if (WARN_ON_ONCE(sctx->put_data))
658 if (unlikely(left < total_len))
661 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
662 put_unaligned_le16(attr, &hdr->tlv_type);
663 put_unaligned_le16(len, &hdr->tlv_len);
664 memcpy(hdr + 1, data, len);
665 sctx->send_size += total_len;
670 #define TLV_PUT_DEFINE_INT(bits) \
671 static int tlv_put_u##bits(struct send_ctx *sctx, \
672 u##bits attr, u##bits value) \
674 __le##bits __tmp = cpu_to_le##bits(value); \
675 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
678 TLV_PUT_DEFINE_INT(8)
679 TLV_PUT_DEFINE_INT(32)
680 TLV_PUT_DEFINE_INT(64)
682 static int tlv_put_string(struct send_ctx *sctx, u16 attr,
683 const char *str, int len)
687 return tlv_put(sctx, attr, str, len);
690 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
693 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
696 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
697 struct extent_buffer *eb,
698 struct btrfs_timespec *ts)
700 struct btrfs_timespec bts;
701 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
702 return tlv_put(sctx, attr, &bts, sizeof(bts));
706 #define TLV_PUT(sctx, attrtype, data, attrlen) \
708 ret = tlv_put(sctx, attrtype, data, attrlen); \
710 goto tlv_put_failure; \
713 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
715 ret = tlv_put_u##bits(sctx, attrtype, value); \
717 goto tlv_put_failure; \
720 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
721 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
722 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
723 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
724 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
726 ret = tlv_put_string(sctx, attrtype, str, len); \
728 goto tlv_put_failure; \
730 #define TLV_PUT_PATH(sctx, attrtype, p) \
732 ret = tlv_put_string(sctx, attrtype, p->start, \
733 p->end - p->start); \
735 goto tlv_put_failure; \
737 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
739 ret = tlv_put_uuid(sctx, attrtype, uuid); \
741 goto tlv_put_failure; \
743 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
745 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
747 goto tlv_put_failure; \
750 static int send_header(struct send_ctx *sctx)
752 struct btrfs_stream_header hdr;
754 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
755 hdr.version = cpu_to_le32(sctx->proto);
756 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
761 * For each command/item we want to send to userspace, we call this function.
763 static int begin_cmd(struct send_ctx *sctx, int cmd)
765 struct btrfs_cmd_header *hdr;
767 if (WARN_ON(!sctx->send_buf))
770 BUG_ON(sctx->send_size);
772 sctx->send_size += sizeof(*hdr);
773 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
774 put_unaligned_le16(cmd, &hdr->cmd);
779 static int send_cmd(struct send_ctx *sctx)
782 struct btrfs_cmd_header *hdr;
785 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
786 put_unaligned_le32(sctx->send_size - sizeof(*hdr), &hdr->len);
787 put_unaligned_le32(0, &hdr->crc);
789 crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
790 put_unaligned_le32(crc, &hdr->crc);
792 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
796 sctx->put_data = false;
802 * Sends a move instruction to user space
804 static int send_rename(struct send_ctx *sctx,
805 struct fs_path *from, struct fs_path *to)
807 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
810 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
812 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
816 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
817 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
819 ret = send_cmd(sctx);
827 * Sends a link instruction to user space
829 static int send_link(struct send_ctx *sctx,
830 struct fs_path *path, struct fs_path *lnk)
832 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
835 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
837 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
841 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
842 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
844 ret = send_cmd(sctx);
852 * Sends an unlink instruction to user space
854 static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
856 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
859 btrfs_debug(fs_info, "send_unlink %s", path->start);
861 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
865 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
867 ret = send_cmd(sctx);
875 * Sends a rmdir instruction to user space
877 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
879 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
882 btrfs_debug(fs_info, "send_rmdir %s", path->start);
884 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
888 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
890 ret = send_cmd(sctx);
897 struct btrfs_inode_info {
909 * Helper function to retrieve some fields from an inode item.
911 static int get_inode_info(struct btrfs_root *root, u64 ino,
912 struct btrfs_inode_info *info)
915 struct btrfs_path *path;
916 struct btrfs_inode_item *ii;
917 struct btrfs_key key;
919 path = alloc_path_for_send();
924 key.type = BTRFS_INODE_ITEM_KEY;
926 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
936 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
937 struct btrfs_inode_item);
938 info->size = btrfs_inode_size(path->nodes[0], ii);
939 info->gen = btrfs_inode_generation(path->nodes[0], ii);
940 info->mode = btrfs_inode_mode(path->nodes[0], ii);
941 info->uid = btrfs_inode_uid(path->nodes[0], ii);
942 info->gid = btrfs_inode_gid(path->nodes[0], ii);
943 info->rdev = btrfs_inode_rdev(path->nodes[0], ii);
944 info->nlink = btrfs_inode_nlink(path->nodes[0], ii);
946 * Transfer the unchanged u64 value of btrfs_inode_item::flags, that's
947 * otherwise logically split to 32/32 parts.
949 info->fileattr = btrfs_inode_flags(path->nodes[0], ii);
952 btrfs_free_path(path);
956 static int get_inode_gen(struct btrfs_root *root, u64 ino, u64 *gen)
959 struct btrfs_inode_info info;
964 ret = get_inode_info(root, ino, &info);
970 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
975 * Helper function to iterate the entries in ONE btrfs_inode_ref or
976 * btrfs_inode_extref.
977 * The iterate callback may return a non zero value to stop iteration. This can
978 * be a negative value for error codes or 1 to simply stop it.
980 * path must point to the INODE_REF or INODE_EXTREF when called.
982 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
983 struct btrfs_key *found_key, int resolve,
984 iterate_inode_ref_t iterate, void *ctx)
986 struct extent_buffer *eb = path->nodes[0];
987 struct btrfs_inode_ref *iref;
988 struct btrfs_inode_extref *extref;
989 struct btrfs_path *tmp_path;
993 int slot = path->slots[0];
1000 unsigned long name_off;
1001 unsigned long elem_size;
1004 p = fs_path_alloc_reversed();
1008 tmp_path = alloc_path_for_send();
1015 if (found_key->type == BTRFS_INODE_REF_KEY) {
1016 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
1017 struct btrfs_inode_ref);
1018 total = btrfs_item_size(eb, slot);
1019 elem_size = sizeof(*iref);
1021 ptr = btrfs_item_ptr_offset(eb, slot);
1022 total = btrfs_item_size(eb, slot);
1023 elem_size = sizeof(*extref);
1026 while (cur < total) {
1029 if (found_key->type == BTRFS_INODE_REF_KEY) {
1030 iref = (struct btrfs_inode_ref *)(ptr + cur);
1031 name_len = btrfs_inode_ref_name_len(eb, iref);
1032 name_off = (unsigned long)(iref + 1);
1033 index = btrfs_inode_ref_index(eb, iref);
1034 dir = found_key->offset;
1036 extref = (struct btrfs_inode_extref *)(ptr + cur);
1037 name_len = btrfs_inode_extref_name_len(eb, extref);
1038 name_off = (unsigned long)&extref->name;
1039 index = btrfs_inode_extref_index(eb, extref);
1040 dir = btrfs_inode_extref_parent(eb, extref);
1044 start = btrfs_ref_to_path(root, tmp_path, name_len,
1046 p->buf, p->buf_len);
1047 if (IS_ERR(start)) {
1048 ret = PTR_ERR(start);
1051 if (start < p->buf) {
1052 /* overflow , try again with larger buffer */
1053 ret = fs_path_ensure_buf(p,
1054 p->buf_len + p->buf - start);
1057 start = btrfs_ref_to_path(root, tmp_path,
1060 p->buf, p->buf_len);
1061 if (IS_ERR(start)) {
1062 ret = PTR_ERR(start);
1065 BUG_ON(start < p->buf);
1069 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
1075 cur += elem_size + name_len;
1076 ret = iterate(num, dir, index, p, ctx);
1083 btrfs_free_path(tmp_path);
1088 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
1089 const char *name, int name_len,
1090 const char *data, int data_len,
1094 * Helper function to iterate the entries in ONE btrfs_dir_item.
1095 * The iterate callback may return a non zero value to stop iteration. This can
1096 * be a negative value for error codes or 1 to simply stop it.
1098 * path must point to the dir item when called.
1100 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
1101 iterate_dir_item_t iterate, void *ctx)
1104 struct extent_buffer *eb;
1105 struct btrfs_dir_item *di;
1106 struct btrfs_key di_key;
1118 * Start with a small buffer (1 page). If later we end up needing more
1119 * space, which can happen for xattrs on a fs with a leaf size greater
1120 * then the page size, attempt to increase the buffer. Typically xattr
1124 buf = kmalloc(buf_len, GFP_KERNEL);
1130 eb = path->nodes[0];
1131 slot = path->slots[0];
1132 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1135 total = btrfs_item_size(eb, slot);
1138 while (cur < total) {
1139 name_len = btrfs_dir_name_len(eb, di);
1140 data_len = btrfs_dir_data_len(eb, di);
1141 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1143 if (btrfs_dir_ftype(eb, di) == BTRFS_FT_XATTR) {
1144 if (name_len > XATTR_NAME_MAX) {
1145 ret = -ENAMETOOLONG;
1148 if (name_len + data_len >
1149 BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
1157 if (name_len + data_len > PATH_MAX) {
1158 ret = -ENAMETOOLONG;
1163 if (name_len + data_len > buf_len) {
1164 buf_len = name_len + data_len;
1165 if (is_vmalloc_addr(buf)) {
1169 char *tmp = krealloc(buf, buf_len,
1170 GFP_KERNEL | __GFP_NOWARN);
1177 buf = kvmalloc(buf_len, GFP_KERNEL);
1185 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
1186 name_len + data_len);
1188 len = sizeof(*di) + name_len + data_len;
1189 di = (struct btrfs_dir_item *)((char *)di + len);
1192 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1209 static int __copy_first_ref(int num, u64 dir, int index,
1210 struct fs_path *p, void *ctx)
1213 struct fs_path *pt = ctx;
1215 ret = fs_path_copy(pt, p);
1219 /* we want the first only */
1224 * Retrieve the first path of an inode. If an inode has more then one
1225 * ref/hardlink, this is ignored.
1227 static int get_inode_path(struct btrfs_root *root,
1228 u64 ino, struct fs_path *path)
1231 struct btrfs_key key, found_key;
1232 struct btrfs_path *p;
1234 p = alloc_path_for_send();
1238 fs_path_reset(path);
1241 key.type = BTRFS_INODE_REF_KEY;
1244 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1251 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1252 if (found_key.objectid != ino ||
1253 (found_key.type != BTRFS_INODE_REF_KEY &&
1254 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1259 ret = iterate_inode_ref(root, p, &found_key, 1,
1260 __copy_first_ref, path);
1270 struct backref_ctx {
1271 struct send_ctx *sctx;
1273 /* number of total found references */
1277 * used for clones found in send_root. clones found behind cur_objectid
1278 * and cur_offset are not considered as allowed clones.
1283 /* may be truncated in case it's the last extent in a file */
1286 /* The bytenr the file extent item we are processing refers to. */
1288 /* The owner (root id) of the data backref for the current extent. */
1290 /* The offset of the data backref for the current extent. */
1294 static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1296 u64 root = (u64)(uintptr_t)key;
1297 const struct clone_root *cr = elt;
1299 if (root < cr->root->root_key.objectid)
1301 if (root > cr->root->root_key.objectid)
1306 static int __clone_root_cmp_sort(const void *e1, const void *e2)
1308 const struct clone_root *cr1 = e1;
1309 const struct clone_root *cr2 = e2;
1311 if (cr1->root->root_key.objectid < cr2->root->root_key.objectid)
1313 if (cr1->root->root_key.objectid > cr2->root->root_key.objectid)
1319 * Called for every backref that is found for the current extent.
1320 * Results are collected in sctx->clone_roots->ino/offset.
1322 static int iterate_backrefs(u64 ino, u64 offset, u64 num_bytes, u64 root_id,
1325 struct backref_ctx *bctx = ctx_;
1326 struct clone_root *clone_root;
1328 /* First check if the root is in the list of accepted clone sources */
1329 clone_root = bsearch((void *)(uintptr_t)root_id, bctx->sctx->clone_roots,
1330 bctx->sctx->clone_roots_cnt,
1331 sizeof(struct clone_root),
1332 __clone_root_cmp_bsearch);
1336 /* This is our own reference, bail out as we can't clone from it. */
1337 if (clone_root->root == bctx->sctx->send_root &&
1338 ino == bctx->cur_objectid &&
1339 offset == bctx->cur_offset)
1343 * Make sure we don't consider clones from send_root that are
1344 * behind the current inode/offset.
1346 if (clone_root->root == bctx->sctx->send_root) {
1348 * If the source inode was not yet processed we can't issue a
1349 * clone operation, as the source extent does not exist yet at
1350 * the destination of the stream.
1352 if (ino > bctx->cur_objectid)
1355 * We clone from the inode currently being sent as long as the
1356 * source extent is already processed, otherwise we could try
1357 * to clone from an extent that does not exist yet at the
1358 * destination of the stream.
1360 if (ino == bctx->cur_objectid &&
1361 offset + bctx->extent_len >
1362 bctx->sctx->cur_inode_next_write_offset)
1367 clone_root->found_ref = true;
1370 * If the given backref refers to a file extent item with a larger
1371 * number of bytes than what we found before, use the new one so that
1372 * we clone more optimally and end up doing less writes and getting
1373 * less exclusive, non-shared extents at the destination.
1375 if (num_bytes > clone_root->num_bytes) {
1376 clone_root->ino = ino;
1377 clone_root->offset = offset;
1378 clone_root->num_bytes = num_bytes;
1381 * Found a perfect candidate, so there's no need to continue
1384 if (num_bytes >= bctx->extent_len)
1385 return BTRFS_ITERATE_EXTENT_INODES_STOP;
1391 static void empty_backref_cache(struct send_ctx *sctx)
1393 struct backref_cache_entry *entry;
1394 struct backref_cache_entry *tmp;
1396 list_for_each_entry_safe(entry, tmp, &sctx->backref_cache.lru_list, list)
1399 INIT_LIST_HEAD(&sctx->backref_cache.lru_list);
1400 mtree_destroy(&sctx->backref_cache.entries);
1401 sctx->backref_cache.size = 0;
1404 static bool lookup_backref_cache(u64 leaf_bytenr, void *ctx,
1405 const u64 **root_ids_ret, int *root_count_ret)
1407 struct backref_ctx *bctx = ctx;
1408 struct send_ctx *sctx = bctx->sctx;
1409 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1410 const u64 key = leaf_bytenr >> fs_info->sectorsize_bits;
1411 struct backref_cache_entry *entry;
1413 if (sctx->backref_cache.size == 0)
1417 * If relocation happened since we first filled the cache, then we must
1418 * empty the cache and can not use it, because even though we operate on
1419 * read-only roots, their leaves and nodes may have been reallocated and
1420 * now be used for different nodes/leaves of the same tree or some other
1423 * We are called from iterate_extent_inodes() while either holding a
1424 * transaction handle or holding fs_info->commit_root_sem, so no need
1425 * to take any lock here.
1427 if (fs_info->last_reloc_trans > sctx->backref_cache.last_reloc_trans) {
1428 empty_backref_cache(sctx);
1432 entry = mtree_load(&sctx->backref_cache.entries, key);
1436 *root_ids_ret = entry->root_ids;
1437 *root_count_ret = entry->num_roots;
1438 list_move_tail(&entry->list, &sctx->backref_cache.lru_list);
1443 static void store_backref_cache(u64 leaf_bytenr, const struct ulist *root_ids,
1446 struct backref_ctx *bctx = ctx;
1447 struct send_ctx *sctx = bctx->sctx;
1448 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1449 struct backref_cache_entry *new_entry;
1450 struct ulist_iterator uiter;
1451 struct ulist_node *node;
1455 * We're called while holding a transaction handle or while holding
1456 * fs_info->commit_root_sem (at iterate_extent_inodes()), so must do a
1459 new_entry = kmalloc(sizeof(struct backref_cache_entry), GFP_NOFS);
1460 /* No worries, cache is optional. */
1464 new_entry->key = leaf_bytenr >> fs_info->sectorsize_bits;
1465 new_entry->num_roots = 0;
1466 ULIST_ITER_INIT(&uiter);
1467 while ((node = ulist_next(root_ids, &uiter)) != NULL) {
1468 const u64 root_id = node->val;
1469 struct clone_root *root;
1471 root = bsearch((void *)(uintptr_t)root_id, sctx->clone_roots,
1472 sctx->clone_roots_cnt, sizeof(struct clone_root),
1473 __clone_root_cmp_bsearch);
1477 /* Too many roots, just exit, no worries as caching is optional. */
1478 if (new_entry->num_roots >= SEND_MAX_BACKREF_CACHE_ROOTS) {
1483 new_entry->root_ids[new_entry->num_roots] = root_id;
1484 new_entry->num_roots++;
1488 * We may have not added any roots to the new cache entry, which means
1489 * none of the roots is part of the list of roots from which we are
1490 * allowed to clone. Cache the new entry as it's still useful to avoid
1491 * backref walking to determine which roots have a path to the leaf.
1494 if (sctx->backref_cache.size >= SEND_MAX_BACKREF_CACHE_SIZE) {
1495 struct backref_cache_entry *lru_entry;
1496 struct backref_cache_entry *mt_entry;
1498 lru_entry = list_first_entry(&sctx->backref_cache.lru_list,
1499 struct backref_cache_entry, list);
1500 mt_entry = mtree_erase(&sctx->backref_cache.entries, lru_entry->key);
1501 ASSERT(mt_entry == lru_entry);
1502 list_del(&mt_entry->list);
1504 sctx->backref_cache.size--;
1507 ret = mtree_insert(&sctx->backref_cache.entries, new_entry->key,
1508 new_entry, GFP_NOFS);
1509 ASSERT(ret == 0 || ret == -ENOMEM);
1511 /* Caching is optional, no worries. */
1516 list_add_tail(&new_entry->list, &sctx->backref_cache.lru_list);
1519 * We are called from iterate_extent_inodes() while either holding a
1520 * transaction handle or holding fs_info->commit_root_sem, so no need
1521 * to take any lock here.
1523 if (sctx->backref_cache.size == 0)
1524 sctx->backref_cache.last_reloc_trans = fs_info->last_reloc_trans;
1526 sctx->backref_cache.size++;
1529 static int check_extent_item(u64 bytenr, const struct btrfs_extent_item *ei,
1530 const struct extent_buffer *leaf, void *ctx)
1532 const u64 refs = btrfs_extent_refs(leaf, ei);
1533 const struct backref_ctx *bctx = ctx;
1534 const struct send_ctx *sctx = bctx->sctx;
1536 if (bytenr == bctx->bytenr) {
1537 const u64 flags = btrfs_extent_flags(leaf, ei);
1539 if (WARN_ON(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
1543 * If we have only one reference and only the send root as a
1544 * clone source - meaning no clone roots were given in the
1545 * struct btrfs_ioctl_send_args passed to the send ioctl - then
1546 * it's our reference and there's no point in doing backref
1547 * walking which is expensive, so exit early.
1549 if (refs == 1 && sctx->clone_roots_cnt == 1)
1554 * Backreference walking (iterate_extent_inodes() below) is currently
1555 * too expensive when an extent has a large number of references, both
1556 * in time spent and used memory. So for now just fallback to write
1557 * operations instead of clone operations when an extent has more than
1558 * a certain amount of references.
1560 if (refs > SEND_MAX_EXTENT_REFS)
1566 static bool skip_self_data_ref(u64 root, u64 ino, u64 offset, void *ctx)
1568 const struct backref_ctx *bctx = ctx;
1570 if (ino == bctx->cur_objectid &&
1571 root == bctx->backref_owner &&
1572 offset == bctx->backref_offset)
1579 * Given an inode, offset and extent item, it finds a good clone for a clone
1580 * instruction. Returns -ENOENT when none could be found. The function makes
1581 * sure that the returned clone is usable at the point where sending is at the
1582 * moment. This means, that no clones are accepted which lie behind the current
1585 * path must point to the extent item when called.
1587 static int find_extent_clone(struct send_ctx *sctx,
1588 struct btrfs_path *path,
1589 u64 ino, u64 data_offset,
1591 struct clone_root **found)
1593 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1599 struct btrfs_file_extent_item *fi;
1600 struct extent_buffer *eb = path->nodes[0];
1601 struct backref_ctx backref_ctx = { 0 };
1602 struct btrfs_backref_walk_ctx backref_walk_ctx = { 0 };
1603 struct clone_root *cur_clone_root;
1608 * With fallocate we can get prealloc extents beyond the inode's i_size,
1609 * so we don't do anything here because clone operations can not clone
1610 * to a range beyond i_size without increasing the i_size of the
1611 * destination inode.
1613 if (data_offset >= ino_size)
1616 fi = btrfs_item_ptr(eb, path->slots[0], struct btrfs_file_extent_item);
1617 extent_type = btrfs_file_extent_type(eb, fi);
1618 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1621 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1625 compressed = btrfs_file_extent_compression(eb, fi);
1626 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1627 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1630 * Setup the clone roots.
1632 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1633 cur_clone_root = sctx->clone_roots + i;
1634 cur_clone_root->ino = (u64)-1;
1635 cur_clone_root->offset = 0;
1636 cur_clone_root->num_bytes = 0;
1637 cur_clone_root->found_ref = false;
1640 backref_ctx.sctx = sctx;
1641 backref_ctx.cur_objectid = ino;
1642 backref_ctx.cur_offset = data_offset;
1643 backref_ctx.bytenr = disk_byte;
1645 * Use the header owner and not the send root's id, because in case of a
1646 * snapshot we can have shared subtrees.
1648 backref_ctx.backref_owner = btrfs_header_owner(eb);
1649 backref_ctx.backref_offset = data_offset - btrfs_file_extent_offset(eb, fi);
1652 * The last extent of a file may be too large due to page alignment.
1653 * We need to adjust extent_len in this case so that the checks in
1654 * iterate_backrefs() work.
1656 if (data_offset + num_bytes >= ino_size)
1657 backref_ctx.extent_len = ino_size - data_offset;
1659 backref_ctx.extent_len = num_bytes;
1662 * Now collect all backrefs.
1664 backref_walk_ctx.bytenr = disk_byte;
1665 if (compressed == BTRFS_COMPRESS_NONE)
1666 backref_walk_ctx.extent_item_pos = btrfs_file_extent_offset(eb, fi);
1667 backref_walk_ctx.fs_info = fs_info;
1668 backref_walk_ctx.cache_lookup = lookup_backref_cache;
1669 backref_walk_ctx.cache_store = store_backref_cache;
1670 backref_walk_ctx.indirect_ref_iterator = iterate_backrefs;
1671 backref_walk_ctx.check_extent_item = check_extent_item;
1672 backref_walk_ctx.user_ctx = &backref_ctx;
1675 * If have a single clone root, then it's the send root and we can tell
1676 * the backref walking code to skip our own backref and not resolve it,
1677 * since we can not use it for cloning - the source and destination
1678 * ranges can't overlap and in case the leaf is shared through a subtree
1679 * due to snapshots, we can't use those other roots since they are not
1680 * in the list of clone roots.
1682 if (sctx->clone_roots_cnt == 1)
1683 backref_walk_ctx.skip_data_ref = skip_self_data_ref;
1685 ret = iterate_extent_inodes(&backref_walk_ctx, true, iterate_backrefs,
1690 down_read(&fs_info->commit_root_sem);
1691 if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
1693 * A transaction commit for a transaction in which block group
1694 * relocation was done just happened.
1695 * The disk_bytenr of the file extent item we processed is
1696 * possibly stale, referring to the extent's location before
1697 * relocation. So act as if we haven't found any clone sources
1698 * and fallback to write commands, which will read the correct
1699 * data from the new extent location. Otherwise we will fail
1700 * below because we haven't found our own back reference or we
1701 * could be getting incorrect sources in case the old extent
1702 * was already reallocated after the relocation.
1704 up_read(&fs_info->commit_root_sem);
1707 up_read(&fs_info->commit_root_sem);
1709 btrfs_debug(fs_info,
1710 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1711 data_offset, ino, num_bytes, logical);
1713 if (!backref_ctx.found) {
1714 btrfs_debug(fs_info, "no clones found");
1718 cur_clone_root = NULL;
1719 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1720 struct clone_root *clone_root = &sctx->clone_roots[i];
1722 if (!clone_root->found_ref)
1726 * Choose the root from which we can clone more bytes, to
1727 * minimize write operations and therefore have more extent
1728 * sharing at the destination (the same as in the source).
1730 if (!cur_clone_root ||
1731 clone_root->num_bytes > cur_clone_root->num_bytes) {
1732 cur_clone_root = clone_root;
1735 * We found an optimal clone candidate (any inode from
1736 * any root is fine), so we're done.
1738 if (clone_root->num_bytes >= backref_ctx.extent_len)
1743 if (cur_clone_root) {
1744 *found = cur_clone_root;
1753 static int read_symlink(struct btrfs_root *root,
1755 struct fs_path *dest)
1758 struct btrfs_path *path;
1759 struct btrfs_key key;
1760 struct btrfs_file_extent_item *ei;
1766 path = alloc_path_for_send();
1771 key.type = BTRFS_EXTENT_DATA_KEY;
1773 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1778 * An empty symlink inode. Can happen in rare error paths when
1779 * creating a symlink (transaction committed before the inode
1780 * eviction handler removed the symlink inode items and a crash
1781 * happened in between or the subvol was snapshoted in between).
1782 * Print an informative message to dmesg/syslog so that the user
1783 * can delete the symlink.
1785 btrfs_err(root->fs_info,
1786 "Found empty symlink inode %llu at root %llu",
1787 ino, root->root_key.objectid);
1792 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1793 struct btrfs_file_extent_item);
1794 type = btrfs_file_extent_type(path->nodes[0], ei);
1795 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1796 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1797 BUG_ON(compression);
1799 off = btrfs_file_extent_inline_start(ei);
1800 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
1802 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1805 btrfs_free_path(path);
1810 * Helper function to generate a file name that is unique in the root of
1811 * send_root and parent_root. This is used to generate names for orphan inodes.
1813 static int gen_unique_name(struct send_ctx *sctx,
1815 struct fs_path *dest)
1818 struct btrfs_path *path;
1819 struct btrfs_dir_item *di;
1824 path = alloc_path_for_send();
1829 struct fscrypt_str tmp_name;
1831 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1833 ASSERT(len < sizeof(tmp));
1834 tmp_name.name = tmp;
1835 tmp_name.len = strlen(tmp);
1837 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1838 path, BTRFS_FIRST_FREE_OBJECTID,
1840 btrfs_release_path(path);
1846 /* not unique, try again */
1851 if (!sctx->parent_root) {
1857 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1858 path, BTRFS_FIRST_FREE_OBJECTID,
1860 btrfs_release_path(path);
1866 /* not unique, try again */
1874 ret = fs_path_add(dest, tmp, strlen(tmp));
1877 btrfs_free_path(path);
1882 inode_state_no_change,
1883 inode_state_will_create,
1884 inode_state_did_create,
1885 inode_state_will_delete,
1886 inode_state_did_delete,
1889 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1896 struct btrfs_inode_info info;
1898 ret = get_inode_info(sctx->send_root, ino, &info);
1899 if (ret < 0 && ret != -ENOENT)
1901 left_ret = (info.nlink == 0) ? -ENOENT : ret;
1902 left_gen = info.gen;
1904 if (!sctx->parent_root) {
1905 right_ret = -ENOENT;
1907 ret = get_inode_info(sctx->parent_root, ino, &info);
1908 if (ret < 0 && ret != -ENOENT)
1910 right_ret = (info.nlink == 0) ? -ENOENT : ret;
1911 right_gen = info.gen;
1914 if (!left_ret && !right_ret) {
1915 if (left_gen == gen && right_gen == gen) {
1916 ret = inode_state_no_change;
1917 } else if (left_gen == gen) {
1918 if (ino < sctx->send_progress)
1919 ret = inode_state_did_create;
1921 ret = inode_state_will_create;
1922 } else if (right_gen == gen) {
1923 if (ino < sctx->send_progress)
1924 ret = inode_state_did_delete;
1926 ret = inode_state_will_delete;
1930 } else if (!left_ret) {
1931 if (left_gen == gen) {
1932 if (ino < sctx->send_progress)
1933 ret = inode_state_did_create;
1935 ret = inode_state_will_create;
1939 } else if (!right_ret) {
1940 if (right_gen == gen) {
1941 if (ino < sctx->send_progress)
1942 ret = inode_state_did_delete;
1944 ret = inode_state_will_delete;
1956 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1960 if (ino == BTRFS_FIRST_FREE_OBJECTID)
1963 ret = get_cur_inode_state(sctx, ino, gen);
1967 if (ret == inode_state_no_change ||
1968 ret == inode_state_did_create ||
1969 ret == inode_state_will_delete)
1979 * Helper function to lookup a dir item in a dir.
1981 static int lookup_dir_item_inode(struct btrfs_root *root,
1982 u64 dir, const char *name, int name_len,
1986 struct btrfs_dir_item *di;
1987 struct btrfs_key key;
1988 struct btrfs_path *path;
1989 struct fscrypt_str name_str = FSTR_INIT((char *)name, name_len);
1991 path = alloc_path_for_send();
1995 di = btrfs_lookup_dir_item(NULL, root, path, dir, &name_str, 0);
1996 if (IS_ERR_OR_NULL(di)) {
1997 ret = di ? PTR_ERR(di) : -ENOENT;
2000 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
2001 if (key.type == BTRFS_ROOT_ITEM_KEY) {
2005 *found_inode = key.objectid;
2008 btrfs_free_path(path);
2013 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
2014 * generation of the parent dir and the name of the dir entry.
2016 static int get_first_ref(struct btrfs_root *root, u64 ino,
2017 u64 *dir, u64 *dir_gen, struct fs_path *name)
2020 struct btrfs_key key;
2021 struct btrfs_key found_key;
2022 struct btrfs_path *path;
2026 path = alloc_path_for_send();
2031 key.type = BTRFS_INODE_REF_KEY;
2034 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
2038 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2040 if (ret || found_key.objectid != ino ||
2041 (found_key.type != BTRFS_INODE_REF_KEY &&
2042 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
2047 if (found_key.type == BTRFS_INODE_REF_KEY) {
2048 struct btrfs_inode_ref *iref;
2049 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
2050 struct btrfs_inode_ref);
2051 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
2052 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
2053 (unsigned long)(iref + 1),
2055 parent_dir = found_key.offset;
2057 struct btrfs_inode_extref *extref;
2058 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
2059 struct btrfs_inode_extref);
2060 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
2061 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
2062 (unsigned long)&extref->name, len);
2063 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
2067 btrfs_release_path(path);
2070 ret = get_inode_gen(root, parent_dir, dir_gen);
2078 btrfs_free_path(path);
2082 static int is_first_ref(struct btrfs_root *root,
2084 const char *name, int name_len)
2087 struct fs_path *tmp_name;
2090 tmp_name = fs_path_alloc();
2094 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
2098 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
2103 ret = !memcmp(tmp_name->start, name, name_len);
2106 fs_path_free(tmp_name);
2111 * Used by process_recorded_refs to determine if a new ref would overwrite an
2112 * already existing ref. In case it detects an overwrite, it returns the
2113 * inode/gen in who_ino/who_gen.
2114 * When an overwrite is detected, process_recorded_refs does proper orphanizing
2115 * to make sure later references to the overwritten inode are possible.
2116 * Orphanizing is however only required for the first ref of an inode.
2117 * process_recorded_refs does an additional is_first_ref check to see if
2118 * orphanizing is really required.
2120 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2121 const char *name, int name_len,
2122 u64 *who_ino, u64 *who_gen, u64 *who_mode)
2126 u64 other_inode = 0;
2127 struct btrfs_inode_info info;
2129 if (!sctx->parent_root)
2132 ret = is_inode_existent(sctx, dir, dir_gen);
2137 * If we have a parent root we need to verify that the parent dir was
2138 * not deleted and then re-created, if it was then we have no overwrite
2139 * and we can just unlink this entry.
2141 if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
2142 ret = get_inode_gen(sctx->parent_root, dir, &gen);
2143 if (ret < 0 && ret != -ENOENT)
2153 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
2155 if (ret < 0 && ret != -ENOENT)
2163 * Check if the overwritten ref was already processed. If yes, the ref
2164 * was already unlinked/moved, so we can safely assume that we will not
2165 * overwrite anything at this point in time.
2167 if (other_inode > sctx->send_progress ||
2168 is_waiting_for_move(sctx, other_inode)) {
2169 ret = get_inode_info(sctx->parent_root, other_inode, &info);
2174 *who_ino = other_inode;
2175 *who_gen = info.gen;
2176 *who_mode = info.mode;
2186 * Checks if the ref was overwritten by an already processed inode. This is
2187 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
2188 * thus the orphan name needs be used.
2189 * process_recorded_refs also uses it to avoid unlinking of refs that were
2192 static int did_overwrite_ref(struct send_ctx *sctx,
2193 u64 dir, u64 dir_gen,
2194 u64 ino, u64 ino_gen,
2195 const char *name, int name_len)
2201 if (!sctx->parent_root)
2204 ret = is_inode_existent(sctx, dir, dir_gen);
2208 if (dir != BTRFS_FIRST_FREE_OBJECTID) {
2209 ret = get_inode_gen(sctx->send_root, dir, &gen);
2210 if (ret < 0 && ret != -ENOENT)
2220 /* check if the ref was overwritten by another ref */
2221 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
2223 if (ret < 0 && ret != -ENOENT)
2226 /* was never and will never be overwritten */
2231 ret = get_inode_gen(sctx->send_root, ow_inode, &gen);
2235 if (ow_inode == ino && gen == ino_gen) {
2241 * We know that it is or will be overwritten. Check this now.
2242 * The current inode being processed might have been the one that caused
2243 * inode 'ino' to be orphanized, therefore check if ow_inode matches
2244 * the current inode being processed.
2246 if ((ow_inode < sctx->send_progress) ||
2247 (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
2248 gen == sctx->cur_inode_gen))
2258 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
2259 * that got overwritten. This is used by process_recorded_refs to determine
2260 * if it has to use the path as returned by get_cur_path or the orphan name.
2262 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
2265 struct fs_path *name = NULL;
2269 if (!sctx->parent_root)
2272 name = fs_path_alloc();
2276 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
2280 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
2281 name->start, fs_path_len(name));
2289 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
2290 * so we need to do some special handling in case we have clashes. This function
2291 * takes care of this with the help of name_cache_entry::radix_list.
2292 * In case of error, nce is kfreed.
2294 static int name_cache_insert(struct send_ctx *sctx,
2295 struct name_cache_entry *nce)
2298 struct list_head *nce_head;
2300 nce_head = radix_tree_lookup(&sctx->name_cache,
2301 (unsigned long)nce->ino);
2303 nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
2308 INIT_LIST_HEAD(nce_head);
2310 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
2317 list_add_tail(&nce->radix_list, nce_head);
2318 list_add_tail(&nce->list, &sctx->name_cache_list);
2319 sctx->name_cache_size++;
2324 static void name_cache_delete(struct send_ctx *sctx,
2325 struct name_cache_entry *nce)
2327 struct list_head *nce_head;
2329 nce_head = radix_tree_lookup(&sctx->name_cache,
2330 (unsigned long)nce->ino);
2332 btrfs_err(sctx->send_root->fs_info,
2333 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2334 nce->ino, sctx->name_cache_size);
2337 list_del(&nce->radix_list);
2338 list_del(&nce->list);
2339 sctx->name_cache_size--;
2342 * We may not get to the final release of nce_head if the lookup fails
2344 if (nce_head && list_empty(nce_head)) {
2345 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
2350 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
2353 struct list_head *nce_head;
2354 struct name_cache_entry *cur;
2356 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
2360 list_for_each_entry(cur, nce_head, radix_list) {
2361 if (cur->ino == ino && cur->gen == gen)
2368 * Remove some entries from the beginning of name_cache_list.
2370 static void name_cache_clean_unused(struct send_ctx *sctx)
2372 struct name_cache_entry *nce;
2374 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
2377 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
2378 nce = list_entry(sctx->name_cache_list.next,
2379 struct name_cache_entry, list);
2380 name_cache_delete(sctx, nce);
2385 static void name_cache_free(struct send_ctx *sctx)
2387 struct name_cache_entry *nce;
2389 while (!list_empty(&sctx->name_cache_list)) {
2390 nce = list_entry(sctx->name_cache_list.next,
2391 struct name_cache_entry, list);
2392 name_cache_delete(sctx, nce);
2398 * Used by get_cur_path for each ref up to the root.
2399 * Returns 0 if it succeeded.
2400 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2401 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2402 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2403 * Returns <0 in case of error.
2405 static int __get_cur_name_and_parent(struct send_ctx *sctx,
2409 struct fs_path *dest)
2413 struct name_cache_entry *nce = NULL;
2416 * First check if we already did a call to this function with the same
2417 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2418 * return the cached result.
2420 nce = name_cache_search(sctx, ino, gen);
2422 if (ino < sctx->send_progress && nce->need_later_update) {
2423 name_cache_delete(sctx, nce);
2428 * Removes the entry from the list and adds it back to
2429 * the end. This marks the entry as recently used so
2430 * that name_cache_clean_unused does not remove it.
2432 list_move_tail(&nce->list, &sctx->name_cache_list);
2434 *parent_ino = nce->parent_ino;
2435 *parent_gen = nce->parent_gen;
2436 ret = fs_path_add(dest, nce->name, nce->name_len);
2445 * If the inode is not existent yet, add the orphan name and return 1.
2446 * This should only happen for the parent dir that we determine in
2447 * record_new_ref_if_needed().
2449 ret = is_inode_existent(sctx, ino, gen);
2454 ret = gen_unique_name(sctx, ino, gen, dest);
2462 * Depending on whether the inode was already processed or not, use
2463 * send_root or parent_root for ref lookup.
2465 if (ino < sctx->send_progress)
2466 ret = get_first_ref(sctx->send_root, ino,
2467 parent_ino, parent_gen, dest);
2469 ret = get_first_ref(sctx->parent_root, ino,
2470 parent_ino, parent_gen, dest);
2475 * Check if the ref was overwritten by an inode's ref that was processed
2476 * earlier. If yes, treat as orphan and return 1.
2478 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2479 dest->start, dest->end - dest->start);
2483 fs_path_reset(dest);
2484 ret = gen_unique_name(sctx, ino, gen, dest);
2492 * Store the result of the lookup in the name cache.
2494 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
2502 nce->parent_ino = *parent_ino;
2503 nce->parent_gen = *parent_gen;
2504 nce->name_len = fs_path_len(dest);
2506 strcpy(nce->name, dest->start);
2508 if (ino < sctx->send_progress)
2509 nce->need_later_update = 0;
2511 nce->need_later_update = 1;
2513 nce_ret = name_cache_insert(sctx, nce);
2516 name_cache_clean_unused(sctx);
2523 * Magic happens here. This function returns the first ref to an inode as it
2524 * would look like while receiving the stream at this point in time.
2525 * We walk the path up to the root. For every inode in between, we check if it
2526 * was already processed/sent. If yes, we continue with the parent as found
2527 * in send_root. If not, we continue with the parent as found in parent_root.
2528 * If we encounter an inode that was deleted at this point in time, we use the
2529 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2530 * that were not created yet and overwritten inodes/refs.
2532 * When do we have orphan inodes:
2533 * 1. When an inode is freshly created and thus no valid refs are available yet
2534 * 2. When a directory lost all it's refs (deleted) but still has dir items
2535 * inside which were not processed yet (pending for move/delete). If anyone
2536 * tried to get the path to the dir items, it would get a path inside that
2538 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2539 * of an unprocessed inode. If in that case the first ref would be
2540 * overwritten, the overwritten inode gets "orphanized". Later when we
2541 * process this overwritten inode, it is restored at a new place by moving
2544 * sctx->send_progress tells this function at which point in time receiving
2547 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2548 struct fs_path *dest)
2551 struct fs_path *name = NULL;
2552 u64 parent_inode = 0;
2556 name = fs_path_alloc();
2563 fs_path_reset(dest);
2565 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2566 struct waiting_dir_move *wdm;
2568 fs_path_reset(name);
2570 if (is_waiting_for_rm(sctx, ino, gen)) {
2571 ret = gen_unique_name(sctx, ino, gen, name);
2574 ret = fs_path_add_path(dest, name);
2578 wdm = get_waiting_dir_move(sctx, ino);
2579 if (wdm && wdm->orphanized) {
2580 ret = gen_unique_name(sctx, ino, gen, name);
2583 ret = get_first_ref(sctx->parent_root, ino,
2584 &parent_inode, &parent_gen, name);
2586 ret = __get_cur_name_and_parent(sctx, ino, gen,
2596 ret = fs_path_add_path(dest, name);
2607 fs_path_unreverse(dest);
2612 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2614 static int send_subvol_begin(struct send_ctx *sctx)
2617 struct btrfs_root *send_root = sctx->send_root;
2618 struct btrfs_root *parent_root = sctx->parent_root;
2619 struct btrfs_path *path;
2620 struct btrfs_key key;
2621 struct btrfs_root_ref *ref;
2622 struct extent_buffer *leaf;
2626 path = btrfs_alloc_path();
2630 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
2632 btrfs_free_path(path);
2636 key.objectid = send_root->root_key.objectid;
2637 key.type = BTRFS_ROOT_BACKREF_KEY;
2640 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2649 leaf = path->nodes[0];
2650 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2651 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2652 key.objectid != send_root->root_key.objectid) {
2656 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2657 namelen = btrfs_root_ref_name_len(leaf, ref);
2658 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2659 btrfs_release_path(path);
2662 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2666 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2671 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2673 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
2674 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2675 sctx->send_root->root_item.received_uuid);
2677 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2678 sctx->send_root->root_item.uuid);
2680 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2681 btrfs_root_ctransid(&sctx->send_root->root_item));
2683 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
2684 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2685 parent_root->root_item.received_uuid);
2687 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2688 parent_root->root_item.uuid);
2689 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2690 btrfs_root_ctransid(&sctx->parent_root->root_item));
2693 ret = send_cmd(sctx);
2697 btrfs_free_path(path);
2702 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2704 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2708 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
2710 p = fs_path_alloc();
2714 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2718 ret = get_cur_path(sctx, ino, gen, p);
2721 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2722 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2724 ret = send_cmd(sctx);
2732 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2734 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2738 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
2740 p = fs_path_alloc();
2744 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2748 ret = get_cur_path(sctx, ino, gen, p);
2751 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2752 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2754 ret = send_cmd(sctx);
2762 static int send_fileattr(struct send_ctx *sctx, u64 ino, u64 gen, u64 fileattr)
2764 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2768 if (sctx->proto < 2)
2771 btrfs_debug(fs_info, "send_fileattr %llu fileattr=%llu", ino, fileattr);
2773 p = fs_path_alloc();
2777 ret = begin_cmd(sctx, BTRFS_SEND_C_FILEATTR);
2781 ret = get_cur_path(sctx, ino, gen, p);
2784 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2785 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILEATTR, fileattr);
2787 ret = send_cmd(sctx);
2795 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2797 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2801 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
2804 p = fs_path_alloc();
2808 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2812 ret = get_cur_path(sctx, ino, gen, p);
2815 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2816 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2817 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2819 ret = send_cmd(sctx);
2827 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2829 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2831 struct fs_path *p = NULL;
2832 struct btrfs_inode_item *ii;
2833 struct btrfs_path *path = NULL;
2834 struct extent_buffer *eb;
2835 struct btrfs_key key;
2838 btrfs_debug(fs_info, "send_utimes %llu", ino);
2840 p = fs_path_alloc();
2844 path = alloc_path_for_send();
2851 key.type = BTRFS_INODE_ITEM_KEY;
2853 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2859 eb = path->nodes[0];
2860 slot = path->slots[0];
2861 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2863 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2867 ret = get_cur_path(sctx, ino, gen, p);
2870 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2871 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
2872 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
2873 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
2874 if (sctx->proto >= 2)
2875 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_OTIME, eb, &ii->otime);
2877 ret = send_cmd(sctx);
2882 btrfs_free_path(path);
2887 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2888 * a valid path yet because we did not process the refs yet. So, the inode
2889 * is created as orphan.
2891 static int send_create_inode(struct send_ctx *sctx, u64 ino)
2893 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2897 struct btrfs_inode_info info;
2902 btrfs_debug(fs_info, "send_create_inode %llu", ino);
2904 p = fs_path_alloc();
2908 if (ino != sctx->cur_ino) {
2909 ret = get_inode_info(sctx->send_root, ino, &info);
2916 gen = sctx->cur_inode_gen;
2917 mode = sctx->cur_inode_mode;
2918 rdev = sctx->cur_inode_rdev;
2921 if (S_ISREG(mode)) {
2922 cmd = BTRFS_SEND_C_MKFILE;
2923 } else if (S_ISDIR(mode)) {
2924 cmd = BTRFS_SEND_C_MKDIR;
2925 } else if (S_ISLNK(mode)) {
2926 cmd = BTRFS_SEND_C_SYMLINK;
2927 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2928 cmd = BTRFS_SEND_C_MKNOD;
2929 } else if (S_ISFIFO(mode)) {
2930 cmd = BTRFS_SEND_C_MKFIFO;
2931 } else if (S_ISSOCK(mode)) {
2932 cmd = BTRFS_SEND_C_MKSOCK;
2934 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
2935 (int)(mode & S_IFMT));
2940 ret = begin_cmd(sctx, cmd);
2944 ret = gen_unique_name(sctx, ino, gen, p);
2948 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2949 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2951 if (S_ISLNK(mode)) {
2953 ret = read_symlink(sctx->send_root, ino, p);
2956 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2957 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2958 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2959 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2960 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2963 ret = send_cmd(sctx);
2975 * We need some special handling for inodes that get processed before the parent
2976 * directory got created. See process_recorded_refs for details.
2977 * This function does the check if we already created the dir out of order.
2979 static int did_create_dir(struct send_ctx *sctx, u64 dir)
2983 struct btrfs_path *path = NULL;
2984 struct btrfs_key key;
2985 struct btrfs_key found_key;
2986 struct btrfs_key di_key;
2987 struct btrfs_dir_item *di;
2989 path = alloc_path_for_send();
2994 key.type = BTRFS_DIR_INDEX_KEY;
2997 btrfs_for_each_slot(sctx->send_root, &key, &found_key, path, iter_ret) {
2998 struct extent_buffer *eb = path->nodes[0];
3000 if (found_key.objectid != key.objectid ||
3001 found_key.type != key.type) {
3006 di = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dir_item);
3007 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
3009 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
3010 di_key.objectid < sctx->send_progress) {
3015 /* Catch error found during iteration */
3019 btrfs_free_path(path);
3024 * Only creates the inode if it is:
3025 * 1. Not a directory
3026 * 2. Or a directory which was not created already due to out of order
3027 * directories. See did_create_dir and process_recorded_refs for details.
3029 static int send_create_inode_if_needed(struct send_ctx *sctx)
3033 if (S_ISDIR(sctx->cur_inode_mode)) {
3034 ret = did_create_dir(sctx, sctx->cur_ino);
3041 return send_create_inode(sctx, sctx->cur_ino);
3044 struct recorded_ref {
3045 struct list_head list;
3047 struct fs_path *full_path;
3051 struct rb_node node;
3052 struct rb_root *root;
3055 static struct recorded_ref *recorded_ref_alloc(void)
3057 struct recorded_ref *ref;
3059 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
3062 RB_CLEAR_NODE(&ref->node);
3063 INIT_LIST_HEAD(&ref->list);
3067 static void recorded_ref_free(struct recorded_ref *ref)
3071 if (!RB_EMPTY_NODE(&ref->node))
3072 rb_erase(&ref->node, ref->root);
3073 list_del(&ref->list);
3074 fs_path_free(ref->full_path);
3078 static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
3080 ref->full_path = path;
3081 ref->name = (char *)kbasename(ref->full_path->start);
3082 ref->name_len = ref->full_path->end - ref->name;
3085 static int dup_ref(struct recorded_ref *ref, struct list_head *list)
3087 struct recorded_ref *new;
3089 new = recorded_ref_alloc();
3093 new->dir = ref->dir;
3094 new->dir_gen = ref->dir_gen;
3095 list_add_tail(&new->list, list);
3099 static void __free_recorded_refs(struct list_head *head)
3101 struct recorded_ref *cur;
3103 while (!list_empty(head)) {
3104 cur = list_entry(head->next, struct recorded_ref, list);
3105 recorded_ref_free(cur);
3109 static void free_recorded_refs(struct send_ctx *sctx)
3111 __free_recorded_refs(&sctx->new_refs);
3112 __free_recorded_refs(&sctx->deleted_refs);
3116 * Renames/moves a file/dir to its orphan name. Used when the first
3117 * ref of an unprocessed inode gets overwritten and for all non empty
3120 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
3121 struct fs_path *path)
3124 struct fs_path *orphan;
3126 orphan = fs_path_alloc();
3130 ret = gen_unique_name(sctx, ino, gen, orphan);
3134 ret = send_rename(sctx, path, orphan);
3137 fs_path_free(orphan);
3141 static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx,
3142 u64 dir_ino, u64 dir_gen)
3144 struct rb_node **p = &sctx->orphan_dirs.rb_node;
3145 struct rb_node *parent = NULL;
3146 struct orphan_dir_info *entry, *odi;
3150 entry = rb_entry(parent, struct orphan_dir_info, node);
3151 if (dir_ino < entry->ino)
3153 else if (dir_ino > entry->ino)
3154 p = &(*p)->rb_right;
3155 else if (dir_gen < entry->gen)
3157 else if (dir_gen > entry->gen)
3158 p = &(*p)->rb_right;
3163 odi = kmalloc(sizeof(*odi), GFP_KERNEL);
3165 return ERR_PTR(-ENOMEM);
3168 odi->last_dir_index_offset = 0;
3170 rb_link_node(&odi->node, parent, p);
3171 rb_insert_color(&odi->node, &sctx->orphan_dirs);
3175 static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx,
3176 u64 dir_ino, u64 gen)
3178 struct rb_node *n = sctx->orphan_dirs.rb_node;
3179 struct orphan_dir_info *entry;
3182 entry = rb_entry(n, struct orphan_dir_info, node);
3183 if (dir_ino < entry->ino)
3185 else if (dir_ino > entry->ino)
3187 else if (gen < entry->gen)
3189 else if (gen > entry->gen)
3197 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen)
3199 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen);
3204 static void free_orphan_dir_info(struct send_ctx *sctx,
3205 struct orphan_dir_info *odi)
3209 rb_erase(&odi->node, &sctx->orphan_dirs);
3214 * Returns 1 if a directory can be removed at this point in time.
3215 * We check this by iterating all dir items and checking if the inode behind
3216 * the dir item was already processed.
3218 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
3223 struct btrfs_root *root = sctx->parent_root;
3224 struct btrfs_path *path;
3225 struct btrfs_key key;
3226 struct btrfs_key found_key;
3227 struct btrfs_key loc;
3228 struct btrfs_dir_item *di;
3229 struct orphan_dir_info *odi = NULL;
3232 * Don't try to rmdir the top/root subvolume dir.
3234 if (dir == BTRFS_FIRST_FREE_OBJECTID)
3237 path = alloc_path_for_send();
3242 key.type = BTRFS_DIR_INDEX_KEY;
3245 odi = get_orphan_dir_info(sctx, dir, dir_gen);
3247 key.offset = odi->last_dir_index_offset;
3249 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
3250 struct waiting_dir_move *dm;
3252 if (found_key.objectid != key.objectid ||
3253 found_key.type != key.type)
3256 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
3257 struct btrfs_dir_item);
3258 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
3260 dm = get_waiting_dir_move(sctx, loc.objectid);
3262 odi = add_orphan_dir_info(sctx, dir, dir_gen);
3268 odi->last_dir_index_offset = found_key.offset;
3269 dm->rmdir_ino = dir;
3270 dm->rmdir_gen = dir_gen;
3275 if (loc.objectid > send_progress) {
3276 odi = add_orphan_dir_info(sctx, dir, dir_gen);
3282 odi->last_dir_index_offset = found_key.offset;
3291 free_orphan_dir_info(sctx, odi);
3296 btrfs_free_path(path);
3300 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
3302 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
3304 return entry != NULL;
3307 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
3309 struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
3310 struct rb_node *parent = NULL;
3311 struct waiting_dir_move *entry, *dm;
3313 dm = kmalloc(sizeof(*dm), GFP_KERNEL);
3319 dm->orphanized = orphanized;
3323 entry = rb_entry(parent, struct waiting_dir_move, node);
3324 if (ino < entry->ino) {
3326 } else if (ino > entry->ino) {
3327 p = &(*p)->rb_right;
3334 rb_link_node(&dm->node, parent, p);
3335 rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
3339 static struct waiting_dir_move *
3340 get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
3342 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
3343 struct waiting_dir_move *entry;
3346 entry = rb_entry(n, struct waiting_dir_move, node);
3347 if (ino < entry->ino)
3349 else if (ino > entry->ino)
3357 static void free_waiting_dir_move(struct send_ctx *sctx,
3358 struct waiting_dir_move *dm)
3362 rb_erase(&dm->node, &sctx->waiting_dir_moves);
3366 static int add_pending_dir_move(struct send_ctx *sctx,
3370 struct list_head *new_refs,
3371 struct list_head *deleted_refs,
3372 const bool is_orphan)
3374 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
3375 struct rb_node *parent = NULL;
3376 struct pending_dir_move *entry = NULL, *pm;
3377 struct recorded_ref *cur;
3381 pm = kmalloc(sizeof(*pm), GFP_KERNEL);
3384 pm->parent_ino = parent_ino;
3387 INIT_LIST_HEAD(&pm->list);
3388 INIT_LIST_HEAD(&pm->update_refs);
3389 RB_CLEAR_NODE(&pm->node);
3393 entry = rb_entry(parent, struct pending_dir_move, node);
3394 if (parent_ino < entry->parent_ino) {
3396 } else if (parent_ino > entry->parent_ino) {
3397 p = &(*p)->rb_right;
3404 list_for_each_entry(cur, deleted_refs, list) {
3405 ret = dup_ref(cur, &pm->update_refs);
3409 list_for_each_entry(cur, new_refs, list) {
3410 ret = dup_ref(cur, &pm->update_refs);
3415 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
3420 list_add_tail(&pm->list, &entry->list);
3422 rb_link_node(&pm->node, parent, p);
3423 rb_insert_color(&pm->node, &sctx->pending_dir_moves);
3428 __free_recorded_refs(&pm->update_refs);
3434 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
3437 struct rb_node *n = sctx->pending_dir_moves.rb_node;
3438 struct pending_dir_move *entry;
3441 entry = rb_entry(n, struct pending_dir_move, node);
3442 if (parent_ino < entry->parent_ino)
3444 else if (parent_ino > entry->parent_ino)
3452 static int path_loop(struct send_ctx *sctx, struct fs_path *name,
3453 u64 ino, u64 gen, u64 *ancestor_ino)
3456 u64 parent_inode = 0;
3458 u64 start_ino = ino;
3461 while (ino != BTRFS_FIRST_FREE_OBJECTID) {
3462 fs_path_reset(name);
3464 if (is_waiting_for_rm(sctx, ino, gen))
3466 if (is_waiting_for_move(sctx, ino)) {
3467 if (*ancestor_ino == 0)
3468 *ancestor_ino = ino;
3469 ret = get_first_ref(sctx->parent_root, ino,
3470 &parent_inode, &parent_gen, name);
3472 ret = __get_cur_name_and_parent(sctx, ino, gen,
3482 if (parent_inode == start_ino) {
3484 if (*ancestor_ino == 0)
3485 *ancestor_ino = ino;
3494 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3496 struct fs_path *from_path = NULL;
3497 struct fs_path *to_path = NULL;
3498 struct fs_path *name = NULL;
3499 u64 orig_progress = sctx->send_progress;
3500 struct recorded_ref *cur;
3501 u64 parent_ino, parent_gen;
3502 struct waiting_dir_move *dm = NULL;
3509 name = fs_path_alloc();
3510 from_path = fs_path_alloc();
3511 if (!name || !from_path) {
3516 dm = get_waiting_dir_move(sctx, pm->ino);
3518 rmdir_ino = dm->rmdir_ino;
3519 rmdir_gen = dm->rmdir_gen;
3520 is_orphan = dm->orphanized;
3521 free_waiting_dir_move(sctx, dm);
3524 ret = gen_unique_name(sctx, pm->ino,
3525 pm->gen, from_path);
3527 ret = get_first_ref(sctx->parent_root, pm->ino,
3528 &parent_ino, &parent_gen, name);
3531 ret = get_cur_path(sctx, parent_ino, parent_gen,
3535 ret = fs_path_add_path(from_path, name);
3540 sctx->send_progress = sctx->cur_ino + 1;
3541 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
3545 LIST_HEAD(deleted_refs);
3546 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3547 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3548 &pm->update_refs, &deleted_refs,
3553 dm = get_waiting_dir_move(sctx, pm->ino);
3555 dm->rmdir_ino = rmdir_ino;
3556 dm->rmdir_gen = rmdir_gen;
3560 fs_path_reset(name);
3563 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3567 ret = send_rename(sctx, from_path, to_path);
3572 struct orphan_dir_info *odi;
3575 odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen);
3577 /* already deleted */
3582 ret = can_rmdir(sctx, rmdir_ino, gen, sctx->cur_ino);
3588 name = fs_path_alloc();
3593 ret = get_cur_path(sctx, rmdir_ino, gen, name);
3596 ret = send_rmdir(sctx, name);
3602 ret = send_utimes(sctx, pm->ino, pm->gen);
3607 * After rename/move, need to update the utimes of both new parent(s)
3608 * and old parent(s).
3610 list_for_each_entry(cur, &pm->update_refs, list) {
3612 * The parent inode might have been deleted in the send snapshot
3614 ret = get_inode_info(sctx->send_root, cur->dir, NULL);
3615 if (ret == -ENOENT) {
3622 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3629 fs_path_free(from_path);
3630 fs_path_free(to_path);
3631 sctx->send_progress = orig_progress;
3636 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3638 if (!list_empty(&m->list))
3640 if (!RB_EMPTY_NODE(&m->node))
3641 rb_erase(&m->node, &sctx->pending_dir_moves);
3642 __free_recorded_refs(&m->update_refs);
3646 static void tail_append_pending_moves(struct send_ctx *sctx,
3647 struct pending_dir_move *moves,
3648 struct list_head *stack)
3650 if (list_empty(&moves->list)) {
3651 list_add_tail(&moves->list, stack);
3654 list_splice_init(&moves->list, &list);
3655 list_add_tail(&moves->list, stack);
3656 list_splice_tail(&list, stack);
3658 if (!RB_EMPTY_NODE(&moves->node)) {
3659 rb_erase(&moves->node, &sctx->pending_dir_moves);
3660 RB_CLEAR_NODE(&moves->node);
3664 static int apply_children_dir_moves(struct send_ctx *sctx)
3666 struct pending_dir_move *pm;
3667 struct list_head stack;
3668 u64 parent_ino = sctx->cur_ino;
3671 pm = get_pending_dir_moves(sctx, parent_ino);
3675 INIT_LIST_HEAD(&stack);
3676 tail_append_pending_moves(sctx, pm, &stack);
3678 while (!list_empty(&stack)) {
3679 pm = list_first_entry(&stack, struct pending_dir_move, list);
3680 parent_ino = pm->ino;
3681 ret = apply_dir_move(sctx, pm);
3682 free_pending_move(sctx, pm);
3685 pm = get_pending_dir_moves(sctx, parent_ino);
3687 tail_append_pending_moves(sctx, pm, &stack);
3692 while (!list_empty(&stack)) {
3693 pm = list_first_entry(&stack, struct pending_dir_move, list);
3694 free_pending_move(sctx, pm);
3700 * We might need to delay a directory rename even when no ancestor directory
3701 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3702 * renamed. This happens when we rename a directory to the old name (the name
3703 * in the parent root) of some other unrelated directory that got its rename
3704 * delayed due to some ancestor with higher number that got renamed.
3710 * |---- a/ (ino 257)
3711 * | |---- file (ino 260)
3713 * |---- b/ (ino 258)
3714 * |---- c/ (ino 259)
3718 * |---- a/ (ino 258)
3719 * |---- x/ (ino 259)
3720 * |---- y/ (ino 257)
3721 * |----- file (ino 260)
3723 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3724 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3725 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3728 * 1 - rename 259 from 'c' to 'x'
3729 * 2 - rename 257 from 'a' to 'x/y'
3730 * 3 - rename 258 from 'b' to 'a'
3732 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3733 * be done right away and < 0 on error.
3735 static int wait_for_dest_dir_move(struct send_ctx *sctx,
3736 struct recorded_ref *parent_ref,
3737 const bool is_orphan)
3739 struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
3740 struct btrfs_path *path;
3741 struct btrfs_key key;
3742 struct btrfs_key di_key;
3743 struct btrfs_dir_item *di;
3747 struct waiting_dir_move *wdm;
3749 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
3752 path = alloc_path_for_send();
3756 key.objectid = parent_ref->dir;
3757 key.type = BTRFS_DIR_ITEM_KEY;
3758 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
3760 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3763 } else if (ret > 0) {
3768 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
3769 parent_ref->name_len);
3775 * di_key.objectid has the number of the inode that has a dentry in the
3776 * parent directory with the same name that sctx->cur_ino is being
3777 * renamed to. We need to check if that inode is in the send root as
3778 * well and if it is currently marked as an inode with a pending rename,
3779 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3780 * that it happens after that other inode is renamed.
3782 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
3783 if (di_key.type != BTRFS_INODE_ITEM_KEY) {
3788 ret = get_inode_gen(sctx->parent_root, di_key.objectid, &left_gen);
3791 ret = get_inode_gen(sctx->send_root, di_key.objectid, &right_gen);
3798 /* Different inode, no need to delay the rename of sctx->cur_ino */
3799 if (right_gen != left_gen) {
3804 wdm = get_waiting_dir_move(sctx, di_key.objectid);
3805 if (wdm && !wdm->orphanized) {
3806 ret = add_pending_dir_move(sctx,
3808 sctx->cur_inode_gen,
3811 &sctx->deleted_refs,
3817 btrfs_free_path(path);
3822 * Check if inode ino2, or any of its ancestors, is inode ino1.
3823 * Return 1 if true, 0 if false and < 0 on error.
3825 static int check_ino_in_path(struct btrfs_root *root,
3830 struct fs_path *fs_path)
3835 return ino1_gen == ino2_gen;
3837 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3842 fs_path_reset(fs_path);
3843 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
3847 return parent_gen == ino1_gen;
3854 * Check if inode ino1 is an ancestor of inode ino2 in the given root for any
3855 * possible path (in case ino2 is not a directory and has multiple hard links).
3856 * Return 1 if true, 0 if false and < 0 on error.
3858 static int is_ancestor(struct btrfs_root *root,
3862 struct fs_path *fs_path)
3864 bool free_fs_path = false;
3867 struct btrfs_path *path = NULL;
3868 struct btrfs_key key;
3871 fs_path = fs_path_alloc();
3874 free_fs_path = true;
3877 path = alloc_path_for_send();
3883 key.objectid = ino2;
3884 key.type = BTRFS_INODE_REF_KEY;
3887 btrfs_for_each_slot(root, &key, &key, path, iter_ret) {
3888 struct extent_buffer *leaf = path->nodes[0];
3889 int slot = path->slots[0];
3893 if (key.objectid != ino2)
3895 if (key.type != BTRFS_INODE_REF_KEY &&
3896 key.type != BTRFS_INODE_EXTREF_KEY)
3899 item_size = btrfs_item_size(leaf, slot);
3900 while (cur_offset < item_size) {
3904 if (key.type == BTRFS_INODE_EXTREF_KEY) {
3906 struct btrfs_inode_extref *extref;
3908 ptr = btrfs_item_ptr_offset(leaf, slot);
3909 extref = (struct btrfs_inode_extref *)
3911 parent = btrfs_inode_extref_parent(leaf,
3913 cur_offset += sizeof(*extref);
3914 cur_offset += btrfs_inode_extref_name_len(leaf,
3917 parent = key.offset;
3918 cur_offset = item_size;
3921 ret = get_inode_gen(root, parent, &parent_gen);
3924 ret = check_ino_in_path(root, ino1, ino1_gen,
3925 parent, parent_gen, fs_path);
3935 btrfs_free_path(path);
3937 fs_path_free(fs_path);
3941 static int wait_for_parent_move(struct send_ctx *sctx,
3942 struct recorded_ref *parent_ref,
3943 const bool is_orphan)
3946 u64 ino = parent_ref->dir;
3947 u64 ino_gen = parent_ref->dir_gen;
3948 u64 parent_ino_before, parent_ino_after;
3949 struct fs_path *path_before = NULL;
3950 struct fs_path *path_after = NULL;
3953 path_after = fs_path_alloc();
3954 path_before = fs_path_alloc();
3955 if (!path_after || !path_before) {
3961 * Our current directory inode may not yet be renamed/moved because some
3962 * ancestor (immediate or not) has to be renamed/moved first. So find if
3963 * such ancestor exists and make sure our own rename/move happens after
3964 * that ancestor is processed to avoid path build infinite loops (done
3965 * at get_cur_path()).
3967 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3968 u64 parent_ino_after_gen;
3970 if (is_waiting_for_move(sctx, ino)) {
3972 * If the current inode is an ancestor of ino in the
3973 * parent root, we need to delay the rename of the
3974 * current inode, otherwise don't delayed the rename
3975 * because we can end up with a circular dependency
3976 * of renames, resulting in some directories never
3977 * getting the respective rename operations issued in
3978 * the send stream or getting into infinite path build
3981 ret = is_ancestor(sctx->parent_root,
3982 sctx->cur_ino, sctx->cur_inode_gen,
3988 fs_path_reset(path_before);
3989 fs_path_reset(path_after);
3991 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3992 &parent_ino_after_gen, path_after);
3995 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3997 if (ret < 0 && ret != -ENOENT) {
3999 } else if (ret == -ENOENT) {
4004 len1 = fs_path_len(path_before);
4005 len2 = fs_path_len(path_after);
4006 if (ino > sctx->cur_ino &&
4007 (parent_ino_before != parent_ino_after || len1 != len2 ||
4008 memcmp(path_before->start, path_after->start, len1))) {
4011 ret = get_inode_gen(sctx->parent_root, ino, &parent_ino_gen);
4014 if (ino_gen == parent_ino_gen) {
4019 ino = parent_ino_after;
4020 ino_gen = parent_ino_after_gen;
4024 fs_path_free(path_before);
4025 fs_path_free(path_after);
4028 ret = add_pending_dir_move(sctx,
4030 sctx->cur_inode_gen,
4033 &sctx->deleted_refs,
4042 static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
4045 struct fs_path *new_path;
4048 * Our reference's name member points to its full_path member string, so
4049 * we use here a new path.
4051 new_path = fs_path_alloc();
4055 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path);
4057 fs_path_free(new_path);
4060 ret = fs_path_add(new_path, ref->name, ref->name_len);
4062 fs_path_free(new_path);
4066 fs_path_free(ref->full_path);
4067 set_ref_path(ref, new_path);
4073 * When processing the new references for an inode we may orphanize an existing
4074 * directory inode because its old name conflicts with one of the new references
4075 * of the current inode. Later, when processing another new reference of our
4076 * inode, we might need to orphanize another inode, but the path we have in the
4077 * reference reflects the pre-orphanization name of the directory we previously
4078 * orphanized. For example:
4080 * parent snapshot looks like:
4083 * |----- f1 (ino 257)
4084 * |----- f2 (ino 258)
4085 * |----- d1/ (ino 259)
4086 * |----- d2/ (ino 260)
4088 * send snapshot looks like:
4091 * |----- d1 (ino 258)
4092 * |----- f2/ (ino 259)
4093 * |----- f2_link/ (ino 260)
4094 * | |----- f1 (ino 257)
4096 * |----- d2 (ino 258)
4098 * When processing inode 257 we compute the name for inode 259 as "d1", and we
4099 * cache it in the name cache. Later when we start processing inode 258, when
4100 * collecting all its new references we set a full path of "d1/d2" for its new
4101 * reference with name "d2". When we start processing the new references we
4102 * start by processing the new reference with name "d1", and this results in
4103 * orphanizing inode 259, since its old reference causes a conflict. Then we
4104 * move on the next new reference, with name "d2", and we find out we must
4105 * orphanize inode 260, as its old reference conflicts with ours - but for the
4106 * orphanization we use a source path corresponding to the path we stored in the
4107 * new reference, which is "d1/d2" and not "o259-6-0/d2" - this makes the
4108 * receiver fail since the path component "d1/" no longer exists, it was renamed
4109 * to "o259-6-0/" when processing the previous new reference. So in this case we
4110 * must recompute the path in the new reference and use it for the new
4111 * orphanization operation.
4113 static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
4118 name = kmemdup(ref->name, ref->name_len, GFP_KERNEL);
4122 fs_path_reset(ref->full_path);
4123 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path);
4127 ret = fs_path_add(ref->full_path, name, ref->name_len);
4131 /* Update the reference's base name pointer. */
4132 set_ref_path(ref, ref->full_path);
4139 * This does all the move/link/unlink/rmdir magic.
4141 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
4143 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
4145 struct recorded_ref *cur;
4146 struct recorded_ref *cur2;
4147 struct list_head check_dirs;
4148 struct fs_path *valid_path = NULL;
4152 int did_overwrite = 0;
4154 u64 last_dir_ino_rm = 0;
4155 bool can_rename = true;
4156 bool orphanized_dir = false;
4157 bool orphanized_ancestor = false;
4159 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
4162 * This should never happen as the root dir always has the same ref
4163 * which is always '..'
4165 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
4166 INIT_LIST_HEAD(&check_dirs);
4168 valid_path = fs_path_alloc();
4175 * First, check if the first ref of the current inode was overwritten
4176 * before. If yes, we know that the current inode was already orphanized
4177 * and thus use the orphan name. If not, we can use get_cur_path to
4178 * get the path of the first ref as it would like while receiving at
4179 * this point in time.
4180 * New inodes are always orphan at the beginning, so force to use the
4181 * orphan name in this case.
4182 * The first ref is stored in valid_path and will be updated if it
4183 * gets moved around.
4185 if (!sctx->cur_inode_new) {
4186 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
4187 sctx->cur_inode_gen);
4193 if (sctx->cur_inode_new || did_overwrite) {
4194 ret = gen_unique_name(sctx, sctx->cur_ino,
4195 sctx->cur_inode_gen, valid_path);
4200 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4207 * Before doing any rename and link operations, do a first pass on the
4208 * new references to orphanize any unprocessed inodes that may have a
4209 * reference that conflicts with one of the new references of the current
4210 * inode. This needs to happen first because a new reference may conflict
4211 * with the old reference of a parent directory, so we must make sure
4212 * that the path used for link and rename commands don't use an
4213 * orphanized name when an ancestor was not yet orphanized.
4220 * |----- testdir/ (ino 259)
4221 * | |----- a (ino 257)
4223 * |----- b (ino 258)
4228 * |----- testdir_2/ (ino 259)
4229 * | |----- a (ino 260)
4231 * |----- testdir (ino 257)
4232 * |----- b (ino 257)
4233 * |----- b2 (ino 258)
4235 * Processing the new reference for inode 257 with name "b" may happen
4236 * before processing the new reference with name "testdir". If so, we
4237 * must make sure that by the time we send a link command to create the
4238 * hard link "b", inode 259 was already orphanized, since the generated
4239 * path in "valid_path" already contains the orphanized name for 259.
4240 * We are processing inode 257, so only later when processing 259 we do
4241 * the rename operation to change its temporary (orphanized) name to
4244 list_for_each_entry(cur, &sctx->new_refs, list) {
4245 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4248 if (ret == inode_state_will_create)
4252 * Check if this new ref would overwrite the first ref of another
4253 * unprocessed inode. If yes, orphanize the overwritten inode.
4254 * If we find an overwritten ref that is not the first ref,
4257 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4258 cur->name, cur->name_len,
4259 &ow_inode, &ow_gen, &ow_mode);
4263 ret = is_first_ref(sctx->parent_root,
4264 ow_inode, cur->dir, cur->name,
4269 struct name_cache_entry *nce;
4270 struct waiting_dir_move *wdm;
4272 if (orphanized_dir) {
4273 ret = refresh_ref_path(sctx, cur);
4278 ret = orphanize_inode(sctx, ow_inode, ow_gen,
4282 if (S_ISDIR(ow_mode))
4283 orphanized_dir = true;
4286 * If ow_inode has its rename operation delayed
4287 * make sure that its orphanized name is used in
4288 * the source path when performing its rename
4291 if (is_waiting_for_move(sctx, ow_inode)) {
4292 wdm = get_waiting_dir_move(sctx,
4295 wdm->orphanized = true;
4299 * Make sure we clear our orphanized inode's
4300 * name from the name cache. This is because the
4301 * inode ow_inode might be an ancestor of some
4302 * other inode that will be orphanized as well
4303 * later and has an inode number greater than
4304 * sctx->send_progress. We need to prevent
4305 * future name lookups from using the old name
4306 * and get instead the orphan name.
4308 nce = name_cache_search(sctx, ow_inode, ow_gen);
4310 name_cache_delete(sctx, nce);
4315 * ow_inode might currently be an ancestor of
4316 * cur_ino, therefore compute valid_path (the
4317 * current path of cur_ino) again because it
4318 * might contain the pre-orphanization name of
4319 * ow_inode, which is no longer valid.
4321 ret = is_ancestor(sctx->parent_root,
4323 sctx->cur_ino, NULL);
4325 orphanized_ancestor = true;
4326 fs_path_reset(valid_path);
4327 ret = get_cur_path(sctx, sctx->cur_ino,
4328 sctx->cur_inode_gen,
4335 * If we previously orphanized a directory that
4336 * collided with a new reference that we already
4337 * processed, recompute the current path because
4338 * that directory may be part of the path.
4340 if (orphanized_dir) {
4341 ret = refresh_ref_path(sctx, cur);
4345 ret = send_unlink(sctx, cur->full_path);
4353 list_for_each_entry(cur, &sctx->new_refs, list) {
4355 * We may have refs where the parent directory does not exist
4356 * yet. This happens if the parent directories inum is higher
4357 * than the current inum. To handle this case, we create the
4358 * parent directory out of order. But we need to check if this
4359 * did already happen before due to other refs in the same dir.
4361 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4364 if (ret == inode_state_will_create) {
4367 * First check if any of the current inodes refs did
4368 * already create the dir.
4370 list_for_each_entry(cur2, &sctx->new_refs, list) {
4373 if (cur2->dir == cur->dir) {
4380 * If that did not happen, check if a previous inode
4381 * did already create the dir.
4384 ret = did_create_dir(sctx, cur->dir);
4388 ret = send_create_inode(sctx, cur->dir);
4394 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
4395 ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
4404 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
4406 ret = wait_for_parent_move(sctx, cur, is_orphan);
4416 * link/move the ref to the new place. If we have an orphan
4417 * inode, move it and update valid_path. If not, link or move
4418 * it depending on the inode mode.
4420 if (is_orphan && can_rename) {
4421 ret = send_rename(sctx, valid_path, cur->full_path);
4425 ret = fs_path_copy(valid_path, cur->full_path);
4428 } else if (can_rename) {
4429 if (S_ISDIR(sctx->cur_inode_mode)) {
4431 * Dirs can't be linked, so move it. For moved
4432 * dirs, we always have one new and one deleted
4433 * ref. The deleted ref is ignored later.
4435 ret = send_rename(sctx, valid_path,
4438 ret = fs_path_copy(valid_path,
4444 * We might have previously orphanized an inode
4445 * which is an ancestor of our current inode,
4446 * so our reference's full path, which was
4447 * computed before any such orphanizations, must
4450 if (orphanized_dir) {
4451 ret = update_ref_path(sctx, cur);
4455 ret = send_link(sctx, cur->full_path,
4461 ret = dup_ref(cur, &check_dirs);
4466 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
4468 * Check if we can already rmdir the directory. If not,
4469 * orphanize it. For every dir item inside that gets deleted
4470 * later, we do this check again and rmdir it then if possible.
4471 * See the use of check_dirs for more details.
4473 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4478 ret = send_rmdir(sctx, valid_path);
4481 } else if (!is_orphan) {
4482 ret = orphanize_inode(sctx, sctx->cur_ino,
4483 sctx->cur_inode_gen, valid_path);
4489 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4490 ret = dup_ref(cur, &check_dirs);
4494 } else if (S_ISDIR(sctx->cur_inode_mode) &&
4495 !list_empty(&sctx->deleted_refs)) {
4497 * We have a moved dir. Add the old parent to check_dirs
4499 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
4501 ret = dup_ref(cur, &check_dirs);
4504 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
4506 * We have a non dir inode. Go through all deleted refs and
4507 * unlink them if they were not already overwritten by other
4510 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4511 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4512 sctx->cur_ino, sctx->cur_inode_gen,
4513 cur->name, cur->name_len);
4518 * If we orphanized any ancestor before, we need
4519 * to recompute the full path for deleted names,
4520 * since any such path was computed before we
4521 * processed any references and orphanized any
4524 if (orphanized_ancestor) {
4525 ret = update_ref_path(sctx, cur);
4529 ret = send_unlink(sctx, cur->full_path);
4533 ret = dup_ref(cur, &check_dirs);
4538 * If the inode is still orphan, unlink the orphan. This may
4539 * happen when a previous inode did overwrite the first ref
4540 * of this inode and no new refs were added for the current
4541 * inode. Unlinking does not mean that the inode is deleted in
4542 * all cases. There may still be links to this inode in other
4546 ret = send_unlink(sctx, valid_path);
4553 * We did collect all parent dirs where cur_inode was once located. We
4554 * now go through all these dirs and check if they are pending for
4555 * deletion and if it's finally possible to perform the rmdir now.
4556 * We also update the inode stats of the parent dirs here.
4558 list_for_each_entry(cur, &check_dirs, list) {
4560 * In case we had refs into dirs that were not processed yet,
4561 * we don't need to do the utime and rmdir logic for these dirs.
4562 * The dir will be processed later.
4564 if (cur->dir > sctx->cur_ino)
4567 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4571 if (ret == inode_state_did_create ||
4572 ret == inode_state_no_change) {
4573 /* TODO delayed utimes */
4574 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
4577 } else if (ret == inode_state_did_delete &&
4578 cur->dir != last_dir_ino_rm) {
4579 ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
4584 ret = get_cur_path(sctx, cur->dir,
4585 cur->dir_gen, valid_path);
4588 ret = send_rmdir(sctx, valid_path);
4591 last_dir_ino_rm = cur->dir;
4599 __free_recorded_refs(&check_dirs);
4600 free_recorded_refs(sctx);
4601 fs_path_free(valid_path);
4605 static int rbtree_ref_comp(const void *k, const struct rb_node *node)
4607 const struct recorded_ref *data = k;
4608 const struct recorded_ref *ref = rb_entry(node, struct recorded_ref, node);
4611 if (data->dir > ref->dir)
4613 if (data->dir < ref->dir)
4615 if (data->dir_gen > ref->dir_gen)
4617 if (data->dir_gen < ref->dir_gen)
4619 if (data->name_len > ref->name_len)
4621 if (data->name_len < ref->name_len)
4623 result = strcmp(data->name, ref->name);
4631 static bool rbtree_ref_less(struct rb_node *node, const struct rb_node *parent)
4633 const struct recorded_ref *entry = rb_entry(node, struct recorded_ref, node);
4635 return rbtree_ref_comp(entry, parent) < 0;
4638 static int record_ref_in_tree(struct rb_root *root, struct list_head *refs,
4639 struct fs_path *name, u64 dir, u64 dir_gen,
4640 struct send_ctx *sctx)
4643 struct fs_path *path = NULL;
4644 struct recorded_ref *ref = NULL;
4646 path = fs_path_alloc();
4652 ref = recorded_ref_alloc();
4658 ret = get_cur_path(sctx, dir, dir_gen, path);
4661 ret = fs_path_add_path(path, name);
4666 ref->dir_gen = dir_gen;
4667 set_ref_path(ref, path);
4668 list_add_tail(&ref->list, refs);
4669 rb_add(&ref->node, root, rbtree_ref_less);
4673 if (path && (!ref || !ref->full_path))
4675 recorded_ref_free(ref);
4680 static int record_new_ref_if_needed(int num, u64 dir, int index,
4681 struct fs_path *name, void *ctx)
4684 struct send_ctx *sctx = ctx;
4685 struct rb_node *node = NULL;
4686 struct recorded_ref data;
4687 struct recorded_ref *ref;
4690 ret = get_inode_gen(sctx->send_root, dir, &dir_gen);
4695 data.dir_gen = dir_gen;
4696 set_ref_path(&data, name);
4697 node = rb_find(&data, &sctx->rbtree_deleted_refs, rbtree_ref_comp);
4699 ref = rb_entry(node, struct recorded_ref, node);
4700 recorded_ref_free(ref);
4702 ret = record_ref_in_tree(&sctx->rbtree_new_refs,
4703 &sctx->new_refs, name, dir, dir_gen,
4710 static int record_deleted_ref_if_needed(int num, u64 dir, int index,
4711 struct fs_path *name, void *ctx)
4714 struct send_ctx *sctx = ctx;
4715 struct rb_node *node = NULL;
4716 struct recorded_ref data;
4717 struct recorded_ref *ref;
4720 ret = get_inode_gen(sctx->parent_root, dir, &dir_gen);
4725 data.dir_gen = dir_gen;
4726 set_ref_path(&data, name);
4727 node = rb_find(&data, &sctx->rbtree_new_refs, rbtree_ref_comp);
4729 ref = rb_entry(node, struct recorded_ref, node);
4730 recorded_ref_free(ref);
4732 ret = record_ref_in_tree(&sctx->rbtree_deleted_refs,
4733 &sctx->deleted_refs, name, dir,
4740 static int record_new_ref(struct send_ctx *sctx)
4744 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4745 sctx->cmp_key, 0, record_new_ref_if_needed, sctx);
4754 static int record_deleted_ref(struct send_ctx *sctx)
4758 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4759 sctx->cmp_key, 0, record_deleted_ref_if_needed,
4769 static int record_changed_ref(struct send_ctx *sctx)
4773 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4774 sctx->cmp_key, 0, record_new_ref_if_needed, sctx);
4777 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4778 sctx->cmp_key, 0, record_deleted_ref_if_needed, sctx);
4788 * Record and process all refs at once. Needed when an inode changes the
4789 * generation number, which means that it was deleted and recreated.
4791 static int process_all_refs(struct send_ctx *sctx,
4792 enum btrfs_compare_tree_result cmd)
4796 struct btrfs_root *root;
4797 struct btrfs_path *path;
4798 struct btrfs_key key;
4799 struct btrfs_key found_key;
4800 iterate_inode_ref_t cb;
4801 int pending_move = 0;
4803 path = alloc_path_for_send();
4807 if (cmd == BTRFS_COMPARE_TREE_NEW) {
4808 root = sctx->send_root;
4809 cb = record_new_ref_if_needed;
4810 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
4811 root = sctx->parent_root;
4812 cb = record_deleted_ref_if_needed;
4814 btrfs_err(sctx->send_root->fs_info,
4815 "Wrong command %d in process_all_refs", cmd);
4820 key.objectid = sctx->cmp_key->objectid;
4821 key.type = BTRFS_INODE_REF_KEY;
4823 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
4824 if (found_key.objectid != key.objectid ||
4825 (found_key.type != BTRFS_INODE_REF_KEY &&
4826 found_key.type != BTRFS_INODE_EXTREF_KEY))
4829 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
4833 /* Catch error found during iteration */
4838 btrfs_release_path(path);
4841 * We don't actually care about pending_move as we are simply
4842 * re-creating this inode and will be rename'ing it into place once we
4843 * rename the parent directory.
4845 ret = process_recorded_refs(sctx, &pending_move);
4847 btrfs_free_path(path);
4851 static int send_set_xattr(struct send_ctx *sctx,
4852 struct fs_path *path,
4853 const char *name, int name_len,
4854 const char *data, int data_len)
4858 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
4862 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4863 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4864 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
4866 ret = send_cmd(sctx);
4873 static int send_remove_xattr(struct send_ctx *sctx,
4874 struct fs_path *path,
4875 const char *name, int name_len)
4879 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
4883 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4884 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4886 ret = send_cmd(sctx);
4893 static int __process_new_xattr(int num, struct btrfs_key *di_key,
4894 const char *name, int name_len, const char *data,
4895 int data_len, void *ctx)
4898 struct send_ctx *sctx = ctx;
4900 struct posix_acl_xattr_header dummy_acl;
4902 /* Capabilities are emitted by finish_inode_if_needed */
4903 if (!strncmp(name, XATTR_NAME_CAPS, name_len))
4906 p = fs_path_alloc();
4911 * This hack is needed because empty acls are stored as zero byte
4912 * data in xattrs. Problem with that is, that receiving these zero byte
4913 * acls will fail later. To fix this, we send a dummy acl list that
4914 * only contains the version number and no entries.
4916 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
4917 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
4918 if (data_len == 0) {
4919 dummy_acl.a_version =
4920 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
4921 data = (char *)&dummy_acl;
4922 data_len = sizeof(dummy_acl);
4926 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4930 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
4937 static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
4938 const char *name, int name_len,
4939 const char *data, int data_len, void *ctx)
4942 struct send_ctx *sctx = ctx;
4945 p = fs_path_alloc();
4949 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4953 ret = send_remove_xattr(sctx, p, name, name_len);
4960 static int process_new_xattr(struct send_ctx *sctx)
4964 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4965 __process_new_xattr, sctx);
4970 static int process_deleted_xattr(struct send_ctx *sctx)
4972 return iterate_dir_item(sctx->parent_root, sctx->right_path,
4973 __process_deleted_xattr, sctx);
4976 struct find_xattr_ctx {
4984 static int __find_xattr(int num, struct btrfs_key *di_key, const char *name,
4985 int name_len, const char *data, int data_len, void *vctx)
4987 struct find_xattr_ctx *ctx = vctx;
4989 if (name_len == ctx->name_len &&
4990 strncmp(name, ctx->name, name_len) == 0) {
4991 ctx->found_idx = num;
4992 ctx->found_data_len = data_len;
4993 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
4994 if (!ctx->found_data)
5001 static int find_xattr(struct btrfs_root *root,
5002 struct btrfs_path *path,
5003 struct btrfs_key *key,
5004 const char *name, int name_len,
5005 char **data, int *data_len)
5008 struct find_xattr_ctx ctx;
5011 ctx.name_len = name_len;
5013 ctx.found_data = NULL;
5014 ctx.found_data_len = 0;
5016 ret = iterate_dir_item(root, path, __find_xattr, &ctx);
5020 if (ctx.found_idx == -1)
5023 *data = ctx.found_data;
5024 *data_len = ctx.found_data_len;
5026 kfree(ctx.found_data);
5028 return ctx.found_idx;
5032 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
5033 const char *name, int name_len,
5034 const char *data, int data_len,
5038 struct send_ctx *sctx = ctx;
5039 char *found_data = NULL;
5040 int found_data_len = 0;
5042 ret = find_xattr(sctx->parent_root, sctx->right_path,
5043 sctx->cmp_key, name, name_len, &found_data,
5045 if (ret == -ENOENT) {
5046 ret = __process_new_xattr(num, di_key, name, name_len, data,
5048 } else if (ret >= 0) {
5049 if (data_len != found_data_len ||
5050 memcmp(data, found_data, data_len)) {
5051 ret = __process_new_xattr(num, di_key, name, name_len,
5052 data, data_len, ctx);
5062 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
5063 const char *name, int name_len,
5064 const char *data, int data_len,
5068 struct send_ctx *sctx = ctx;
5070 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
5071 name, name_len, NULL, NULL);
5073 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
5081 static int process_changed_xattr(struct send_ctx *sctx)
5085 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
5086 __process_changed_new_xattr, sctx);
5089 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
5090 __process_changed_deleted_xattr, sctx);
5096 static int process_all_new_xattrs(struct send_ctx *sctx)
5100 struct btrfs_root *root;
5101 struct btrfs_path *path;
5102 struct btrfs_key key;
5103 struct btrfs_key found_key;
5105 path = alloc_path_for_send();
5109 root = sctx->send_root;
5111 key.objectid = sctx->cmp_key->objectid;
5112 key.type = BTRFS_XATTR_ITEM_KEY;
5114 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
5115 if (found_key.objectid != key.objectid ||
5116 found_key.type != key.type) {
5121 ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
5125 /* Catch error found during iteration */
5129 btrfs_free_path(path);
5133 static int send_verity(struct send_ctx *sctx, struct fs_path *path,
5134 struct fsverity_descriptor *desc)
5138 ret = begin_cmd(sctx, BTRFS_SEND_C_ENABLE_VERITY);
5142 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
5143 TLV_PUT_U8(sctx, BTRFS_SEND_A_VERITY_ALGORITHM,
5144 le8_to_cpu(desc->hash_algorithm));
5145 TLV_PUT_U32(sctx, BTRFS_SEND_A_VERITY_BLOCK_SIZE,
5146 1U << le8_to_cpu(desc->log_blocksize));
5147 TLV_PUT(sctx, BTRFS_SEND_A_VERITY_SALT_DATA, desc->salt,
5148 le8_to_cpu(desc->salt_size));
5149 TLV_PUT(sctx, BTRFS_SEND_A_VERITY_SIG_DATA, desc->signature,
5150 le32_to_cpu(desc->sig_size));
5152 ret = send_cmd(sctx);
5159 static int process_verity(struct send_ctx *sctx)
5162 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
5163 struct inode *inode;
5166 inode = btrfs_iget(fs_info->sb, sctx->cur_ino, sctx->send_root);
5168 return PTR_ERR(inode);
5170 ret = btrfs_get_verity_descriptor(inode, NULL, 0);
5174 if (ret > FS_VERITY_MAX_DESCRIPTOR_SIZE) {
5178 if (!sctx->verity_descriptor) {
5179 sctx->verity_descriptor = kvmalloc(FS_VERITY_MAX_DESCRIPTOR_SIZE,
5181 if (!sctx->verity_descriptor) {
5187 ret = btrfs_get_verity_descriptor(inode, sctx->verity_descriptor, ret);
5191 p = fs_path_alloc();
5196 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5200 ret = send_verity(sctx, p, sctx->verity_descriptor);
5211 static inline u64 max_send_read_size(const struct send_ctx *sctx)
5213 return sctx->send_max_size - SZ_16K;
5216 static int put_data_header(struct send_ctx *sctx, u32 len)
5218 if (WARN_ON_ONCE(sctx->put_data))
5220 sctx->put_data = true;
5221 if (sctx->proto >= 2) {
5223 * Since v2, the data attribute header doesn't include a length,
5224 * it is implicitly to the end of the command.
5226 if (sctx->send_max_size - sctx->send_size < sizeof(__le16) + len)
5228 put_unaligned_le16(BTRFS_SEND_A_DATA, sctx->send_buf + sctx->send_size);
5229 sctx->send_size += sizeof(__le16);
5231 struct btrfs_tlv_header *hdr;
5233 if (sctx->send_max_size - sctx->send_size < sizeof(*hdr) + len)
5235 hdr = (struct btrfs_tlv_header *)(sctx->send_buf + sctx->send_size);
5236 put_unaligned_le16(BTRFS_SEND_A_DATA, &hdr->tlv_type);
5237 put_unaligned_le16(len, &hdr->tlv_len);
5238 sctx->send_size += sizeof(*hdr);
5243 static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
5245 struct btrfs_root *root = sctx->send_root;
5246 struct btrfs_fs_info *fs_info = root->fs_info;
5248 pgoff_t index = offset >> PAGE_SHIFT;
5250 unsigned pg_offset = offset_in_page(offset);
5253 ret = put_data_header(sctx, len);
5257 last_index = (offset + len - 1) >> PAGE_SHIFT;
5259 while (index <= last_index) {
5260 unsigned cur_len = min_t(unsigned, len,
5261 PAGE_SIZE - pg_offset);
5263 page = find_lock_page(sctx->cur_inode->i_mapping, index);
5265 page_cache_sync_readahead(sctx->cur_inode->i_mapping,
5266 &sctx->ra, NULL, index,
5267 last_index + 1 - index);
5269 page = find_or_create_page(sctx->cur_inode->i_mapping,
5277 if (PageReadahead(page))
5278 page_cache_async_readahead(sctx->cur_inode->i_mapping,
5279 &sctx->ra, NULL, page_folio(page),
5280 index, last_index + 1 - index);
5282 if (!PageUptodate(page)) {
5283 btrfs_read_folio(NULL, page_folio(page));
5285 if (!PageUptodate(page)) {
5288 "send: IO error at offset %llu for inode %llu root %llu",
5289 page_offset(page), sctx->cur_ino,
5290 sctx->send_root->root_key.objectid);
5297 memcpy_from_page(sctx->send_buf + sctx->send_size, page,
5298 pg_offset, cur_len);
5304 sctx->send_size += cur_len;
5311 * Read some bytes from the current inode/file and send a write command to
5314 static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
5316 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
5320 p = fs_path_alloc();
5324 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
5326 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5330 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5334 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5335 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5336 ret = put_file_data(sctx, offset, len);
5340 ret = send_cmd(sctx);
5349 * Send a clone command to user space.
5351 static int send_clone(struct send_ctx *sctx,
5352 u64 offset, u32 len,
5353 struct clone_root *clone_root)
5359 btrfs_debug(sctx->send_root->fs_info,
5360 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
5361 offset, len, clone_root->root->root_key.objectid,
5362 clone_root->ino, clone_root->offset);
5364 p = fs_path_alloc();
5368 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
5372 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5376 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5377 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
5378 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5380 if (clone_root->root == sctx->send_root) {
5381 ret = get_inode_gen(sctx->send_root, clone_root->ino, &gen);
5384 ret = get_cur_path(sctx, clone_root->ino, gen, p);
5386 ret = get_inode_path(clone_root->root, clone_root->ino, p);
5392 * If the parent we're using has a received_uuid set then use that as
5393 * our clone source as that is what we will look for when doing a
5396 * This covers the case that we create a snapshot off of a received
5397 * subvolume and then use that as the parent and try to receive on a
5400 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
5401 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
5402 clone_root->root->root_item.received_uuid);
5404 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
5405 clone_root->root->root_item.uuid);
5406 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
5407 btrfs_root_ctransid(&clone_root->root->root_item));
5408 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
5409 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
5410 clone_root->offset);
5412 ret = send_cmd(sctx);
5421 * Send an update extent command to user space.
5423 static int send_update_extent(struct send_ctx *sctx,
5424 u64 offset, u32 len)
5429 p = fs_path_alloc();
5433 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
5437 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5441 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5442 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5443 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
5445 ret = send_cmd(sctx);
5453 static int send_hole(struct send_ctx *sctx, u64 end)
5455 struct fs_path *p = NULL;
5456 u64 read_size = max_send_read_size(sctx);
5457 u64 offset = sctx->cur_inode_last_extent;
5461 * A hole that starts at EOF or beyond it. Since we do not yet support
5462 * fallocate (for extent preallocation and hole punching), sending a
5463 * write of zeroes starting at EOF or beyond would later require issuing
5464 * a truncate operation which would undo the write and achieve nothing.
5466 if (offset >= sctx->cur_inode_size)
5470 * Don't go beyond the inode's i_size due to prealloc extents that start
5473 end = min_t(u64, end, sctx->cur_inode_size);
5475 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5476 return send_update_extent(sctx, offset, end - offset);
5478 p = fs_path_alloc();
5481 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5483 goto tlv_put_failure;
5484 while (offset < end) {
5485 u64 len = min(end - offset, read_size);
5487 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5490 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5491 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5492 ret = put_data_header(sctx, len);
5495 memset(sctx->send_buf + sctx->send_size, 0, len);
5496 sctx->send_size += len;
5497 ret = send_cmd(sctx);
5502 sctx->cur_inode_next_write_offset = offset;
5508 static int send_encoded_inline_extent(struct send_ctx *sctx,
5509 struct btrfs_path *path, u64 offset,
5512 struct btrfs_root *root = sctx->send_root;
5513 struct btrfs_fs_info *fs_info = root->fs_info;
5514 struct inode *inode;
5515 struct fs_path *fspath;
5516 struct extent_buffer *leaf = path->nodes[0];
5517 struct btrfs_key key;
5518 struct btrfs_file_extent_item *ei;
5523 inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root);
5525 return PTR_ERR(inode);
5527 fspath = fs_path_alloc();
5533 ret = begin_cmd(sctx, BTRFS_SEND_C_ENCODED_WRITE);
5537 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
5541 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5542 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
5543 ram_bytes = btrfs_file_extent_ram_bytes(leaf, ei);
5544 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
5546 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, fspath);
5547 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5548 TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_FILE_LEN,
5549 min(key.offset + ram_bytes - offset, len));
5550 TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_LEN, ram_bytes);
5551 TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_OFFSET, offset - key.offset);
5552 ret = btrfs_encoded_io_compression_from_extent(fs_info,
5553 btrfs_file_extent_compression(leaf, ei));
5556 TLV_PUT_U32(sctx, BTRFS_SEND_A_COMPRESSION, ret);
5558 ret = put_data_header(sctx, inline_size);
5561 read_extent_buffer(leaf, sctx->send_buf + sctx->send_size,
5562 btrfs_file_extent_inline_start(ei), inline_size);
5563 sctx->send_size += inline_size;
5565 ret = send_cmd(sctx);
5569 fs_path_free(fspath);
5574 static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
5575 u64 offset, u64 len)
5577 struct btrfs_root *root = sctx->send_root;
5578 struct btrfs_fs_info *fs_info = root->fs_info;
5579 struct inode *inode;
5580 struct fs_path *fspath;
5581 struct extent_buffer *leaf = path->nodes[0];
5582 struct btrfs_key key;
5583 struct btrfs_file_extent_item *ei;
5584 u64 disk_bytenr, disk_num_bytes;
5586 struct btrfs_cmd_header *hdr;
5590 inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root);
5592 return PTR_ERR(inode);
5594 fspath = fs_path_alloc();
5600 ret = begin_cmd(sctx, BTRFS_SEND_C_ENCODED_WRITE);
5604 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
5608 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5609 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
5610 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
5611 disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, ei);
5613 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, fspath);
5614 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5615 TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_FILE_LEN,
5616 min(key.offset + btrfs_file_extent_num_bytes(leaf, ei) - offset,
5618 TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_LEN,
5619 btrfs_file_extent_ram_bytes(leaf, ei));
5620 TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_OFFSET,
5621 offset - key.offset + btrfs_file_extent_offset(leaf, ei));
5622 ret = btrfs_encoded_io_compression_from_extent(fs_info,
5623 btrfs_file_extent_compression(leaf, ei));
5626 TLV_PUT_U32(sctx, BTRFS_SEND_A_COMPRESSION, ret);
5627 TLV_PUT_U32(sctx, BTRFS_SEND_A_ENCRYPTION, 0);
5629 ret = put_data_header(sctx, disk_num_bytes);
5634 * We want to do I/O directly into the send buffer, so get the next page
5635 * boundary in the send buffer. This means that there may be a gap
5636 * between the beginning of the command and the file data.
5638 data_offset = ALIGN(sctx->send_size, PAGE_SIZE);
5639 if (data_offset > sctx->send_max_size ||
5640 sctx->send_max_size - data_offset < disk_num_bytes) {
5646 * Note that send_buf is a mapping of send_buf_pages, so this is really
5647 * reading into send_buf.
5649 ret = btrfs_encoded_read_regular_fill_pages(BTRFS_I(inode), offset,
5650 disk_bytenr, disk_num_bytes,
5651 sctx->send_buf_pages +
5652 (data_offset >> PAGE_SHIFT));
5656 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
5657 hdr->len = cpu_to_le32(sctx->send_size + disk_num_bytes - sizeof(*hdr));
5659 crc = btrfs_crc32c(0, sctx->send_buf, sctx->send_size);
5660 crc = btrfs_crc32c(crc, sctx->send_buf + data_offset, disk_num_bytes);
5661 hdr->crc = cpu_to_le32(crc);
5663 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
5666 ret = write_buf(sctx->send_filp, sctx->send_buf + data_offset,
5667 disk_num_bytes, &sctx->send_off);
5669 sctx->send_size = 0;
5670 sctx->put_data = false;
5674 fs_path_free(fspath);
5679 static int send_extent_data(struct send_ctx *sctx, struct btrfs_path *path,
5680 const u64 offset, const u64 len)
5682 const u64 end = offset + len;
5683 struct extent_buffer *leaf = path->nodes[0];
5684 struct btrfs_file_extent_item *ei;
5685 u64 read_size = max_send_read_size(sctx);
5688 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5689 return send_update_extent(sctx, offset, len);
5691 ei = btrfs_item_ptr(leaf, path->slots[0],
5692 struct btrfs_file_extent_item);
5693 if ((sctx->flags & BTRFS_SEND_FLAG_COMPRESSED) &&
5694 btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) {
5695 bool is_inline = (btrfs_file_extent_type(leaf, ei) ==
5696 BTRFS_FILE_EXTENT_INLINE);
5699 * Send the compressed extent unless the compressed data is
5700 * larger than the decompressed data. This can happen if we're
5701 * not sending the entire extent, either because it has been
5702 * partially overwritten/truncated or because this is a part of
5703 * the extent that we couldn't clone in clone_range().
5706 btrfs_file_extent_inline_item_len(leaf,
5707 path->slots[0]) <= len) {
5708 return send_encoded_inline_extent(sctx, path, offset,
5710 } else if (!is_inline &&
5711 btrfs_file_extent_disk_num_bytes(leaf, ei) <= len) {
5712 return send_encoded_extent(sctx, path, offset, len);
5716 if (sctx->cur_inode == NULL) {
5717 struct btrfs_root *root = sctx->send_root;
5719 sctx->cur_inode = btrfs_iget(root->fs_info->sb, sctx->cur_ino, root);
5720 if (IS_ERR(sctx->cur_inode)) {
5721 int err = PTR_ERR(sctx->cur_inode);
5723 sctx->cur_inode = NULL;
5726 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
5727 file_ra_state_init(&sctx->ra, sctx->cur_inode->i_mapping);
5730 * It's very likely there are no pages from this inode in the page
5731 * cache, so after reading extents and sending their data, we clean
5732 * the page cache to avoid trashing the page cache (adding pressure
5733 * to the page cache and forcing eviction of other data more useful
5734 * for applications).
5736 * We decide if we should clean the page cache simply by checking
5737 * if the inode's mapping nrpages is 0 when we first open it, and
5738 * not by using something like filemap_range_has_page() before
5739 * reading an extent because when we ask the readahead code to
5740 * read a given file range, it may (and almost always does) read
5741 * pages from beyond that range (see the documentation for
5742 * page_cache_sync_readahead()), so it would not be reliable,
5743 * because after reading the first extent future calls to
5744 * filemap_range_has_page() would return true because the readahead
5745 * on the previous extent resulted in reading pages of the current
5748 sctx->clean_page_cache = (sctx->cur_inode->i_mapping->nrpages == 0);
5749 sctx->page_cache_clear_start = round_down(offset, PAGE_SIZE);
5752 while (sent < len) {
5753 u64 size = min(len - sent, read_size);
5756 ret = send_write(sctx, offset + sent, size);
5762 if (sctx->clean_page_cache && IS_ALIGNED(end, PAGE_SIZE)) {
5764 * Always operate only on ranges that are a multiple of the page
5765 * size. This is not only to prevent zeroing parts of a page in
5766 * the case of subpage sector size, but also to guarantee we evict
5767 * pages, as passing a range that is smaller than page size does
5768 * not evict the respective page (only zeroes part of its content).
5770 * Always start from the end offset of the last range cleared.
5771 * This is because the readahead code may (and very often does)
5772 * reads pages beyond the range we request for readahead. So if
5773 * we have an extent layout like this:
5775 * [ extent A ] [ extent B ] [ extent C ]
5777 * When we ask page_cache_sync_readahead() to read extent A, it
5778 * may also trigger reads for pages of extent B. If we are doing
5779 * an incremental send and extent B has not changed between the
5780 * parent and send snapshots, some or all of its pages may end
5781 * up being read and placed in the page cache. So when truncating
5782 * the page cache we always start from the end offset of the
5783 * previously processed extent up to the end of the current
5786 truncate_inode_pages_range(&sctx->cur_inode->i_data,
5787 sctx->page_cache_clear_start,
5789 sctx->page_cache_clear_start = end;
5796 * Search for a capability xattr related to sctx->cur_ino. If the capability is
5797 * found, call send_set_xattr function to emit it.
5799 * Return 0 if there isn't a capability, or when the capability was emitted
5800 * successfully, or < 0 if an error occurred.
5802 static int send_capabilities(struct send_ctx *sctx)
5804 struct fs_path *fspath = NULL;
5805 struct btrfs_path *path;
5806 struct btrfs_dir_item *di;
5807 struct extent_buffer *leaf;
5808 unsigned long data_ptr;
5813 path = alloc_path_for_send();
5817 di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino,
5818 XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0);
5820 /* There is no xattr for this inode */
5822 } else if (IS_ERR(di)) {
5827 leaf = path->nodes[0];
5828 buf_len = btrfs_dir_data_len(leaf, di);
5830 fspath = fs_path_alloc();
5831 buf = kmalloc(buf_len, GFP_KERNEL);
5832 if (!fspath || !buf) {
5837 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
5841 data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di);
5842 read_extent_buffer(leaf, buf, data_ptr, buf_len);
5844 ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS,
5845 strlen(XATTR_NAME_CAPS), buf, buf_len);
5848 fs_path_free(fspath);
5849 btrfs_free_path(path);
5853 static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path,
5854 struct clone_root *clone_root, const u64 disk_byte,
5855 u64 data_offset, u64 offset, u64 len)
5857 struct btrfs_path *path;
5858 struct btrfs_key key;
5860 struct btrfs_inode_info info;
5861 u64 clone_src_i_size = 0;
5864 * Prevent cloning from a zero offset with a length matching the sector
5865 * size because in some scenarios this will make the receiver fail.
5867 * For example, if in the source filesystem the extent at offset 0
5868 * has a length of sectorsize and it was written using direct IO, then
5869 * it can never be an inline extent (even if compression is enabled).
5870 * Then this extent can be cloned in the original filesystem to a non
5871 * zero file offset, but it may not be possible to clone in the
5872 * destination filesystem because it can be inlined due to compression
5873 * on the destination filesystem (as the receiver's write operations are
5874 * always done using buffered IO). The same happens when the original
5875 * filesystem does not have compression enabled but the destination
5878 if (clone_root->offset == 0 &&
5879 len == sctx->send_root->fs_info->sectorsize)
5880 return send_extent_data(sctx, dst_path, offset, len);
5882 path = alloc_path_for_send();
5887 * There are inodes that have extents that lie behind its i_size. Don't
5888 * accept clones from these extents.
5890 ret = get_inode_info(clone_root->root, clone_root->ino, &info);
5891 btrfs_release_path(path);
5894 clone_src_i_size = info.size;
5897 * We can't send a clone operation for the entire range if we find
5898 * extent items in the respective range in the source file that
5899 * refer to different extents or if we find holes.
5900 * So check for that and do a mix of clone and regular write/copy
5901 * operations if needed.
5905 * mkfs.btrfs -f /dev/sda
5906 * mount /dev/sda /mnt
5907 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
5908 * cp --reflink=always /mnt/foo /mnt/bar
5909 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
5910 * btrfs subvolume snapshot -r /mnt /mnt/snap
5912 * If when we send the snapshot and we are processing file bar (which
5913 * has a higher inode number than foo) we blindly send a clone operation
5914 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
5915 * a file bar that matches the content of file foo - iow, doesn't match
5916 * the content from bar in the original filesystem.
5918 key.objectid = clone_root->ino;
5919 key.type = BTRFS_EXTENT_DATA_KEY;
5920 key.offset = clone_root->offset;
5921 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
5924 if (ret > 0 && path->slots[0] > 0) {
5925 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
5926 if (key.objectid == clone_root->ino &&
5927 key.type == BTRFS_EXTENT_DATA_KEY)
5932 struct extent_buffer *leaf = path->nodes[0];
5933 int slot = path->slots[0];
5934 struct btrfs_file_extent_item *ei;
5938 u64 clone_data_offset;
5939 bool crossed_src_i_size = false;
5941 if (slot >= btrfs_header_nritems(leaf)) {
5942 ret = btrfs_next_leaf(clone_root->root, path);
5950 btrfs_item_key_to_cpu(leaf, &key, slot);
5953 * We might have an implicit trailing hole (NO_HOLES feature
5954 * enabled). We deal with it after leaving this loop.
5956 if (key.objectid != clone_root->ino ||
5957 key.type != BTRFS_EXTENT_DATA_KEY)
5960 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5961 type = btrfs_file_extent_type(leaf, ei);
5962 if (type == BTRFS_FILE_EXTENT_INLINE) {
5963 ext_len = btrfs_file_extent_ram_bytes(leaf, ei);
5964 ext_len = PAGE_ALIGN(ext_len);
5966 ext_len = btrfs_file_extent_num_bytes(leaf, ei);
5969 if (key.offset + ext_len <= clone_root->offset)
5972 if (key.offset > clone_root->offset) {
5973 /* Implicit hole, NO_HOLES feature enabled. */
5974 u64 hole_len = key.offset - clone_root->offset;
5978 ret = send_extent_data(sctx, dst_path, offset,
5987 clone_root->offset += hole_len;
5988 data_offset += hole_len;
5991 if (key.offset >= clone_root->offset + len)
5994 if (key.offset >= clone_src_i_size)
5997 if (key.offset + ext_len > clone_src_i_size) {
5998 ext_len = clone_src_i_size - key.offset;
5999 crossed_src_i_size = true;
6002 clone_data_offset = btrfs_file_extent_offset(leaf, ei);
6003 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
6004 clone_root->offset = key.offset;
6005 if (clone_data_offset < data_offset &&
6006 clone_data_offset + ext_len > data_offset) {
6009 extent_offset = data_offset - clone_data_offset;
6010 ext_len -= extent_offset;
6011 clone_data_offset += extent_offset;
6012 clone_root->offset += extent_offset;
6016 clone_len = min_t(u64, ext_len, len);
6018 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte &&
6019 clone_data_offset == data_offset) {
6020 const u64 src_end = clone_root->offset + clone_len;
6021 const u64 sectorsize = SZ_64K;
6024 * We can't clone the last block, when its size is not
6025 * sector size aligned, into the middle of a file. If we
6026 * do so, the receiver will get a failure (-EINVAL) when
6027 * trying to clone or will silently corrupt the data in
6028 * the destination file if it's on a kernel without the
6029 * fix introduced by commit ac765f83f1397646
6030 * ("Btrfs: fix data corruption due to cloning of eof
6033 * So issue a clone of the aligned down range plus a
6034 * regular write for the eof block, if we hit that case.
6036 * Also, we use the maximum possible sector size, 64K,
6037 * because we don't know what's the sector size of the
6038 * filesystem that receives the stream, so we have to
6039 * assume the largest possible sector size.
6041 if (src_end == clone_src_i_size &&
6042 !IS_ALIGNED(src_end, sectorsize) &&
6043 offset + clone_len < sctx->cur_inode_size) {
6046 slen = ALIGN_DOWN(src_end - clone_root->offset,
6049 ret = send_clone(sctx, offset, slen,
6054 ret = send_extent_data(sctx, dst_path,
6058 ret = send_clone(sctx, offset, clone_len,
6061 } else if (crossed_src_i_size && clone_len < len) {
6063 * If we are at i_size of the clone source inode and we
6064 * can not clone from it, terminate the loop. This is
6065 * to avoid sending two write operations, one with a
6066 * length matching clone_len and the final one after
6067 * this loop with a length of len - clone_len.
6069 * When using encoded writes (BTRFS_SEND_FLAG_COMPRESSED
6070 * was passed to the send ioctl), this helps avoid
6071 * sending an encoded write for an offset that is not
6072 * sector size aligned, in case the i_size of the source
6073 * inode is not sector size aligned. That will make the
6074 * receiver fallback to decompression of the data and
6075 * writing it using regular buffered IO, therefore while
6076 * not incorrect, it's not optimal due decompression and
6077 * possible re-compression at the receiver.
6081 ret = send_extent_data(sctx, dst_path, offset,
6091 offset += clone_len;
6092 clone_root->offset += clone_len;
6095 * If we are cloning from the file we are currently processing,
6096 * and using the send root as the clone root, we must stop once
6097 * the current clone offset reaches the current eof of the file
6098 * at the receiver, otherwise we would issue an invalid clone
6099 * operation (source range going beyond eof) and cause the
6100 * receiver to fail. So if we reach the current eof, bail out
6101 * and fallback to a regular write.
6103 if (clone_root->root == sctx->send_root &&
6104 clone_root->ino == sctx->cur_ino &&
6105 clone_root->offset >= sctx->cur_inode_next_write_offset)
6108 data_offset += clone_len;
6114 ret = send_extent_data(sctx, dst_path, offset, len);
6118 btrfs_free_path(path);
6122 static int send_write_or_clone(struct send_ctx *sctx,
6123 struct btrfs_path *path,
6124 struct btrfs_key *key,
6125 struct clone_root *clone_root)
6128 u64 offset = key->offset;
6130 u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
6132 end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size);
6136 if (clone_root && IS_ALIGNED(end, bs)) {
6137 struct btrfs_file_extent_item *ei;
6141 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
6142 struct btrfs_file_extent_item);
6143 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
6144 data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
6145 ret = clone_range(sctx, path, clone_root, disk_byte,
6146 data_offset, offset, end - offset);
6148 ret = send_extent_data(sctx, path, offset, end - offset);
6150 sctx->cur_inode_next_write_offset = end;
6154 static int is_extent_unchanged(struct send_ctx *sctx,
6155 struct btrfs_path *left_path,
6156 struct btrfs_key *ekey)
6159 struct btrfs_key key;
6160 struct btrfs_path *path = NULL;
6161 struct extent_buffer *eb;
6163 struct btrfs_key found_key;
6164 struct btrfs_file_extent_item *ei;
6169 u64 left_offset_fixed;
6177 path = alloc_path_for_send();
6181 eb = left_path->nodes[0];
6182 slot = left_path->slots[0];
6183 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
6184 left_type = btrfs_file_extent_type(eb, ei);
6186 if (left_type != BTRFS_FILE_EXTENT_REG) {
6190 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
6191 left_len = btrfs_file_extent_num_bytes(eb, ei);
6192 left_offset = btrfs_file_extent_offset(eb, ei);
6193 left_gen = btrfs_file_extent_generation(eb, ei);
6196 * Following comments will refer to these graphics. L is the left
6197 * extents which we are checking at the moment. 1-8 are the right
6198 * extents that we iterate.
6201 * |-1-|-2a-|-3-|-4-|-5-|-6-|
6204 * |--1--|-2b-|...(same as above)
6206 * Alternative situation. Happens on files where extents got split.
6208 * |-----------7-----------|-6-|
6210 * Alternative situation. Happens on files which got larger.
6213 * Nothing follows after 8.
6216 key.objectid = ekey->objectid;
6217 key.type = BTRFS_EXTENT_DATA_KEY;
6218 key.offset = ekey->offset;
6219 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
6228 * Handle special case where the right side has no extents at all.
6230 eb = path->nodes[0];
6231 slot = path->slots[0];
6232 btrfs_item_key_to_cpu(eb, &found_key, slot);
6233 if (found_key.objectid != key.objectid ||
6234 found_key.type != key.type) {
6235 /* If we're a hole then just pretend nothing changed */
6236 ret = (left_disknr) ? 0 : 1;
6241 * We're now on 2a, 2b or 7.
6244 while (key.offset < ekey->offset + left_len) {
6245 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
6246 right_type = btrfs_file_extent_type(eb, ei);
6247 if (right_type != BTRFS_FILE_EXTENT_REG &&
6248 right_type != BTRFS_FILE_EXTENT_INLINE) {
6253 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
6254 right_len = btrfs_file_extent_ram_bytes(eb, ei);
6255 right_len = PAGE_ALIGN(right_len);
6257 right_len = btrfs_file_extent_num_bytes(eb, ei);
6261 * Are we at extent 8? If yes, we know the extent is changed.
6262 * This may only happen on the first iteration.
6264 if (found_key.offset + right_len <= ekey->offset) {
6265 /* If we're a hole just pretend nothing changed */
6266 ret = (left_disknr) ? 0 : 1;
6271 * We just wanted to see if when we have an inline extent, what
6272 * follows it is a regular extent (wanted to check the above
6273 * condition for inline extents too). This should normally not
6274 * happen but it's possible for example when we have an inline
6275 * compressed extent representing data with a size matching
6276 * the page size (currently the same as sector size).
6278 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
6283 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
6284 right_offset = btrfs_file_extent_offset(eb, ei);
6285 right_gen = btrfs_file_extent_generation(eb, ei);
6287 left_offset_fixed = left_offset;
6288 if (key.offset < ekey->offset) {
6289 /* Fix the right offset for 2a and 7. */
6290 right_offset += ekey->offset - key.offset;
6292 /* Fix the left offset for all behind 2a and 2b */
6293 left_offset_fixed += key.offset - ekey->offset;
6297 * Check if we have the same extent.
6299 if (left_disknr != right_disknr ||
6300 left_offset_fixed != right_offset ||
6301 left_gen != right_gen) {
6307 * Go to the next extent.
6309 ret = btrfs_next_item(sctx->parent_root, path);
6313 eb = path->nodes[0];
6314 slot = path->slots[0];
6315 btrfs_item_key_to_cpu(eb, &found_key, slot);
6317 if (ret || found_key.objectid != key.objectid ||
6318 found_key.type != key.type) {
6319 key.offset += right_len;
6322 if (found_key.offset != key.offset + right_len) {
6330 * We're now behind the left extent (treat as unchanged) or at the end
6331 * of the right side (treat as changed).
6333 if (key.offset >= ekey->offset + left_len)
6340 btrfs_free_path(path);
6344 static int get_last_extent(struct send_ctx *sctx, u64 offset)
6346 struct btrfs_path *path;
6347 struct btrfs_root *root = sctx->send_root;
6348 struct btrfs_key key;
6351 path = alloc_path_for_send();
6355 sctx->cur_inode_last_extent = 0;
6357 key.objectid = sctx->cur_ino;
6358 key.type = BTRFS_EXTENT_DATA_KEY;
6359 key.offset = offset;
6360 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
6364 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
6365 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
6368 sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
6370 btrfs_free_path(path);
6374 static int range_is_hole_in_parent(struct send_ctx *sctx,
6378 struct btrfs_path *path;
6379 struct btrfs_key key;
6380 struct btrfs_root *root = sctx->parent_root;
6381 u64 search_start = start;
6384 path = alloc_path_for_send();
6388 key.objectid = sctx->cur_ino;
6389 key.type = BTRFS_EXTENT_DATA_KEY;
6390 key.offset = search_start;
6391 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6394 if (ret > 0 && path->slots[0] > 0)
6397 while (search_start < end) {
6398 struct extent_buffer *leaf = path->nodes[0];
6399 int slot = path->slots[0];
6400 struct btrfs_file_extent_item *fi;
6403 if (slot >= btrfs_header_nritems(leaf)) {
6404 ret = btrfs_next_leaf(root, path);
6412 btrfs_item_key_to_cpu(leaf, &key, slot);
6413 if (key.objectid < sctx->cur_ino ||
6414 key.type < BTRFS_EXTENT_DATA_KEY)
6416 if (key.objectid > sctx->cur_ino ||
6417 key.type > BTRFS_EXTENT_DATA_KEY ||
6421 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
6422 extent_end = btrfs_file_extent_end(path);
6423 if (extent_end <= start)
6425 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
6426 search_start = extent_end;
6436 btrfs_free_path(path);
6440 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
6441 struct btrfs_key *key)
6445 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
6448 if (sctx->cur_inode_last_extent == (u64)-1) {
6449 ret = get_last_extent(sctx, key->offset - 1);
6454 if (path->slots[0] == 0 &&
6455 sctx->cur_inode_last_extent < key->offset) {
6457 * We might have skipped entire leafs that contained only
6458 * file extent items for our current inode. These leafs have
6459 * a generation number smaller (older) than the one in the
6460 * current leaf and the leaf our last extent came from, and
6461 * are located between these 2 leafs.
6463 ret = get_last_extent(sctx, key->offset - 1);
6468 if (sctx->cur_inode_last_extent < key->offset) {
6469 ret = range_is_hole_in_parent(sctx,
6470 sctx->cur_inode_last_extent,
6475 ret = send_hole(sctx, key->offset);
6479 sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
6483 static int process_extent(struct send_ctx *sctx,
6484 struct btrfs_path *path,
6485 struct btrfs_key *key)
6487 struct clone_root *found_clone = NULL;
6490 if (S_ISLNK(sctx->cur_inode_mode))
6493 if (sctx->parent_root && !sctx->cur_inode_new) {
6494 ret = is_extent_unchanged(sctx, path, key);
6502 struct btrfs_file_extent_item *ei;
6505 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
6506 struct btrfs_file_extent_item);
6507 type = btrfs_file_extent_type(path->nodes[0], ei);
6508 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
6509 type == BTRFS_FILE_EXTENT_REG) {
6511 * The send spec does not have a prealloc command yet,
6512 * so just leave a hole for prealloc'ed extents until
6513 * we have enough commands queued up to justify rev'ing
6516 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
6521 /* Have a hole, just skip it. */
6522 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
6529 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
6530 sctx->cur_inode_size, &found_clone);
6531 if (ret != -ENOENT && ret < 0)
6534 ret = send_write_or_clone(sctx, path, key, found_clone);
6538 ret = maybe_send_hole(sctx, path, key);
6543 static int process_all_extents(struct send_ctx *sctx)
6547 struct btrfs_root *root;
6548 struct btrfs_path *path;
6549 struct btrfs_key key;
6550 struct btrfs_key found_key;
6552 root = sctx->send_root;
6553 path = alloc_path_for_send();
6557 key.objectid = sctx->cmp_key->objectid;
6558 key.type = BTRFS_EXTENT_DATA_KEY;
6560 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
6561 if (found_key.objectid != key.objectid ||
6562 found_key.type != key.type) {
6567 ret = process_extent(sctx, path, &found_key);
6571 /* Catch error found during iteration */
6575 btrfs_free_path(path);
6579 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
6581 int *refs_processed)
6585 if (sctx->cur_ino == 0)
6587 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
6588 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
6590 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
6593 ret = process_recorded_refs(sctx, pending_move);
6597 *refs_processed = 1;
6602 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
6605 struct btrfs_inode_info info;
6616 bool need_fileattr = false;
6617 int need_truncate = 1;
6618 int pending_move = 0;
6619 int refs_processed = 0;
6621 if (sctx->ignore_cur_inode)
6624 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
6630 * We have processed the refs and thus need to advance send_progress.
6631 * Now, calls to get_cur_xxx will take the updated refs of the current
6632 * inode into account.
6634 * On the other hand, if our current inode is a directory and couldn't
6635 * be moved/renamed because its parent was renamed/moved too and it has
6636 * a higher inode number, we can only move/rename our current inode
6637 * after we moved/renamed its parent. Therefore in this case operate on
6638 * the old path (pre move/rename) of our current inode, and the
6639 * move/rename will be performed later.
6641 if (refs_processed && !pending_move)
6642 sctx->send_progress = sctx->cur_ino + 1;
6644 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
6646 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
6648 ret = get_inode_info(sctx->send_root, sctx->cur_ino, &info);
6651 left_mode = info.mode;
6652 left_uid = info.uid;
6653 left_gid = info.gid;
6654 left_fileattr = info.fileattr;
6656 if (!sctx->parent_root || sctx->cur_inode_new) {
6658 if (!S_ISLNK(sctx->cur_inode_mode))
6660 if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size)
6665 ret = get_inode_info(sctx->parent_root, sctx->cur_ino, &info);
6668 old_size = info.size;
6669 right_mode = info.mode;
6670 right_uid = info.uid;
6671 right_gid = info.gid;
6672 right_fileattr = info.fileattr;
6674 if (left_uid != right_uid || left_gid != right_gid)
6676 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
6678 if (!S_ISLNK(sctx->cur_inode_mode) && left_fileattr != right_fileattr)
6679 need_fileattr = true;
6680 if ((old_size == sctx->cur_inode_size) ||
6681 (sctx->cur_inode_size > old_size &&
6682 sctx->cur_inode_next_write_offset == sctx->cur_inode_size))
6686 if (S_ISREG(sctx->cur_inode_mode)) {
6687 if (need_send_hole(sctx)) {
6688 if (sctx->cur_inode_last_extent == (u64)-1 ||
6689 sctx->cur_inode_last_extent <
6690 sctx->cur_inode_size) {
6691 ret = get_last_extent(sctx, (u64)-1);
6695 if (sctx->cur_inode_last_extent <
6696 sctx->cur_inode_size) {
6697 ret = send_hole(sctx, sctx->cur_inode_size);
6702 if (need_truncate) {
6703 ret = send_truncate(sctx, sctx->cur_ino,
6704 sctx->cur_inode_gen,
6705 sctx->cur_inode_size);
6712 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6713 left_uid, left_gid);
6718 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6723 if (need_fileattr) {
6724 ret = send_fileattr(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6730 if (proto_cmd_ok(sctx, BTRFS_SEND_C_ENABLE_VERITY)
6731 && sctx->cur_inode_needs_verity) {
6732 ret = process_verity(sctx);
6737 ret = send_capabilities(sctx);
6742 * If other directory inodes depended on our current directory
6743 * inode's move/rename, now do their move/rename operations.
6745 if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
6746 ret = apply_children_dir_moves(sctx);
6750 * Need to send that every time, no matter if it actually
6751 * changed between the two trees as we have done changes to
6752 * the inode before. If our inode is a directory and it's
6753 * waiting to be moved/renamed, we will send its utimes when
6754 * it's moved/renamed, therefore we don't need to do it here.
6756 sctx->send_progress = sctx->cur_ino + 1;
6757 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
6766 static void close_current_inode(struct send_ctx *sctx)
6770 if (sctx->cur_inode == NULL)
6773 i_size = i_size_read(sctx->cur_inode);
6776 * If we are doing an incremental send, we may have extents between the
6777 * last processed extent and the i_size that have not been processed
6778 * because they haven't changed but we may have read some of their pages
6779 * through readahead, see the comments at send_extent_data().
6781 if (sctx->clean_page_cache && sctx->page_cache_clear_start < i_size)
6782 truncate_inode_pages_range(&sctx->cur_inode->i_data,
6783 sctx->page_cache_clear_start,
6784 round_up(i_size, PAGE_SIZE) - 1);
6786 iput(sctx->cur_inode);
6787 sctx->cur_inode = NULL;
6790 static int changed_inode(struct send_ctx *sctx,
6791 enum btrfs_compare_tree_result result)
6794 struct btrfs_key *key = sctx->cmp_key;
6795 struct btrfs_inode_item *left_ii = NULL;
6796 struct btrfs_inode_item *right_ii = NULL;
6800 close_current_inode(sctx);
6802 sctx->cur_ino = key->objectid;
6803 sctx->cur_inode_new_gen = false;
6804 sctx->cur_inode_last_extent = (u64)-1;
6805 sctx->cur_inode_next_write_offset = 0;
6806 sctx->ignore_cur_inode = false;
6809 * Set send_progress to current inode. This will tell all get_cur_xxx
6810 * functions that the current inode's refs are not updated yet. Later,
6811 * when process_recorded_refs is finished, it is set to cur_ino + 1.
6813 sctx->send_progress = sctx->cur_ino;
6815 if (result == BTRFS_COMPARE_TREE_NEW ||
6816 result == BTRFS_COMPARE_TREE_CHANGED) {
6817 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
6818 sctx->left_path->slots[0],
6819 struct btrfs_inode_item);
6820 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
6823 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6824 sctx->right_path->slots[0],
6825 struct btrfs_inode_item);
6826 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6829 if (result == BTRFS_COMPARE_TREE_CHANGED) {
6830 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6831 sctx->right_path->slots[0],
6832 struct btrfs_inode_item);
6834 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6838 * The cur_ino = root dir case is special here. We can't treat
6839 * the inode as deleted+reused because it would generate a
6840 * stream that tries to delete/mkdir the root dir.
6842 if (left_gen != right_gen &&
6843 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6844 sctx->cur_inode_new_gen = true;
6848 * Normally we do not find inodes with a link count of zero (orphans)
6849 * because the most common case is to create a snapshot and use it
6850 * for a send operation. However other less common use cases involve
6851 * using a subvolume and send it after turning it to RO mode just
6852 * after deleting all hard links of a file while holding an open
6853 * file descriptor against it or turning a RO snapshot into RW mode,
6854 * keep an open file descriptor against a file, delete it and then
6855 * turn the snapshot back to RO mode before using it for a send
6856 * operation. The former is what the receiver operation does.
6857 * Therefore, if we want to send these snapshots soon after they're
6858 * received, we need to handle orphan inodes as well. Moreover, orphans
6859 * can appear not only in the send snapshot but also in the parent
6860 * snapshot. Here are several cases:
6862 * Case 1: BTRFS_COMPARE_TREE_NEW
6863 * | send snapshot | action
6864 * --------------------------------
6865 * nlink | 0 | ignore
6867 * Case 2: BTRFS_COMPARE_TREE_DELETED
6868 * | parent snapshot | action
6869 * ----------------------------------
6870 * nlink | 0 | as usual
6871 * Note: No unlinks will be sent because there're no paths for it.
6873 * Case 3: BTRFS_COMPARE_TREE_CHANGED
6874 * | | parent snapshot | send snapshot | action
6875 * -----------------------------------------------------------------------
6876 * subcase 1 | nlink | 0 | 0 | ignore
6877 * subcase 2 | nlink | >0 | 0 | new_gen(deletion)
6878 * subcase 3 | nlink | 0 | >0 | new_gen(creation)
6881 if (result == BTRFS_COMPARE_TREE_NEW) {
6882 if (btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii) == 0) {
6883 sctx->ignore_cur_inode = true;
6886 sctx->cur_inode_gen = left_gen;
6887 sctx->cur_inode_new = true;
6888 sctx->cur_inode_deleted = false;
6889 sctx->cur_inode_size = btrfs_inode_size(
6890 sctx->left_path->nodes[0], left_ii);
6891 sctx->cur_inode_mode = btrfs_inode_mode(
6892 sctx->left_path->nodes[0], left_ii);
6893 sctx->cur_inode_rdev = btrfs_inode_rdev(
6894 sctx->left_path->nodes[0], left_ii);
6895 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6896 ret = send_create_inode_if_needed(sctx);
6897 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
6898 sctx->cur_inode_gen = right_gen;
6899 sctx->cur_inode_new = false;
6900 sctx->cur_inode_deleted = true;
6901 sctx->cur_inode_size = btrfs_inode_size(
6902 sctx->right_path->nodes[0], right_ii);
6903 sctx->cur_inode_mode = btrfs_inode_mode(
6904 sctx->right_path->nodes[0], right_ii);
6905 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
6906 u32 new_nlinks, old_nlinks;
6908 new_nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
6909 old_nlinks = btrfs_inode_nlink(sctx->right_path->nodes[0], right_ii);
6910 if (new_nlinks == 0 && old_nlinks == 0) {
6911 sctx->ignore_cur_inode = true;
6913 } else if (new_nlinks == 0 || old_nlinks == 0) {
6914 sctx->cur_inode_new_gen = 1;
6917 * We need to do some special handling in case the inode was
6918 * reported as changed with a changed generation number. This
6919 * means that the original inode was deleted and new inode
6920 * reused the same inum. So we have to treat the old inode as
6921 * deleted and the new one as new.
6923 if (sctx->cur_inode_new_gen) {
6925 * First, process the inode as if it was deleted.
6927 if (old_nlinks > 0) {
6928 sctx->cur_inode_gen = right_gen;
6929 sctx->cur_inode_new = false;
6930 sctx->cur_inode_deleted = true;
6931 sctx->cur_inode_size = btrfs_inode_size(
6932 sctx->right_path->nodes[0], right_ii);
6933 sctx->cur_inode_mode = btrfs_inode_mode(
6934 sctx->right_path->nodes[0], right_ii);
6935 ret = process_all_refs(sctx,
6936 BTRFS_COMPARE_TREE_DELETED);
6942 * Now process the inode as if it was new.
6944 if (new_nlinks > 0) {
6945 sctx->cur_inode_gen = left_gen;
6946 sctx->cur_inode_new = true;
6947 sctx->cur_inode_deleted = false;
6948 sctx->cur_inode_size = btrfs_inode_size(
6949 sctx->left_path->nodes[0],
6951 sctx->cur_inode_mode = btrfs_inode_mode(
6952 sctx->left_path->nodes[0],
6954 sctx->cur_inode_rdev = btrfs_inode_rdev(
6955 sctx->left_path->nodes[0],
6957 ret = send_create_inode_if_needed(sctx);
6961 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
6965 * Advance send_progress now as we did not get
6966 * into process_recorded_refs_if_needed in the
6969 sctx->send_progress = sctx->cur_ino + 1;
6972 * Now process all extents and xattrs of the
6973 * inode as if they were all new.
6975 ret = process_all_extents(sctx);
6978 ret = process_all_new_xattrs(sctx);
6983 sctx->cur_inode_gen = left_gen;
6984 sctx->cur_inode_new = false;
6985 sctx->cur_inode_new_gen = false;
6986 sctx->cur_inode_deleted = false;
6987 sctx->cur_inode_size = btrfs_inode_size(
6988 sctx->left_path->nodes[0], left_ii);
6989 sctx->cur_inode_mode = btrfs_inode_mode(
6990 sctx->left_path->nodes[0], left_ii);
6999 * We have to process new refs before deleted refs, but compare_trees gives us
7000 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
7001 * first and later process them in process_recorded_refs.
7002 * For the cur_inode_new_gen case, we skip recording completely because
7003 * changed_inode did already initiate processing of refs. The reason for this is
7004 * that in this case, compare_tree actually compares the refs of 2 different
7005 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
7006 * refs of the right tree as deleted and all refs of the left tree as new.
7008 static int changed_ref(struct send_ctx *sctx,
7009 enum btrfs_compare_tree_result result)
7013 if (sctx->cur_ino != sctx->cmp_key->objectid) {
7014 inconsistent_snapshot_error(sctx, result, "reference");
7018 if (!sctx->cur_inode_new_gen &&
7019 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
7020 if (result == BTRFS_COMPARE_TREE_NEW)
7021 ret = record_new_ref(sctx);
7022 else if (result == BTRFS_COMPARE_TREE_DELETED)
7023 ret = record_deleted_ref(sctx);
7024 else if (result == BTRFS_COMPARE_TREE_CHANGED)
7025 ret = record_changed_ref(sctx);
7032 * Process new/deleted/changed xattrs. We skip processing in the
7033 * cur_inode_new_gen case because changed_inode did already initiate processing
7034 * of xattrs. The reason is the same as in changed_ref
7036 static int changed_xattr(struct send_ctx *sctx,
7037 enum btrfs_compare_tree_result result)
7041 if (sctx->cur_ino != sctx->cmp_key->objectid) {
7042 inconsistent_snapshot_error(sctx, result, "xattr");
7046 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
7047 if (result == BTRFS_COMPARE_TREE_NEW)
7048 ret = process_new_xattr(sctx);
7049 else if (result == BTRFS_COMPARE_TREE_DELETED)
7050 ret = process_deleted_xattr(sctx);
7051 else if (result == BTRFS_COMPARE_TREE_CHANGED)
7052 ret = process_changed_xattr(sctx);
7059 * Process new/deleted/changed extents. We skip processing in the
7060 * cur_inode_new_gen case because changed_inode did already initiate processing
7061 * of extents. The reason is the same as in changed_ref
7063 static int changed_extent(struct send_ctx *sctx,
7064 enum btrfs_compare_tree_result result)
7069 * We have found an extent item that changed without the inode item
7070 * having changed. This can happen either after relocation (where the
7071 * disk_bytenr of an extent item is replaced at
7072 * relocation.c:replace_file_extents()) or after deduplication into a
7073 * file in both the parent and send snapshots (where an extent item can
7074 * get modified or replaced with a new one). Note that deduplication
7075 * updates the inode item, but it only changes the iversion (sequence
7076 * field in the inode item) of the inode, so if a file is deduplicated
7077 * the same amount of times in both the parent and send snapshots, its
7078 * iversion becomes the same in both snapshots, whence the inode item is
7079 * the same on both snapshots.
7081 if (sctx->cur_ino != sctx->cmp_key->objectid)
7084 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
7085 if (result != BTRFS_COMPARE_TREE_DELETED)
7086 ret = process_extent(sctx, sctx->left_path,
7093 static int changed_verity(struct send_ctx *sctx, enum btrfs_compare_tree_result result)
7097 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
7098 if (result == BTRFS_COMPARE_TREE_NEW)
7099 sctx->cur_inode_needs_verity = true;
7104 static int dir_changed(struct send_ctx *sctx, u64 dir)
7106 u64 orig_gen, new_gen;
7109 ret = get_inode_gen(sctx->send_root, dir, &new_gen);
7113 ret = get_inode_gen(sctx->parent_root, dir, &orig_gen);
7117 return (orig_gen != new_gen) ? 1 : 0;
7120 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
7121 struct btrfs_key *key)
7123 struct btrfs_inode_extref *extref;
7124 struct extent_buffer *leaf;
7125 u64 dirid = 0, last_dirid = 0;
7132 /* Easy case, just check this one dirid */
7133 if (key->type == BTRFS_INODE_REF_KEY) {
7134 dirid = key->offset;
7136 ret = dir_changed(sctx, dirid);
7140 leaf = path->nodes[0];
7141 item_size = btrfs_item_size(leaf, path->slots[0]);
7142 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
7143 while (cur_offset < item_size) {
7144 extref = (struct btrfs_inode_extref *)(ptr +
7146 dirid = btrfs_inode_extref_parent(leaf, extref);
7147 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
7148 cur_offset += ref_name_len + sizeof(*extref);
7149 if (dirid == last_dirid)
7151 ret = dir_changed(sctx, dirid);
7161 * Updates compare related fields in sctx and simply forwards to the actual
7162 * changed_xxx functions.
7164 static int changed_cb(struct btrfs_path *left_path,
7165 struct btrfs_path *right_path,
7166 struct btrfs_key *key,
7167 enum btrfs_compare_tree_result result,
7168 struct send_ctx *sctx)
7173 * We can not hold the commit root semaphore here. This is because in
7174 * the case of sending and receiving to the same filesystem, using a
7175 * pipe, could result in a deadlock:
7177 * 1) The task running send blocks on the pipe because it's full;
7179 * 2) The task running receive, which is the only consumer of the pipe,
7180 * is waiting for a transaction commit (for example due to a space
7181 * reservation when doing a write or triggering a transaction commit
7182 * when creating a subvolume);
7184 * 3) The transaction is waiting to write lock the commit root semaphore,
7185 * but can not acquire it since it's being held at 1).
7187 * Down this call chain we write to the pipe through kernel_write().
7188 * The same type of problem can also happen when sending to a file that
7189 * is stored in the same filesystem - when reserving space for a write
7190 * into the file, we can trigger a transaction commit.
7192 * Our caller has supplied us with clones of leaves from the send and
7193 * parent roots, so we're safe here from a concurrent relocation and
7194 * further reallocation of metadata extents while we are here. Below we
7195 * also assert that the leaves are clones.
7197 lockdep_assert_not_held(&sctx->send_root->fs_info->commit_root_sem);
7200 * We always have a send root, so left_path is never NULL. We will not
7201 * have a leaf when we have reached the end of the send root but have
7202 * not yet reached the end of the parent root.
7204 if (left_path->nodes[0])
7205 ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED,
7206 &left_path->nodes[0]->bflags));
7208 * When doing a full send we don't have a parent root, so right_path is
7209 * NULL. When doing an incremental send, we may have reached the end of
7210 * the parent root already, so we don't have a leaf at right_path.
7212 if (right_path && right_path->nodes[0])
7213 ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED,
7214 &right_path->nodes[0]->bflags));
7216 if (result == BTRFS_COMPARE_TREE_SAME) {
7217 if (key->type == BTRFS_INODE_REF_KEY ||
7218 key->type == BTRFS_INODE_EXTREF_KEY) {
7219 ret = compare_refs(sctx, left_path, key);
7224 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
7225 return maybe_send_hole(sctx, left_path, key);
7229 result = BTRFS_COMPARE_TREE_CHANGED;
7233 sctx->left_path = left_path;
7234 sctx->right_path = right_path;
7235 sctx->cmp_key = key;
7237 ret = finish_inode_if_needed(sctx, 0);
7241 /* Ignore non-FS objects */
7242 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
7243 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
7246 if (key->type == BTRFS_INODE_ITEM_KEY) {
7247 ret = changed_inode(sctx, result);
7248 } else if (!sctx->ignore_cur_inode) {
7249 if (key->type == BTRFS_INODE_REF_KEY ||
7250 key->type == BTRFS_INODE_EXTREF_KEY)
7251 ret = changed_ref(sctx, result);
7252 else if (key->type == BTRFS_XATTR_ITEM_KEY)
7253 ret = changed_xattr(sctx, result);
7254 else if (key->type == BTRFS_EXTENT_DATA_KEY)
7255 ret = changed_extent(sctx, result);
7256 else if (key->type == BTRFS_VERITY_DESC_ITEM_KEY &&
7258 ret = changed_verity(sctx, result);
7265 static int search_key_again(const struct send_ctx *sctx,
7266 struct btrfs_root *root,
7267 struct btrfs_path *path,
7268 const struct btrfs_key *key)
7272 if (!path->need_commit_sem)
7273 lockdep_assert_held_read(&root->fs_info->commit_root_sem);
7276 * Roots used for send operations are readonly and no one can add,
7277 * update or remove keys from them, so we should be able to find our
7278 * key again. The only exception is deduplication, which can operate on
7279 * readonly roots and add, update or remove keys to/from them - but at
7280 * the moment we don't allow it to run in parallel with send.
7282 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7285 btrfs_print_tree(path->nodes[path->lowest_level], false);
7286 btrfs_err(root->fs_info,
7287 "send: key (%llu %u %llu) not found in %s root %llu, lowest_level %d, slot %d",
7288 key->objectid, key->type, key->offset,
7289 (root == sctx->parent_root ? "parent" : "send"),
7290 root->root_key.objectid, path->lowest_level,
7291 path->slots[path->lowest_level]);
7298 static int full_send_tree(struct send_ctx *sctx)
7301 struct btrfs_root *send_root = sctx->send_root;
7302 struct btrfs_key key;
7303 struct btrfs_fs_info *fs_info = send_root->fs_info;
7304 struct btrfs_path *path;
7306 path = alloc_path_for_send();
7309 path->reada = READA_FORWARD_ALWAYS;
7311 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
7312 key.type = BTRFS_INODE_ITEM_KEY;
7315 down_read(&fs_info->commit_root_sem);
7316 sctx->last_reloc_trans = fs_info->last_reloc_trans;
7317 up_read(&fs_info->commit_root_sem);
7319 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
7326 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
7328 ret = changed_cb(path, NULL, &key,
7329 BTRFS_COMPARE_TREE_NEW, sctx);
7333 down_read(&fs_info->commit_root_sem);
7334 if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
7335 sctx->last_reloc_trans = fs_info->last_reloc_trans;
7336 up_read(&fs_info->commit_root_sem);
7338 * A transaction used for relocating a block group was
7339 * committed or is about to finish its commit. Release
7340 * our path (leaf) and restart the search, so that we
7341 * avoid operating on any file extent items that are
7342 * stale, with a disk_bytenr that reflects a pre
7343 * relocation value. This way we avoid as much as
7344 * possible to fallback to regular writes when checking
7345 * if we can clone file ranges.
7347 btrfs_release_path(path);
7348 ret = search_key_again(sctx, send_root, path, &key);
7352 up_read(&fs_info->commit_root_sem);
7355 ret = btrfs_next_item(send_root, path);
7365 ret = finish_inode_if_needed(sctx, 1);
7368 btrfs_free_path(path);
7372 static int replace_node_with_clone(struct btrfs_path *path, int level)
7374 struct extent_buffer *clone;
7376 clone = btrfs_clone_extent_buffer(path->nodes[level]);
7380 free_extent_buffer(path->nodes[level]);
7381 path->nodes[level] = clone;
7386 static int tree_move_down(struct btrfs_path *path, int *level, u64 reada_min_gen)
7388 struct extent_buffer *eb;
7389 struct extent_buffer *parent = path->nodes[*level];
7390 int slot = path->slots[*level];
7391 const int nritems = btrfs_header_nritems(parent);
7395 lockdep_assert_held_read(&parent->fs_info->commit_root_sem);
7397 BUG_ON(*level == 0);
7398 eb = btrfs_read_node_slot(parent, slot);
7403 * Trigger readahead for the next leaves we will process, so that it is
7404 * very likely that when we need them they are already in memory and we
7405 * will not block on disk IO. For nodes we only do readahead for one,
7406 * since the time window between processing nodes is typically larger.
7408 reada_max = (*level == 1 ? SZ_128K : eb->fs_info->nodesize);
7410 for (slot++; slot < nritems && reada_done < reada_max; slot++) {
7411 if (btrfs_node_ptr_generation(parent, slot) > reada_min_gen) {
7412 btrfs_readahead_node_child(parent, slot);
7413 reada_done += eb->fs_info->nodesize;
7417 path->nodes[*level - 1] = eb;
7418 path->slots[*level - 1] = 0;
7422 return replace_node_with_clone(path, 0);
7427 static int tree_move_next_or_upnext(struct btrfs_path *path,
7428 int *level, int root_level)
7432 nritems = btrfs_header_nritems(path->nodes[*level]);
7434 path->slots[*level]++;
7436 while (path->slots[*level] >= nritems) {
7437 if (*level == root_level) {
7438 path->slots[*level] = nritems - 1;
7443 path->slots[*level] = 0;
7444 free_extent_buffer(path->nodes[*level]);
7445 path->nodes[*level] = NULL;
7447 path->slots[*level]++;
7449 nritems = btrfs_header_nritems(path->nodes[*level]);
7456 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
7459 static int tree_advance(struct btrfs_path *path,
7460 int *level, int root_level,
7462 struct btrfs_key *key,
7467 if (*level == 0 || !allow_down) {
7468 ret = tree_move_next_or_upnext(path, level, root_level);
7470 ret = tree_move_down(path, level, reada_min_gen);
7474 * Even if we have reached the end of a tree, ret is -1, update the key
7475 * anyway, so that in case we need to restart due to a block group
7476 * relocation, we can assert that the last key of the root node still
7477 * exists in the tree.
7480 btrfs_item_key_to_cpu(path->nodes[*level], key,
7481 path->slots[*level]);
7483 btrfs_node_key_to_cpu(path->nodes[*level], key,
7484 path->slots[*level]);
7489 static int tree_compare_item(struct btrfs_path *left_path,
7490 struct btrfs_path *right_path,
7495 unsigned long off1, off2;
7497 len1 = btrfs_item_size(left_path->nodes[0], left_path->slots[0]);
7498 len2 = btrfs_item_size(right_path->nodes[0], right_path->slots[0]);
7502 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
7503 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
7504 right_path->slots[0]);
7506 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
7508 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
7515 * A transaction used for relocating a block group was committed or is about to
7516 * finish its commit. Release our paths and restart the search, so that we are
7517 * not using stale extent buffers:
7519 * 1) For levels > 0, we are only holding references of extent buffers, without
7520 * any locks on them, which does not prevent them from having been relocated
7521 * and reallocated after the last time we released the commit root semaphore.
7522 * The exception are the root nodes, for which we always have a clone, see
7523 * the comment at btrfs_compare_trees();
7525 * 2) For leaves, level 0, we are holding copies (clones) of extent buffers, so
7526 * we are safe from the concurrent relocation and reallocation. However they
7527 * can have file extent items with a pre relocation disk_bytenr value, so we
7528 * restart the start from the current commit roots and clone the new leaves so
7529 * that we get the post relocation disk_bytenr values. Not doing so, could
7530 * make us clone the wrong data in case there are new extents using the old
7531 * disk_bytenr that happen to be shared.
7533 static int restart_after_relocation(struct btrfs_path *left_path,
7534 struct btrfs_path *right_path,
7535 const struct btrfs_key *left_key,
7536 const struct btrfs_key *right_key,
7539 const struct send_ctx *sctx)
7544 lockdep_assert_held_read(&sctx->send_root->fs_info->commit_root_sem);
7546 btrfs_release_path(left_path);
7547 btrfs_release_path(right_path);
7550 * Since keys can not be added or removed to/from our roots because they
7551 * are readonly and we do not allow deduplication to run in parallel
7552 * (which can add, remove or change keys), the layout of the trees should
7555 left_path->lowest_level = left_level;
7556 ret = search_key_again(sctx, sctx->send_root, left_path, left_key);
7560 right_path->lowest_level = right_level;
7561 ret = search_key_again(sctx, sctx->parent_root, right_path, right_key);
7566 * If the lowest level nodes are leaves, clone them so that they can be
7567 * safely used by changed_cb() while not under the protection of the
7568 * commit root semaphore, even if relocation and reallocation happens in
7571 if (left_level == 0) {
7572 ret = replace_node_with_clone(left_path, 0);
7577 if (right_level == 0) {
7578 ret = replace_node_with_clone(right_path, 0);
7584 * Now clone the root nodes (unless they happen to be the leaves we have
7585 * already cloned). This is to protect against concurrent snapshotting of
7586 * the send and parent roots (see the comment at btrfs_compare_trees()).
7588 root_level = btrfs_header_level(sctx->send_root->commit_root);
7589 if (root_level > 0) {
7590 ret = replace_node_with_clone(left_path, root_level);
7595 root_level = btrfs_header_level(sctx->parent_root->commit_root);
7596 if (root_level > 0) {
7597 ret = replace_node_with_clone(right_path, root_level);
7606 * This function compares two trees and calls the provided callback for
7607 * every changed/new/deleted item it finds.
7608 * If shared tree blocks are encountered, whole subtrees are skipped, making
7609 * the compare pretty fast on snapshotted subvolumes.
7611 * This currently works on commit roots only. As commit roots are read only,
7612 * we don't do any locking. The commit roots are protected with transactions.
7613 * Transactions are ended and rejoined when a commit is tried in between.
7615 * This function checks for modifications done to the trees while comparing.
7616 * If it detects a change, it aborts immediately.
7618 static int btrfs_compare_trees(struct btrfs_root *left_root,
7619 struct btrfs_root *right_root, struct send_ctx *sctx)
7621 struct btrfs_fs_info *fs_info = left_root->fs_info;
7624 struct btrfs_path *left_path = NULL;
7625 struct btrfs_path *right_path = NULL;
7626 struct btrfs_key left_key;
7627 struct btrfs_key right_key;
7628 char *tmp_buf = NULL;
7629 int left_root_level;
7630 int right_root_level;
7633 int left_end_reached = 0;
7634 int right_end_reached = 0;
7635 int advance_left = 0;
7636 int advance_right = 0;
7643 left_path = btrfs_alloc_path();
7648 right_path = btrfs_alloc_path();
7654 tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
7660 left_path->search_commit_root = 1;
7661 left_path->skip_locking = 1;
7662 right_path->search_commit_root = 1;
7663 right_path->skip_locking = 1;
7666 * Strategy: Go to the first items of both trees. Then do
7668 * If both trees are at level 0
7669 * Compare keys of current items
7670 * If left < right treat left item as new, advance left tree
7672 * If left > right treat right item as deleted, advance right tree
7674 * If left == right do deep compare of items, treat as changed if
7675 * needed, advance both trees and repeat
7676 * If both trees are at the same level but not at level 0
7677 * Compare keys of current nodes/leafs
7678 * If left < right advance left tree and repeat
7679 * If left > right advance right tree and repeat
7680 * If left == right compare blockptrs of the next nodes/leafs
7681 * If they match advance both trees but stay at the same level
7683 * If they don't match advance both trees while allowing to go
7685 * If tree levels are different
7686 * Advance the tree that needs it and repeat
7688 * Advancing a tree means:
7689 * If we are at level 0, try to go to the next slot. If that's not
7690 * possible, go one level up and repeat. Stop when we found a level
7691 * where we could go to the next slot. We may at this point be on a
7694 * If we are not at level 0 and not on shared tree blocks, go one
7697 * If we are not at level 0 and on shared tree blocks, go one slot to
7698 * the right if possible or go up and right.
7701 down_read(&fs_info->commit_root_sem);
7702 left_level = btrfs_header_level(left_root->commit_root);
7703 left_root_level = left_level;
7705 * We clone the root node of the send and parent roots to prevent races
7706 * with snapshot creation of these roots. Snapshot creation COWs the
7707 * root node of a tree, so after the transaction is committed the old
7708 * extent can be reallocated while this send operation is still ongoing.
7709 * So we clone them, under the commit root semaphore, to be race free.
7711 left_path->nodes[left_level] =
7712 btrfs_clone_extent_buffer(left_root->commit_root);
7713 if (!left_path->nodes[left_level]) {
7718 right_level = btrfs_header_level(right_root->commit_root);
7719 right_root_level = right_level;
7720 right_path->nodes[right_level] =
7721 btrfs_clone_extent_buffer(right_root->commit_root);
7722 if (!right_path->nodes[right_level]) {
7727 * Our right root is the parent root, while the left root is the "send"
7728 * root. We know that all new nodes/leaves in the left root must have
7729 * a generation greater than the right root's generation, so we trigger
7730 * readahead for those nodes and leaves of the left root, as we know we
7731 * will need to read them at some point.
7733 reada_min_gen = btrfs_header_generation(right_root->commit_root);
7735 if (left_level == 0)
7736 btrfs_item_key_to_cpu(left_path->nodes[left_level],
7737 &left_key, left_path->slots[left_level]);
7739 btrfs_node_key_to_cpu(left_path->nodes[left_level],
7740 &left_key, left_path->slots[left_level]);
7741 if (right_level == 0)
7742 btrfs_item_key_to_cpu(right_path->nodes[right_level],
7743 &right_key, right_path->slots[right_level]);
7745 btrfs_node_key_to_cpu(right_path->nodes[right_level],
7746 &right_key, right_path->slots[right_level]);
7748 sctx->last_reloc_trans = fs_info->last_reloc_trans;
7751 if (need_resched() ||
7752 rwsem_is_contended(&fs_info->commit_root_sem)) {
7753 up_read(&fs_info->commit_root_sem);
7755 down_read(&fs_info->commit_root_sem);
7758 if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
7759 ret = restart_after_relocation(left_path, right_path,
7760 &left_key, &right_key,
7761 left_level, right_level,
7765 sctx->last_reloc_trans = fs_info->last_reloc_trans;
7768 if (advance_left && !left_end_reached) {
7769 ret = tree_advance(left_path, &left_level,
7771 advance_left != ADVANCE_ONLY_NEXT,
7772 &left_key, reada_min_gen);
7774 left_end_reached = ADVANCE;
7779 if (advance_right && !right_end_reached) {
7780 ret = tree_advance(right_path, &right_level,
7782 advance_right != ADVANCE_ONLY_NEXT,
7783 &right_key, reada_min_gen);
7785 right_end_reached = ADVANCE;
7791 if (left_end_reached && right_end_reached) {
7794 } else if (left_end_reached) {
7795 if (right_level == 0) {
7796 up_read(&fs_info->commit_root_sem);
7797 ret = changed_cb(left_path, right_path,
7799 BTRFS_COMPARE_TREE_DELETED,
7803 down_read(&fs_info->commit_root_sem);
7805 advance_right = ADVANCE;
7807 } else if (right_end_reached) {
7808 if (left_level == 0) {
7809 up_read(&fs_info->commit_root_sem);
7810 ret = changed_cb(left_path, right_path,
7812 BTRFS_COMPARE_TREE_NEW,
7816 down_read(&fs_info->commit_root_sem);
7818 advance_left = ADVANCE;
7822 if (left_level == 0 && right_level == 0) {
7823 up_read(&fs_info->commit_root_sem);
7824 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
7826 ret = changed_cb(left_path, right_path,
7828 BTRFS_COMPARE_TREE_NEW,
7830 advance_left = ADVANCE;
7831 } else if (cmp > 0) {
7832 ret = changed_cb(left_path, right_path,
7834 BTRFS_COMPARE_TREE_DELETED,
7836 advance_right = ADVANCE;
7838 enum btrfs_compare_tree_result result;
7840 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
7841 ret = tree_compare_item(left_path, right_path,
7844 result = BTRFS_COMPARE_TREE_CHANGED;
7846 result = BTRFS_COMPARE_TREE_SAME;
7847 ret = changed_cb(left_path, right_path,
7848 &left_key, result, sctx);
7849 advance_left = ADVANCE;
7850 advance_right = ADVANCE;
7855 down_read(&fs_info->commit_root_sem);
7856 } else if (left_level == right_level) {
7857 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
7859 advance_left = ADVANCE;
7860 } else if (cmp > 0) {
7861 advance_right = ADVANCE;
7863 left_blockptr = btrfs_node_blockptr(
7864 left_path->nodes[left_level],
7865 left_path->slots[left_level]);
7866 right_blockptr = btrfs_node_blockptr(
7867 right_path->nodes[right_level],
7868 right_path->slots[right_level]);
7869 left_gen = btrfs_node_ptr_generation(
7870 left_path->nodes[left_level],
7871 left_path->slots[left_level]);
7872 right_gen = btrfs_node_ptr_generation(
7873 right_path->nodes[right_level],
7874 right_path->slots[right_level]);
7875 if (left_blockptr == right_blockptr &&
7876 left_gen == right_gen) {
7878 * As we're on a shared block, don't
7879 * allow to go deeper.
7881 advance_left = ADVANCE_ONLY_NEXT;
7882 advance_right = ADVANCE_ONLY_NEXT;
7884 advance_left = ADVANCE;
7885 advance_right = ADVANCE;
7888 } else if (left_level < right_level) {
7889 advance_right = ADVANCE;
7891 advance_left = ADVANCE;
7896 up_read(&fs_info->commit_root_sem);
7898 btrfs_free_path(left_path);
7899 btrfs_free_path(right_path);
7904 static int send_subvol(struct send_ctx *sctx)
7908 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
7909 ret = send_header(sctx);
7914 ret = send_subvol_begin(sctx);
7918 if (sctx->parent_root) {
7919 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root, sctx);
7922 ret = finish_inode_if_needed(sctx, 1);
7926 ret = full_send_tree(sctx);
7932 free_recorded_refs(sctx);
7937 * If orphan cleanup did remove any orphans from a root, it means the tree
7938 * was modified and therefore the commit root is not the same as the current
7939 * root anymore. This is a problem, because send uses the commit root and
7940 * therefore can see inode items that don't exist in the current root anymore,
7941 * and for example make calls to btrfs_iget, which will do tree lookups based
7942 * on the current root and not on the commit root. Those lookups will fail,
7943 * returning a -ESTALE error, and making send fail with that error. So make
7944 * sure a send does not see any orphans we have just removed, and that it will
7945 * see the same inodes regardless of whether a transaction commit happened
7946 * before it started (meaning that the commit root will be the same as the
7947 * current root) or not.
7949 static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
7952 struct btrfs_trans_handle *trans = NULL;
7955 if (sctx->parent_root &&
7956 sctx->parent_root->node != sctx->parent_root->commit_root)
7959 for (i = 0; i < sctx->clone_roots_cnt; i++)
7960 if (sctx->clone_roots[i].root->node !=
7961 sctx->clone_roots[i].root->commit_root)
7965 return btrfs_end_transaction(trans);
7970 /* Use any root, all fs roots will get their commit roots updated. */
7972 trans = btrfs_join_transaction(sctx->send_root);
7974 return PTR_ERR(trans);
7978 return btrfs_commit_transaction(trans);
7982 * Make sure any existing dellaloc is flushed for any root used by a send
7983 * operation so that we do not miss any data and we do not race with writeback
7984 * finishing and changing a tree while send is using the tree. This could
7985 * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
7986 * a send operation then uses the subvolume.
7987 * After flushing delalloc ensure_commit_roots_uptodate() must be called.
7989 static int flush_delalloc_roots(struct send_ctx *sctx)
7991 struct btrfs_root *root = sctx->parent_root;
7996 ret = btrfs_start_delalloc_snapshot(root, false);
7999 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
8002 for (i = 0; i < sctx->clone_roots_cnt; i++) {
8003 root = sctx->clone_roots[i].root;
8004 ret = btrfs_start_delalloc_snapshot(root, false);
8007 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
8013 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
8015 spin_lock(&root->root_item_lock);
8016 root->send_in_progress--;
8018 * Not much left to do, we don't know why it's unbalanced and
8019 * can't blindly reset it to 0.
8021 if (root->send_in_progress < 0)
8022 btrfs_err(root->fs_info,
8023 "send_in_progress unbalanced %d root %llu",
8024 root->send_in_progress, root->root_key.objectid);
8025 spin_unlock(&root->root_item_lock);
8028 static void dedupe_in_progress_warn(const struct btrfs_root *root)
8030 btrfs_warn_rl(root->fs_info,
8031 "cannot use root %llu for send while deduplications on it are in progress (%d in progress)",
8032 root->root_key.objectid, root->dedupe_in_progress);
8035 long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
8038 struct btrfs_root *send_root = BTRFS_I(inode)->root;
8039 struct btrfs_fs_info *fs_info = send_root->fs_info;
8040 struct btrfs_root *clone_root;
8041 struct send_ctx *sctx = NULL;
8043 u64 *clone_sources_tmp = NULL;
8044 int clone_sources_to_rollback = 0;
8046 int sort_clone_roots = 0;
8048 if (!capable(CAP_SYS_ADMIN))
8052 * The subvolume must remain read-only during send, protect against
8053 * making it RW. This also protects against deletion.
8055 spin_lock(&send_root->root_item_lock);
8056 if (btrfs_root_readonly(send_root) && send_root->dedupe_in_progress) {
8057 dedupe_in_progress_warn(send_root);
8058 spin_unlock(&send_root->root_item_lock);
8061 send_root->send_in_progress++;
8062 spin_unlock(&send_root->root_item_lock);
8065 * Userspace tools do the checks and warn the user if it's
8068 if (!btrfs_root_readonly(send_root)) {
8074 * Check that we don't overflow at later allocations, we request
8075 * clone_sources_count + 1 items, and compare to unsigned long inside
8078 if (arg->clone_sources_count >
8079 ULONG_MAX / sizeof(struct clone_root) - 1) {
8084 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
8089 sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
8095 INIT_LIST_HEAD(&sctx->new_refs);
8096 INIT_LIST_HEAD(&sctx->deleted_refs);
8097 INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
8098 INIT_LIST_HEAD(&sctx->name_cache_list);
8100 INIT_LIST_HEAD(&sctx->backref_cache.lru_list);
8101 mt_init(&sctx->backref_cache.entries);
8103 sctx->flags = arg->flags;
8105 if (arg->flags & BTRFS_SEND_FLAG_VERSION) {
8106 if (arg->version > BTRFS_SEND_STREAM_VERSION) {
8110 /* Zero means "use the highest version" */
8111 sctx->proto = arg->version ?: BTRFS_SEND_STREAM_VERSION;
8115 if ((arg->flags & BTRFS_SEND_FLAG_COMPRESSED) && sctx->proto < 2) {
8120 sctx->send_filp = fget(arg->send_fd);
8121 if (!sctx->send_filp) {
8126 sctx->send_root = send_root;
8128 * Unlikely but possible, if the subvolume is marked for deletion but
8129 * is slow to remove the directory entry, send can still be started
8131 if (btrfs_root_dead(sctx->send_root)) {
8136 sctx->clone_roots_cnt = arg->clone_sources_count;
8138 if (sctx->proto >= 2) {
8139 u32 send_buf_num_pages;
8141 sctx->send_max_size = BTRFS_SEND_BUF_SIZE_V2;
8142 sctx->send_buf = vmalloc(sctx->send_max_size);
8143 if (!sctx->send_buf) {
8147 send_buf_num_pages = sctx->send_max_size >> PAGE_SHIFT;
8148 sctx->send_buf_pages = kcalloc(send_buf_num_pages,
8149 sizeof(*sctx->send_buf_pages),
8151 if (!sctx->send_buf_pages) {
8155 for (i = 0; i < send_buf_num_pages; i++) {
8156 sctx->send_buf_pages[i] =
8157 vmalloc_to_page(sctx->send_buf + (i << PAGE_SHIFT));
8160 sctx->send_max_size = BTRFS_SEND_BUF_SIZE_V1;
8161 sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
8163 if (!sctx->send_buf) {
8168 sctx->pending_dir_moves = RB_ROOT;
8169 sctx->waiting_dir_moves = RB_ROOT;
8170 sctx->orphan_dirs = RB_ROOT;
8171 sctx->rbtree_new_refs = RB_ROOT;
8172 sctx->rbtree_deleted_refs = RB_ROOT;
8174 sctx->clone_roots = kvcalloc(sizeof(*sctx->clone_roots),
8175 arg->clone_sources_count + 1,
8177 if (!sctx->clone_roots) {
8182 alloc_size = array_size(sizeof(*arg->clone_sources),
8183 arg->clone_sources_count);
8185 if (arg->clone_sources_count) {
8186 clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
8187 if (!clone_sources_tmp) {
8192 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
8199 for (i = 0; i < arg->clone_sources_count; i++) {
8200 clone_root = btrfs_get_fs_root(fs_info,
8201 clone_sources_tmp[i], true);
8202 if (IS_ERR(clone_root)) {
8203 ret = PTR_ERR(clone_root);
8206 spin_lock(&clone_root->root_item_lock);
8207 if (!btrfs_root_readonly(clone_root) ||
8208 btrfs_root_dead(clone_root)) {
8209 spin_unlock(&clone_root->root_item_lock);
8210 btrfs_put_root(clone_root);
8214 if (clone_root->dedupe_in_progress) {
8215 dedupe_in_progress_warn(clone_root);
8216 spin_unlock(&clone_root->root_item_lock);
8217 btrfs_put_root(clone_root);
8221 clone_root->send_in_progress++;
8222 spin_unlock(&clone_root->root_item_lock);
8224 sctx->clone_roots[i].root = clone_root;
8225 clone_sources_to_rollback = i + 1;
8227 kvfree(clone_sources_tmp);
8228 clone_sources_tmp = NULL;
8231 if (arg->parent_root) {
8232 sctx->parent_root = btrfs_get_fs_root(fs_info, arg->parent_root,
8234 if (IS_ERR(sctx->parent_root)) {
8235 ret = PTR_ERR(sctx->parent_root);
8239 spin_lock(&sctx->parent_root->root_item_lock);
8240 sctx->parent_root->send_in_progress++;
8241 if (!btrfs_root_readonly(sctx->parent_root) ||
8242 btrfs_root_dead(sctx->parent_root)) {
8243 spin_unlock(&sctx->parent_root->root_item_lock);
8247 if (sctx->parent_root->dedupe_in_progress) {
8248 dedupe_in_progress_warn(sctx->parent_root);
8249 spin_unlock(&sctx->parent_root->root_item_lock);
8253 spin_unlock(&sctx->parent_root->root_item_lock);
8257 * Clones from send_root are allowed, but only if the clone source
8258 * is behind the current send position. This is checked while searching
8259 * for possible clone sources.
8261 sctx->clone_roots[sctx->clone_roots_cnt++].root =
8262 btrfs_grab_root(sctx->send_root);
8264 /* We do a bsearch later */
8265 sort(sctx->clone_roots, sctx->clone_roots_cnt,
8266 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
8268 sort_clone_roots = 1;
8270 ret = flush_delalloc_roots(sctx);
8274 ret = ensure_commit_roots_uptodate(sctx);
8278 ret = send_subvol(sctx);
8282 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
8283 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
8286 ret = send_cmd(sctx);
8292 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
8293 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
8295 struct pending_dir_move *pm;
8297 n = rb_first(&sctx->pending_dir_moves);
8298 pm = rb_entry(n, struct pending_dir_move, node);
8299 while (!list_empty(&pm->list)) {
8300 struct pending_dir_move *pm2;
8302 pm2 = list_first_entry(&pm->list,
8303 struct pending_dir_move, list);
8304 free_pending_move(sctx, pm2);
8306 free_pending_move(sctx, pm);
8309 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
8310 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
8312 struct waiting_dir_move *dm;
8314 n = rb_first(&sctx->waiting_dir_moves);
8315 dm = rb_entry(n, struct waiting_dir_move, node);
8316 rb_erase(&dm->node, &sctx->waiting_dir_moves);
8320 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
8321 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
8323 struct orphan_dir_info *odi;
8325 n = rb_first(&sctx->orphan_dirs);
8326 odi = rb_entry(n, struct orphan_dir_info, node);
8327 free_orphan_dir_info(sctx, odi);
8330 if (sort_clone_roots) {
8331 for (i = 0; i < sctx->clone_roots_cnt; i++) {
8332 btrfs_root_dec_send_in_progress(
8333 sctx->clone_roots[i].root);
8334 btrfs_put_root(sctx->clone_roots[i].root);
8337 for (i = 0; sctx && i < clone_sources_to_rollback; i++) {
8338 btrfs_root_dec_send_in_progress(
8339 sctx->clone_roots[i].root);
8340 btrfs_put_root(sctx->clone_roots[i].root);
8343 btrfs_root_dec_send_in_progress(send_root);
8345 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root)) {
8346 btrfs_root_dec_send_in_progress(sctx->parent_root);
8347 btrfs_put_root(sctx->parent_root);
8350 kvfree(clone_sources_tmp);
8353 if (sctx->send_filp)
8354 fput(sctx->send_filp);
8356 kvfree(sctx->clone_roots);
8357 kfree(sctx->send_buf_pages);
8358 kvfree(sctx->send_buf);
8359 kvfree(sctx->verity_descriptor);
8361 name_cache_free(sctx);
8363 close_current_inode(sctx);
8365 empty_backref_cache(sctx);