2 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bsearch.h>
21 #include <linux/file.h>
22 #include <linux/sort.h>
23 #include <linux/mount.h>
24 #include <linux/xattr.h>
25 #include <linux/posix_acl_xattr.h>
26 #include <linux/radix-tree.h>
27 #include <linux/vmalloc.h>
28 #include <linux/string.h>
35 #include "btrfs_inode.h"
36 #include "transaction.h"
37 #include "compression.h"
40 * A fs_path is a helper to dynamically build path names with unknown size.
41 * It reallocates the internal buffer on demand.
42 * It allows fast adding of path elements on the right side (normal path) and
43 * fast adding to the left side (reversed path). A reversed path can also be
44 * unreversed if needed.
53 unsigned short buf_len:15;
54 unsigned short reversed:1;
58 * Average path length does not exceed 200 bytes, we'll have
59 * better packing in the slab and higher chance to satisfy
60 * a allocation later during send.
65 #define FS_PATH_INLINE_SIZE \
66 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
69 /* reused for each extent */
71 struct btrfs_root *root;
78 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
79 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
82 struct file *send_filp;
88 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
89 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
91 struct btrfs_root *send_root;
92 struct btrfs_root *parent_root;
93 struct clone_root *clone_roots;
96 /* current state of the compare_tree call */
97 struct btrfs_path *left_path;
98 struct btrfs_path *right_path;
99 struct btrfs_key *cmp_key;
102 * infos of the currently processed inode. In case of deleted inodes,
103 * these are the values from the deleted inode.
108 int cur_inode_new_gen;
109 int cur_inode_deleted;
113 u64 cur_inode_last_extent;
117 struct list_head new_refs;
118 struct list_head deleted_refs;
120 struct radix_tree_root name_cache;
121 struct list_head name_cache_list;
124 struct file_ra_state ra;
129 * We process inodes by their increasing order, so if before an
130 * incremental send we reverse the parent/child relationship of
131 * directories such that a directory with a lower inode number was
132 * the parent of a directory with a higher inode number, and the one
133 * becoming the new parent got renamed too, we can't rename/move the
134 * directory with lower inode number when we finish processing it - we
135 * must process the directory with higher inode number first, then
136 * rename/move it and then rename/move the directory with lower inode
137 * number. Example follows.
139 * Tree state when the first send was performed:
151 * Tree state when the second (incremental) send is performed:
160 * The sequence of steps that lead to the second state was:
162 * mv /a/b/c/d /a/b/c2/d2
163 * mv /a/b/c /a/b/c2/d2/cc
165 * "c" has lower inode number, but we can't move it (2nd mv operation)
166 * before we move "d", which has higher inode number.
168 * So we just memorize which move/rename operations must be performed
169 * later when their respective parent is processed and moved/renamed.
172 /* Indexed by parent directory inode number. */
173 struct rb_root pending_dir_moves;
176 * Reverse index, indexed by the inode number of a directory that
177 * is waiting for the move/rename of its immediate parent before its
178 * own move/rename can be performed.
180 struct rb_root waiting_dir_moves;
183 * A directory that is going to be rm'ed might have a child directory
184 * which is in the pending directory moves index above. In this case,
185 * the directory can only be removed after the move/rename of its child
186 * is performed. Example:
206 * Sequence of steps that lead to the send snapshot:
207 * rm -f /a/b/c/foo.txt
209 * mv /a/b/c/x /a/b/YY
212 * When the child is processed, its move/rename is delayed until its
213 * parent is processed (as explained above), but all other operations
214 * like update utimes, chown, chgrp, etc, are performed and the paths
215 * that it uses for those operations must use the orphanized name of
216 * its parent (the directory we're going to rm later), so we need to
217 * memorize that name.
219 * Indexed by the inode number of the directory to be deleted.
221 struct rb_root orphan_dirs;
224 struct pending_dir_move {
226 struct list_head list;
230 struct list_head update_refs;
233 struct waiting_dir_move {
237 * There might be some directory that could not be removed because it
238 * was waiting for this directory inode to be moved first. Therefore
239 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
245 struct orphan_dir_info {
251 struct name_cache_entry {
252 struct list_head list;
254 * radix_tree has only 32bit entries but we need to handle 64bit inums.
255 * We use the lower 32bit of the 64bit inum to store it in the tree. If
256 * more then one inum would fall into the same entry, we use radix_list
257 * to store the additional entries. radix_list is also used to store
258 * entries where two entries have the same inum but different
261 struct list_head radix_list;
267 int need_later_update;
272 static void inconsistent_snapshot_error(struct send_ctx *sctx,
273 enum btrfs_compare_tree_result result,
276 const char *result_string;
279 case BTRFS_COMPARE_TREE_NEW:
280 result_string = "new";
282 case BTRFS_COMPARE_TREE_DELETED:
283 result_string = "deleted";
285 case BTRFS_COMPARE_TREE_CHANGED:
286 result_string = "updated";
288 case BTRFS_COMPARE_TREE_SAME:
290 result_string = "unchanged";
294 result_string = "unexpected";
297 btrfs_err(sctx->send_root->fs_info,
298 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
299 result_string, what, sctx->cmp_key->objectid,
300 sctx->send_root->root_key.objectid,
302 sctx->parent_root->root_key.objectid : 0));
305 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
307 static struct waiting_dir_move *
308 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
310 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
312 static int need_send_hole(struct send_ctx *sctx)
314 return (sctx->parent_root && !sctx->cur_inode_new &&
315 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
316 S_ISREG(sctx->cur_inode_mode));
319 static void fs_path_reset(struct fs_path *p)
322 p->start = p->buf + p->buf_len - 1;
332 static struct fs_path *fs_path_alloc(void)
336 p = kmalloc(sizeof(*p), GFP_KERNEL);
340 p->buf = p->inline_buf;
341 p->buf_len = FS_PATH_INLINE_SIZE;
346 static struct fs_path *fs_path_alloc_reversed(void)
358 static void fs_path_free(struct fs_path *p)
362 if (p->buf != p->inline_buf)
367 static int fs_path_len(struct fs_path *p)
369 return p->end - p->start;
372 static int fs_path_ensure_buf(struct fs_path *p, int len)
380 if (p->buf_len >= len)
383 if (len > PATH_MAX) {
388 path_len = p->end - p->start;
389 old_buf_len = p->buf_len;
392 * First time the inline_buf does not suffice
394 if (p->buf == p->inline_buf) {
395 tmp_buf = kmalloc(len, GFP_KERNEL);
397 memcpy(tmp_buf, p->buf, old_buf_len);
399 tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
405 * The real size of the buffer is bigger, this will let the fast path
406 * happen most of the time
408 p->buf_len = ksize(p->buf);
411 tmp_buf = p->buf + old_buf_len - path_len - 1;
412 p->end = p->buf + p->buf_len - 1;
413 p->start = p->end - path_len;
414 memmove(p->start, tmp_buf, path_len + 1);
417 p->end = p->start + path_len;
422 static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
428 new_len = p->end - p->start + name_len;
429 if (p->start != p->end)
431 ret = fs_path_ensure_buf(p, new_len);
436 if (p->start != p->end)
438 p->start -= name_len;
439 *prepared = p->start;
441 if (p->start != p->end)
452 static int fs_path_add(struct fs_path *p, const char *name, int name_len)
457 ret = fs_path_prepare_for_add(p, name_len, &prepared);
460 memcpy(prepared, name, name_len);
466 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
471 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
474 memcpy(prepared, p2->start, p2->end - p2->start);
480 static int fs_path_add_from_extent_buffer(struct fs_path *p,
481 struct extent_buffer *eb,
482 unsigned long off, int len)
487 ret = fs_path_prepare_for_add(p, len, &prepared);
491 read_extent_buffer(eb, prepared, off, len);
497 static int fs_path_copy(struct fs_path *p, struct fs_path *from)
501 p->reversed = from->reversed;
504 ret = fs_path_add_path(p, from);
510 static void fs_path_unreverse(struct fs_path *p)
519 len = p->end - p->start;
521 p->end = p->start + len;
522 memmove(p->start, tmp, len + 1);
526 static struct btrfs_path *alloc_path_for_send(void)
528 struct btrfs_path *path;
530 path = btrfs_alloc_path();
533 path->search_commit_root = 1;
534 path->skip_locking = 1;
535 path->need_commit_sem = 1;
539 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
549 ret = vfs_write(filp, (__force const char __user *)buf + pos,
551 /* TODO handle that correctly */
552 /*if (ret == -ERESTARTSYS) {
571 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
573 struct btrfs_tlv_header *hdr;
574 int total_len = sizeof(*hdr) + len;
575 int left = sctx->send_max_size - sctx->send_size;
577 if (unlikely(left < total_len))
580 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
581 hdr->tlv_type = cpu_to_le16(attr);
582 hdr->tlv_len = cpu_to_le16(len);
583 memcpy(hdr + 1, data, len);
584 sctx->send_size += total_len;
589 #define TLV_PUT_DEFINE_INT(bits) \
590 static int tlv_put_u##bits(struct send_ctx *sctx, \
591 u##bits attr, u##bits value) \
593 __le##bits __tmp = cpu_to_le##bits(value); \
594 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
597 TLV_PUT_DEFINE_INT(64)
599 static int tlv_put_string(struct send_ctx *sctx, u16 attr,
600 const char *str, int len)
604 return tlv_put(sctx, attr, str, len);
607 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
610 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
613 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
614 struct extent_buffer *eb,
615 struct btrfs_timespec *ts)
617 struct btrfs_timespec bts;
618 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
619 return tlv_put(sctx, attr, &bts, sizeof(bts));
623 #define TLV_PUT(sctx, attrtype, attrlen, data) \
625 ret = tlv_put(sctx, attrtype, attrlen, data); \
627 goto tlv_put_failure; \
630 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
632 ret = tlv_put_u##bits(sctx, attrtype, value); \
634 goto tlv_put_failure; \
637 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
638 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
639 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
640 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
641 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
643 ret = tlv_put_string(sctx, attrtype, str, len); \
645 goto tlv_put_failure; \
647 #define TLV_PUT_PATH(sctx, attrtype, p) \
649 ret = tlv_put_string(sctx, attrtype, p->start, \
650 p->end - p->start); \
652 goto tlv_put_failure; \
654 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
656 ret = tlv_put_uuid(sctx, attrtype, uuid); \
658 goto tlv_put_failure; \
660 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
662 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
664 goto tlv_put_failure; \
667 static int send_header(struct send_ctx *sctx)
669 struct btrfs_stream_header hdr;
671 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
672 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
674 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
679 * For each command/item we want to send to userspace, we call this function.
681 static int begin_cmd(struct send_ctx *sctx, int cmd)
683 struct btrfs_cmd_header *hdr;
685 if (WARN_ON(!sctx->send_buf))
688 BUG_ON(sctx->send_size);
690 sctx->send_size += sizeof(*hdr);
691 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
692 hdr->cmd = cpu_to_le16(cmd);
697 static int send_cmd(struct send_ctx *sctx)
700 struct btrfs_cmd_header *hdr;
703 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
704 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
707 crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
708 hdr->crc = cpu_to_le32(crc);
710 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
713 sctx->total_send_size += sctx->send_size;
714 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
721 * Sends a move instruction to user space
723 static int send_rename(struct send_ctx *sctx,
724 struct fs_path *from, struct fs_path *to)
726 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
729 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
731 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
735 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
736 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
738 ret = send_cmd(sctx);
746 * Sends a link instruction to user space
748 static int send_link(struct send_ctx *sctx,
749 struct fs_path *path, struct fs_path *lnk)
751 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
754 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
756 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
760 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
761 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
763 ret = send_cmd(sctx);
771 * Sends an unlink instruction to user space
773 static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
775 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
778 btrfs_debug(fs_info, "send_unlink %s", path->start);
780 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
784 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
786 ret = send_cmd(sctx);
794 * Sends a rmdir instruction to user space
796 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
798 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
801 btrfs_debug(fs_info, "send_rmdir %s", path->start);
803 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
807 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
809 ret = send_cmd(sctx);
817 * Helper function to retrieve some fields from an inode item.
819 static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
820 u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
824 struct btrfs_inode_item *ii;
825 struct btrfs_key key;
828 key.type = BTRFS_INODE_ITEM_KEY;
830 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
837 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
838 struct btrfs_inode_item);
840 *size = btrfs_inode_size(path->nodes[0], ii);
842 *gen = btrfs_inode_generation(path->nodes[0], ii);
844 *mode = btrfs_inode_mode(path->nodes[0], ii);
846 *uid = btrfs_inode_uid(path->nodes[0], ii);
848 *gid = btrfs_inode_gid(path->nodes[0], ii);
850 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
855 static int get_inode_info(struct btrfs_root *root,
856 u64 ino, u64 *size, u64 *gen,
857 u64 *mode, u64 *uid, u64 *gid,
860 struct btrfs_path *path;
863 path = alloc_path_for_send();
866 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
868 btrfs_free_path(path);
872 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
877 * Helper function to iterate the entries in ONE btrfs_inode_ref or
878 * btrfs_inode_extref.
879 * The iterate callback may return a non zero value to stop iteration. This can
880 * be a negative value for error codes or 1 to simply stop it.
882 * path must point to the INODE_REF or INODE_EXTREF when called.
884 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
885 struct btrfs_key *found_key, int resolve,
886 iterate_inode_ref_t iterate, void *ctx)
888 struct extent_buffer *eb = path->nodes[0];
889 struct btrfs_item *item;
890 struct btrfs_inode_ref *iref;
891 struct btrfs_inode_extref *extref;
892 struct btrfs_path *tmp_path;
896 int slot = path->slots[0];
903 unsigned long name_off;
904 unsigned long elem_size;
907 p = fs_path_alloc_reversed();
911 tmp_path = alloc_path_for_send();
918 if (found_key->type == BTRFS_INODE_REF_KEY) {
919 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
920 struct btrfs_inode_ref);
921 item = btrfs_item_nr(slot);
922 total = btrfs_item_size(eb, item);
923 elem_size = sizeof(*iref);
925 ptr = btrfs_item_ptr_offset(eb, slot);
926 total = btrfs_item_size_nr(eb, slot);
927 elem_size = sizeof(*extref);
930 while (cur < total) {
933 if (found_key->type == BTRFS_INODE_REF_KEY) {
934 iref = (struct btrfs_inode_ref *)(ptr + cur);
935 name_len = btrfs_inode_ref_name_len(eb, iref);
936 name_off = (unsigned long)(iref + 1);
937 index = btrfs_inode_ref_index(eb, iref);
938 dir = found_key->offset;
940 extref = (struct btrfs_inode_extref *)(ptr + cur);
941 name_len = btrfs_inode_extref_name_len(eb, extref);
942 name_off = (unsigned long)&extref->name;
943 index = btrfs_inode_extref_index(eb, extref);
944 dir = btrfs_inode_extref_parent(eb, extref);
948 start = btrfs_ref_to_path(root, tmp_path, name_len,
952 ret = PTR_ERR(start);
955 if (start < p->buf) {
956 /* overflow , try again with larger buffer */
957 ret = fs_path_ensure_buf(p,
958 p->buf_len + p->buf - start);
961 start = btrfs_ref_to_path(root, tmp_path,
966 ret = PTR_ERR(start);
969 BUG_ON(start < p->buf);
973 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
979 cur += elem_size + name_len;
980 ret = iterate(num, dir, index, p, ctx);
987 btrfs_free_path(tmp_path);
992 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
993 const char *name, int name_len,
994 const char *data, int data_len,
998 * Helper function to iterate the entries in ONE btrfs_dir_item.
999 * The iterate callback may return a non zero value to stop iteration. This can
1000 * be a negative value for error codes or 1 to simply stop it.
1002 * path must point to the dir item when called.
1004 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
1005 struct btrfs_key *found_key,
1006 iterate_dir_item_t iterate, void *ctx)
1009 struct extent_buffer *eb;
1010 struct btrfs_item *item;
1011 struct btrfs_dir_item *di;
1012 struct btrfs_key di_key;
1025 * Start with a small buffer (1 page). If later we end up needing more
1026 * space, which can happen for xattrs on a fs with a leaf size greater
1027 * then the page size, attempt to increase the buffer. Typically xattr
1031 buf = kmalloc(buf_len, GFP_KERNEL);
1037 eb = path->nodes[0];
1038 slot = path->slots[0];
1039 item = btrfs_item_nr(slot);
1040 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1043 total = btrfs_item_size(eb, item);
1046 while (cur < total) {
1047 name_len = btrfs_dir_name_len(eb, di);
1048 data_len = btrfs_dir_data_len(eb, di);
1049 type = btrfs_dir_type(eb, di);
1050 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1052 if (type == BTRFS_FT_XATTR) {
1053 if (name_len > XATTR_NAME_MAX) {
1054 ret = -ENAMETOOLONG;
1057 if (name_len + data_len >
1058 BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
1066 if (name_len + data_len > PATH_MAX) {
1067 ret = -ENAMETOOLONG;
1072 if (name_len + data_len > buf_len) {
1073 buf_len = name_len + data_len;
1074 if (is_vmalloc_addr(buf)) {
1078 char *tmp = krealloc(buf, buf_len,
1079 GFP_KERNEL | __GFP_NOWARN);
1086 buf = kvmalloc(buf_len, GFP_KERNEL);
1094 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
1095 name_len + data_len);
1097 len = sizeof(*di) + name_len + data_len;
1098 di = (struct btrfs_dir_item *)((char *)di + len);
1101 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1102 data_len, type, ctx);
1118 static int __copy_first_ref(int num, u64 dir, int index,
1119 struct fs_path *p, void *ctx)
1122 struct fs_path *pt = ctx;
1124 ret = fs_path_copy(pt, p);
1128 /* we want the first only */
1133 * Retrieve the first path of an inode. If an inode has more then one
1134 * ref/hardlink, this is ignored.
1136 static int get_inode_path(struct btrfs_root *root,
1137 u64 ino, struct fs_path *path)
1140 struct btrfs_key key, found_key;
1141 struct btrfs_path *p;
1143 p = alloc_path_for_send();
1147 fs_path_reset(path);
1150 key.type = BTRFS_INODE_REF_KEY;
1153 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1160 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1161 if (found_key.objectid != ino ||
1162 (found_key.type != BTRFS_INODE_REF_KEY &&
1163 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1168 ret = iterate_inode_ref(root, p, &found_key, 1,
1169 __copy_first_ref, path);
1179 struct backref_ctx {
1180 struct send_ctx *sctx;
1182 struct btrfs_path *path;
1183 /* number of total found references */
1187 * used for clones found in send_root. clones found behind cur_objectid
1188 * and cur_offset are not considered as allowed clones.
1193 /* may be truncated in case it's the last extent in a file */
1196 /* data offset in the file extent item */
1199 /* Just to check for bugs in backref resolving */
1203 static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1205 u64 root = (u64)(uintptr_t)key;
1206 struct clone_root *cr = (struct clone_root *)elt;
1208 if (root < cr->root->objectid)
1210 if (root > cr->root->objectid)
1215 static int __clone_root_cmp_sort(const void *e1, const void *e2)
1217 struct clone_root *cr1 = (struct clone_root *)e1;
1218 struct clone_root *cr2 = (struct clone_root *)e2;
1220 if (cr1->root->objectid < cr2->root->objectid)
1222 if (cr1->root->objectid > cr2->root->objectid)
1228 * Called for every backref that is found for the current extent.
1229 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1231 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1233 struct backref_ctx *bctx = ctx_;
1234 struct clone_root *found;
1238 /* First check if the root is in the list of accepted clone sources */
1239 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1240 bctx->sctx->clone_roots_cnt,
1241 sizeof(struct clone_root),
1242 __clone_root_cmp_bsearch);
1246 if (found->root == bctx->sctx->send_root &&
1247 ino == bctx->cur_objectid &&
1248 offset == bctx->cur_offset) {
1249 bctx->found_itself = 1;
1253 * There are inodes that have extents that lie behind its i_size. Don't
1254 * accept clones from these extents.
1256 ret = __get_inode_info(found->root, bctx->path, ino, &i_size, NULL, NULL,
1258 btrfs_release_path(bctx->path);
1262 if (offset + bctx->data_offset + bctx->extent_len > i_size)
1266 * Make sure we don't consider clones from send_root that are
1267 * behind the current inode/offset.
1269 if (found->root == bctx->sctx->send_root) {
1271 * TODO for the moment we don't accept clones from the inode
1272 * that is currently send. We may change this when
1273 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1276 if (ino >= bctx->cur_objectid)
1279 if (ino > bctx->cur_objectid)
1281 if (offset + bctx->extent_len > bctx->cur_offset)
1287 found->found_refs++;
1288 if (ino < found->ino) {
1290 found->offset = offset;
1291 } else if (found->ino == ino) {
1293 * same extent found more then once in the same file.
1295 if (found->offset > offset + bctx->extent_len)
1296 found->offset = offset;
1303 * Given an inode, offset and extent item, it finds a good clone for a clone
1304 * instruction. Returns -ENOENT when none could be found. The function makes
1305 * sure that the returned clone is usable at the point where sending is at the
1306 * moment. This means, that no clones are accepted which lie behind the current
1309 * path must point to the extent item when called.
1311 static int find_extent_clone(struct send_ctx *sctx,
1312 struct btrfs_path *path,
1313 u64 ino, u64 data_offset,
1315 struct clone_root **found)
1317 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1323 u64 extent_item_pos;
1325 struct btrfs_file_extent_item *fi;
1326 struct extent_buffer *eb = path->nodes[0];
1327 struct backref_ctx *backref_ctx = NULL;
1328 struct clone_root *cur_clone_root;
1329 struct btrfs_key found_key;
1330 struct btrfs_path *tmp_path;
1334 tmp_path = alloc_path_for_send();
1338 /* We only use this path under the commit sem */
1339 tmp_path->need_commit_sem = 0;
1341 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL);
1347 backref_ctx->path = tmp_path;
1349 if (data_offset >= ino_size) {
1351 * There may be extents that lie behind the file's size.
1352 * I at least had this in combination with snapshotting while
1353 * writing large files.
1359 fi = btrfs_item_ptr(eb, path->slots[0],
1360 struct btrfs_file_extent_item);
1361 extent_type = btrfs_file_extent_type(eb, fi);
1362 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1366 compressed = btrfs_file_extent_compression(eb, fi);
1368 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1369 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1370 if (disk_byte == 0) {
1374 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1376 down_read(&fs_info->commit_root_sem);
1377 ret = extent_from_logical(fs_info, disk_byte, tmp_path,
1378 &found_key, &flags);
1379 up_read(&fs_info->commit_root_sem);
1380 btrfs_release_path(tmp_path);
1384 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1390 * Setup the clone roots.
1392 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1393 cur_clone_root = sctx->clone_roots + i;
1394 cur_clone_root->ino = (u64)-1;
1395 cur_clone_root->offset = 0;
1396 cur_clone_root->found_refs = 0;
1399 backref_ctx->sctx = sctx;
1400 backref_ctx->found = 0;
1401 backref_ctx->cur_objectid = ino;
1402 backref_ctx->cur_offset = data_offset;
1403 backref_ctx->found_itself = 0;
1404 backref_ctx->extent_len = num_bytes;
1406 * For non-compressed extents iterate_extent_inodes() gives us extent
1407 * offsets that already take into account the data offset, but not for
1408 * compressed extents, since the offset is logical and not relative to
1409 * the physical extent locations. We must take this into account to
1410 * avoid sending clone offsets that go beyond the source file's size,
1411 * which would result in the clone ioctl failing with -EINVAL on the
1414 if (compressed == BTRFS_COMPRESS_NONE)
1415 backref_ctx->data_offset = 0;
1417 backref_ctx->data_offset = btrfs_file_extent_offset(eb, fi);
1420 * The last extent of a file may be too large due to page alignment.
1421 * We need to adjust extent_len in this case so that the checks in
1422 * __iterate_backrefs work.
1424 if (data_offset + num_bytes >= ino_size)
1425 backref_ctx->extent_len = ino_size - data_offset;
1428 * Now collect all backrefs.
1430 if (compressed == BTRFS_COMPRESS_NONE)
1431 extent_item_pos = logical - found_key.objectid;
1433 extent_item_pos = 0;
1434 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1435 extent_item_pos, 1, __iterate_backrefs,
1441 if (!backref_ctx->found_itself) {
1442 /* found a bug in backref code? */
1445 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
1446 ino, data_offset, disk_byte, found_key.objectid);
1450 btrfs_debug(fs_info,
1451 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1452 data_offset, ino, num_bytes, logical);
1454 if (!backref_ctx->found)
1455 btrfs_debug(fs_info, "no clones found");
1457 cur_clone_root = NULL;
1458 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1459 if (sctx->clone_roots[i].found_refs) {
1460 if (!cur_clone_root)
1461 cur_clone_root = sctx->clone_roots + i;
1462 else if (sctx->clone_roots[i].root == sctx->send_root)
1463 /* prefer clones from send_root over others */
1464 cur_clone_root = sctx->clone_roots + i;
1469 if (cur_clone_root) {
1470 *found = cur_clone_root;
1477 btrfs_free_path(tmp_path);
1482 static int read_symlink(struct btrfs_root *root,
1484 struct fs_path *dest)
1487 struct btrfs_path *path;
1488 struct btrfs_key key;
1489 struct btrfs_file_extent_item *ei;
1495 path = alloc_path_for_send();
1500 key.type = BTRFS_EXTENT_DATA_KEY;
1502 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1507 * An empty symlink inode. Can happen in rare error paths when
1508 * creating a symlink (transaction committed before the inode
1509 * eviction handler removed the symlink inode items and a crash
1510 * happened in between or the subvol was snapshoted in between).
1511 * Print an informative message to dmesg/syslog so that the user
1512 * can delete the symlink.
1514 btrfs_err(root->fs_info,
1515 "Found empty symlink inode %llu at root %llu",
1516 ino, root->root_key.objectid);
1521 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1522 struct btrfs_file_extent_item);
1523 type = btrfs_file_extent_type(path->nodes[0], ei);
1524 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1525 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1526 BUG_ON(compression);
1528 off = btrfs_file_extent_inline_start(ei);
1529 len = btrfs_file_extent_inline_len(path->nodes[0], path->slots[0], ei);
1531 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1534 btrfs_free_path(path);
1539 * Helper function to generate a file name that is unique in the root of
1540 * send_root and parent_root. This is used to generate names for orphan inodes.
1542 static int gen_unique_name(struct send_ctx *sctx,
1544 struct fs_path *dest)
1547 struct btrfs_path *path;
1548 struct btrfs_dir_item *di;
1553 path = alloc_path_for_send();
1558 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1560 ASSERT(len < sizeof(tmp));
1562 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1563 path, BTRFS_FIRST_FREE_OBJECTID,
1564 tmp, strlen(tmp), 0);
1565 btrfs_release_path(path);
1571 /* not unique, try again */
1576 if (!sctx->parent_root) {
1582 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1583 path, BTRFS_FIRST_FREE_OBJECTID,
1584 tmp, strlen(tmp), 0);
1585 btrfs_release_path(path);
1591 /* not unique, try again */
1599 ret = fs_path_add(dest, tmp, strlen(tmp));
1602 btrfs_free_path(path);
1607 inode_state_no_change,
1608 inode_state_will_create,
1609 inode_state_did_create,
1610 inode_state_will_delete,
1611 inode_state_did_delete,
1614 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1622 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1624 if (ret < 0 && ret != -ENOENT)
1628 if (!sctx->parent_root) {
1629 right_ret = -ENOENT;
1631 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1632 NULL, NULL, NULL, NULL);
1633 if (ret < 0 && ret != -ENOENT)
1638 if (!left_ret && !right_ret) {
1639 if (left_gen == gen && right_gen == gen) {
1640 ret = inode_state_no_change;
1641 } else if (left_gen == gen) {
1642 if (ino < sctx->send_progress)
1643 ret = inode_state_did_create;
1645 ret = inode_state_will_create;
1646 } else if (right_gen == gen) {
1647 if (ino < sctx->send_progress)
1648 ret = inode_state_did_delete;
1650 ret = inode_state_will_delete;
1654 } else if (!left_ret) {
1655 if (left_gen == gen) {
1656 if (ino < sctx->send_progress)
1657 ret = inode_state_did_create;
1659 ret = inode_state_will_create;
1663 } else if (!right_ret) {
1664 if (right_gen == gen) {
1665 if (ino < sctx->send_progress)
1666 ret = inode_state_did_delete;
1668 ret = inode_state_will_delete;
1680 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1684 if (ino == BTRFS_FIRST_FREE_OBJECTID)
1687 ret = get_cur_inode_state(sctx, ino, gen);
1691 if (ret == inode_state_no_change ||
1692 ret == inode_state_did_create ||
1693 ret == inode_state_will_delete)
1703 * Helper function to lookup a dir item in a dir.
1705 static int lookup_dir_item_inode(struct btrfs_root *root,
1706 u64 dir, const char *name, int name_len,
1711 struct btrfs_dir_item *di;
1712 struct btrfs_key key;
1713 struct btrfs_path *path;
1715 path = alloc_path_for_send();
1719 di = btrfs_lookup_dir_item(NULL, root, path,
1720 dir, name, name_len, 0);
1729 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1730 if (key.type == BTRFS_ROOT_ITEM_KEY) {
1734 *found_inode = key.objectid;
1735 *found_type = btrfs_dir_type(path->nodes[0], di);
1738 btrfs_free_path(path);
1743 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1744 * generation of the parent dir and the name of the dir entry.
1746 static int get_first_ref(struct btrfs_root *root, u64 ino,
1747 u64 *dir, u64 *dir_gen, struct fs_path *name)
1750 struct btrfs_key key;
1751 struct btrfs_key found_key;
1752 struct btrfs_path *path;
1756 path = alloc_path_for_send();
1761 key.type = BTRFS_INODE_REF_KEY;
1764 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1768 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1770 if (ret || found_key.objectid != ino ||
1771 (found_key.type != BTRFS_INODE_REF_KEY &&
1772 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1777 if (found_key.type == BTRFS_INODE_REF_KEY) {
1778 struct btrfs_inode_ref *iref;
1779 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1780 struct btrfs_inode_ref);
1781 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1782 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1783 (unsigned long)(iref + 1),
1785 parent_dir = found_key.offset;
1787 struct btrfs_inode_extref *extref;
1788 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1789 struct btrfs_inode_extref);
1790 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1791 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1792 (unsigned long)&extref->name, len);
1793 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1797 btrfs_release_path(path);
1800 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
1809 btrfs_free_path(path);
1813 static int is_first_ref(struct btrfs_root *root,
1815 const char *name, int name_len)
1818 struct fs_path *tmp_name;
1821 tmp_name = fs_path_alloc();
1825 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
1829 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1834 ret = !memcmp(tmp_name->start, name, name_len);
1837 fs_path_free(tmp_name);
1842 * Used by process_recorded_refs to determine if a new ref would overwrite an
1843 * already existing ref. In case it detects an overwrite, it returns the
1844 * inode/gen in who_ino/who_gen.
1845 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1846 * to make sure later references to the overwritten inode are possible.
1847 * Orphanizing is however only required for the first ref of an inode.
1848 * process_recorded_refs does an additional is_first_ref check to see if
1849 * orphanizing is really required.
1851 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1852 const char *name, int name_len,
1853 u64 *who_ino, u64 *who_gen)
1857 u64 other_inode = 0;
1860 if (!sctx->parent_root)
1863 ret = is_inode_existent(sctx, dir, dir_gen);
1868 * If we have a parent root we need to verify that the parent dir was
1869 * not deleted and then re-created, if it was then we have no overwrite
1870 * and we can just unlink this entry.
1872 if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
1873 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1875 if (ret < 0 && ret != -ENOENT)
1885 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1886 &other_inode, &other_type);
1887 if (ret < 0 && ret != -ENOENT)
1895 * Check if the overwritten ref was already processed. If yes, the ref
1896 * was already unlinked/moved, so we can safely assume that we will not
1897 * overwrite anything at this point in time.
1899 if (other_inode > sctx->send_progress ||
1900 is_waiting_for_move(sctx, other_inode)) {
1901 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1902 who_gen, NULL, NULL, NULL, NULL);
1907 *who_ino = other_inode;
1917 * Checks if the ref was overwritten by an already processed inode. This is
1918 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1919 * thus the orphan name needs be used.
1920 * process_recorded_refs also uses it to avoid unlinking of refs that were
1923 static int did_overwrite_ref(struct send_ctx *sctx,
1924 u64 dir, u64 dir_gen,
1925 u64 ino, u64 ino_gen,
1926 const char *name, int name_len)
1933 if (!sctx->parent_root)
1936 ret = is_inode_existent(sctx, dir, dir_gen);
1940 if (dir != BTRFS_FIRST_FREE_OBJECTID) {
1941 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL,
1943 if (ret < 0 && ret != -ENOENT)
1953 /* check if the ref was overwritten by another ref */
1954 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1955 &ow_inode, &other_type);
1956 if (ret < 0 && ret != -ENOENT)
1959 /* was never and will never be overwritten */
1964 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1969 if (ow_inode == ino && gen == ino_gen) {
1975 * We know that it is or will be overwritten. Check this now.
1976 * The current inode being processed might have been the one that caused
1977 * inode 'ino' to be orphanized, therefore check if ow_inode matches
1978 * the current inode being processed.
1980 if ((ow_inode < sctx->send_progress) ||
1981 (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
1982 gen == sctx->cur_inode_gen))
1992 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1993 * that got overwritten. This is used by process_recorded_refs to determine
1994 * if it has to use the path as returned by get_cur_path or the orphan name.
1996 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
1999 struct fs_path *name = NULL;
2003 if (!sctx->parent_root)
2006 name = fs_path_alloc();
2010 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
2014 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
2015 name->start, fs_path_len(name));
2023 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
2024 * so we need to do some special handling in case we have clashes. This function
2025 * takes care of this with the help of name_cache_entry::radix_list.
2026 * In case of error, nce is kfreed.
2028 static int name_cache_insert(struct send_ctx *sctx,
2029 struct name_cache_entry *nce)
2032 struct list_head *nce_head;
2034 nce_head = radix_tree_lookup(&sctx->name_cache,
2035 (unsigned long)nce->ino);
2037 nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
2042 INIT_LIST_HEAD(nce_head);
2044 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
2051 list_add_tail(&nce->radix_list, nce_head);
2052 list_add_tail(&nce->list, &sctx->name_cache_list);
2053 sctx->name_cache_size++;
2058 static void name_cache_delete(struct send_ctx *sctx,
2059 struct name_cache_entry *nce)
2061 struct list_head *nce_head;
2063 nce_head = radix_tree_lookup(&sctx->name_cache,
2064 (unsigned long)nce->ino);
2066 btrfs_err(sctx->send_root->fs_info,
2067 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2068 nce->ino, sctx->name_cache_size);
2071 list_del(&nce->radix_list);
2072 list_del(&nce->list);
2073 sctx->name_cache_size--;
2076 * We may not get to the final release of nce_head if the lookup fails
2078 if (nce_head && list_empty(nce_head)) {
2079 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
2084 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
2087 struct list_head *nce_head;
2088 struct name_cache_entry *cur;
2090 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
2094 list_for_each_entry(cur, nce_head, radix_list) {
2095 if (cur->ino == ino && cur->gen == gen)
2102 * Removes the entry from the list and adds it back to the end. This marks the
2103 * entry as recently used so that name_cache_clean_unused does not remove it.
2105 static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
2107 list_del(&nce->list);
2108 list_add_tail(&nce->list, &sctx->name_cache_list);
2112 * Remove some entries from the beginning of name_cache_list.
2114 static void name_cache_clean_unused(struct send_ctx *sctx)
2116 struct name_cache_entry *nce;
2118 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
2121 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
2122 nce = list_entry(sctx->name_cache_list.next,
2123 struct name_cache_entry, list);
2124 name_cache_delete(sctx, nce);
2129 static void name_cache_free(struct send_ctx *sctx)
2131 struct name_cache_entry *nce;
2133 while (!list_empty(&sctx->name_cache_list)) {
2134 nce = list_entry(sctx->name_cache_list.next,
2135 struct name_cache_entry, list);
2136 name_cache_delete(sctx, nce);
2142 * Used by get_cur_path for each ref up to the root.
2143 * Returns 0 if it succeeded.
2144 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2145 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2146 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2147 * Returns <0 in case of error.
2149 static int __get_cur_name_and_parent(struct send_ctx *sctx,
2153 struct fs_path *dest)
2157 struct name_cache_entry *nce = NULL;
2160 * First check if we already did a call to this function with the same
2161 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2162 * return the cached result.
2164 nce = name_cache_search(sctx, ino, gen);
2166 if (ino < sctx->send_progress && nce->need_later_update) {
2167 name_cache_delete(sctx, nce);
2171 name_cache_used(sctx, nce);
2172 *parent_ino = nce->parent_ino;
2173 *parent_gen = nce->parent_gen;
2174 ret = fs_path_add(dest, nce->name, nce->name_len);
2183 * If the inode is not existent yet, add the orphan name and return 1.
2184 * This should only happen for the parent dir that we determine in
2187 ret = is_inode_existent(sctx, ino, gen);
2192 ret = gen_unique_name(sctx, ino, gen, dest);
2200 * Depending on whether the inode was already processed or not, use
2201 * send_root or parent_root for ref lookup.
2203 if (ino < sctx->send_progress)
2204 ret = get_first_ref(sctx->send_root, ino,
2205 parent_ino, parent_gen, dest);
2207 ret = get_first_ref(sctx->parent_root, ino,
2208 parent_ino, parent_gen, dest);
2213 * Check if the ref was overwritten by an inode's ref that was processed
2214 * earlier. If yes, treat as orphan and return 1.
2216 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2217 dest->start, dest->end - dest->start);
2221 fs_path_reset(dest);
2222 ret = gen_unique_name(sctx, ino, gen, dest);
2230 * Store the result of the lookup in the name cache.
2232 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
2240 nce->parent_ino = *parent_ino;
2241 nce->parent_gen = *parent_gen;
2242 nce->name_len = fs_path_len(dest);
2244 strcpy(nce->name, dest->start);
2246 if (ino < sctx->send_progress)
2247 nce->need_later_update = 0;
2249 nce->need_later_update = 1;
2251 nce_ret = name_cache_insert(sctx, nce);
2254 name_cache_clean_unused(sctx);
2261 * Magic happens here. This function returns the first ref to an inode as it
2262 * would look like while receiving the stream at this point in time.
2263 * We walk the path up to the root. For every inode in between, we check if it
2264 * was already processed/sent. If yes, we continue with the parent as found
2265 * in send_root. If not, we continue with the parent as found in parent_root.
2266 * If we encounter an inode that was deleted at this point in time, we use the
2267 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2268 * that were not created yet and overwritten inodes/refs.
2270 * When do we have have orphan inodes:
2271 * 1. When an inode is freshly created and thus no valid refs are available yet
2272 * 2. When a directory lost all it's refs (deleted) but still has dir items
2273 * inside which were not processed yet (pending for move/delete). If anyone
2274 * tried to get the path to the dir items, it would get a path inside that
2276 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2277 * of an unprocessed inode. If in that case the first ref would be
2278 * overwritten, the overwritten inode gets "orphanized". Later when we
2279 * process this overwritten inode, it is restored at a new place by moving
2282 * sctx->send_progress tells this function at which point in time receiving
2285 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2286 struct fs_path *dest)
2289 struct fs_path *name = NULL;
2290 u64 parent_inode = 0;
2294 name = fs_path_alloc();
2301 fs_path_reset(dest);
2303 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2304 struct waiting_dir_move *wdm;
2306 fs_path_reset(name);
2308 if (is_waiting_for_rm(sctx, ino)) {
2309 ret = gen_unique_name(sctx, ino, gen, name);
2312 ret = fs_path_add_path(dest, name);
2316 wdm = get_waiting_dir_move(sctx, ino);
2317 if (wdm && wdm->orphanized) {
2318 ret = gen_unique_name(sctx, ino, gen, name);
2321 ret = get_first_ref(sctx->parent_root, ino,
2322 &parent_inode, &parent_gen, name);
2324 ret = __get_cur_name_and_parent(sctx, ino, gen,
2334 ret = fs_path_add_path(dest, name);
2345 fs_path_unreverse(dest);
2350 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2352 static int send_subvol_begin(struct send_ctx *sctx)
2355 struct btrfs_root *send_root = sctx->send_root;
2356 struct btrfs_root *parent_root = sctx->parent_root;
2357 struct btrfs_path *path;
2358 struct btrfs_key key;
2359 struct btrfs_root_ref *ref;
2360 struct extent_buffer *leaf;
2364 path = btrfs_alloc_path();
2368 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
2370 btrfs_free_path(path);
2374 key.objectid = send_root->objectid;
2375 key.type = BTRFS_ROOT_BACKREF_KEY;
2378 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2387 leaf = path->nodes[0];
2388 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2389 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2390 key.objectid != send_root->objectid) {
2394 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2395 namelen = btrfs_root_ref_name_len(leaf, ref);
2396 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2397 btrfs_release_path(path);
2400 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2404 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2409 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2411 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
2412 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2413 sctx->send_root->root_item.received_uuid);
2415 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2416 sctx->send_root->root_item.uuid);
2418 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2419 le64_to_cpu(sctx->send_root->root_item.ctransid));
2421 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
2422 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2423 parent_root->root_item.received_uuid);
2425 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2426 parent_root->root_item.uuid);
2427 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2428 le64_to_cpu(sctx->parent_root->root_item.ctransid));
2431 ret = send_cmd(sctx);
2435 btrfs_free_path(path);
2440 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2442 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2446 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
2448 p = fs_path_alloc();
2452 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2456 ret = get_cur_path(sctx, ino, gen, p);
2459 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2460 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2462 ret = send_cmd(sctx);
2470 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2472 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2476 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
2478 p = fs_path_alloc();
2482 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2486 ret = get_cur_path(sctx, ino, gen, p);
2489 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2490 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2492 ret = send_cmd(sctx);
2500 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2502 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2506 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
2509 p = fs_path_alloc();
2513 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2517 ret = get_cur_path(sctx, ino, gen, p);
2520 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2521 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2522 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2524 ret = send_cmd(sctx);
2532 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2534 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2536 struct fs_path *p = NULL;
2537 struct btrfs_inode_item *ii;
2538 struct btrfs_path *path = NULL;
2539 struct extent_buffer *eb;
2540 struct btrfs_key key;
2543 btrfs_debug(fs_info, "send_utimes %llu", ino);
2545 p = fs_path_alloc();
2549 path = alloc_path_for_send();
2556 key.type = BTRFS_INODE_ITEM_KEY;
2558 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2564 eb = path->nodes[0];
2565 slot = path->slots[0];
2566 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2568 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2572 ret = get_cur_path(sctx, ino, gen, p);
2575 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2576 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
2577 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
2578 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
2579 /* TODO Add otime support when the otime patches get into upstream */
2581 ret = send_cmd(sctx);
2586 btrfs_free_path(path);
2591 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2592 * a valid path yet because we did not process the refs yet. So, the inode
2593 * is created as orphan.
2595 static int send_create_inode(struct send_ctx *sctx, u64 ino)
2597 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2605 btrfs_debug(fs_info, "send_create_inode %llu", ino);
2607 p = fs_path_alloc();
2611 if (ino != sctx->cur_ino) {
2612 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
2617 gen = sctx->cur_inode_gen;
2618 mode = sctx->cur_inode_mode;
2619 rdev = sctx->cur_inode_rdev;
2622 if (S_ISREG(mode)) {
2623 cmd = BTRFS_SEND_C_MKFILE;
2624 } else if (S_ISDIR(mode)) {
2625 cmd = BTRFS_SEND_C_MKDIR;
2626 } else if (S_ISLNK(mode)) {
2627 cmd = BTRFS_SEND_C_SYMLINK;
2628 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2629 cmd = BTRFS_SEND_C_MKNOD;
2630 } else if (S_ISFIFO(mode)) {
2631 cmd = BTRFS_SEND_C_MKFIFO;
2632 } else if (S_ISSOCK(mode)) {
2633 cmd = BTRFS_SEND_C_MKSOCK;
2635 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
2636 (int)(mode & S_IFMT));
2641 ret = begin_cmd(sctx, cmd);
2645 ret = gen_unique_name(sctx, ino, gen, p);
2649 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2650 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2652 if (S_ISLNK(mode)) {
2654 ret = read_symlink(sctx->send_root, ino, p);
2657 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2658 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2659 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2660 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2661 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2664 ret = send_cmd(sctx);
2676 * We need some special handling for inodes that get processed before the parent
2677 * directory got created. See process_recorded_refs for details.
2678 * This function does the check if we already created the dir out of order.
2680 static int did_create_dir(struct send_ctx *sctx, u64 dir)
2683 struct btrfs_path *path = NULL;
2684 struct btrfs_key key;
2685 struct btrfs_key found_key;
2686 struct btrfs_key di_key;
2687 struct extent_buffer *eb;
2688 struct btrfs_dir_item *di;
2691 path = alloc_path_for_send();
2698 key.type = BTRFS_DIR_INDEX_KEY;
2700 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2705 eb = path->nodes[0];
2706 slot = path->slots[0];
2707 if (slot >= btrfs_header_nritems(eb)) {
2708 ret = btrfs_next_leaf(sctx->send_root, path);
2711 } else if (ret > 0) {
2718 btrfs_item_key_to_cpu(eb, &found_key, slot);
2719 if (found_key.objectid != key.objectid ||
2720 found_key.type != key.type) {
2725 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2726 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2728 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2729 di_key.objectid < sctx->send_progress) {
2738 btrfs_free_path(path);
2743 * Only creates the inode if it is:
2744 * 1. Not a directory
2745 * 2. Or a directory which was not created already due to out of order
2746 * directories. See did_create_dir and process_recorded_refs for details.
2748 static int send_create_inode_if_needed(struct send_ctx *sctx)
2752 if (S_ISDIR(sctx->cur_inode_mode)) {
2753 ret = did_create_dir(sctx, sctx->cur_ino);
2762 ret = send_create_inode(sctx, sctx->cur_ino);
2770 struct recorded_ref {
2771 struct list_head list;
2773 struct fs_path *full_path;
2780 * We need to process new refs before deleted refs, but compare_tree gives us
2781 * everything mixed. So we first record all refs and later process them.
2782 * This function is a helper to record one ref.
2784 static int __record_ref(struct list_head *head, u64 dir,
2785 u64 dir_gen, struct fs_path *path)
2787 struct recorded_ref *ref;
2789 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
2794 ref->dir_gen = dir_gen;
2795 ref->full_path = path;
2797 ref->name = (char *)kbasename(ref->full_path->start);
2798 ref->name_len = ref->full_path->end - ref->name;
2800 list_add_tail(&ref->list, head);
2804 static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2806 struct recorded_ref *new;
2808 new = kmalloc(sizeof(*ref), GFP_KERNEL);
2812 new->dir = ref->dir;
2813 new->dir_gen = ref->dir_gen;
2814 new->full_path = NULL;
2815 INIT_LIST_HEAD(&new->list);
2816 list_add_tail(&new->list, list);
2820 static void __free_recorded_refs(struct list_head *head)
2822 struct recorded_ref *cur;
2824 while (!list_empty(head)) {
2825 cur = list_entry(head->next, struct recorded_ref, list);
2826 fs_path_free(cur->full_path);
2827 list_del(&cur->list);
2832 static void free_recorded_refs(struct send_ctx *sctx)
2834 __free_recorded_refs(&sctx->new_refs);
2835 __free_recorded_refs(&sctx->deleted_refs);
2839 * Renames/moves a file/dir to its orphan name. Used when the first
2840 * ref of an unprocessed inode gets overwritten and for all non empty
2843 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2844 struct fs_path *path)
2847 struct fs_path *orphan;
2849 orphan = fs_path_alloc();
2853 ret = gen_unique_name(sctx, ino, gen, orphan);
2857 ret = send_rename(sctx, path, orphan);
2860 fs_path_free(orphan);
2864 static struct orphan_dir_info *
2865 add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2867 struct rb_node **p = &sctx->orphan_dirs.rb_node;
2868 struct rb_node *parent = NULL;
2869 struct orphan_dir_info *entry, *odi;
2871 odi = kmalloc(sizeof(*odi), GFP_KERNEL);
2873 return ERR_PTR(-ENOMEM);
2879 entry = rb_entry(parent, struct orphan_dir_info, node);
2880 if (dir_ino < entry->ino) {
2882 } else if (dir_ino > entry->ino) {
2883 p = &(*p)->rb_right;
2890 rb_link_node(&odi->node, parent, p);
2891 rb_insert_color(&odi->node, &sctx->orphan_dirs);
2895 static struct orphan_dir_info *
2896 get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2898 struct rb_node *n = sctx->orphan_dirs.rb_node;
2899 struct orphan_dir_info *entry;
2902 entry = rb_entry(n, struct orphan_dir_info, node);
2903 if (dir_ino < entry->ino)
2905 else if (dir_ino > entry->ino)
2913 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
2915 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
2920 static void free_orphan_dir_info(struct send_ctx *sctx,
2921 struct orphan_dir_info *odi)
2925 rb_erase(&odi->node, &sctx->orphan_dirs);
2930 * Returns 1 if a directory can be removed at this point in time.
2931 * We check this by iterating all dir items and checking if the inode behind
2932 * the dir item was already processed.
2934 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2938 struct btrfs_root *root = sctx->parent_root;
2939 struct btrfs_path *path;
2940 struct btrfs_key key;
2941 struct btrfs_key found_key;
2942 struct btrfs_key loc;
2943 struct btrfs_dir_item *di;
2946 * Don't try to rmdir the top/root subvolume dir.
2948 if (dir == BTRFS_FIRST_FREE_OBJECTID)
2951 path = alloc_path_for_send();
2956 key.type = BTRFS_DIR_INDEX_KEY;
2958 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2963 struct waiting_dir_move *dm;
2965 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2966 ret = btrfs_next_leaf(root, path);
2973 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2975 if (found_key.objectid != key.objectid ||
2976 found_key.type != key.type)
2979 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
2980 struct btrfs_dir_item);
2981 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
2983 dm = get_waiting_dir_move(sctx, loc.objectid);
2985 struct orphan_dir_info *odi;
2987 odi = add_orphan_dir_info(sctx, dir);
2993 dm->rmdir_ino = dir;
2998 if (loc.objectid > send_progress) {
2999 struct orphan_dir_info *odi;
3001 odi = get_orphan_dir_info(sctx, dir);
3002 free_orphan_dir_info(sctx, odi);
3013 btrfs_free_path(path);
3017 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
3019 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
3021 return entry != NULL;
3024 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
3026 struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
3027 struct rb_node *parent = NULL;
3028 struct waiting_dir_move *entry, *dm;
3030 dm = kmalloc(sizeof(*dm), GFP_KERNEL);
3035 dm->orphanized = orphanized;
3039 entry = rb_entry(parent, struct waiting_dir_move, node);
3040 if (ino < entry->ino) {
3042 } else if (ino > entry->ino) {
3043 p = &(*p)->rb_right;
3050 rb_link_node(&dm->node, parent, p);
3051 rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
3055 static struct waiting_dir_move *
3056 get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
3058 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
3059 struct waiting_dir_move *entry;
3062 entry = rb_entry(n, struct waiting_dir_move, node);
3063 if (ino < entry->ino)
3065 else if (ino > entry->ino)
3073 static void free_waiting_dir_move(struct send_ctx *sctx,
3074 struct waiting_dir_move *dm)
3078 rb_erase(&dm->node, &sctx->waiting_dir_moves);
3082 static int add_pending_dir_move(struct send_ctx *sctx,
3086 struct list_head *new_refs,
3087 struct list_head *deleted_refs,
3088 const bool is_orphan)
3090 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
3091 struct rb_node *parent = NULL;
3092 struct pending_dir_move *entry = NULL, *pm;
3093 struct recorded_ref *cur;
3097 pm = kmalloc(sizeof(*pm), GFP_KERNEL);
3100 pm->parent_ino = parent_ino;
3103 INIT_LIST_HEAD(&pm->list);
3104 INIT_LIST_HEAD(&pm->update_refs);
3105 RB_CLEAR_NODE(&pm->node);
3109 entry = rb_entry(parent, struct pending_dir_move, node);
3110 if (parent_ino < entry->parent_ino) {
3112 } else if (parent_ino > entry->parent_ino) {
3113 p = &(*p)->rb_right;
3120 list_for_each_entry(cur, deleted_refs, list) {
3121 ret = dup_ref(cur, &pm->update_refs);
3125 list_for_each_entry(cur, new_refs, list) {
3126 ret = dup_ref(cur, &pm->update_refs);
3131 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
3136 list_add_tail(&pm->list, &entry->list);
3138 rb_link_node(&pm->node, parent, p);
3139 rb_insert_color(&pm->node, &sctx->pending_dir_moves);
3144 __free_recorded_refs(&pm->update_refs);
3150 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
3153 struct rb_node *n = sctx->pending_dir_moves.rb_node;
3154 struct pending_dir_move *entry;
3157 entry = rb_entry(n, struct pending_dir_move, node);
3158 if (parent_ino < entry->parent_ino)
3160 else if (parent_ino > entry->parent_ino)
3168 static int path_loop(struct send_ctx *sctx, struct fs_path *name,
3169 u64 ino, u64 gen, u64 *ancestor_ino)
3172 u64 parent_inode = 0;
3174 u64 start_ino = ino;
3177 while (ino != BTRFS_FIRST_FREE_OBJECTID) {
3178 fs_path_reset(name);
3180 if (is_waiting_for_rm(sctx, ino))
3182 if (is_waiting_for_move(sctx, ino)) {
3183 if (*ancestor_ino == 0)
3184 *ancestor_ino = ino;
3185 ret = get_first_ref(sctx->parent_root, ino,
3186 &parent_inode, &parent_gen, name);
3188 ret = __get_cur_name_and_parent(sctx, ino, gen,
3198 if (parent_inode == start_ino) {
3200 if (*ancestor_ino == 0)
3201 *ancestor_ino = ino;
3210 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3212 struct fs_path *from_path = NULL;
3213 struct fs_path *to_path = NULL;
3214 struct fs_path *name = NULL;
3215 u64 orig_progress = sctx->send_progress;
3216 struct recorded_ref *cur;
3217 u64 parent_ino, parent_gen;
3218 struct waiting_dir_move *dm = NULL;
3224 name = fs_path_alloc();
3225 from_path = fs_path_alloc();
3226 if (!name || !from_path) {
3231 dm = get_waiting_dir_move(sctx, pm->ino);
3233 rmdir_ino = dm->rmdir_ino;
3234 is_orphan = dm->orphanized;
3235 free_waiting_dir_move(sctx, dm);
3238 ret = gen_unique_name(sctx, pm->ino,
3239 pm->gen, from_path);
3241 ret = get_first_ref(sctx->parent_root, pm->ino,
3242 &parent_ino, &parent_gen, name);
3245 ret = get_cur_path(sctx, parent_ino, parent_gen,
3249 ret = fs_path_add_path(from_path, name);
3254 sctx->send_progress = sctx->cur_ino + 1;
3255 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
3259 LIST_HEAD(deleted_refs);
3260 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3261 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3262 &pm->update_refs, &deleted_refs,
3267 dm = get_waiting_dir_move(sctx, pm->ino);
3269 dm->rmdir_ino = rmdir_ino;
3273 fs_path_reset(name);
3276 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3280 ret = send_rename(sctx, from_path, to_path);
3285 struct orphan_dir_info *odi;
3287 odi = get_orphan_dir_info(sctx, rmdir_ino);
3289 /* already deleted */
3292 ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino);
3298 name = fs_path_alloc();
3303 ret = get_cur_path(sctx, rmdir_ino, odi->gen, name);
3306 ret = send_rmdir(sctx, name);
3309 free_orphan_dir_info(sctx, odi);
3313 ret = send_utimes(sctx, pm->ino, pm->gen);
3318 * After rename/move, need to update the utimes of both new parent(s)
3319 * and old parent(s).
3321 list_for_each_entry(cur, &pm->update_refs, list) {
3323 * The parent inode might have been deleted in the send snapshot
3325 ret = get_inode_info(sctx->send_root, cur->dir, NULL,
3326 NULL, NULL, NULL, NULL, NULL);
3327 if (ret == -ENOENT) {
3334 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3341 fs_path_free(from_path);
3342 fs_path_free(to_path);
3343 sctx->send_progress = orig_progress;
3348 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3350 if (!list_empty(&m->list))
3352 if (!RB_EMPTY_NODE(&m->node))
3353 rb_erase(&m->node, &sctx->pending_dir_moves);
3354 __free_recorded_refs(&m->update_refs);
3358 static void tail_append_pending_moves(struct pending_dir_move *moves,
3359 struct list_head *stack)
3361 if (list_empty(&moves->list)) {
3362 list_add_tail(&moves->list, stack);
3365 list_splice_init(&moves->list, &list);
3366 list_add_tail(&moves->list, stack);
3367 list_splice_tail(&list, stack);
3371 static int apply_children_dir_moves(struct send_ctx *sctx)
3373 struct pending_dir_move *pm;
3374 struct list_head stack;
3375 u64 parent_ino = sctx->cur_ino;
3378 pm = get_pending_dir_moves(sctx, parent_ino);
3382 INIT_LIST_HEAD(&stack);
3383 tail_append_pending_moves(pm, &stack);
3385 while (!list_empty(&stack)) {
3386 pm = list_first_entry(&stack, struct pending_dir_move, list);
3387 parent_ino = pm->ino;
3388 ret = apply_dir_move(sctx, pm);
3389 free_pending_move(sctx, pm);
3392 pm = get_pending_dir_moves(sctx, parent_ino);
3394 tail_append_pending_moves(pm, &stack);
3399 while (!list_empty(&stack)) {
3400 pm = list_first_entry(&stack, struct pending_dir_move, list);
3401 free_pending_move(sctx, pm);
3407 * We might need to delay a directory rename even when no ancestor directory
3408 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3409 * renamed. This happens when we rename a directory to the old name (the name
3410 * in the parent root) of some other unrelated directory that got its rename
3411 * delayed due to some ancestor with higher number that got renamed.
3417 * |---- a/ (ino 257)
3418 * | |---- file (ino 260)
3420 * |---- b/ (ino 258)
3421 * |---- c/ (ino 259)
3425 * |---- a/ (ino 258)
3426 * |---- x/ (ino 259)
3427 * |---- y/ (ino 257)
3428 * |----- file (ino 260)
3430 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3431 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3432 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3435 * 1 - rename 259 from 'c' to 'x'
3436 * 2 - rename 257 from 'a' to 'x/y'
3437 * 3 - rename 258 from 'b' to 'a'
3439 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3440 * be done right away and < 0 on error.
3442 static int wait_for_dest_dir_move(struct send_ctx *sctx,
3443 struct recorded_ref *parent_ref,
3444 const bool is_orphan)
3446 struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
3447 struct btrfs_path *path;
3448 struct btrfs_key key;
3449 struct btrfs_key di_key;
3450 struct btrfs_dir_item *di;
3454 struct waiting_dir_move *wdm;
3456 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
3459 path = alloc_path_for_send();
3463 key.objectid = parent_ref->dir;
3464 key.type = BTRFS_DIR_ITEM_KEY;
3465 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
3467 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3470 } else if (ret > 0) {
3475 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
3476 parent_ref->name_len);
3482 * di_key.objectid has the number of the inode that has a dentry in the
3483 * parent directory with the same name that sctx->cur_ino is being
3484 * renamed to. We need to check if that inode is in the send root as
3485 * well and if it is currently marked as an inode with a pending rename,
3486 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3487 * that it happens after that other inode is renamed.
3489 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
3490 if (di_key.type != BTRFS_INODE_ITEM_KEY) {
3495 ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
3496 &left_gen, NULL, NULL, NULL, NULL);
3499 ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
3500 &right_gen, NULL, NULL, NULL, NULL);
3507 /* Different inode, no need to delay the rename of sctx->cur_ino */
3508 if (right_gen != left_gen) {
3513 wdm = get_waiting_dir_move(sctx, di_key.objectid);
3514 if (wdm && !wdm->orphanized) {
3515 ret = add_pending_dir_move(sctx,
3517 sctx->cur_inode_gen,
3520 &sctx->deleted_refs,
3526 btrfs_free_path(path);
3531 * Check if ino ino1 is an ancestor of inode ino2 in the given root.
3532 * Return 1 if true, 0 if false and < 0 on error.
3534 static int is_ancestor(struct btrfs_root *root,
3538 struct fs_path *fs_path)
3541 bool free_path = false;
3545 fs_path = fs_path_alloc();
3551 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3555 fs_path_reset(fs_path);
3556 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
3558 if (ret == -ENOENT && ino == ino2)
3562 if (parent == ino1) {
3563 ret = parent_gen == ino1_gen ? 1 : 0;
3570 fs_path_free(fs_path);
3574 static int wait_for_parent_move(struct send_ctx *sctx,
3575 struct recorded_ref *parent_ref,
3576 const bool is_orphan)
3579 u64 ino = parent_ref->dir;
3580 u64 ino_gen = parent_ref->dir_gen;
3581 u64 parent_ino_before, parent_ino_after;
3582 struct fs_path *path_before = NULL;
3583 struct fs_path *path_after = NULL;
3586 path_after = fs_path_alloc();
3587 path_before = fs_path_alloc();
3588 if (!path_after || !path_before) {
3594 * Our current directory inode may not yet be renamed/moved because some
3595 * ancestor (immediate or not) has to be renamed/moved first. So find if
3596 * such ancestor exists and make sure our own rename/move happens after
3597 * that ancestor is processed to avoid path build infinite loops (done
3598 * at get_cur_path()).
3600 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3601 u64 parent_ino_after_gen;
3603 if (is_waiting_for_move(sctx, ino)) {
3605 * If the current inode is an ancestor of ino in the
3606 * parent root, we need to delay the rename of the
3607 * current inode, otherwise don't delayed the rename
3608 * because we can end up with a circular dependency
3609 * of renames, resulting in some directories never
3610 * getting the respective rename operations issued in
3611 * the send stream or getting into infinite path build
3614 ret = is_ancestor(sctx->parent_root,
3615 sctx->cur_ino, sctx->cur_inode_gen,
3621 fs_path_reset(path_before);
3622 fs_path_reset(path_after);
3624 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3625 &parent_ino_after_gen, path_after);
3628 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3630 if (ret < 0 && ret != -ENOENT) {
3632 } else if (ret == -ENOENT) {
3637 len1 = fs_path_len(path_before);
3638 len2 = fs_path_len(path_after);
3639 if (ino > sctx->cur_ino &&
3640 (parent_ino_before != parent_ino_after || len1 != len2 ||
3641 memcmp(path_before->start, path_after->start, len1))) {
3644 ret = get_inode_info(sctx->parent_root, ino, NULL,
3645 &parent_ino_gen, NULL, NULL, NULL,
3649 if (ino_gen == parent_ino_gen) {
3654 ino = parent_ino_after;
3655 ino_gen = parent_ino_after_gen;
3659 fs_path_free(path_before);
3660 fs_path_free(path_after);
3663 ret = add_pending_dir_move(sctx,
3665 sctx->cur_inode_gen,
3668 &sctx->deleted_refs,
3678 * This does all the move/link/unlink/rmdir magic.
3680 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3682 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
3684 struct recorded_ref *cur;
3685 struct recorded_ref *cur2;
3686 struct list_head check_dirs;
3687 struct fs_path *valid_path = NULL;
3690 int did_overwrite = 0;
3692 u64 last_dir_ino_rm = 0;
3693 bool can_rename = true;
3695 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
3698 * This should never happen as the root dir always has the same ref
3699 * which is always '..'
3701 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
3702 INIT_LIST_HEAD(&check_dirs);
3704 valid_path = fs_path_alloc();
3711 * First, check if the first ref of the current inode was overwritten
3712 * before. If yes, we know that the current inode was already orphanized
3713 * and thus use the orphan name. If not, we can use get_cur_path to
3714 * get the path of the first ref as it would like while receiving at
3715 * this point in time.
3716 * New inodes are always orphan at the beginning, so force to use the
3717 * orphan name in this case.
3718 * The first ref is stored in valid_path and will be updated if it
3719 * gets moved around.
3721 if (!sctx->cur_inode_new) {
3722 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
3723 sctx->cur_inode_gen);
3729 if (sctx->cur_inode_new || did_overwrite) {
3730 ret = gen_unique_name(sctx, sctx->cur_ino,
3731 sctx->cur_inode_gen, valid_path);
3736 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3742 list_for_each_entry(cur, &sctx->new_refs, list) {
3744 * We may have refs where the parent directory does not exist
3745 * yet. This happens if the parent directories inum is higher
3746 * the the current inum. To handle this case, we create the
3747 * parent directory out of order. But we need to check if this
3748 * did already happen before due to other refs in the same dir.
3750 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3753 if (ret == inode_state_will_create) {
3756 * First check if any of the current inodes refs did
3757 * already create the dir.
3759 list_for_each_entry(cur2, &sctx->new_refs, list) {
3762 if (cur2->dir == cur->dir) {
3769 * If that did not happen, check if a previous inode
3770 * did already create the dir.
3773 ret = did_create_dir(sctx, cur->dir);
3777 ret = send_create_inode(sctx, cur->dir);
3784 * Check if this new ref would overwrite the first ref of
3785 * another unprocessed inode. If yes, orphanize the
3786 * overwritten inode. If we find an overwritten ref that is
3787 * not the first ref, simply unlink it.
3789 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
3790 cur->name, cur->name_len,
3791 &ow_inode, &ow_gen);
3795 ret = is_first_ref(sctx->parent_root,
3796 ow_inode, cur->dir, cur->name,
3801 struct name_cache_entry *nce;
3802 struct waiting_dir_move *wdm;
3804 ret = orphanize_inode(sctx, ow_inode, ow_gen,
3810 * If ow_inode has its rename operation delayed
3811 * make sure that its orphanized name is used in
3812 * the source path when performing its rename
3815 if (is_waiting_for_move(sctx, ow_inode)) {
3816 wdm = get_waiting_dir_move(sctx,
3819 wdm->orphanized = true;
3823 * Make sure we clear our orphanized inode's
3824 * name from the name cache. This is because the
3825 * inode ow_inode might be an ancestor of some
3826 * other inode that will be orphanized as well
3827 * later and has an inode number greater than
3828 * sctx->send_progress. We need to prevent
3829 * future name lookups from using the old name
3830 * and get instead the orphan name.
3832 nce = name_cache_search(sctx, ow_inode, ow_gen);
3834 name_cache_delete(sctx, nce);
3839 * ow_inode might currently be an ancestor of
3840 * cur_ino, therefore compute valid_path (the
3841 * current path of cur_ino) again because it
3842 * might contain the pre-orphanization name of
3843 * ow_inode, which is no longer valid.
3845 ret = is_ancestor(sctx->parent_root,
3847 sctx->cur_ino, NULL);
3849 fs_path_reset(valid_path);
3850 ret = get_cur_path(sctx, sctx->cur_ino,
3851 sctx->cur_inode_gen,
3857 ret = send_unlink(sctx, cur->full_path);
3863 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
3864 ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
3873 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
3875 ret = wait_for_parent_move(sctx, cur, is_orphan);
3885 * link/move the ref to the new place. If we have an orphan
3886 * inode, move it and update valid_path. If not, link or move
3887 * it depending on the inode mode.
3889 if (is_orphan && can_rename) {
3890 ret = send_rename(sctx, valid_path, cur->full_path);
3894 ret = fs_path_copy(valid_path, cur->full_path);
3897 } else if (can_rename) {
3898 if (S_ISDIR(sctx->cur_inode_mode)) {
3900 * Dirs can't be linked, so move it. For moved
3901 * dirs, we always have one new and one deleted
3902 * ref. The deleted ref is ignored later.
3904 ret = send_rename(sctx, valid_path,
3907 ret = fs_path_copy(valid_path,
3912 ret = send_link(sctx, cur->full_path,
3918 ret = dup_ref(cur, &check_dirs);
3923 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
3925 * Check if we can already rmdir the directory. If not,
3926 * orphanize it. For every dir item inside that gets deleted
3927 * later, we do this check again and rmdir it then if possible.
3928 * See the use of check_dirs for more details.
3930 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3935 ret = send_rmdir(sctx, valid_path);
3938 } else if (!is_orphan) {
3939 ret = orphanize_inode(sctx, sctx->cur_ino,
3940 sctx->cur_inode_gen, valid_path);
3946 list_for_each_entry(cur, &sctx->deleted_refs, list) {
3947 ret = dup_ref(cur, &check_dirs);
3951 } else if (S_ISDIR(sctx->cur_inode_mode) &&
3952 !list_empty(&sctx->deleted_refs)) {
3954 * We have a moved dir. Add the old parent to check_dirs
3956 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
3958 ret = dup_ref(cur, &check_dirs);
3961 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
3963 * We have a non dir inode. Go through all deleted refs and
3964 * unlink them if they were not already overwritten by other
3967 list_for_each_entry(cur, &sctx->deleted_refs, list) {
3968 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
3969 sctx->cur_ino, sctx->cur_inode_gen,
3970 cur->name, cur->name_len);
3974 ret = send_unlink(sctx, cur->full_path);
3978 ret = dup_ref(cur, &check_dirs);
3983 * If the inode is still orphan, unlink the orphan. This may
3984 * happen when a previous inode did overwrite the first ref
3985 * of this inode and no new refs were added for the current
3986 * inode. Unlinking does not mean that the inode is deleted in
3987 * all cases. There may still be links to this inode in other
3991 ret = send_unlink(sctx, valid_path);
3998 * We did collect all parent dirs where cur_inode was once located. We
3999 * now go through all these dirs and check if they are pending for
4000 * deletion and if it's finally possible to perform the rmdir now.
4001 * We also update the inode stats of the parent dirs here.
4003 list_for_each_entry(cur, &check_dirs, list) {
4005 * In case we had refs into dirs that were not processed yet,
4006 * we don't need to do the utime and rmdir logic for these dirs.
4007 * The dir will be processed later.
4009 if (cur->dir > sctx->cur_ino)
4012 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4016 if (ret == inode_state_did_create ||
4017 ret == inode_state_no_change) {
4018 /* TODO delayed utimes */
4019 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
4022 } else if (ret == inode_state_did_delete &&
4023 cur->dir != last_dir_ino_rm) {
4024 ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
4029 ret = get_cur_path(sctx, cur->dir,
4030 cur->dir_gen, valid_path);
4033 ret = send_rmdir(sctx, valid_path);
4036 last_dir_ino_rm = cur->dir;
4044 __free_recorded_refs(&check_dirs);
4045 free_recorded_refs(sctx);
4046 fs_path_free(valid_path);
4050 static int record_ref(struct btrfs_root *root, int num, u64 dir, int index,
4051 struct fs_path *name, void *ctx, struct list_head *refs)
4054 struct send_ctx *sctx = ctx;
4058 p = fs_path_alloc();
4062 ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
4067 ret = get_cur_path(sctx, dir, gen, p);
4070 ret = fs_path_add_path(p, name);
4074 ret = __record_ref(refs, dir, gen, p);
4082 static int __record_new_ref(int num, u64 dir, int index,
4083 struct fs_path *name,
4086 struct send_ctx *sctx = ctx;
4087 return record_ref(sctx->send_root, num, dir, index, name,
4088 ctx, &sctx->new_refs);
4092 static int __record_deleted_ref(int num, u64 dir, int index,
4093 struct fs_path *name,
4096 struct send_ctx *sctx = ctx;
4097 return record_ref(sctx->parent_root, num, dir, index, name,
4098 ctx, &sctx->deleted_refs);
4101 static int record_new_ref(struct send_ctx *sctx)
4105 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4106 sctx->cmp_key, 0, __record_new_ref, sctx);
4115 static int record_deleted_ref(struct send_ctx *sctx)
4119 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4120 sctx->cmp_key, 0, __record_deleted_ref, sctx);
4129 struct find_ref_ctx {
4132 struct btrfs_root *root;
4133 struct fs_path *name;
4137 static int __find_iref(int num, u64 dir, int index,
4138 struct fs_path *name,
4141 struct find_ref_ctx *ctx = ctx_;
4145 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
4146 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
4148 * To avoid doing extra lookups we'll only do this if everything
4151 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
4155 if (dir_gen != ctx->dir_gen)
4157 ctx->found_idx = num;
4163 static int find_iref(struct btrfs_root *root,
4164 struct btrfs_path *path,
4165 struct btrfs_key *key,
4166 u64 dir, u64 dir_gen, struct fs_path *name)
4169 struct find_ref_ctx ctx;
4173 ctx.dir_gen = dir_gen;
4177 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
4181 if (ctx.found_idx == -1)
4184 return ctx.found_idx;
4187 static int __record_changed_new_ref(int num, u64 dir, int index,
4188 struct fs_path *name,
4193 struct send_ctx *sctx = ctx;
4195 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
4200 ret = find_iref(sctx->parent_root, sctx->right_path,
4201 sctx->cmp_key, dir, dir_gen, name);
4203 ret = __record_new_ref(num, dir, index, name, sctx);
4210 static int __record_changed_deleted_ref(int num, u64 dir, int index,
4211 struct fs_path *name,
4216 struct send_ctx *sctx = ctx;
4218 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
4223 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
4224 dir, dir_gen, name);
4226 ret = __record_deleted_ref(num, dir, index, name, sctx);
4233 static int record_changed_ref(struct send_ctx *sctx)
4237 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4238 sctx->cmp_key, 0, __record_changed_new_ref, sctx);
4241 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4242 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
4252 * Record and process all refs at once. Needed when an inode changes the
4253 * generation number, which means that it was deleted and recreated.
4255 static int process_all_refs(struct send_ctx *sctx,
4256 enum btrfs_compare_tree_result cmd)
4259 struct btrfs_root *root;
4260 struct btrfs_path *path;
4261 struct btrfs_key key;
4262 struct btrfs_key found_key;
4263 struct extent_buffer *eb;
4265 iterate_inode_ref_t cb;
4266 int pending_move = 0;
4268 path = alloc_path_for_send();
4272 if (cmd == BTRFS_COMPARE_TREE_NEW) {
4273 root = sctx->send_root;
4274 cb = __record_new_ref;
4275 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
4276 root = sctx->parent_root;
4277 cb = __record_deleted_ref;
4279 btrfs_err(sctx->send_root->fs_info,
4280 "Wrong command %d in process_all_refs", cmd);
4285 key.objectid = sctx->cmp_key->objectid;
4286 key.type = BTRFS_INODE_REF_KEY;
4288 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4293 eb = path->nodes[0];
4294 slot = path->slots[0];
4295 if (slot >= btrfs_header_nritems(eb)) {
4296 ret = btrfs_next_leaf(root, path);
4304 btrfs_item_key_to_cpu(eb, &found_key, slot);
4306 if (found_key.objectid != key.objectid ||
4307 (found_key.type != BTRFS_INODE_REF_KEY &&
4308 found_key.type != BTRFS_INODE_EXTREF_KEY))
4311 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
4317 btrfs_release_path(path);
4320 * We don't actually care about pending_move as we are simply
4321 * re-creating this inode and will be rename'ing it into place once we
4322 * rename the parent directory.
4324 ret = process_recorded_refs(sctx, &pending_move);
4326 btrfs_free_path(path);
4330 static int send_set_xattr(struct send_ctx *sctx,
4331 struct fs_path *path,
4332 const char *name, int name_len,
4333 const char *data, int data_len)
4337 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
4341 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4342 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4343 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
4345 ret = send_cmd(sctx);
4352 static int send_remove_xattr(struct send_ctx *sctx,
4353 struct fs_path *path,
4354 const char *name, int name_len)
4358 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
4362 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4363 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4365 ret = send_cmd(sctx);
4372 static int __process_new_xattr(int num, struct btrfs_key *di_key,
4373 const char *name, int name_len,
4374 const char *data, int data_len,
4378 struct send_ctx *sctx = ctx;
4380 struct posix_acl_xattr_header dummy_acl;
4382 p = fs_path_alloc();
4387 * This hack is needed because empty acls are stored as zero byte
4388 * data in xattrs. Problem with that is, that receiving these zero byte
4389 * acls will fail later. To fix this, we send a dummy acl list that
4390 * only contains the version number and no entries.
4392 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
4393 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
4394 if (data_len == 0) {
4395 dummy_acl.a_version =
4396 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
4397 data = (char *)&dummy_acl;
4398 data_len = sizeof(dummy_acl);
4402 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4406 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
4413 static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
4414 const char *name, int name_len,
4415 const char *data, int data_len,
4419 struct send_ctx *sctx = ctx;
4422 p = fs_path_alloc();
4426 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4430 ret = send_remove_xattr(sctx, p, name, name_len);
4437 static int process_new_xattr(struct send_ctx *sctx)
4441 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4442 sctx->cmp_key, __process_new_xattr, sctx);
4447 static int process_deleted_xattr(struct send_ctx *sctx)
4449 return iterate_dir_item(sctx->parent_root, sctx->right_path,
4450 sctx->cmp_key, __process_deleted_xattr, sctx);
4453 struct find_xattr_ctx {
4461 static int __find_xattr(int num, struct btrfs_key *di_key,
4462 const char *name, int name_len,
4463 const char *data, int data_len,
4464 u8 type, void *vctx)
4466 struct find_xattr_ctx *ctx = vctx;
4468 if (name_len == ctx->name_len &&
4469 strncmp(name, ctx->name, name_len) == 0) {
4470 ctx->found_idx = num;
4471 ctx->found_data_len = data_len;
4472 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
4473 if (!ctx->found_data)
4480 static int find_xattr(struct btrfs_root *root,
4481 struct btrfs_path *path,
4482 struct btrfs_key *key,
4483 const char *name, int name_len,
4484 char **data, int *data_len)
4487 struct find_xattr_ctx ctx;
4490 ctx.name_len = name_len;
4492 ctx.found_data = NULL;
4493 ctx.found_data_len = 0;
4495 ret = iterate_dir_item(root, path, key, __find_xattr, &ctx);
4499 if (ctx.found_idx == -1)
4502 *data = ctx.found_data;
4503 *data_len = ctx.found_data_len;
4505 kfree(ctx.found_data);
4507 return ctx.found_idx;
4511 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
4512 const char *name, int name_len,
4513 const char *data, int data_len,
4517 struct send_ctx *sctx = ctx;
4518 char *found_data = NULL;
4519 int found_data_len = 0;
4521 ret = find_xattr(sctx->parent_root, sctx->right_path,
4522 sctx->cmp_key, name, name_len, &found_data,
4524 if (ret == -ENOENT) {
4525 ret = __process_new_xattr(num, di_key, name, name_len, data,
4526 data_len, type, ctx);
4527 } else if (ret >= 0) {
4528 if (data_len != found_data_len ||
4529 memcmp(data, found_data, data_len)) {
4530 ret = __process_new_xattr(num, di_key, name, name_len,
4531 data, data_len, type, ctx);
4541 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
4542 const char *name, int name_len,
4543 const char *data, int data_len,
4547 struct send_ctx *sctx = ctx;
4549 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
4550 name, name_len, NULL, NULL);
4552 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
4553 data_len, type, ctx);
4560 static int process_changed_xattr(struct send_ctx *sctx)
4564 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4565 sctx->cmp_key, __process_changed_new_xattr, sctx);
4568 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4569 sctx->cmp_key, __process_changed_deleted_xattr, sctx);
4575 static int process_all_new_xattrs(struct send_ctx *sctx)
4578 struct btrfs_root *root;
4579 struct btrfs_path *path;
4580 struct btrfs_key key;
4581 struct btrfs_key found_key;
4582 struct extent_buffer *eb;
4585 path = alloc_path_for_send();
4589 root = sctx->send_root;
4591 key.objectid = sctx->cmp_key->objectid;
4592 key.type = BTRFS_XATTR_ITEM_KEY;
4594 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4599 eb = path->nodes[0];
4600 slot = path->slots[0];
4601 if (slot >= btrfs_header_nritems(eb)) {
4602 ret = btrfs_next_leaf(root, path);
4605 } else if (ret > 0) {
4612 btrfs_item_key_to_cpu(eb, &found_key, slot);
4613 if (found_key.objectid != key.objectid ||
4614 found_key.type != key.type) {
4619 ret = iterate_dir_item(root, path, &found_key,
4620 __process_new_xattr, sctx);
4628 btrfs_free_path(path);
4632 static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4634 struct btrfs_root *root = sctx->send_root;
4635 struct btrfs_fs_info *fs_info = root->fs_info;
4636 struct inode *inode;
4639 struct btrfs_key key;
4640 pgoff_t index = offset >> PAGE_SHIFT;
4642 unsigned pg_offset = offset & ~PAGE_MASK;
4645 key.objectid = sctx->cur_ino;
4646 key.type = BTRFS_INODE_ITEM_KEY;
4649 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
4651 return PTR_ERR(inode);
4653 if (offset + len > i_size_read(inode)) {
4654 if (offset > i_size_read(inode))
4657 len = offset - i_size_read(inode);
4662 last_index = (offset + len - 1) >> PAGE_SHIFT;
4664 /* initial readahead */
4665 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
4666 file_ra_state_init(&sctx->ra, inode->i_mapping);
4667 btrfs_force_ra(inode->i_mapping, &sctx->ra, NULL, index,
4668 last_index - index + 1);
4670 while (index <= last_index) {
4671 unsigned cur_len = min_t(unsigned, len,
4672 PAGE_SIZE - pg_offset);
4673 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
4679 if (!PageUptodate(page)) {
4680 btrfs_readpage(NULL, page);
4682 if (!PageUptodate(page)) {
4691 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
4706 * Read some bytes from the current inode/file and send a write command to
4709 static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
4711 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
4714 ssize_t num_read = 0;
4716 p = fs_path_alloc();
4720 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
4722 num_read = fill_read_buf(sctx, offset, len);
4723 if (num_read <= 0) {
4729 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
4733 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4737 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4738 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4739 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
4741 ret = send_cmd(sctx);
4752 * Send a clone command to user space.
4754 static int send_clone(struct send_ctx *sctx,
4755 u64 offset, u32 len,
4756 struct clone_root *clone_root)
4762 btrfs_debug(sctx->send_root->fs_info,
4763 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
4764 offset, len, clone_root->root->objectid, clone_root->ino,
4765 clone_root->offset);
4767 p = fs_path_alloc();
4771 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
4775 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4779 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4780 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
4781 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4783 if (clone_root->root == sctx->send_root) {
4784 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
4785 &gen, NULL, NULL, NULL, NULL);
4788 ret = get_cur_path(sctx, clone_root->ino, gen, p);
4790 ret = get_inode_path(clone_root->root, clone_root->ino, p);
4796 * If the parent we're using has a received_uuid set then use that as
4797 * our clone source as that is what we will look for when doing a
4800 * This covers the case that we create a snapshot off of a received
4801 * subvolume and then use that as the parent and try to receive on a
4804 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
4805 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
4806 clone_root->root->root_item.received_uuid);
4808 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
4809 clone_root->root->root_item.uuid);
4810 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
4811 le64_to_cpu(clone_root->root->root_item.ctransid));
4812 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
4813 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
4814 clone_root->offset);
4816 ret = send_cmd(sctx);
4825 * Send an update extent command to user space.
4827 static int send_update_extent(struct send_ctx *sctx,
4828 u64 offset, u32 len)
4833 p = fs_path_alloc();
4837 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
4841 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4845 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4846 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4847 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
4849 ret = send_cmd(sctx);
4857 static int send_hole(struct send_ctx *sctx, u64 end)
4859 struct fs_path *p = NULL;
4860 u64 offset = sctx->cur_inode_last_extent;
4864 p = fs_path_alloc();
4867 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4869 goto tlv_put_failure;
4870 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
4871 while (offset < end) {
4872 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
4874 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
4877 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4878 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4879 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
4880 ret = send_cmd(sctx);
4890 static int send_extent_data(struct send_ctx *sctx,
4896 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
4897 return send_update_extent(sctx, offset, len);
4899 while (sent < len) {
4900 u64 size = len - sent;
4903 if (size > BTRFS_SEND_READ_SIZE)
4904 size = BTRFS_SEND_READ_SIZE;
4905 ret = send_write(sctx, offset + sent, size);
4915 static int clone_range(struct send_ctx *sctx,
4916 struct clone_root *clone_root,
4917 const u64 disk_byte,
4922 struct btrfs_path *path;
4923 struct btrfs_key key;
4926 path = alloc_path_for_send();
4931 * We can't send a clone operation for the entire range if we find
4932 * extent items in the respective range in the source file that
4933 * refer to different extents or if we find holes.
4934 * So check for that and do a mix of clone and regular write/copy
4935 * operations if needed.
4939 * mkfs.btrfs -f /dev/sda
4940 * mount /dev/sda /mnt
4941 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
4942 * cp --reflink=always /mnt/foo /mnt/bar
4943 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
4944 * btrfs subvolume snapshot -r /mnt /mnt/snap
4946 * If when we send the snapshot and we are processing file bar (which
4947 * has a higher inode number than foo) we blindly send a clone operation
4948 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
4949 * a file bar that matches the content of file foo - iow, doesn't match
4950 * the content from bar in the original filesystem.
4952 key.objectid = clone_root->ino;
4953 key.type = BTRFS_EXTENT_DATA_KEY;
4954 key.offset = clone_root->offset;
4955 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
4958 if (ret > 0 && path->slots[0] > 0) {
4959 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
4960 if (key.objectid == clone_root->ino &&
4961 key.type == BTRFS_EXTENT_DATA_KEY)
4966 struct extent_buffer *leaf = path->nodes[0];
4967 int slot = path->slots[0];
4968 struct btrfs_file_extent_item *ei;
4973 if (slot >= btrfs_header_nritems(leaf)) {
4974 ret = btrfs_next_leaf(clone_root->root, path);
4982 btrfs_item_key_to_cpu(leaf, &key, slot);
4985 * We might have an implicit trailing hole (NO_HOLES feature
4986 * enabled). We deal with it after leaving this loop.
4988 if (key.objectid != clone_root->ino ||
4989 key.type != BTRFS_EXTENT_DATA_KEY)
4992 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4993 type = btrfs_file_extent_type(leaf, ei);
4994 if (type == BTRFS_FILE_EXTENT_INLINE) {
4995 ext_len = btrfs_file_extent_inline_len(leaf, slot, ei);
4996 ext_len = PAGE_ALIGN(ext_len);
4998 ext_len = btrfs_file_extent_num_bytes(leaf, ei);
5001 if (key.offset + ext_len <= clone_root->offset)
5004 if (key.offset > clone_root->offset) {
5005 /* Implicit hole, NO_HOLES feature enabled. */
5006 u64 hole_len = key.offset - clone_root->offset;
5010 ret = send_extent_data(sctx, offset, hole_len);
5018 clone_root->offset += hole_len;
5019 data_offset += hole_len;
5022 if (key.offset >= clone_root->offset + len)
5025 clone_len = min_t(u64, ext_len, len);
5027 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte &&
5028 btrfs_file_extent_offset(leaf, ei) == data_offset)
5029 ret = send_clone(sctx, offset, clone_len, clone_root);
5031 ret = send_extent_data(sctx, offset, clone_len);
5039 offset += clone_len;
5040 clone_root->offset += clone_len;
5041 data_offset += clone_len;
5047 ret = send_extent_data(sctx, offset, len);
5051 btrfs_free_path(path);
5055 static int send_write_or_clone(struct send_ctx *sctx,
5056 struct btrfs_path *path,
5057 struct btrfs_key *key,
5058 struct clone_root *clone_root)
5061 struct btrfs_file_extent_item *ei;
5062 u64 offset = key->offset;
5065 u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
5067 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5068 struct btrfs_file_extent_item);
5069 type = btrfs_file_extent_type(path->nodes[0], ei);
5070 if (type == BTRFS_FILE_EXTENT_INLINE) {
5071 len = btrfs_file_extent_inline_len(path->nodes[0],
5072 path->slots[0], ei);
5074 * it is possible the inline item won't cover the whole page,
5075 * but there may be items after this page. Make
5076 * sure to send the whole thing
5078 len = PAGE_ALIGN(len);
5080 len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
5083 if (offset + len > sctx->cur_inode_size)
5084 len = sctx->cur_inode_size - offset;
5090 if (clone_root && IS_ALIGNED(offset + len, bs)) {
5094 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
5095 data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
5096 ret = clone_range(sctx, clone_root, disk_byte, data_offset,
5099 ret = send_extent_data(sctx, offset, len);
5105 static int is_extent_unchanged(struct send_ctx *sctx,
5106 struct btrfs_path *left_path,
5107 struct btrfs_key *ekey)
5110 struct btrfs_key key;
5111 struct btrfs_path *path = NULL;
5112 struct extent_buffer *eb;
5114 struct btrfs_key found_key;
5115 struct btrfs_file_extent_item *ei;
5120 u64 left_offset_fixed;
5128 path = alloc_path_for_send();
5132 eb = left_path->nodes[0];
5133 slot = left_path->slots[0];
5134 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5135 left_type = btrfs_file_extent_type(eb, ei);
5137 if (left_type != BTRFS_FILE_EXTENT_REG) {
5141 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5142 left_len = btrfs_file_extent_num_bytes(eb, ei);
5143 left_offset = btrfs_file_extent_offset(eb, ei);
5144 left_gen = btrfs_file_extent_generation(eb, ei);
5147 * Following comments will refer to these graphics. L is the left
5148 * extents which we are checking at the moment. 1-8 are the right
5149 * extents that we iterate.
5152 * |-1-|-2a-|-3-|-4-|-5-|-6-|
5155 * |--1--|-2b-|...(same as above)
5157 * Alternative situation. Happens on files where extents got split.
5159 * |-----------7-----------|-6-|
5161 * Alternative situation. Happens on files which got larger.
5164 * Nothing follows after 8.
5167 key.objectid = ekey->objectid;
5168 key.type = BTRFS_EXTENT_DATA_KEY;
5169 key.offset = ekey->offset;
5170 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
5179 * Handle special case where the right side has no extents at all.
5181 eb = path->nodes[0];
5182 slot = path->slots[0];
5183 btrfs_item_key_to_cpu(eb, &found_key, slot);
5184 if (found_key.objectid != key.objectid ||
5185 found_key.type != key.type) {
5186 /* If we're a hole then just pretend nothing changed */
5187 ret = (left_disknr) ? 0 : 1;
5192 * We're now on 2a, 2b or 7.
5195 while (key.offset < ekey->offset + left_len) {
5196 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5197 right_type = btrfs_file_extent_type(eb, ei);
5198 if (right_type != BTRFS_FILE_EXTENT_REG &&
5199 right_type != BTRFS_FILE_EXTENT_INLINE) {
5204 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5205 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5206 right_len = btrfs_file_extent_inline_len(eb, slot, ei);
5207 right_len = PAGE_ALIGN(right_len);
5209 right_len = btrfs_file_extent_num_bytes(eb, ei);
5211 right_offset = btrfs_file_extent_offset(eb, ei);
5212 right_gen = btrfs_file_extent_generation(eb, ei);
5215 * Are we at extent 8? If yes, we know the extent is changed.
5216 * This may only happen on the first iteration.
5218 if (found_key.offset + right_len <= ekey->offset) {
5219 /* If we're a hole just pretend nothing changed */
5220 ret = (left_disknr) ? 0 : 1;
5225 * We just wanted to see if when we have an inline extent, what
5226 * follows it is a regular extent (wanted to check the above
5227 * condition for inline extents too). This should normally not
5228 * happen but it's possible for example when we have an inline
5229 * compressed extent representing data with a size matching
5230 * the page size (currently the same as sector size).
5232 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5237 left_offset_fixed = left_offset;
5238 if (key.offset < ekey->offset) {
5239 /* Fix the right offset for 2a and 7. */
5240 right_offset += ekey->offset - key.offset;
5242 /* Fix the left offset for all behind 2a and 2b */
5243 left_offset_fixed += key.offset - ekey->offset;
5247 * Check if we have the same extent.
5249 if (left_disknr != right_disknr ||
5250 left_offset_fixed != right_offset ||
5251 left_gen != right_gen) {
5257 * Go to the next extent.
5259 ret = btrfs_next_item(sctx->parent_root, path);
5263 eb = path->nodes[0];
5264 slot = path->slots[0];
5265 btrfs_item_key_to_cpu(eb, &found_key, slot);
5267 if (ret || found_key.objectid != key.objectid ||
5268 found_key.type != key.type) {
5269 key.offset += right_len;
5272 if (found_key.offset != key.offset + right_len) {
5280 * We're now behind the left extent (treat as unchanged) or at the end
5281 * of the right side (treat as changed).
5283 if (key.offset >= ekey->offset + left_len)
5290 btrfs_free_path(path);
5294 static int get_last_extent(struct send_ctx *sctx, u64 offset)
5296 struct btrfs_path *path;
5297 struct btrfs_root *root = sctx->send_root;
5298 struct btrfs_file_extent_item *fi;
5299 struct btrfs_key key;
5304 path = alloc_path_for_send();
5308 sctx->cur_inode_last_extent = 0;
5310 key.objectid = sctx->cur_ino;
5311 key.type = BTRFS_EXTENT_DATA_KEY;
5312 key.offset = offset;
5313 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
5317 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5318 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
5321 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
5322 struct btrfs_file_extent_item);
5323 type = btrfs_file_extent_type(path->nodes[0], fi);
5324 if (type == BTRFS_FILE_EXTENT_INLINE) {
5325 u64 size = btrfs_file_extent_inline_len(path->nodes[0],
5326 path->slots[0], fi);
5327 extent_end = ALIGN(key.offset + size,
5328 sctx->send_root->fs_info->sectorsize);
5330 extent_end = key.offset +
5331 btrfs_file_extent_num_bytes(path->nodes[0], fi);
5333 sctx->cur_inode_last_extent = extent_end;
5335 btrfs_free_path(path);
5339 static int range_is_hole_in_parent(struct send_ctx *sctx,
5343 struct btrfs_path *path;
5344 struct btrfs_key key;
5345 struct btrfs_root *root = sctx->parent_root;
5346 u64 search_start = start;
5349 path = alloc_path_for_send();
5353 key.objectid = sctx->cur_ino;
5354 key.type = BTRFS_EXTENT_DATA_KEY;
5355 key.offset = search_start;
5356 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5359 if (ret > 0 && path->slots[0] > 0)
5362 while (search_start < end) {
5363 struct extent_buffer *leaf = path->nodes[0];
5364 int slot = path->slots[0];
5365 struct btrfs_file_extent_item *fi;
5368 if (slot >= btrfs_header_nritems(leaf)) {
5369 ret = btrfs_next_leaf(root, path);
5377 btrfs_item_key_to_cpu(leaf, &key, slot);
5378 if (key.objectid < sctx->cur_ino ||
5379 key.type < BTRFS_EXTENT_DATA_KEY)
5381 if (key.objectid > sctx->cur_ino ||
5382 key.type > BTRFS_EXTENT_DATA_KEY ||
5386 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5387 if (btrfs_file_extent_type(leaf, fi) ==
5388 BTRFS_FILE_EXTENT_INLINE) {
5389 u64 size = btrfs_file_extent_inline_len(leaf, slot, fi);
5391 extent_end = ALIGN(key.offset + size,
5392 root->fs_info->sectorsize);
5394 extent_end = key.offset +
5395 btrfs_file_extent_num_bytes(leaf, fi);
5397 if (extent_end <= start)
5399 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
5400 search_start = extent_end;
5410 btrfs_free_path(path);
5414 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
5415 struct btrfs_key *key)
5417 struct btrfs_file_extent_item *fi;
5422 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
5425 if (sctx->cur_inode_last_extent == (u64)-1) {
5426 ret = get_last_extent(sctx, key->offset - 1);
5431 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
5432 struct btrfs_file_extent_item);
5433 type = btrfs_file_extent_type(path->nodes[0], fi);
5434 if (type == BTRFS_FILE_EXTENT_INLINE) {
5435 u64 size = btrfs_file_extent_inline_len(path->nodes[0],
5436 path->slots[0], fi);
5437 extent_end = ALIGN(key->offset + size,
5438 sctx->send_root->fs_info->sectorsize);
5440 extent_end = key->offset +
5441 btrfs_file_extent_num_bytes(path->nodes[0], fi);
5444 if (path->slots[0] == 0 &&
5445 sctx->cur_inode_last_extent < key->offset) {
5447 * We might have skipped entire leafs that contained only
5448 * file extent items for our current inode. These leafs have
5449 * a generation number smaller (older) than the one in the
5450 * current leaf and the leaf our last extent came from, and
5451 * are located between these 2 leafs.
5453 ret = get_last_extent(sctx, key->offset - 1);
5458 if (sctx->cur_inode_last_extent < key->offset) {
5459 ret = range_is_hole_in_parent(sctx,
5460 sctx->cur_inode_last_extent,
5465 ret = send_hole(sctx, key->offset);
5469 sctx->cur_inode_last_extent = extent_end;
5473 static int process_extent(struct send_ctx *sctx,
5474 struct btrfs_path *path,
5475 struct btrfs_key *key)
5477 struct clone_root *found_clone = NULL;
5480 if (S_ISLNK(sctx->cur_inode_mode))
5483 if (sctx->parent_root && !sctx->cur_inode_new) {
5484 ret = is_extent_unchanged(sctx, path, key);
5492 struct btrfs_file_extent_item *ei;
5495 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5496 struct btrfs_file_extent_item);
5497 type = btrfs_file_extent_type(path->nodes[0], ei);
5498 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
5499 type == BTRFS_FILE_EXTENT_REG) {
5501 * The send spec does not have a prealloc command yet,
5502 * so just leave a hole for prealloc'ed extents until
5503 * we have enough commands queued up to justify rev'ing
5506 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
5511 /* Have a hole, just skip it. */
5512 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
5519 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
5520 sctx->cur_inode_size, &found_clone);
5521 if (ret != -ENOENT && ret < 0)
5524 ret = send_write_or_clone(sctx, path, key, found_clone);
5528 ret = maybe_send_hole(sctx, path, key);
5533 static int process_all_extents(struct send_ctx *sctx)
5536 struct btrfs_root *root;
5537 struct btrfs_path *path;
5538 struct btrfs_key key;
5539 struct btrfs_key found_key;
5540 struct extent_buffer *eb;
5543 root = sctx->send_root;
5544 path = alloc_path_for_send();
5548 key.objectid = sctx->cmp_key->objectid;
5549 key.type = BTRFS_EXTENT_DATA_KEY;
5551 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5556 eb = path->nodes[0];
5557 slot = path->slots[0];
5559 if (slot >= btrfs_header_nritems(eb)) {
5560 ret = btrfs_next_leaf(root, path);
5563 } else if (ret > 0) {
5570 btrfs_item_key_to_cpu(eb, &found_key, slot);
5572 if (found_key.objectid != key.objectid ||
5573 found_key.type != key.type) {
5578 ret = process_extent(sctx, path, &found_key);
5586 btrfs_free_path(path);
5590 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
5592 int *refs_processed)
5596 if (sctx->cur_ino == 0)
5598 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
5599 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
5601 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
5604 ret = process_recorded_refs(sctx, pending_move);
5608 *refs_processed = 1;
5613 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
5624 int pending_move = 0;
5625 int refs_processed = 0;
5627 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
5633 * We have processed the refs and thus need to advance send_progress.
5634 * Now, calls to get_cur_xxx will take the updated refs of the current
5635 * inode into account.
5637 * On the other hand, if our current inode is a directory and couldn't
5638 * be moved/renamed because its parent was renamed/moved too and it has
5639 * a higher inode number, we can only move/rename our current inode
5640 * after we moved/renamed its parent. Therefore in this case operate on
5641 * the old path (pre move/rename) of our current inode, and the
5642 * move/rename will be performed later.
5644 if (refs_processed && !pending_move)
5645 sctx->send_progress = sctx->cur_ino + 1;
5647 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
5649 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
5652 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
5653 &left_mode, &left_uid, &left_gid, NULL);
5657 if (!sctx->parent_root || sctx->cur_inode_new) {
5659 if (!S_ISLNK(sctx->cur_inode_mode))
5662 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
5663 NULL, NULL, &right_mode, &right_uid,
5668 if (left_uid != right_uid || left_gid != right_gid)
5670 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
5674 if (S_ISREG(sctx->cur_inode_mode)) {
5675 if (need_send_hole(sctx)) {
5676 if (sctx->cur_inode_last_extent == (u64)-1 ||
5677 sctx->cur_inode_last_extent <
5678 sctx->cur_inode_size) {
5679 ret = get_last_extent(sctx, (u64)-1);
5683 if (sctx->cur_inode_last_extent <
5684 sctx->cur_inode_size) {
5685 ret = send_hole(sctx, sctx->cur_inode_size);
5690 ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5691 sctx->cur_inode_size);
5697 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5698 left_uid, left_gid);
5703 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5710 * If other directory inodes depended on our current directory
5711 * inode's move/rename, now do their move/rename operations.
5713 if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
5714 ret = apply_children_dir_moves(sctx);
5718 * Need to send that every time, no matter if it actually
5719 * changed between the two trees as we have done changes to
5720 * the inode before. If our inode is a directory and it's
5721 * waiting to be moved/renamed, we will send its utimes when
5722 * it's moved/renamed, therefore we don't need to do it here.
5724 sctx->send_progress = sctx->cur_ino + 1;
5725 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
5734 static int changed_inode(struct send_ctx *sctx,
5735 enum btrfs_compare_tree_result result)
5738 struct btrfs_key *key = sctx->cmp_key;
5739 struct btrfs_inode_item *left_ii = NULL;
5740 struct btrfs_inode_item *right_ii = NULL;
5744 sctx->cur_ino = key->objectid;
5745 sctx->cur_inode_new_gen = 0;
5746 sctx->cur_inode_last_extent = (u64)-1;
5749 * Set send_progress to current inode. This will tell all get_cur_xxx
5750 * functions that the current inode's refs are not updated yet. Later,
5751 * when process_recorded_refs is finished, it is set to cur_ino + 1.
5753 sctx->send_progress = sctx->cur_ino;
5755 if (result == BTRFS_COMPARE_TREE_NEW ||
5756 result == BTRFS_COMPARE_TREE_CHANGED) {
5757 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
5758 sctx->left_path->slots[0],
5759 struct btrfs_inode_item);
5760 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
5763 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
5764 sctx->right_path->slots[0],
5765 struct btrfs_inode_item);
5766 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
5769 if (result == BTRFS_COMPARE_TREE_CHANGED) {
5770 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
5771 sctx->right_path->slots[0],
5772 struct btrfs_inode_item);
5774 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
5778 * The cur_ino = root dir case is special here. We can't treat
5779 * the inode as deleted+reused because it would generate a
5780 * stream that tries to delete/mkdir the root dir.
5782 if (left_gen != right_gen &&
5783 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
5784 sctx->cur_inode_new_gen = 1;
5787 if (result == BTRFS_COMPARE_TREE_NEW) {
5788 sctx->cur_inode_gen = left_gen;
5789 sctx->cur_inode_new = 1;
5790 sctx->cur_inode_deleted = 0;
5791 sctx->cur_inode_size = btrfs_inode_size(
5792 sctx->left_path->nodes[0], left_ii);
5793 sctx->cur_inode_mode = btrfs_inode_mode(
5794 sctx->left_path->nodes[0], left_ii);
5795 sctx->cur_inode_rdev = btrfs_inode_rdev(
5796 sctx->left_path->nodes[0], left_ii);
5797 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
5798 ret = send_create_inode_if_needed(sctx);
5799 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
5800 sctx->cur_inode_gen = right_gen;
5801 sctx->cur_inode_new = 0;
5802 sctx->cur_inode_deleted = 1;
5803 sctx->cur_inode_size = btrfs_inode_size(
5804 sctx->right_path->nodes[0], right_ii);
5805 sctx->cur_inode_mode = btrfs_inode_mode(
5806 sctx->right_path->nodes[0], right_ii);
5807 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
5809 * We need to do some special handling in case the inode was
5810 * reported as changed with a changed generation number. This
5811 * means that the original inode was deleted and new inode
5812 * reused the same inum. So we have to treat the old inode as
5813 * deleted and the new one as new.
5815 if (sctx->cur_inode_new_gen) {
5817 * First, process the inode as if it was deleted.
5819 sctx->cur_inode_gen = right_gen;
5820 sctx->cur_inode_new = 0;
5821 sctx->cur_inode_deleted = 1;
5822 sctx->cur_inode_size = btrfs_inode_size(
5823 sctx->right_path->nodes[0], right_ii);
5824 sctx->cur_inode_mode = btrfs_inode_mode(
5825 sctx->right_path->nodes[0], right_ii);
5826 ret = process_all_refs(sctx,
5827 BTRFS_COMPARE_TREE_DELETED);
5832 * Now process the inode as if it was new.
5834 sctx->cur_inode_gen = left_gen;
5835 sctx->cur_inode_new = 1;
5836 sctx->cur_inode_deleted = 0;
5837 sctx->cur_inode_size = btrfs_inode_size(
5838 sctx->left_path->nodes[0], left_ii);
5839 sctx->cur_inode_mode = btrfs_inode_mode(
5840 sctx->left_path->nodes[0], left_ii);
5841 sctx->cur_inode_rdev = btrfs_inode_rdev(
5842 sctx->left_path->nodes[0], left_ii);
5843 ret = send_create_inode_if_needed(sctx);
5847 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
5851 * Advance send_progress now as we did not get into
5852 * process_recorded_refs_if_needed in the new_gen case.
5854 sctx->send_progress = sctx->cur_ino + 1;
5857 * Now process all extents and xattrs of the inode as if
5858 * they were all new.
5860 ret = process_all_extents(sctx);
5863 ret = process_all_new_xattrs(sctx);
5867 sctx->cur_inode_gen = left_gen;
5868 sctx->cur_inode_new = 0;
5869 sctx->cur_inode_new_gen = 0;
5870 sctx->cur_inode_deleted = 0;
5871 sctx->cur_inode_size = btrfs_inode_size(
5872 sctx->left_path->nodes[0], left_ii);
5873 sctx->cur_inode_mode = btrfs_inode_mode(
5874 sctx->left_path->nodes[0], left_ii);
5883 * We have to process new refs before deleted refs, but compare_trees gives us
5884 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
5885 * first and later process them in process_recorded_refs.
5886 * For the cur_inode_new_gen case, we skip recording completely because
5887 * changed_inode did already initiate processing of refs. The reason for this is
5888 * that in this case, compare_tree actually compares the refs of 2 different
5889 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
5890 * refs of the right tree as deleted and all refs of the left tree as new.
5892 static int changed_ref(struct send_ctx *sctx,
5893 enum btrfs_compare_tree_result result)
5897 if (sctx->cur_ino != sctx->cmp_key->objectid) {
5898 inconsistent_snapshot_error(sctx, result, "reference");
5902 if (!sctx->cur_inode_new_gen &&
5903 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
5904 if (result == BTRFS_COMPARE_TREE_NEW)
5905 ret = record_new_ref(sctx);
5906 else if (result == BTRFS_COMPARE_TREE_DELETED)
5907 ret = record_deleted_ref(sctx);
5908 else if (result == BTRFS_COMPARE_TREE_CHANGED)
5909 ret = record_changed_ref(sctx);
5916 * Process new/deleted/changed xattrs. We skip processing in the
5917 * cur_inode_new_gen case because changed_inode did already initiate processing
5918 * of xattrs. The reason is the same as in changed_ref
5920 static int changed_xattr(struct send_ctx *sctx,
5921 enum btrfs_compare_tree_result result)
5925 if (sctx->cur_ino != sctx->cmp_key->objectid) {
5926 inconsistent_snapshot_error(sctx, result, "xattr");
5930 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
5931 if (result == BTRFS_COMPARE_TREE_NEW)
5932 ret = process_new_xattr(sctx);
5933 else if (result == BTRFS_COMPARE_TREE_DELETED)
5934 ret = process_deleted_xattr(sctx);
5935 else if (result == BTRFS_COMPARE_TREE_CHANGED)
5936 ret = process_changed_xattr(sctx);
5943 * Process new/deleted/changed extents. We skip processing in the
5944 * cur_inode_new_gen case because changed_inode did already initiate processing
5945 * of extents. The reason is the same as in changed_ref
5947 static int changed_extent(struct send_ctx *sctx,
5948 enum btrfs_compare_tree_result result)
5952 if (sctx->cur_ino != sctx->cmp_key->objectid) {
5954 if (result == BTRFS_COMPARE_TREE_CHANGED) {
5955 struct extent_buffer *leaf_l;
5956 struct extent_buffer *leaf_r;
5957 struct btrfs_file_extent_item *ei_l;
5958 struct btrfs_file_extent_item *ei_r;
5960 leaf_l = sctx->left_path->nodes[0];
5961 leaf_r = sctx->right_path->nodes[0];
5962 ei_l = btrfs_item_ptr(leaf_l,
5963 sctx->left_path->slots[0],
5964 struct btrfs_file_extent_item);
5965 ei_r = btrfs_item_ptr(leaf_r,
5966 sctx->right_path->slots[0],
5967 struct btrfs_file_extent_item);
5970 * We may have found an extent item that has changed
5971 * only its disk_bytenr field and the corresponding
5972 * inode item was not updated. This case happens due to
5973 * very specific timings during relocation when a leaf
5974 * that contains file extent items is COWed while
5975 * relocation is ongoing and its in the stage where it
5976 * updates data pointers. So when this happens we can
5977 * safely ignore it since we know it's the same extent,
5978 * but just at different logical and physical locations
5979 * (when an extent is fully replaced with a new one, we
5980 * know the generation number must have changed too,
5981 * since snapshot creation implies committing the current
5982 * transaction, and the inode item must have been updated
5984 * This replacement of the disk_bytenr happens at
5985 * relocation.c:replace_file_extents() through
5986 * relocation.c:btrfs_reloc_cow_block().
5988 if (btrfs_file_extent_generation(leaf_l, ei_l) ==
5989 btrfs_file_extent_generation(leaf_r, ei_r) &&
5990 btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
5991 btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
5992 btrfs_file_extent_compression(leaf_l, ei_l) ==
5993 btrfs_file_extent_compression(leaf_r, ei_r) &&
5994 btrfs_file_extent_encryption(leaf_l, ei_l) ==
5995 btrfs_file_extent_encryption(leaf_r, ei_r) &&
5996 btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
5997 btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
5998 btrfs_file_extent_type(leaf_l, ei_l) ==
5999 btrfs_file_extent_type(leaf_r, ei_r) &&
6000 btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
6001 btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
6002 btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
6003 btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
6004 btrfs_file_extent_offset(leaf_l, ei_l) ==
6005 btrfs_file_extent_offset(leaf_r, ei_r) &&
6006 btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
6007 btrfs_file_extent_num_bytes(leaf_r, ei_r))
6011 inconsistent_snapshot_error(sctx, result, "extent");
6015 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6016 if (result != BTRFS_COMPARE_TREE_DELETED)
6017 ret = process_extent(sctx, sctx->left_path,
6024 static int dir_changed(struct send_ctx *sctx, u64 dir)
6026 u64 orig_gen, new_gen;
6029 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
6034 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
6039 return (orig_gen != new_gen) ? 1 : 0;
6042 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
6043 struct btrfs_key *key)
6045 struct btrfs_inode_extref *extref;
6046 struct extent_buffer *leaf;
6047 u64 dirid = 0, last_dirid = 0;
6054 /* Easy case, just check this one dirid */
6055 if (key->type == BTRFS_INODE_REF_KEY) {
6056 dirid = key->offset;
6058 ret = dir_changed(sctx, dirid);
6062 leaf = path->nodes[0];
6063 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
6064 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
6065 while (cur_offset < item_size) {
6066 extref = (struct btrfs_inode_extref *)(ptr +
6068 dirid = btrfs_inode_extref_parent(leaf, extref);
6069 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
6070 cur_offset += ref_name_len + sizeof(*extref);
6071 if (dirid == last_dirid)
6073 ret = dir_changed(sctx, dirid);
6083 * Updates compare related fields in sctx and simply forwards to the actual
6084 * changed_xxx functions.
6086 static int changed_cb(struct btrfs_root *left_root,
6087 struct btrfs_root *right_root,
6088 struct btrfs_path *left_path,
6089 struct btrfs_path *right_path,
6090 struct btrfs_key *key,
6091 enum btrfs_compare_tree_result result,
6095 struct send_ctx *sctx = ctx;
6097 if (result == BTRFS_COMPARE_TREE_SAME) {
6098 if (key->type == BTRFS_INODE_REF_KEY ||
6099 key->type == BTRFS_INODE_EXTREF_KEY) {
6100 ret = compare_refs(sctx, left_path, key);
6105 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
6106 return maybe_send_hole(sctx, left_path, key);
6110 result = BTRFS_COMPARE_TREE_CHANGED;
6114 sctx->left_path = left_path;
6115 sctx->right_path = right_path;
6116 sctx->cmp_key = key;
6118 ret = finish_inode_if_needed(sctx, 0);
6122 /* Ignore non-FS objects */
6123 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
6124 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
6127 if (key->type == BTRFS_INODE_ITEM_KEY)
6128 ret = changed_inode(sctx, result);
6129 else if (key->type == BTRFS_INODE_REF_KEY ||
6130 key->type == BTRFS_INODE_EXTREF_KEY)
6131 ret = changed_ref(sctx, result);
6132 else if (key->type == BTRFS_XATTR_ITEM_KEY)
6133 ret = changed_xattr(sctx, result);
6134 else if (key->type == BTRFS_EXTENT_DATA_KEY)
6135 ret = changed_extent(sctx, result);
6141 static int full_send_tree(struct send_ctx *sctx)
6144 struct btrfs_root *send_root = sctx->send_root;
6145 struct btrfs_key key;
6146 struct btrfs_key found_key;
6147 struct btrfs_path *path;
6148 struct extent_buffer *eb;
6151 path = alloc_path_for_send();
6155 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
6156 key.type = BTRFS_INODE_ITEM_KEY;
6159 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
6166 eb = path->nodes[0];
6167 slot = path->slots[0];
6168 btrfs_item_key_to_cpu(eb, &found_key, slot);
6170 ret = changed_cb(send_root, NULL, path, NULL,
6171 &found_key, BTRFS_COMPARE_TREE_NEW, sctx);
6175 key.objectid = found_key.objectid;
6176 key.type = found_key.type;
6177 key.offset = found_key.offset + 1;
6179 ret = btrfs_next_item(send_root, path);
6189 ret = finish_inode_if_needed(sctx, 1);
6192 btrfs_free_path(path);
6196 static int send_subvol(struct send_ctx *sctx)
6200 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
6201 ret = send_header(sctx);
6206 ret = send_subvol_begin(sctx);
6210 if (sctx->parent_root) {
6211 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
6215 ret = finish_inode_if_needed(sctx, 1);
6219 ret = full_send_tree(sctx);
6225 free_recorded_refs(sctx);
6230 * If orphan cleanup did remove any orphans from a root, it means the tree
6231 * was modified and therefore the commit root is not the same as the current
6232 * root anymore. This is a problem, because send uses the commit root and
6233 * therefore can see inode items that don't exist in the current root anymore,
6234 * and for example make calls to btrfs_iget, which will do tree lookups based
6235 * on the current root and not on the commit root. Those lookups will fail,
6236 * returning a -ESTALE error, and making send fail with that error. So make
6237 * sure a send does not see any orphans we have just removed, and that it will
6238 * see the same inodes regardless of whether a transaction commit happened
6239 * before it started (meaning that the commit root will be the same as the
6240 * current root) or not.
6242 static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
6245 struct btrfs_trans_handle *trans = NULL;
6248 if (sctx->parent_root &&
6249 sctx->parent_root->node != sctx->parent_root->commit_root)
6252 for (i = 0; i < sctx->clone_roots_cnt; i++)
6253 if (sctx->clone_roots[i].root->node !=
6254 sctx->clone_roots[i].root->commit_root)
6258 return btrfs_end_transaction(trans);
6263 /* Use any root, all fs roots will get their commit roots updated. */
6265 trans = btrfs_join_transaction(sctx->send_root);
6267 return PTR_ERR(trans);
6271 return btrfs_commit_transaction(trans);
6274 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
6276 spin_lock(&root->root_item_lock);
6277 root->send_in_progress--;
6279 * Not much left to do, we don't know why it's unbalanced and
6280 * can't blindly reset it to 0.
6282 if (root->send_in_progress < 0)
6283 btrfs_err(root->fs_info,
6284 "send_in_progres unbalanced %d root %llu",
6285 root->send_in_progress, root->root_key.objectid);
6286 spin_unlock(&root->root_item_lock);
6289 long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
6292 struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
6293 struct btrfs_fs_info *fs_info = send_root->fs_info;
6294 struct btrfs_root *clone_root;
6295 struct btrfs_ioctl_send_args *arg = NULL;
6296 struct btrfs_key key;
6297 struct send_ctx *sctx = NULL;
6299 u64 *clone_sources_tmp = NULL;
6300 int clone_sources_to_rollback = 0;
6301 unsigned alloc_size;
6302 int sort_clone_roots = 0;
6305 if (!capable(CAP_SYS_ADMIN))
6309 * The subvolume must remain read-only during send, protect against
6310 * making it RW. This also protects against deletion.
6312 spin_lock(&send_root->root_item_lock);
6313 send_root->send_in_progress++;
6314 spin_unlock(&send_root->root_item_lock);
6317 * This is done when we lookup the root, it should already be complete
6318 * by the time we get here.
6320 WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
6323 * Userspace tools do the checks and warn the user if it's
6326 if (!btrfs_root_readonly(send_root)) {
6331 arg = memdup_user(arg_, sizeof(*arg));
6339 * Check that we don't overflow at later allocations, we request
6340 * clone_sources_count + 1 items, and compare to unsigned long inside
6343 if (arg->clone_sources_count >
6344 ULONG_MAX / sizeof(struct clone_root) - 1) {
6349 if (!access_ok(VERIFY_READ, arg->clone_sources,
6350 sizeof(*arg->clone_sources) *
6351 arg->clone_sources_count)) {
6356 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
6361 sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
6367 INIT_LIST_HEAD(&sctx->new_refs);
6368 INIT_LIST_HEAD(&sctx->deleted_refs);
6369 INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
6370 INIT_LIST_HEAD(&sctx->name_cache_list);
6372 sctx->flags = arg->flags;
6374 sctx->send_filp = fget(arg->send_fd);
6375 if (!sctx->send_filp) {
6380 sctx->send_root = send_root;
6382 * Unlikely but possible, if the subvolume is marked for deletion but
6383 * is slow to remove the directory entry, send can still be started
6385 if (btrfs_root_dead(sctx->send_root)) {
6390 sctx->clone_roots_cnt = arg->clone_sources_count;
6392 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
6393 sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
6394 if (!sctx->send_buf) {
6399 sctx->read_buf = kvmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL);
6400 if (!sctx->read_buf) {
6405 sctx->pending_dir_moves = RB_ROOT;
6406 sctx->waiting_dir_moves = RB_ROOT;
6407 sctx->orphan_dirs = RB_ROOT;
6409 alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1);
6411 sctx->clone_roots = kzalloc(alloc_size, GFP_KERNEL);
6412 if (!sctx->clone_roots) {
6417 alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources);
6419 if (arg->clone_sources_count) {
6420 clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
6421 if (!clone_sources_tmp) {
6426 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
6433 for (i = 0; i < arg->clone_sources_count; i++) {
6434 key.objectid = clone_sources_tmp[i];
6435 key.type = BTRFS_ROOT_ITEM_KEY;
6436 key.offset = (u64)-1;
6438 index = srcu_read_lock(&fs_info->subvol_srcu);
6440 clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
6441 if (IS_ERR(clone_root)) {
6442 srcu_read_unlock(&fs_info->subvol_srcu, index);
6443 ret = PTR_ERR(clone_root);
6446 spin_lock(&clone_root->root_item_lock);
6447 if (!btrfs_root_readonly(clone_root) ||
6448 btrfs_root_dead(clone_root)) {
6449 spin_unlock(&clone_root->root_item_lock);
6450 srcu_read_unlock(&fs_info->subvol_srcu, index);
6454 clone_root->send_in_progress++;
6455 spin_unlock(&clone_root->root_item_lock);
6456 srcu_read_unlock(&fs_info->subvol_srcu, index);
6458 sctx->clone_roots[i].root = clone_root;
6459 clone_sources_to_rollback = i + 1;
6461 kvfree(clone_sources_tmp);
6462 clone_sources_tmp = NULL;
6465 if (arg->parent_root) {
6466 key.objectid = arg->parent_root;
6467 key.type = BTRFS_ROOT_ITEM_KEY;
6468 key.offset = (u64)-1;
6470 index = srcu_read_lock(&fs_info->subvol_srcu);
6472 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
6473 if (IS_ERR(sctx->parent_root)) {
6474 srcu_read_unlock(&fs_info->subvol_srcu, index);
6475 ret = PTR_ERR(sctx->parent_root);
6479 spin_lock(&sctx->parent_root->root_item_lock);
6480 sctx->parent_root->send_in_progress++;
6481 if (!btrfs_root_readonly(sctx->parent_root) ||
6482 btrfs_root_dead(sctx->parent_root)) {
6483 spin_unlock(&sctx->parent_root->root_item_lock);
6484 srcu_read_unlock(&fs_info->subvol_srcu, index);
6488 spin_unlock(&sctx->parent_root->root_item_lock);
6490 srcu_read_unlock(&fs_info->subvol_srcu, index);
6494 * Clones from send_root are allowed, but only if the clone source
6495 * is behind the current send position. This is checked while searching
6496 * for possible clone sources.
6498 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
6500 /* We do a bsearch later */
6501 sort(sctx->clone_roots, sctx->clone_roots_cnt,
6502 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
6504 sort_clone_roots = 1;
6506 ret = ensure_commit_roots_uptodate(sctx);
6510 current->journal_info = BTRFS_SEND_TRANS_STUB;
6511 ret = send_subvol(sctx);
6512 current->journal_info = NULL;
6516 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
6517 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
6520 ret = send_cmd(sctx);
6526 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
6527 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
6529 struct pending_dir_move *pm;
6531 n = rb_first(&sctx->pending_dir_moves);
6532 pm = rb_entry(n, struct pending_dir_move, node);
6533 while (!list_empty(&pm->list)) {
6534 struct pending_dir_move *pm2;
6536 pm2 = list_first_entry(&pm->list,
6537 struct pending_dir_move, list);
6538 free_pending_move(sctx, pm2);
6540 free_pending_move(sctx, pm);
6543 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
6544 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
6546 struct waiting_dir_move *dm;
6548 n = rb_first(&sctx->waiting_dir_moves);
6549 dm = rb_entry(n, struct waiting_dir_move, node);
6550 rb_erase(&dm->node, &sctx->waiting_dir_moves);
6554 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
6555 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
6557 struct orphan_dir_info *odi;
6559 n = rb_first(&sctx->orphan_dirs);
6560 odi = rb_entry(n, struct orphan_dir_info, node);
6561 free_orphan_dir_info(sctx, odi);
6564 if (sort_clone_roots) {
6565 for (i = 0; i < sctx->clone_roots_cnt; i++)
6566 btrfs_root_dec_send_in_progress(
6567 sctx->clone_roots[i].root);
6569 for (i = 0; sctx && i < clone_sources_to_rollback; i++)
6570 btrfs_root_dec_send_in_progress(
6571 sctx->clone_roots[i].root);
6573 btrfs_root_dec_send_in_progress(send_root);
6575 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root))
6576 btrfs_root_dec_send_in_progress(sctx->parent_root);
6579 kvfree(clone_sources_tmp);
6582 if (sctx->send_filp)
6583 fput(sctx->send_filp);
6585 kvfree(sctx->clone_roots);
6586 kvfree(sctx->send_buf);
6587 kvfree(sctx->read_buf);
6589 name_cache_free(sctx);