1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/kernel.h>
8 #include <linux/file.h>
10 #include <linux/fsnotify.h>
11 #include <linux/pagemap.h>
12 #include <linux/highmem.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/backing-dev.h>
16 #include <linux/mount.h>
17 #include <linux/namei.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/security.h>
21 #include <linux/xattr.h>
23 #include <linux/slab.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include <linux/btrfs.h>
27 #include <linux/uaccess.h>
28 #include <linux/iversion.h>
29 #include <linux/fileattr.h>
30 #include <linux/fsverity.h>
34 #include "transaction.h"
35 #include "btrfs_inode.h"
36 #include "print-tree.h"
40 #include "rcu-string.h"
42 #include "dev-replace.h"
47 #include "compression.h"
48 #include "space-info.h"
49 #include "delalloc-space.h"
50 #include "block-group.h"
54 /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
55 * structures are incorrect, as the timespec structure from userspace
56 * is 4 bytes too small. We define these alternatives here to teach
57 * the kernel about the 32-bit struct packing.
59 struct btrfs_ioctl_timespec_32 {
62 } __attribute__ ((__packed__));
64 struct btrfs_ioctl_received_subvol_args_32 {
65 char uuid[BTRFS_UUID_SIZE]; /* in */
66 __u64 stransid; /* in */
67 __u64 rtransid; /* out */
68 struct btrfs_ioctl_timespec_32 stime; /* in */
69 struct btrfs_ioctl_timespec_32 rtime; /* out */
71 __u64 reserved[16]; /* in */
72 } __attribute__ ((__packed__));
74 #define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \
75 struct btrfs_ioctl_received_subvol_args_32)
78 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
79 struct btrfs_ioctl_send_args_32 {
80 __s64 send_fd; /* in */
81 __u64 clone_sources_count; /* in */
82 compat_uptr_t clone_sources; /* in */
83 __u64 parent_root; /* in */
85 __u32 version; /* in */
86 __u8 reserved[28]; /* in */
87 } __attribute__ ((__packed__));
89 #define BTRFS_IOC_SEND_32 _IOW(BTRFS_IOCTL_MAGIC, 38, \
90 struct btrfs_ioctl_send_args_32)
93 /* Mask out flags that are inappropriate for the given type of inode. */
94 static unsigned int btrfs_mask_fsflags_for_type(struct inode *inode,
97 if (S_ISDIR(inode->i_mode))
99 else if (S_ISREG(inode->i_mode))
100 return flags & ~FS_DIRSYNC_FL;
102 return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
106 * Export internal inode flags to the format expected by the FS_IOC_GETFLAGS
109 static unsigned int btrfs_inode_flags_to_fsflags(struct btrfs_inode *binode)
111 unsigned int iflags = 0;
112 u32 flags = binode->flags;
113 u32 ro_flags = binode->ro_flags;
115 if (flags & BTRFS_INODE_SYNC)
116 iflags |= FS_SYNC_FL;
117 if (flags & BTRFS_INODE_IMMUTABLE)
118 iflags |= FS_IMMUTABLE_FL;
119 if (flags & BTRFS_INODE_APPEND)
120 iflags |= FS_APPEND_FL;
121 if (flags & BTRFS_INODE_NODUMP)
122 iflags |= FS_NODUMP_FL;
123 if (flags & BTRFS_INODE_NOATIME)
124 iflags |= FS_NOATIME_FL;
125 if (flags & BTRFS_INODE_DIRSYNC)
126 iflags |= FS_DIRSYNC_FL;
127 if (flags & BTRFS_INODE_NODATACOW)
128 iflags |= FS_NOCOW_FL;
129 if (ro_flags & BTRFS_INODE_RO_VERITY)
130 iflags |= FS_VERITY_FL;
132 if (flags & BTRFS_INODE_NOCOMPRESS)
133 iflags |= FS_NOCOMP_FL;
134 else if (flags & BTRFS_INODE_COMPRESS)
135 iflags |= FS_COMPR_FL;
141 * Update inode->i_flags based on the btrfs internal flags.
143 void btrfs_sync_inode_flags_to_i_flags(struct inode *inode)
145 struct btrfs_inode *binode = BTRFS_I(inode);
146 unsigned int new_fl = 0;
148 if (binode->flags & BTRFS_INODE_SYNC)
150 if (binode->flags & BTRFS_INODE_IMMUTABLE)
151 new_fl |= S_IMMUTABLE;
152 if (binode->flags & BTRFS_INODE_APPEND)
154 if (binode->flags & BTRFS_INODE_NOATIME)
156 if (binode->flags & BTRFS_INODE_DIRSYNC)
158 if (binode->ro_flags & BTRFS_INODE_RO_VERITY)
161 set_mask_bits(&inode->i_flags,
162 S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC |
167 * Check if @flags are a supported and valid set of FS_*_FL flags and that
168 * the old and new flags are not conflicting
170 static int check_fsflags(unsigned int old_flags, unsigned int flags)
172 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
173 FS_NOATIME_FL | FS_NODUMP_FL | \
174 FS_SYNC_FL | FS_DIRSYNC_FL | \
175 FS_NOCOMP_FL | FS_COMPR_FL |
179 /* COMPR and NOCOMP on new/old are valid */
180 if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
183 if ((flags & FS_COMPR_FL) && (flags & FS_NOCOW_FL))
186 /* NOCOW and compression options are mutually exclusive */
187 if ((old_flags & FS_NOCOW_FL) && (flags & (FS_COMPR_FL | FS_NOCOMP_FL)))
189 if ((flags & FS_NOCOW_FL) && (old_flags & (FS_COMPR_FL | FS_NOCOMP_FL)))
195 static int check_fsflags_compatible(struct btrfs_fs_info *fs_info,
198 if (btrfs_is_zoned(fs_info) && (flags & FS_NOCOW_FL))
205 * Set flags/xflags from the internal inode flags. The remaining items of
206 * fsxattr are zeroed.
208 int btrfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
210 struct btrfs_inode *binode = BTRFS_I(d_inode(dentry));
212 fileattr_fill_flags(fa, btrfs_inode_flags_to_fsflags(binode));
216 int btrfs_fileattr_set(struct user_namespace *mnt_userns,
217 struct dentry *dentry, struct fileattr *fa)
219 struct inode *inode = d_inode(dentry);
220 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
221 struct btrfs_inode *binode = BTRFS_I(inode);
222 struct btrfs_root *root = binode->root;
223 struct btrfs_trans_handle *trans;
224 unsigned int fsflags, old_fsflags;
226 const char *comp = NULL;
229 if (btrfs_root_readonly(root))
232 if (fileattr_has_fsx(fa))
235 fsflags = btrfs_mask_fsflags_for_type(inode, fa->flags);
236 old_fsflags = btrfs_inode_flags_to_fsflags(binode);
237 ret = check_fsflags(old_fsflags, fsflags);
241 ret = check_fsflags_compatible(fs_info, fsflags);
245 binode_flags = binode->flags;
246 if (fsflags & FS_SYNC_FL)
247 binode_flags |= BTRFS_INODE_SYNC;
249 binode_flags &= ~BTRFS_INODE_SYNC;
250 if (fsflags & FS_IMMUTABLE_FL)
251 binode_flags |= BTRFS_INODE_IMMUTABLE;
253 binode_flags &= ~BTRFS_INODE_IMMUTABLE;
254 if (fsflags & FS_APPEND_FL)
255 binode_flags |= BTRFS_INODE_APPEND;
257 binode_flags &= ~BTRFS_INODE_APPEND;
258 if (fsflags & FS_NODUMP_FL)
259 binode_flags |= BTRFS_INODE_NODUMP;
261 binode_flags &= ~BTRFS_INODE_NODUMP;
262 if (fsflags & FS_NOATIME_FL)
263 binode_flags |= BTRFS_INODE_NOATIME;
265 binode_flags &= ~BTRFS_INODE_NOATIME;
267 /* If coming from FS_IOC_FSSETXATTR then skip unconverted flags */
268 if (!fa->flags_valid) {
269 /* 1 item for the inode */
270 trans = btrfs_start_transaction(root, 1);
272 return PTR_ERR(trans);
276 if (fsflags & FS_DIRSYNC_FL)
277 binode_flags |= BTRFS_INODE_DIRSYNC;
279 binode_flags &= ~BTRFS_INODE_DIRSYNC;
280 if (fsflags & FS_NOCOW_FL) {
281 if (S_ISREG(inode->i_mode)) {
283 * It's safe to turn csums off here, no extents exist.
284 * Otherwise we want the flag to reflect the real COW
285 * status of the file and will not set it.
287 if (inode->i_size == 0)
288 binode_flags |= BTRFS_INODE_NODATACOW |
289 BTRFS_INODE_NODATASUM;
291 binode_flags |= BTRFS_INODE_NODATACOW;
295 * Revert back under same assumptions as above
297 if (S_ISREG(inode->i_mode)) {
298 if (inode->i_size == 0)
299 binode_flags &= ~(BTRFS_INODE_NODATACOW |
300 BTRFS_INODE_NODATASUM);
302 binode_flags &= ~BTRFS_INODE_NODATACOW;
307 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
308 * flag may be changed automatically if compression code won't make
311 if (fsflags & FS_NOCOMP_FL) {
312 binode_flags &= ~BTRFS_INODE_COMPRESS;
313 binode_flags |= BTRFS_INODE_NOCOMPRESS;
314 } else if (fsflags & FS_COMPR_FL) {
316 if (IS_SWAPFILE(inode))
319 binode_flags |= BTRFS_INODE_COMPRESS;
320 binode_flags &= ~BTRFS_INODE_NOCOMPRESS;
322 comp = btrfs_compress_type2str(fs_info->compress_type);
323 if (!comp || comp[0] == 0)
324 comp = btrfs_compress_type2str(BTRFS_COMPRESS_ZLIB);
326 binode_flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
333 trans = btrfs_start_transaction(root, 3);
335 return PTR_ERR(trans);
338 ret = btrfs_set_prop(trans, inode, "btrfs.compression", comp,
341 btrfs_abort_transaction(trans, ret);
345 ret = btrfs_set_prop(trans, inode, "btrfs.compression", NULL,
347 if (ret && ret != -ENODATA) {
348 btrfs_abort_transaction(trans, ret);
354 binode->flags = binode_flags;
355 btrfs_sync_inode_flags_to_i_flags(inode);
356 inode_inc_iversion(inode);
357 inode->i_ctime = current_time(inode);
358 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
361 btrfs_end_transaction(trans);
366 * Start exclusive operation @type, return true on success
368 bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
369 enum btrfs_exclusive_operation type)
373 spin_lock(&fs_info->super_lock);
374 if (fs_info->exclusive_operation == BTRFS_EXCLOP_NONE) {
375 fs_info->exclusive_operation = type;
378 spin_unlock(&fs_info->super_lock);
384 * Conditionally allow to enter the exclusive operation in case it's compatible
385 * with the running one. This must be paired with btrfs_exclop_start_unlock and
386 * btrfs_exclop_finish.
389 * - the same type is already running
390 * - when trying to add a device and balance has been paused
391 * - not BTRFS_EXCLOP_NONE - this is intentionally incompatible and the caller
392 * must check the condition first that would allow none -> @type
394 bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
395 enum btrfs_exclusive_operation type)
397 spin_lock(&fs_info->super_lock);
398 if (fs_info->exclusive_operation == type ||
399 (fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED &&
400 type == BTRFS_EXCLOP_DEV_ADD))
403 spin_unlock(&fs_info->super_lock);
407 void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info)
409 spin_unlock(&fs_info->super_lock);
412 void btrfs_exclop_finish(struct btrfs_fs_info *fs_info)
414 spin_lock(&fs_info->super_lock);
415 WRITE_ONCE(fs_info->exclusive_operation, BTRFS_EXCLOP_NONE);
416 spin_unlock(&fs_info->super_lock);
417 sysfs_notify(&fs_info->fs_devices->fsid_kobj, NULL, "exclusive_operation");
420 void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
421 enum btrfs_exclusive_operation op)
424 case BTRFS_EXCLOP_BALANCE_PAUSED:
425 spin_lock(&fs_info->super_lock);
426 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE ||
427 fs_info->exclusive_operation == BTRFS_EXCLOP_DEV_ADD);
428 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE_PAUSED;
429 spin_unlock(&fs_info->super_lock);
431 case BTRFS_EXCLOP_BALANCE:
432 spin_lock(&fs_info->super_lock);
433 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
434 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE;
435 spin_unlock(&fs_info->super_lock);
439 "invalid exclop balance operation %d requested", op);
443 static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
445 struct inode *inode = file_inode(file);
447 return put_user(inode->i_generation, arg);
450 static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
453 struct btrfs_device *device;
454 struct request_queue *q;
455 struct fstrim_range range;
456 u64 minlen = ULLONG_MAX;
460 if (!capable(CAP_SYS_ADMIN))
464 * btrfs_trim_block_group() depends on space cache, which is not
465 * available in zoned filesystem. So, disallow fitrim on a zoned
466 * filesystem for now.
468 if (btrfs_is_zoned(fs_info))
472 * If the fs is mounted with nologreplay, which requires it to be
473 * mounted in RO mode as well, we can not allow discard on free space
474 * inside block groups, because log trees refer to extents that are not
475 * pinned in a block group's free space cache (pinning the extents is
476 * precisely the first phase of replaying a log tree).
478 if (btrfs_test_opt(fs_info, NOLOGREPLAY))
482 list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
486 q = bdev_get_queue(device->bdev);
487 if (blk_queue_discard(q)) {
489 minlen = min_t(u64, q->limits.discard_granularity,
497 if (copy_from_user(&range, arg, sizeof(range)))
501 * NOTE: Don't truncate the range using super->total_bytes. Bytenr of
502 * block group is in the logical address space, which can be any
503 * sectorsize aligned bytenr in the range [0, U64_MAX].
505 if (range.len < fs_info->sb->s_blocksize)
508 range.minlen = max(range.minlen, minlen);
509 ret = btrfs_trim_fs(fs_info, &range);
513 if (copy_to_user(arg, &range, sizeof(range)))
519 int __pure btrfs_is_empty_uuid(u8 *uuid)
523 for (i = 0; i < BTRFS_UUID_SIZE; i++) {
530 static noinline int create_subvol(struct user_namespace *mnt_userns,
531 struct inode *dir, struct dentry *dentry,
532 const char *name, int namelen,
533 struct btrfs_qgroup_inherit *inherit)
535 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
536 struct btrfs_trans_handle *trans;
537 struct btrfs_key key;
538 struct btrfs_root_item *root_item;
539 struct btrfs_inode_item *inode_item;
540 struct extent_buffer *leaf;
541 struct btrfs_root *root = BTRFS_I(dir)->root;
542 struct btrfs_root *new_root;
543 struct btrfs_block_rsv block_rsv;
544 struct timespec64 cur_time = current_time(dir);
551 root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
555 ret = btrfs_get_free_objectid(fs_info->tree_root, &objectid);
559 ret = get_anon_bdev(&anon_dev);
564 * Don't create subvolume whose level is not zero. Or qgroup will be
565 * screwed up since it assumes subvolume qgroup's level to be 0.
567 if (btrfs_qgroup_level(objectid)) {
572 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
574 * The same as the snapshot creation, please see the comment
575 * of create_snapshot().
577 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 8, false);
581 trans = btrfs_start_transaction(root, 0);
583 ret = PTR_ERR(trans);
584 btrfs_subvolume_release_metadata(root, &block_rsv);
587 trans->block_rsv = &block_rsv;
588 trans->bytes_reserved = block_rsv.size;
590 ret = btrfs_qgroup_inherit(trans, 0, objectid, inherit);
594 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
595 BTRFS_NESTING_NORMAL);
601 btrfs_mark_buffer_dirty(leaf);
603 inode_item = &root_item->inode;
604 btrfs_set_stack_inode_generation(inode_item, 1);
605 btrfs_set_stack_inode_size(inode_item, 3);
606 btrfs_set_stack_inode_nlink(inode_item, 1);
607 btrfs_set_stack_inode_nbytes(inode_item,
609 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
611 btrfs_set_root_flags(root_item, 0);
612 btrfs_set_root_limit(root_item, 0);
613 btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT);
615 btrfs_set_root_bytenr(root_item, leaf->start);
616 btrfs_set_root_generation(root_item, trans->transid);
617 btrfs_set_root_level(root_item, 0);
618 btrfs_set_root_refs(root_item, 1);
619 btrfs_set_root_used(root_item, leaf->len);
620 btrfs_set_root_last_snapshot(root_item, 0);
622 btrfs_set_root_generation_v2(root_item,
623 btrfs_root_generation(root_item));
624 generate_random_guid(root_item->uuid);
625 btrfs_set_stack_timespec_sec(&root_item->otime, cur_time.tv_sec);
626 btrfs_set_stack_timespec_nsec(&root_item->otime, cur_time.tv_nsec);
627 root_item->ctime = root_item->otime;
628 btrfs_set_root_ctransid(root_item, trans->transid);
629 btrfs_set_root_otransid(root_item, trans->transid);
631 btrfs_tree_unlock(leaf);
633 btrfs_set_root_dirid(root_item, BTRFS_FIRST_FREE_OBJECTID);
635 key.objectid = objectid;
637 key.type = BTRFS_ROOT_ITEM_KEY;
638 ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
642 * Since we don't abort the transaction in this case, free the
643 * tree block so that we don't leak space and leave the
644 * filesystem in an inconsistent state (an extent item in the
645 * extent tree with a backreference for a root that does not
648 btrfs_tree_lock(leaf);
649 btrfs_clean_tree_block(leaf);
650 btrfs_tree_unlock(leaf);
651 btrfs_free_tree_block(trans, objectid, leaf, 0, 1);
652 free_extent_buffer(leaf);
656 free_extent_buffer(leaf);
659 key.offset = (u64)-1;
660 new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev);
661 if (IS_ERR(new_root)) {
662 free_anon_bdev(anon_dev);
663 ret = PTR_ERR(new_root);
664 btrfs_abort_transaction(trans, ret);
667 /* Freeing will be done in btrfs_put_root() of new_root */
670 ret = btrfs_record_root_in_trans(trans, new_root);
672 btrfs_put_root(new_root);
673 btrfs_abort_transaction(trans, ret);
677 ret = btrfs_create_subvol_root(trans, new_root, root, mnt_userns);
678 btrfs_put_root(new_root);
680 /* We potentially lose an unused inode item here */
681 btrfs_abort_transaction(trans, ret);
686 * insert the directory item
688 ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
690 btrfs_abort_transaction(trans, ret);
694 ret = btrfs_insert_dir_item(trans, name, namelen, BTRFS_I(dir), &key,
695 BTRFS_FT_DIR, index);
697 btrfs_abort_transaction(trans, ret);
701 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2);
702 ret = btrfs_update_inode(trans, root, BTRFS_I(dir));
704 btrfs_abort_transaction(trans, ret);
708 ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid,
709 btrfs_ino(BTRFS_I(dir)), index, name, namelen);
711 btrfs_abort_transaction(trans, ret);
715 ret = btrfs_uuid_tree_add(trans, root_item->uuid,
716 BTRFS_UUID_KEY_SUBVOL, objectid);
718 btrfs_abort_transaction(trans, ret);
722 trans->block_rsv = NULL;
723 trans->bytes_reserved = 0;
724 btrfs_subvolume_release_metadata(root, &block_rsv);
727 btrfs_end_transaction(trans);
729 ret = btrfs_commit_transaction(trans);
732 inode = btrfs_lookup_dentry(dir, dentry);
734 return PTR_ERR(inode);
735 d_instantiate(dentry, inode);
741 free_anon_bdev(anon_dev);
746 static int create_snapshot(struct btrfs_root *root, struct inode *dir,
747 struct dentry *dentry, bool readonly,
748 struct btrfs_qgroup_inherit *inherit)
750 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
752 struct btrfs_pending_snapshot *pending_snapshot;
753 struct btrfs_trans_handle *trans;
756 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
759 if (atomic_read(&root->nr_swapfiles)) {
761 "cannot snapshot subvolume with active swapfile");
765 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_KERNEL);
766 if (!pending_snapshot)
769 ret = get_anon_bdev(&pending_snapshot->anon_dev);
772 pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item),
774 pending_snapshot->path = btrfs_alloc_path();
775 if (!pending_snapshot->root_item || !pending_snapshot->path) {
780 btrfs_init_block_rsv(&pending_snapshot->block_rsv,
781 BTRFS_BLOCK_RSV_TEMP);
783 * 1 - parent dir inode
786 * 2 - root ref/backref
787 * 1 - root of snapshot
790 ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
791 &pending_snapshot->block_rsv, 8,
796 pending_snapshot->dentry = dentry;
797 pending_snapshot->root = root;
798 pending_snapshot->readonly = readonly;
799 pending_snapshot->dir = dir;
800 pending_snapshot->inherit = inherit;
802 trans = btrfs_start_transaction(root, 0);
804 ret = PTR_ERR(trans);
808 trans->pending_snapshot = pending_snapshot;
810 ret = btrfs_commit_transaction(trans);
814 ret = pending_snapshot->error;
818 ret = btrfs_orphan_cleanup(pending_snapshot->snap);
822 inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry);
824 ret = PTR_ERR(inode);
828 d_instantiate(dentry, inode);
830 pending_snapshot->anon_dev = 0;
832 /* Prevent double freeing of anon_dev */
833 if (ret && pending_snapshot->snap)
834 pending_snapshot->snap->anon_dev = 0;
835 btrfs_put_root(pending_snapshot->snap);
836 btrfs_subvolume_release_metadata(root, &pending_snapshot->block_rsv);
838 if (pending_snapshot->anon_dev)
839 free_anon_bdev(pending_snapshot->anon_dev);
840 kfree(pending_snapshot->root_item);
841 btrfs_free_path(pending_snapshot->path);
842 kfree(pending_snapshot);
847 /* copy of may_delete in fs/namei.c()
848 * Check whether we can remove a link victim from directory dir, check
849 * whether the type of victim is right.
850 * 1. We can't do it if dir is read-only (done in permission())
851 * 2. We should have write and exec permissions on dir
852 * 3. We can't remove anything from append-only dir
853 * 4. We can't do anything with immutable dir (done in permission())
854 * 5. If the sticky bit on dir is set we should either
855 * a. be owner of dir, or
856 * b. be owner of victim, or
857 * c. have CAP_FOWNER capability
858 * 6. If the victim is append-only or immutable we can't do anything with
859 * links pointing to it.
860 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
861 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
862 * 9. We can't remove a root or mountpoint.
863 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
864 * nfs_async_unlink().
867 static int btrfs_may_delete(struct user_namespace *mnt_userns,
868 struct inode *dir, struct dentry *victim, int isdir)
872 if (d_really_is_negative(victim))
875 BUG_ON(d_inode(victim->d_parent) != dir);
876 audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
878 error = inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
883 if (check_sticky(mnt_userns, dir, d_inode(victim)) ||
884 IS_APPEND(d_inode(victim)) || IS_IMMUTABLE(d_inode(victim)) ||
885 IS_SWAPFILE(d_inode(victim)))
888 if (!d_is_dir(victim))
892 } else if (d_is_dir(victim))
896 if (victim->d_flags & DCACHE_NFSFS_RENAMED)
901 /* copy of may_create in fs/namei.c() */
902 static inline int btrfs_may_create(struct user_namespace *mnt_userns,
903 struct inode *dir, struct dentry *child)
905 if (d_really_is_positive(child))
909 if (!fsuidgid_has_mapping(dir->i_sb, mnt_userns))
911 return inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
915 * Create a new subvolume below @parent. This is largely modeled after
916 * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
917 * inside this filesystem so it's quite a bit simpler.
919 static noinline int btrfs_mksubvol(const struct path *parent,
920 struct user_namespace *mnt_userns,
921 const char *name, int namelen,
922 struct btrfs_root *snap_src,
924 struct btrfs_qgroup_inherit *inherit)
926 struct inode *dir = d_inode(parent->dentry);
927 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
928 struct dentry *dentry;
931 error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
935 dentry = lookup_one(mnt_userns, name, parent->dentry, namelen);
936 error = PTR_ERR(dentry);
940 error = btrfs_may_create(mnt_userns, dir, dentry);
945 * even if this name doesn't exist, we may get hash collisions.
946 * check for them now when we can safely fail
948 error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
954 down_read(&fs_info->subvol_sem);
956 if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
960 error = create_snapshot(snap_src, dir, dentry, readonly, inherit);
962 error = create_subvol(mnt_userns, dir, dentry, name, namelen, inherit);
965 fsnotify_mkdir(dir, dentry);
967 up_read(&fs_info->subvol_sem);
971 btrfs_inode_unlock(dir, 0);
975 static noinline int btrfs_mksnapshot(const struct path *parent,
976 struct user_namespace *mnt_userns,
977 const char *name, int namelen,
978 struct btrfs_root *root,
980 struct btrfs_qgroup_inherit *inherit)
983 bool snapshot_force_cow = false;
986 * Force new buffered writes to reserve space even when NOCOW is
987 * possible. This is to avoid later writeback (running dealloc) to
988 * fallback to COW mode and unexpectedly fail with ENOSPC.
990 btrfs_drew_read_lock(&root->snapshot_lock);
992 ret = btrfs_start_delalloc_snapshot(root, false);
997 * All previous writes have started writeback in NOCOW mode, so now
998 * we force future writes to fallback to COW mode during snapshot
1001 atomic_inc(&root->snapshot_force_cow);
1002 snapshot_force_cow = true;
1004 btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
1006 ret = btrfs_mksubvol(parent, mnt_userns, name, namelen,
1007 root, readonly, inherit);
1009 if (snapshot_force_cow)
1010 atomic_dec(&root->snapshot_force_cow);
1011 btrfs_drew_read_unlock(&root->snapshot_lock);
1015 static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
1018 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1019 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1020 struct extent_map *em;
1021 const u32 sectorsize = BTRFS_I(inode)->root->fs_info->sectorsize;
1024 * hopefully we have this extent in the tree already, try without
1025 * the full extent lock
1027 read_lock(&em_tree->lock);
1028 em = lookup_extent_mapping(em_tree, start, sectorsize);
1029 read_unlock(&em_tree->lock);
1032 struct extent_state *cached = NULL;
1033 u64 end = start + sectorsize - 1;
1035 /* get the big lock and read metadata off disk */
1037 lock_extent_bits(io_tree, start, end, &cached);
1038 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, sectorsize);
1040 unlock_extent_cached(io_tree, start, end, &cached);
1049 static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
1052 struct extent_map *next;
1055 /* this is the last extent */
1056 if (em->start + em->len >= i_size_read(inode))
1059 next = defrag_lookup_extent(inode, em->start + em->len, locked);
1060 if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
1062 else if ((em->block_start + em->block_len == next->block_start) &&
1063 (em->block_len > SZ_128K && next->block_len > SZ_128K))
1066 free_extent_map(next);
1071 * Prepare one page to be defragged.
1075 * - Returned page is locked and has been set up properly.
1076 * - No ordered extent exists in the page.
1077 * - The page is uptodate.
1079 * NOTE: Caller should also wait for page writeback after the cluster is
1080 * prepared, here we don't do writeback wait for each page.
1082 static struct page *defrag_prepare_one_page(struct btrfs_inode *inode,
1085 struct address_space *mapping = inode->vfs_inode.i_mapping;
1086 gfp_t mask = btrfs_alloc_write_mask(mapping);
1087 u64 page_start = (u64)index << PAGE_SHIFT;
1088 u64 page_end = page_start + PAGE_SIZE - 1;
1089 struct extent_state *cached_state = NULL;
1094 page = find_or_create_page(mapping, index, mask);
1096 return ERR_PTR(-ENOMEM);
1099 * Since we can defragment files opened read-only, we can encounter
1100 * transparent huge pages here (see CONFIG_READ_ONLY_THP_FOR_FS). We
1101 * can't do I/O using huge pages yet, so return an error for now.
1102 * Filesystem transparent huge pages are typically only used for
1103 * executables that explicitly enable them, so this isn't very
1106 if (PageCompound(page)) {
1109 return ERR_PTR(-ETXTBSY);
1112 ret = set_page_extent_mapped(page);
1116 return ERR_PTR(ret);
1119 /* Wait for any existing ordered extent in the range */
1121 struct btrfs_ordered_extent *ordered;
1123 lock_extent_bits(&inode->io_tree, page_start, page_end, &cached_state);
1124 ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
1125 unlock_extent_cached(&inode->io_tree, page_start, page_end,
1131 btrfs_start_ordered_extent(ordered, 1);
1132 btrfs_put_ordered_extent(ordered);
1135 * We unlocked the page above, so we need check if it was
1138 if (page->mapping != mapping || !PagePrivate(page)) {
1146 * Now the page range has no ordered extent any more. Read the page to
1149 if (!PageUptodate(page)) {
1150 btrfs_readpage(NULL, page);
1152 if (page->mapping != mapping || !PagePrivate(page)) {
1157 if (!PageUptodate(page)) {
1160 return ERR_PTR(-EIO);
1166 struct defrag_target_range {
1167 struct list_head list;
1173 * Collect all valid target extents.
1175 * @start: file offset to lookup
1176 * @len: length to lookup
1177 * @extent_thresh: file extent size threshold, any extent size >= this value
1179 * @newer_than: only defrag extents newer than this value
1180 * @do_compress: whether the defrag is doing compression
1181 * if true, @extent_thresh will be ignored and all regular
1182 * file extents meeting @newer_than will be targets.
1183 * @locked: if the range has already held extent lock
1184 * @target_list: list of targets file extents
1186 static int defrag_collect_targets(struct btrfs_inode *inode,
1187 u64 start, u64 len, u32 extent_thresh,
1188 u64 newer_than, bool do_compress,
1189 bool locked, struct list_head *target_list)
1194 while (cur < start + len) {
1195 struct extent_map *em;
1196 struct defrag_target_range *new;
1197 bool next_mergeable = true;
1200 em = defrag_lookup_extent(&inode->vfs_inode, cur, locked);
1204 /* Skip hole/inline/preallocated extents */
1205 if (em->block_start >= EXTENT_MAP_LAST_BYTE ||
1206 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
1209 /* Skip older extent */
1210 if (em->generation < newer_than)
1214 * Our start offset might be in the middle of an existing extent
1215 * map, so take that into account.
1217 range_len = em->len - (cur - em->start);
1219 * If this range of the extent map is already flagged for delalloc,
1222 * 1) We could deadlock later, when trying to reserve space for
1223 * delalloc, because in case we can't immediately reserve space
1224 * the flusher can start delalloc and wait for the respective
1225 * ordered extents to complete. The deadlock would happen
1226 * because we do the space reservation while holding the range
1227 * locked, and starting writeback, or finishing an ordered
1228 * extent, requires locking the range;
1230 * 2) If there's delalloc there, it means there's dirty pages for
1231 * which writeback has not started yet (we clean the delalloc
1232 * flag when starting writeback and after creating an ordered
1233 * extent). If we mark pages in an adjacent range for defrag,
1234 * then we will have a larger contiguous range for delalloc,
1235 * very likely resulting in a larger extent after writeback is
1236 * triggered (except in a case of free space fragmentation).
1238 if (test_range_bit(&inode->io_tree, cur, cur + range_len - 1,
1239 EXTENT_DELALLOC, 0, NULL))
1243 * For do_compress case, we want to compress all valid file
1244 * extents, thus no @extent_thresh or mergeable check.
1249 /* Skip too large extent */
1250 if (range_len >= extent_thresh)
1253 next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
1255 if (!next_mergeable) {
1256 struct defrag_target_range *last;
1258 /* Empty target list, no way to merge with last entry */
1259 if (list_empty(target_list))
1261 last = list_entry(target_list->prev,
1262 struct defrag_target_range, list);
1263 /* Not mergeable with last entry */
1264 if (last->start + last->len != cur)
1267 /* Mergeable, fall through to add it to @target_list. */
1271 range_len = min(extent_map_end(em), start + len) - cur;
1273 * This one is a good target, check if it can be merged into
1274 * last range of the target list.
1276 if (!list_empty(target_list)) {
1277 struct defrag_target_range *last;
1279 last = list_entry(target_list->prev,
1280 struct defrag_target_range, list);
1281 ASSERT(last->start + last->len <= cur);
1282 if (last->start + last->len == cur) {
1283 /* Mergeable, enlarge the last entry */
1284 last->len += range_len;
1287 /* Fall through to allocate a new entry */
1290 /* Allocate new defrag_target_range */
1291 new = kmalloc(sizeof(*new), GFP_NOFS);
1293 free_extent_map(em);
1298 new->len = range_len;
1299 list_add_tail(&new->list, target_list);
1302 cur = extent_map_end(em);
1303 free_extent_map(em);
1306 struct defrag_target_range *entry;
1307 struct defrag_target_range *tmp;
1309 list_for_each_entry_safe(entry, tmp, target_list, list) {
1310 list_del_init(&entry->list);
1317 #define CLUSTER_SIZE (SZ_256K)
1320 * Defrag one contiguous target range.
1322 * @inode: target inode
1323 * @target: target range to defrag
1324 * @pages: locked pages covering the defrag range
1325 * @nr_pages: number of locked pages
1327 * Caller should ensure:
1329 * - Pages are prepared
1330 * Pages should be locked, no ordered extent in the pages range,
1333 * - Extent bits are locked
1335 static int defrag_one_locked_target(struct btrfs_inode *inode,
1336 struct defrag_target_range *target,
1337 struct page **pages, int nr_pages,
1338 struct extent_state **cached_state)
1340 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1341 struct extent_changeset *data_reserved = NULL;
1342 const u64 start = target->start;
1343 const u64 len = target->len;
1344 unsigned long last_index = (start + len - 1) >> PAGE_SHIFT;
1345 unsigned long start_index = start >> PAGE_SHIFT;
1346 unsigned long first_index = page_index(pages[0]);
1350 ASSERT(last_index - first_index + 1 <= nr_pages);
1352 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, start, len);
1355 clear_extent_bit(&inode->io_tree, start, start + len - 1,
1356 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
1357 EXTENT_DEFRAG, 0, 0, cached_state);
1358 set_extent_defrag(&inode->io_tree, start, start + len - 1, cached_state);
1360 /* Update the page status */
1361 for (i = start_index - first_index; i <= last_index - first_index; i++) {
1362 ClearPageChecked(pages[i]);
1363 btrfs_page_clamp_set_dirty(fs_info, pages[i], start, len);
1365 btrfs_delalloc_release_extents(inode, len);
1366 extent_changeset_free(data_reserved);
1371 static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
1372 u32 extent_thresh, u64 newer_than, bool do_compress)
1374 struct extent_state *cached_state = NULL;
1375 struct defrag_target_range *entry;
1376 struct defrag_target_range *tmp;
1377 LIST_HEAD(target_list);
1378 struct page **pages;
1379 const u32 sectorsize = inode->root->fs_info->sectorsize;
1380 u64 last_index = (start + len - 1) >> PAGE_SHIFT;
1381 u64 start_index = start >> PAGE_SHIFT;
1382 unsigned int nr_pages = last_index - start_index + 1;
1386 ASSERT(nr_pages <= CLUSTER_SIZE / PAGE_SIZE);
1387 ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(len, sectorsize));
1389 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
1393 /* Prepare all pages */
1394 for (i = 0; i < nr_pages; i++) {
1395 pages[i] = defrag_prepare_one_page(inode, start_index + i);
1396 if (IS_ERR(pages[i])) {
1397 ret = PTR_ERR(pages[i]);
1402 for (i = 0; i < nr_pages; i++)
1403 wait_on_page_writeback(pages[i]);
1405 /* Lock the pages range */
1406 lock_extent_bits(&inode->io_tree, start_index << PAGE_SHIFT,
1407 (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
1410 * Now we have a consistent view about the extent map, re-check
1411 * which range really needs to be defragged.
1413 * And this time we have extent locked already, pass @locked = true
1414 * so that we won't relock the extent range and cause deadlock.
1416 ret = defrag_collect_targets(inode, start, len, extent_thresh,
1417 newer_than, do_compress, true,
1422 list_for_each_entry(entry, &target_list, list) {
1423 ret = defrag_one_locked_target(inode, entry, pages, nr_pages,
1429 list_for_each_entry_safe(entry, tmp, &target_list, list) {
1430 list_del_init(&entry->list);
1434 unlock_extent_cached(&inode->io_tree, start_index << PAGE_SHIFT,
1435 (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
1438 for (i = 0; i < nr_pages; i++) {
1440 unlock_page(pages[i]);
1448 static int defrag_one_cluster(struct btrfs_inode *inode,
1449 struct file_ra_state *ra,
1450 u64 start, u32 len, u32 extent_thresh,
1451 u64 newer_than, bool do_compress,
1452 unsigned long *sectors_defragged,
1453 unsigned long max_sectors)
1455 const u32 sectorsize = inode->root->fs_info->sectorsize;
1456 struct defrag_target_range *entry;
1457 struct defrag_target_range *tmp;
1458 LIST_HEAD(target_list);
1461 BUILD_BUG_ON(!IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
1462 ret = defrag_collect_targets(inode, start, len, extent_thresh,
1463 newer_than, do_compress, false,
1468 list_for_each_entry(entry, &target_list, list) {
1469 u32 range_len = entry->len;
1471 /* Reached or beyond the limit */
1472 if (max_sectors && *sectors_defragged >= max_sectors) {
1478 range_len = min_t(u32, range_len,
1479 (max_sectors - *sectors_defragged) * sectorsize);
1482 page_cache_sync_readahead(inode->vfs_inode.i_mapping,
1483 ra, NULL, entry->start >> PAGE_SHIFT,
1484 ((entry->start + range_len - 1) >> PAGE_SHIFT) -
1485 (entry->start >> PAGE_SHIFT) + 1);
1487 * Here we may not defrag any range if holes are punched before
1488 * we locked the pages.
1489 * But that's fine, it only affects the @sectors_defragged
1492 ret = defrag_one_range(inode, entry->start, range_len,
1493 extent_thresh, newer_than, do_compress);
1496 *sectors_defragged += range_len >>
1497 inode->root->fs_info->sectorsize_bits;
1500 list_for_each_entry_safe(entry, tmp, &target_list, list) {
1501 list_del_init(&entry->list);
1508 * Entry point to file defragmentation.
1510 * @inode: inode to be defragged
1511 * @ra: readahead state (can be NUL)
1512 * @range: defrag options including range and flags
1513 * @newer_than: minimum transid to defrag
1514 * @max_to_defrag: max number of sectors to be defragged, if 0, the whole inode
1515 * will be defragged.
1517 * Return <0 for error.
1518 * Return >=0 for the number of sectors defragged, and range->start will be updated
1519 * to indicate the file offset where next defrag should be started at.
1520 * (Mostly for autodefrag, which sets @max_to_defrag thus we may exit early without
1521 * defragging all the range).
1523 int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
1524 struct btrfs_ioctl_defrag_range_args *range,
1525 u64 newer_than, unsigned long max_to_defrag)
1527 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1528 unsigned long sectors_defragged = 0;
1529 u64 isize = i_size_read(inode);
1532 bool do_compress = range->flags & BTRFS_DEFRAG_RANGE_COMPRESS;
1533 bool ra_allocated = false;
1534 int compress_type = BTRFS_COMPRESS_ZLIB;
1536 u32 extent_thresh = range->extent_thresh;
1537 pgoff_t start_index;
1542 if (range->start >= isize)
1546 if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES)
1548 if (range->compress_type)
1549 compress_type = range->compress_type;
1552 if (extent_thresh == 0)
1553 extent_thresh = SZ_256K;
1555 if (range->start + range->len > range->start) {
1556 /* Got a specific range */
1557 last_byte = min(isize, range->start + range->len);
1559 /* Defrag until file end */
1563 /* Align the range */
1564 cur = round_down(range->start, fs_info->sectorsize);
1565 last_byte = round_up(last_byte, fs_info->sectorsize) - 1;
1568 * If we were not given a ra, allocate a readahead context. As
1569 * readahead is just an optimization, defrag will work without it so
1570 * we don't error out.
1573 ra_allocated = true;
1574 ra = kzalloc(sizeof(*ra), GFP_KERNEL);
1576 file_ra_state_init(ra, inode->i_mapping);
1580 * Make writeback start from the beginning of the range, so that the
1581 * defrag range can be written sequentially.
1583 start_index = cur >> PAGE_SHIFT;
1584 if (start_index < inode->i_mapping->writeback_index)
1585 inode->i_mapping->writeback_index = start_index;
1587 while (cur < last_byte) {
1588 const unsigned long prev_sectors_defragged = sectors_defragged;
1591 /* The cluster size 256K should always be page aligned */
1592 BUILD_BUG_ON(!IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
1594 if (btrfs_defrag_cancelled(fs_info)) {
1599 /* We want the cluster end at page boundary when possible */
1600 cluster_end = (((cur >> PAGE_SHIFT) +
1601 (SZ_256K >> PAGE_SHIFT)) << PAGE_SHIFT) - 1;
1602 cluster_end = min(cluster_end, last_byte);
1604 btrfs_inode_lock(inode, 0);
1605 if (IS_SWAPFILE(inode)) {
1607 btrfs_inode_unlock(inode, 0);
1610 if (!(inode->i_sb->s_flags & SB_ACTIVE)) {
1611 btrfs_inode_unlock(inode, 0);
1615 BTRFS_I(inode)->defrag_compress = compress_type;
1616 ret = defrag_one_cluster(BTRFS_I(inode), ra, cur,
1617 cluster_end + 1 - cur, extent_thresh,
1618 newer_than, do_compress,
1619 §ors_defragged, max_to_defrag);
1621 if (sectors_defragged > prev_sectors_defragged)
1622 balance_dirty_pages_ratelimited(inode->i_mapping);
1624 btrfs_inode_unlock(inode, 0);
1627 cur = cluster_end + 1;
1637 * Update range.start for autodefrag, this will indicate where to start
1641 if (sectors_defragged) {
1643 * We have defragged some sectors, for compression case they
1644 * need to be written back immediately.
1646 if (range->flags & BTRFS_DEFRAG_RANGE_START_IO) {
1647 filemap_flush(inode->i_mapping);
1648 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1649 &BTRFS_I(inode)->runtime_flags))
1650 filemap_flush(inode->i_mapping);
1652 if (range->compress_type == BTRFS_COMPRESS_LZO)
1653 btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
1654 else if (range->compress_type == BTRFS_COMPRESS_ZSTD)
1655 btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
1656 ret = sectors_defragged;
1659 btrfs_inode_lock(inode, 0);
1660 BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
1661 btrfs_inode_unlock(inode, 0);
1667 * Try to start exclusive operation @type or cancel it if it's running.
1670 * 0 - normal mode, newly claimed op started
1671 * >0 - normal mode, something else is running,
1672 * return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS to user space
1673 * ECANCELED - cancel mode, successful cancel
1674 * ENOTCONN - cancel mode, operation not running anymore
1676 static int exclop_start_or_cancel_reloc(struct btrfs_fs_info *fs_info,
1677 enum btrfs_exclusive_operation type, bool cancel)
1680 /* Start normal op */
1681 if (!btrfs_exclop_start(fs_info, type))
1682 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
1683 /* Exclusive operation is now claimed */
1687 /* Cancel running op */
1688 if (btrfs_exclop_start_try_lock(fs_info, type)) {
1690 * This blocks any exclop finish from setting it to NONE, so we
1691 * request cancellation. Either it runs and we will wait for it,
1692 * or it has finished and no waiting will happen.
1694 atomic_inc(&fs_info->reloc_cancel_req);
1695 btrfs_exclop_start_unlock(fs_info);
1697 if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags))
1698 wait_on_bit(&fs_info->flags, BTRFS_FS_RELOC_RUNNING,
1699 TASK_INTERRUPTIBLE);
1704 /* Something else is running or none */
1708 static noinline int btrfs_ioctl_resize(struct file *file,
1711 BTRFS_DEV_LOOKUP_ARGS(args);
1712 struct inode *inode = file_inode(file);
1713 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1717 struct btrfs_root *root = BTRFS_I(inode)->root;
1718 struct btrfs_ioctl_vol_args *vol_args;
1719 struct btrfs_trans_handle *trans;
1720 struct btrfs_device *device = NULL;
1723 char *devstr = NULL;
1728 if (!capable(CAP_SYS_ADMIN))
1731 ret = mnt_want_write_file(file);
1736 * Read the arguments before checking exclusivity to be able to
1737 * distinguish regular resize and cancel
1739 vol_args = memdup_user(arg, sizeof(*vol_args));
1740 if (IS_ERR(vol_args)) {
1741 ret = PTR_ERR(vol_args);
1744 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
1745 sizestr = vol_args->name;
1746 cancel = (strcmp("cancel", sizestr) == 0);
1747 ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_RESIZE, cancel);
1750 /* Exclusive operation is now claimed */
1752 devstr = strchr(sizestr, ':');
1754 sizestr = devstr + 1;
1756 devstr = vol_args->name;
1757 ret = kstrtoull(devstr, 10, &devid);
1764 btrfs_info(fs_info, "resizing devid %llu", devid);
1768 device = btrfs_find_device(fs_info->fs_devices, &args);
1770 btrfs_info(fs_info, "resizer unable to find device %llu",
1776 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1778 "resizer unable to apply on readonly device %llu",
1784 if (!strcmp(sizestr, "max"))
1785 new_size = bdev_nr_bytes(device->bdev);
1787 if (sizestr[0] == '-') {
1790 } else if (sizestr[0] == '+') {
1794 new_size = memparse(sizestr, &retptr);
1795 if (*retptr != '\0' || new_size == 0) {
1801 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1806 old_size = btrfs_device_get_total_bytes(device);
1809 if (new_size > old_size) {
1813 new_size = old_size - new_size;
1814 } else if (mod > 0) {
1815 if (new_size > ULLONG_MAX - old_size) {
1819 new_size = old_size + new_size;
1822 if (new_size < SZ_256M) {
1826 if (new_size > bdev_nr_bytes(device->bdev)) {
1831 new_size = round_down(new_size, fs_info->sectorsize);
1833 if (new_size > old_size) {
1834 trans = btrfs_start_transaction(root, 0);
1835 if (IS_ERR(trans)) {
1836 ret = PTR_ERR(trans);
1839 ret = btrfs_grow_device(trans, device, new_size);
1840 btrfs_commit_transaction(trans);
1841 } else if (new_size < old_size) {
1842 ret = btrfs_shrink_device(device, new_size);
1843 } /* equal, nothing need to do */
1845 if (ret == 0 && new_size != old_size)
1846 btrfs_info_in_rcu(fs_info,
1847 "resize device %s (devid %llu) from %llu to %llu",
1848 rcu_str_deref(device->name), device->devid,
1849 old_size, new_size);
1851 btrfs_exclop_finish(fs_info);
1855 mnt_drop_write_file(file);
1859 static noinline int __btrfs_ioctl_snap_create(struct file *file,
1860 struct user_namespace *mnt_userns,
1861 const char *name, unsigned long fd, int subvol,
1863 struct btrfs_qgroup_inherit *inherit)
1868 if (!S_ISDIR(file_inode(file)->i_mode))
1871 ret = mnt_want_write_file(file);
1875 namelen = strlen(name);
1876 if (strchr(name, '/')) {
1878 goto out_drop_write;
1881 if (name[0] == '.' &&
1882 (namelen == 1 || (name[1] == '.' && namelen == 2))) {
1884 goto out_drop_write;
1888 ret = btrfs_mksubvol(&file->f_path, mnt_userns, name,
1889 namelen, NULL, readonly, inherit);
1891 struct fd src = fdget(fd);
1892 struct inode *src_inode;
1895 goto out_drop_write;
1898 src_inode = file_inode(src.file);
1899 if (src_inode->i_sb != file_inode(file)->i_sb) {
1900 btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
1901 "Snapshot src from another FS");
1903 } else if (!inode_owner_or_capable(mnt_userns, src_inode)) {
1905 * Subvolume creation is not restricted, but snapshots
1906 * are limited to own subvolumes only
1910 ret = btrfs_mksnapshot(&file->f_path, mnt_userns,
1912 BTRFS_I(src_inode)->root,
1918 mnt_drop_write_file(file);
1923 static noinline int btrfs_ioctl_snap_create(struct file *file,
1924 void __user *arg, int subvol)
1926 struct btrfs_ioctl_vol_args *vol_args;
1929 if (!S_ISDIR(file_inode(file)->i_mode))
1932 vol_args = memdup_user(arg, sizeof(*vol_args));
1933 if (IS_ERR(vol_args))
1934 return PTR_ERR(vol_args);
1935 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
1937 ret = __btrfs_ioctl_snap_create(file, file_mnt_user_ns(file),
1938 vol_args->name, vol_args->fd, subvol,
1945 static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
1946 void __user *arg, int subvol)
1948 struct btrfs_ioctl_vol_args_v2 *vol_args;
1950 bool readonly = false;
1951 struct btrfs_qgroup_inherit *inherit = NULL;
1953 if (!S_ISDIR(file_inode(file)->i_mode))
1956 vol_args = memdup_user(arg, sizeof(*vol_args));
1957 if (IS_ERR(vol_args))
1958 return PTR_ERR(vol_args);
1959 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
1961 if (vol_args->flags & ~BTRFS_SUBVOL_CREATE_ARGS_MASK) {
1966 if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
1968 if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
1971 if (vol_args->size < sizeof(*inherit) ||
1972 vol_args->size > PAGE_SIZE) {
1976 inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
1977 if (IS_ERR(inherit)) {
1978 ret = PTR_ERR(inherit);
1982 if (inherit->num_qgroups > PAGE_SIZE ||
1983 inherit->num_ref_copies > PAGE_SIZE ||
1984 inherit->num_excl_copies > PAGE_SIZE) {
1989 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
1990 2 * inherit->num_excl_copies;
1991 if (vol_args->size != struct_size(inherit, qgroups, nums)) {
1997 ret = __btrfs_ioctl_snap_create(file, file_mnt_user_ns(file),
1998 vol_args->name, vol_args->fd, subvol,
2009 static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
2012 struct inode *inode = file_inode(file);
2013 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2014 struct btrfs_root *root = BTRFS_I(inode)->root;
2018 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID)
2021 down_read(&fs_info->subvol_sem);
2022 if (btrfs_root_readonly(root))
2023 flags |= BTRFS_SUBVOL_RDONLY;
2024 up_read(&fs_info->subvol_sem);
2026 if (copy_to_user(arg, &flags, sizeof(flags)))
2032 static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
2035 struct inode *inode = file_inode(file);
2036 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2037 struct btrfs_root *root = BTRFS_I(inode)->root;
2038 struct btrfs_trans_handle *trans;
2043 if (!inode_owner_or_capable(file_mnt_user_ns(file), inode))
2046 ret = mnt_want_write_file(file);
2050 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
2052 goto out_drop_write;
2055 if (copy_from_user(&flags, arg, sizeof(flags))) {
2057 goto out_drop_write;
2060 if (flags & ~BTRFS_SUBVOL_RDONLY) {
2062 goto out_drop_write;
2065 down_write(&fs_info->subvol_sem);
2068 if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
2071 root_flags = btrfs_root_flags(&root->root_item);
2072 if (flags & BTRFS_SUBVOL_RDONLY) {
2073 btrfs_set_root_flags(&root->root_item,
2074 root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
2077 * Block RO -> RW transition if this subvolume is involved in
2080 spin_lock(&root->root_item_lock);
2081 if (root->send_in_progress == 0) {
2082 btrfs_set_root_flags(&root->root_item,
2083 root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
2084 spin_unlock(&root->root_item_lock);
2086 spin_unlock(&root->root_item_lock);
2088 "Attempt to set subvolume %llu read-write during send",
2089 root->root_key.objectid);
2095 trans = btrfs_start_transaction(root, 1);
2096 if (IS_ERR(trans)) {
2097 ret = PTR_ERR(trans);
2101 ret = btrfs_update_root(trans, fs_info->tree_root,
2102 &root->root_key, &root->root_item);
2104 btrfs_end_transaction(trans);
2108 ret = btrfs_commit_transaction(trans);
2112 btrfs_set_root_flags(&root->root_item, root_flags);
2114 up_write(&fs_info->subvol_sem);
2116 mnt_drop_write_file(file);
2121 static noinline int key_in_sk(struct btrfs_key *key,
2122 struct btrfs_ioctl_search_key *sk)
2124 struct btrfs_key test;
2127 test.objectid = sk->min_objectid;
2128 test.type = sk->min_type;
2129 test.offset = sk->min_offset;
2131 ret = btrfs_comp_cpu_keys(key, &test);
2135 test.objectid = sk->max_objectid;
2136 test.type = sk->max_type;
2137 test.offset = sk->max_offset;
2139 ret = btrfs_comp_cpu_keys(key, &test);
2145 static noinline int copy_to_sk(struct btrfs_path *path,
2146 struct btrfs_key *key,
2147 struct btrfs_ioctl_search_key *sk,
2150 unsigned long *sk_offset,
2154 struct extent_buffer *leaf;
2155 struct btrfs_ioctl_search_header sh;
2156 struct btrfs_key test;
2157 unsigned long item_off;
2158 unsigned long item_len;
2164 leaf = path->nodes[0];
2165 slot = path->slots[0];
2166 nritems = btrfs_header_nritems(leaf);
2168 if (btrfs_header_generation(leaf) > sk->max_transid) {
2172 found_transid = btrfs_header_generation(leaf);
2174 for (i = slot; i < nritems; i++) {
2175 item_off = btrfs_item_ptr_offset(leaf, i);
2176 item_len = btrfs_item_size(leaf, i);
2178 btrfs_item_key_to_cpu(leaf, key, i);
2179 if (!key_in_sk(key, sk))
2182 if (sizeof(sh) + item_len > *buf_size) {
2189 * return one empty item back for v1, which does not
2193 *buf_size = sizeof(sh) + item_len;
2198 if (sizeof(sh) + item_len + *sk_offset > *buf_size) {
2203 sh.objectid = key->objectid;
2204 sh.offset = key->offset;
2205 sh.type = key->type;
2207 sh.transid = found_transid;
2210 * Copy search result header. If we fault then loop again so we
2211 * can fault in the pages and -EFAULT there if there's a
2212 * problem. Otherwise we'll fault and then copy the buffer in
2213 * properly this next time through
2215 if (copy_to_user_nofault(ubuf + *sk_offset, &sh, sizeof(sh))) {
2220 *sk_offset += sizeof(sh);
2223 char __user *up = ubuf + *sk_offset;
2225 * Copy the item, same behavior as above, but reset the
2226 * * sk_offset so we copy the full thing again.
2228 if (read_extent_buffer_to_user_nofault(leaf, up,
2229 item_off, item_len)) {
2231 *sk_offset -= sizeof(sh);
2235 *sk_offset += item_len;
2239 if (ret) /* -EOVERFLOW from above */
2242 if (*num_found >= sk->nr_items) {
2249 test.objectid = sk->max_objectid;
2250 test.type = sk->max_type;
2251 test.offset = sk->max_offset;
2252 if (btrfs_comp_cpu_keys(key, &test) >= 0)
2254 else if (key->offset < (u64)-1)
2256 else if (key->type < (u8)-1) {
2259 } else if (key->objectid < (u64)-1) {
2267 * 0: all items from this leaf copied, continue with next
2268 * 1: * more items can be copied, but unused buffer is too small
2269 * * all items were found
2270 * Either way, it will stops the loop which iterates to the next
2272 * -EOVERFLOW: item was to large for buffer
2273 * -EFAULT: could not copy extent buffer back to userspace
2278 static noinline int search_ioctl(struct inode *inode,
2279 struct btrfs_ioctl_search_key *sk,
2283 struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
2284 struct btrfs_root *root;
2285 struct btrfs_key key;
2286 struct btrfs_path *path;
2289 unsigned long sk_offset = 0;
2291 if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) {
2292 *buf_size = sizeof(struct btrfs_ioctl_search_header);
2296 path = btrfs_alloc_path();
2300 if (sk->tree_id == 0) {
2301 /* search the root of the inode that was passed */
2302 root = btrfs_grab_root(BTRFS_I(inode)->root);
2304 root = btrfs_get_fs_root(info, sk->tree_id, true);
2306 btrfs_free_path(path);
2307 return PTR_ERR(root);
2311 key.objectid = sk->min_objectid;
2312 key.type = sk->min_type;
2313 key.offset = sk->min_offset;
2317 if (fault_in_writeable(ubuf + sk_offset, *buf_size - sk_offset))
2320 ret = btrfs_search_forward(root, &key, path, sk->min_transid);
2326 ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
2327 &sk_offset, &num_found);
2328 btrfs_release_path(path);
2336 sk->nr_items = num_found;
2337 btrfs_put_root(root);
2338 btrfs_free_path(path);
2342 static noinline int btrfs_ioctl_tree_search(struct file *file,
2345 struct btrfs_ioctl_search_args __user *uargs;
2346 struct btrfs_ioctl_search_key sk;
2347 struct inode *inode;
2351 if (!capable(CAP_SYS_ADMIN))
2354 uargs = (struct btrfs_ioctl_search_args __user *)argp;
2356 if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
2359 buf_size = sizeof(uargs->buf);
2361 inode = file_inode(file);
2362 ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
2365 * In the origin implementation an overflow is handled by returning a
2366 * search header with a len of zero, so reset ret.
2368 if (ret == -EOVERFLOW)
2371 if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk)))
2376 static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
2379 struct btrfs_ioctl_search_args_v2 __user *uarg;
2380 struct btrfs_ioctl_search_args_v2 args;
2381 struct inode *inode;
2384 const size_t buf_limit = SZ_16M;
2386 if (!capable(CAP_SYS_ADMIN))
2389 /* copy search header and buffer size */
2390 uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp;
2391 if (copy_from_user(&args, uarg, sizeof(args)))
2394 buf_size = args.buf_size;
2396 /* limit result size to 16MB */
2397 if (buf_size > buf_limit)
2398 buf_size = buf_limit;
2400 inode = file_inode(file);
2401 ret = search_ioctl(inode, &args.key, &buf_size,
2402 (char __user *)(&uarg->buf[0]));
2403 if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
2405 else if (ret == -EOVERFLOW &&
2406 copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size)))
2413 * Search INODE_REFs to identify path name of 'dirid' directory
2414 * in a 'tree_id' tree. and sets path name to 'name'.
2416 static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
2417 u64 tree_id, u64 dirid, char *name)
2419 struct btrfs_root *root;
2420 struct btrfs_key key;
2426 struct btrfs_inode_ref *iref;
2427 struct extent_buffer *l;
2428 struct btrfs_path *path;
2430 if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
2435 path = btrfs_alloc_path();
2439 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
2441 root = btrfs_get_fs_root(info, tree_id, true);
2443 ret = PTR_ERR(root);
2448 key.objectid = dirid;
2449 key.type = BTRFS_INODE_REF_KEY;
2450 key.offset = (u64)-1;
2453 ret = btrfs_search_backwards(root, &key, path);
2462 slot = path->slots[0];
2464 iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
2465 len = btrfs_inode_ref_name_len(l, iref);
2467 total_len += len + 1;
2469 ret = -ENAMETOOLONG;
2474 read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len);
2476 if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
2479 btrfs_release_path(path);
2480 key.objectid = key.offset;
2481 key.offset = (u64)-1;
2482 dirid = key.objectid;
2484 memmove(name, ptr, total_len);
2485 name[total_len] = '\0';
2488 btrfs_put_root(root);
2489 btrfs_free_path(path);
2493 static int btrfs_search_path_in_tree_user(struct user_namespace *mnt_userns,
2494 struct inode *inode,
2495 struct btrfs_ioctl_ino_lookup_user_args *args)
2497 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2498 struct super_block *sb = inode->i_sb;
2499 struct btrfs_key upper_limit = BTRFS_I(inode)->location;
2500 u64 treeid = BTRFS_I(inode)->root->root_key.objectid;
2501 u64 dirid = args->dirid;
2502 unsigned long item_off;
2503 unsigned long item_len;
2504 struct btrfs_inode_ref *iref;
2505 struct btrfs_root_ref *rref;
2506 struct btrfs_root *root = NULL;
2507 struct btrfs_path *path;
2508 struct btrfs_key key, key2;
2509 struct extent_buffer *leaf;
2510 struct inode *temp_inode;
2517 path = btrfs_alloc_path();
2522 * If the bottom subvolume does not exist directly under upper_limit,
2523 * construct the path in from the bottom up.
2525 if (dirid != upper_limit.objectid) {
2526 ptr = &args->path[BTRFS_INO_LOOKUP_USER_PATH_MAX - 1];
2528 root = btrfs_get_fs_root(fs_info, treeid, true);
2530 ret = PTR_ERR(root);
2534 key.objectid = dirid;
2535 key.type = BTRFS_INODE_REF_KEY;
2536 key.offset = (u64)-1;
2538 ret = btrfs_search_backwards(root, &key, path);
2546 leaf = path->nodes[0];
2547 slot = path->slots[0];
2549 iref = btrfs_item_ptr(leaf, slot, struct btrfs_inode_ref);
2550 len = btrfs_inode_ref_name_len(leaf, iref);
2552 total_len += len + 1;
2553 if (ptr < args->path) {
2554 ret = -ENAMETOOLONG;
2559 read_extent_buffer(leaf, ptr,
2560 (unsigned long)(iref + 1), len);
2562 /* Check the read+exec permission of this directory */
2563 ret = btrfs_previous_item(root, path, dirid,
2564 BTRFS_INODE_ITEM_KEY);
2567 } else if (ret > 0) {
2572 leaf = path->nodes[0];
2573 slot = path->slots[0];
2574 btrfs_item_key_to_cpu(leaf, &key2, slot);
2575 if (key2.objectid != dirid) {
2580 temp_inode = btrfs_iget(sb, key2.objectid, root);
2581 if (IS_ERR(temp_inode)) {
2582 ret = PTR_ERR(temp_inode);
2585 ret = inode_permission(mnt_userns, temp_inode,
2586 MAY_READ | MAY_EXEC);
2593 if (key.offset == upper_limit.objectid)
2595 if (key.objectid == BTRFS_FIRST_FREE_OBJECTID) {
2600 btrfs_release_path(path);
2601 key.objectid = key.offset;
2602 key.offset = (u64)-1;
2603 dirid = key.objectid;
2606 memmove(args->path, ptr, total_len);
2607 args->path[total_len] = '\0';
2608 btrfs_put_root(root);
2610 btrfs_release_path(path);
2613 /* Get the bottom subvolume's name from ROOT_REF */
2614 key.objectid = treeid;
2615 key.type = BTRFS_ROOT_REF_KEY;
2616 key.offset = args->treeid;
2617 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
2620 } else if (ret > 0) {
2625 leaf = path->nodes[0];
2626 slot = path->slots[0];
2627 btrfs_item_key_to_cpu(leaf, &key, slot);
2629 item_off = btrfs_item_ptr_offset(leaf, slot);
2630 item_len = btrfs_item_size(leaf, slot);
2631 /* Check if dirid in ROOT_REF corresponds to passed dirid */
2632 rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
2633 if (args->dirid != btrfs_root_ref_dirid(leaf, rref)) {
2638 /* Copy subvolume's name */
2639 item_off += sizeof(struct btrfs_root_ref);
2640 item_len -= sizeof(struct btrfs_root_ref);
2641 read_extent_buffer(leaf, args->name, item_off, item_len);
2642 args->name[item_len] = 0;
2645 btrfs_put_root(root);
2647 btrfs_free_path(path);
2651 static noinline int btrfs_ioctl_ino_lookup(struct file *file,
2654 struct btrfs_ioctl_ino_lookup_args *args;
2655 struct inode *inode;
2658 args = memdup_user(argp, sizeof(*args));
2660 return PTR_ERR(args);
2662 inode = file_inode(file);
2665 * Unprivileged query to obtain the containing subvolume root id. The
2666 * path is reset so it's consistent with btrfs_search_path_in_tree.
2668 if (args->treeid == 0)
2669 args->treeid = BTRFS_I(inode)->root->root_key.objectid;
2671 if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
2676 if (!capable(CAP_SYS_ADMIN)) {
2681 ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
2682 args->treeid, args->objectid,
2686 if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
2694 * Version of ino_lookup ioctl (unprivileged)
2696 * The main differences from ino_lookup ioctl are:
2698 * 1. Read + Exec permission will be checked using inode_permission() during
2699 * path construction. -EACCES will be returned in case of failure.
2700 * 2. Path construction will be stopped at the inode number which corresponds
2701 * to the fd with which this ioctl is called. If constructed path does not
2702 * exist under fd's inode, -EACCES will be returned.
2703 * 3. The name of bottom subvolume is also searched and filled.
2705 static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp)
2707 struct btrfs_ioctl_ino_lookup_user_args *args;
2708 struct inode *inode;
2711 args = memdup_user(argp, sizeof(*args));
2713 return PTR_ERR(args);
2715 inode = file_inode(file);
2717 if (args->dirid == BTRFS_FIRST_FREE_OBJECTID &&
2718 BTRFS_I(inode)->location.objectid != BTRFS_FIRST_FREE_OBJECTID) {
2720 * The subvolume does not exist under fd with which this is
2727 ret = btrfs_search_path_in_tree_user(file_mnt_user_ns(file), inode, args);
2729 if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
2736 /* Get the subvolume information in BTRFS_ROOT_ITEM and BTRFS_ROOT_BACKREF */
2737 static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp)
2739 struct btrfs_ioctl_get_subvol_info_args *subvol_info;
2740 struct btrfs_fs_info *fs_info;
2741 struct btrfs_root *root;
2742 struct btrfs_path *path;
2743 struct btrfs_key key;
2744 struct btrfs_root_item *root_item;
2745 struct btrfs_root_ref *rref;
2746 struct extent_buffer *leaf;
2747 unsigned long item_off;
2748 unsigned long item_len;
2749 struct inode *inode;
2753 path = btrfs_alloc_path();
2757 subvol_info = kzalloc(sizeof(*subvol_info), GFP_KERNEL);
2759 btrfs_free_path(path);
2763 inode = file_inode(file);
2764 fs_info = BTRFS_I(inode)->root->fs_info;
2766 /* Get root_item of inode's subvolume */
2767 key.objectid = BTRFS_I(inode)->root->root_key.objectid;
2768 root = btrfs_get_fs_root(fs_info, key.objectid, true);
2770 ret = PTR_ERR(root);
2773 root_item = &root->root_item;
2775 subvol_info->treeid = key.objectid;
2777 subvol_info->generation = btrfs_root_generation(root_item);
2778 subvol_info->flags = btrfs_root_flags(root_item);
2780 memcpy(subvol_info->uuid, root_item->uuid, BTRFS_UUID_SIZE);
2781 memcpy(subvol_info->parent_uuid, root_item->parent_uuid,
2783 memcpy(subvol_info->received_uuid, root_item->received_uuid,
2786 subvol_info->ctransid = btrfs_root_ctransid(root_item);
2787 subvol_info->ctime.sec = btrfs_stack_timespec_sec(&root_item->ctime);
2788 subvol_info->ctime.nsec = btrfs_stack_timespec_nsec(&root_item->ctime);
2790 subvol_info->otransid = btrfs_root_otransid(root_item);
2791 subvol_info->otime.sec = btrfs_stack_timespec_sec(&root_item->otime);
2792 subvol_info->otime.nsec = btrfs_stack_timespec_nsec(&root_item->otime);
2794 subvol_info->stransid = btrfs_root_stransid(root_item);
2795 subvol_info->stime.sec = btrfs_stack_timespec_sec(&root_item->stime);
2796 subvol_info->stime.nsec = btrfs_stack_timespec_nsec(&root_item->stime);
2798 subvol_info->rtransid = btrfs_root_rtransid(root_item);
2799 subvol_info->rtime.sec = btrfs_stack_timespec_sec(&root_item->rtime);
2800 subvol_info->rtime.nsec = btrfs_stack_timespec_nsec(&root_item->rtime);
2802 if (key.objectid != BTRFS_FS_TREE_OBJECTID) {
2803 /* Search root tree for ROOT_BACKREF of this subvolume */
2804 key.type = BTRFS_ROOT_BACKREF_KEY;
2806 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
2809 } else if (path->slots[0] >=
2810 btrfs_header_nritems(path->nodes[0])) {
2811 ret = btrfs_next_leaf(fs_info->tree_root, path);
2814 } else if (ret > 0) {
2820 leaf = path->nodes[0];
2821 slot = path->slots[0];
2822 btrfs_item_key_to_cpu(leaf, &key, slot);
2823 if (key.objectid == subvol_info->treeid &&
2824 key.type == BTRFS_ROOT_BACKREF_KEY) {
2825 subvol_info->parent_id = key.offset;
2827 rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
2828 subvol_info->dirid = btrfs_root_ref_dirid(leaf, rref);
2830 item_off = btrfs_item_ptr_offset(leaf, slot)
2831 + sizeof(struct btrfs_root_ref);
2832 item_len = btrfs_item_size(leaf, slot)
2833 - sizeof(struct btrfs_root_ref);
2834 read_extent_buffer(leaf, subvol_info->name,
2835 item_off, item_len);
2842 if (copy_to_user(argp, subvol_info, sizeof(*subvol_info)))
2846 btrfs_put_root(root);
2848 btrfs_free_path(path);
2854 * Return ROOT_REF information of the subvolume containing this inode
2855 * except the subvolume name.
2857 static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp)
2859 struct btrfs_ioctl_get_subvol_rootref_args *rootrefs;
2860 struct btrfs_root_ref *rref;
2861 struct btrfs_root *root;
2862 struct btrfs_path *path;
2863 struct btrfs_key key;
2864 struct extent_buffer *leaf;
2865 struct inode *inode;
2871 path = btrfs_alloc_path();
2875 rootrefs = memdup_user(argp, sizeof(*rootrefs));
2876 if (IS_ERR(rootrefs)) {
2877 btrfs_free_path(path);
2878 return PTR_ERR(rootrefs);
2881 inode = file_inode(file);
2882 root = BTRFS_I(inode)->root->fs_info->tree_root;
2883 objectid = BTRFS_I(inode)->root->root_key.objectid;
2885 key.objectid = objectid;
2886 key.type = BTRFS_ROOT_REF_KEY;
2887 key.offset = rootrefs->min_treeid;
2890 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2893 } else if (path->slots[0] >=
2894 btrfs_header_nritems(path->nodes[0])) {
2895 ret = btrfs_next_leaf(root, path);
2898 } else if (ret > 0) {
2904 leaf = path->nodes[0];
2905 slot = path->slots[0];
2907 btrfs_item_key_to_cpu(leaf, &key, slot);
2908 if (key.objectid != objectid || key.type != BTRFS_ROOT_REF_KEY) {
2913 if (found == BTRFS_MAX_ROOTREF_BUFFER_NUM) {
2918 rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
2919 rootrefs->rootref[found].treeid = key.offset;
2920 rootrefs->rootref[found].dirid =
2921 btrfs_root_ref_dirid(leaf, rref);
2924 ret = btrfs_next_item(root, path);
2927 } else if (ret > 0) {
2934 if (!ret || ret == -EOVERFLOW) {
2935 rootrefs->num_items = found;
2936 /* update min_treeid for next search */
2938 rootrefs->min_treeid =
2939 rootrefs->rootref[found - 1].treeid + 1;
2940 if (copy_to_user(argp, rootrefs, sizeof(*rootrefs)))
2945 btrfs_free_path(path);
2950 static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2954 struct dentry *parent = file->f_path.dentry;
2955 struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb);
2956 struct dentry *dentry;
2957 struct inode *dir = d_inode(parent);
2958 struct inode *inode;
2959 struct btrfs_root *root = BTRFS_I(dir)->root;
2960 struct btrfs_root *dest = NULL;
2961 struct btrfs_ioctl_vol_args *vol_args = NULL;
2962 struct btrfs_ioctl_vol_args_v2 *vol_args2 = NULL;
2963 struct user_namespace *mnt_userns = file_mnt_user_ns(file);
2964 char *subvol_name, *subvol_name_ptr = NULL;
2967 bool destroy_parent = false;
2970 vol_args2 = memdup_user(arg, sizeof(*vol_args2));
2971 if (IS_ERR(vol_args2))
2972 return PTR_ERR(vol_args2);
2974 if (vol_args2->flags & ~BTRFS_SUBVOL_DELETE_ARGS_MASK) {
2980 * If SPEC_BY_ID is not set, we are looking for the subvolume by
2981 * name, same as v1 currently does.
2983 if (!(vol_args2->flags & BTRFS_SUBVOL_SPEC_BY_ID)) {
2984 vol_args2->name[BTRFS_SUBVOL_NAME_MAX] = 0;
2985 subvol_name = vol_args2->name;
2987 err = mnt_want_write_file(file);
2991 struct inode *old_dir;
2993 if (vol_args2->subvolid < BTRFS_FIRST_FREE_OBJECTID) {
2998 err = mnt_want_write_file(file);
3002 dentry = btrfs_get_dentry(fs_info->sb,
3003 BTRFS_FIRST_FREE_OBJECTID,
3004 vol_args2->subvolid, 0, 0);
3005 if (IS_ERR(dentry)) {
3006 err = PTR_ERR(dentry);
3007 goto out_drop_write;
3011 * Change the default parent since the subvolume being
3012 * deleted can be outside of the current mount point.
3014 parent = btrfs_get_parent(dentry);
3017 * At this point dentry->d_name can point to '/' if the
3018 * subvolume we want to destroy is outsite of the
3019 * current mount point, so we need to release the
3020 * current dentry and execute the lookup to return a new
3021 * one with ->d_name pointing to the
3022 * <mount point>/subvol_name.
3025 if (IS_ERR(parent)) {
3026 err = PTR_ERR(parent);
3027 goto out_drop_write;
3030 dir = d_inode(parent);
3033 * If v2 was used with SPEC_BY_ID, a new parent was
3034 * allocated since the subvolume can be outside of the
3035 * current mount point. Later on we need to release this
3036 * new parent dentry.
3038 destroy_parent = true;
3041 * On idmapped mounts, deletion via subvolid is
3042 * restricted to subvolumes that are immediate
3043 * ancestors of the inode referenced by the file
3044 * descriptor in the ioctl. Otherwise the idmapping
3045 * could potentially be abused to delete subvolumes
3046 * anywhere in the filesystem the user wouldn't be able
3047 * to delete without an idmapped mount.
3049 if (old_dir != dir && mnt_userns != &init_user_ns) {
3054 subvol_name_ptr = btrfs_get_subvol_name_from_objectid(
3055 fs_info, vol_args2->subvolid);
3056 if (IS_ERR(subvol_name_ptr)) {
3057 err = PTR_ERR(subvol_name_ptr);
3060 /* subvol_name_ptr is already nul terminated */
3061 subvol_name = (char *)kbasename(subvol_name_ptr);
3064 vol_args = memdup_user(arg, sizeof(*vol_args));
3065 if (IS_ERR(vol_args))
3066 return PTR_ERR(vol_args);
3068 vol_args->name[BTRFS_PATH_NAME_MAX] = 0;
3069 subvol_name = vol_args->name;
3071 err = mnt_want_write_file(file);
3076 subvol_namelen = strlen(subvol_name);
3078 if (strchr(subvol_name, '/') ||
3079 strncmp(subvol_name, "..", subvol_namelen) == 0) {
3081 goto free_subvol_name;
3084 if (!S_ISDIR(dir->i_mode)) {
3086 goto free_subvol_name;
3089 err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
3091 goto free_subvol_name;
3092 dentry = lookup_one(mnt_userns, subvol_name, parent, subvol_namelen);
3093 if (IS_ERR(dentry)) {
3094 err = PTR_ERR(dentry);
3095 goto out_unlock_dir;
3098 if (d_really_is_negative(dentry)) {
3103 inode = d_inode(dentry);
3104 dest = BTRFS_I(inode)->root;
3105 if (!capable(CAP_SYS_ADMIN)) {
3107 * Regular user. Only allow this with a special mount
3108 * option, when the user has write+exec access to the
3109 * subvol root, and when rmdir(2) would have been
3112 * Note that this is _not_ check that the subvol is
3113 * empty or doesn't contain data that we wouldn't
3114 * otherwise be able to delete.
3116 * Users who want to delete empty subvols should try
3120 if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED))
3124 * Do not allow deletion if the parent dir is the same
3125 * as the dir to be deleted. That means the ioctl
3126 * must be called on the dentry referencing the root
3127 * of the subvol, not a random directory contained
3134 err = inode_permission(mnt_userns, inode, MAY_WRITE | MAY_EXEC);
3139 /* check if subvolume may be deleted by a user */
3140 err = btrfs_may_delete(mnt_userns, dir, dentry, 1);
3144 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
3149 btrfs_inode_lock(inode, 0);
3150 err = btrfs_delete_subvolume(dir, dentry);
3151 btrfs_inode_unlock(inode, 0);
3153 d_delete_notify(dir, dentry);
3158 btrfs_inode_unlock(dir, 0);
3160 kfree(subvol_name_ptr);
3165 mnt_drop_write_file(file);
3172 static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
3174 struct inode *inode = file_inode(file);
3175 struct btrfs_root *root = BTRFS_I(inode)->root;
3176 struct btrfs_ioctl_defrag_range_args range = {0};
3179 ret = mnt_want_write_file(file);
3183 if (btrfs_root_readonly(root)) {
3188 switch (inode->i_mode & S_IFMT) {
3190 if (!capable(CAP_SYS_ADMIN)) {
3194 ret = btrfs_defrag_root(root);
3198 * Note that this does not check the file descriptor for write
3199 * access. This prevents defragmenting executables that are
3200 * running and allows defrag on files open in read-only mode.
3202 if (!capable(CAP_SYS_ADMIN) &&
3203 inode_permission(&init_user_ns, inode, MAY_WRITE)) {
3209 if (copy_from_user(&range, argp, sizeof(range))) {
3213 /* compression requires us to start the IO */
3214 if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
3215 range.flags |= BTRFS_DEFRAG_RANGE_START_IO;
3216 range.extent_thresh = (u32)-1;
3219 /* the rest are all set to zero by kzalloc */
3220 range.len = (u64)-1;
3222 ret = btrfs_defrag_file(file_inode(file), &file->f_ra,
3223 &range, BTRFS_OLDEST_GENERATION, 0);
3231 mnt_drop_write_file(file);
3235 static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
3237 struct btrfs_ioctl_vol_args *vol_args;
3238 bool restore_op = false;
3241 if (!capable(CAP_SYS_ADMIN))
3244 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_ADD)) {
3245 if (!btrfs_exclop_start_try_lock(fs_info, BTRFS_EXCLOP_DEV_ADD))
3246 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3249 * We can do the device add because we have a paused balanced,
3250 * change the exclusive op type and remember we should bring
3251 * back the paused balance
3253 fs_info->exclusive_operation = BTRFS_EXCLOP_DEV_ADD;
3254 btrfs_exclop_start_unlock(fs_info);
3258 vol_args = memdup_user(arg, sizeof(*vol_args));
3259 if (IS_ERR(vol_args)) {
3260 ret = PTR_ERR(vol_args);
3264 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
3265 ret = btrfs_init_new_device(fs_info, vol_args->name);
3268 btrfs_info(fs_info, "disk added %s", vol_args->name);
3273 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED);
3275 btrfs_exclop_finish(fs_info);
3279 static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
3281 BTRFS_DEV_LOOKUP_ARGS(args);
3282 struct inode *inode = file_inode(file);
3283 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3284 struct btrfs_ioctl_vol_args_v2 *vol_args;
3285 struct block_device *bdev = NULL;
3288 bool cancel = false;
3290 if (!capable(CAP_SYS_ADMIN))
3293 vol_args = memdup_user(arg, sizeof(*vol_args));
3294 if (IS_ERR(vol_args))
3295 return PTR_ERR(vol_args);
3297 if (vol_args->flags & ~BTRFS_DEVICE_REMOVE_ARGS_MASK) {
3302 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
3303 if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
3304 args.devid = vol_args->devid;
3305 } else if (!strcmp("cancel", vol_args->name)) {
3308 ret = btrfs_get_dev_args_from_path(fs_info, &args, vol_args->name);
3313 ret = mnt_want_write_file(file);
3317 ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE,
3322 /* Exclusive operation is now claimed */
3323 ret = btrfs_rm_device(fs_info, &args, &bdev, &mode);
3325 btrfs_exclop_finish(fs_info);
3328 if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
3329 btrfs_info(fs_info, "device deleted: id %llu",
3332 btrfs_info(fs_info, "device deleted: %s",
3336 mnt_drop_write_file(file);
3338 blkdev_put(bdev, mode);
3340 btrfs_put_dev_args_from_path(&args);
3345 static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
3347 BTRFS_DEV_LOOKUP_ARGS(args);
3348 struct inode *inode = file_inode(file);
3349 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3350 struct btrfs_ioctl_vol_args *vol_args;
3351 struct block_device *bdev = NULL;
3354 bool cancel = false;
3356 if (!capable(CAP_SYS_ADMIN))
3359 vol_args = memdup_user(arg, sizeof(*vol_args));
3360 if (IS_ERR(vol_args))
3361 return PTR_ERR(vol_args);
3363 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
3364 if (!strcmp("cancel", vol_args->name)) {
3367 ret = btrfs_get_dev_args_from_path(fs_info, &args, vol_args->name);
3372 ret = mnt_want_write_file(file);
3376 ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE,
3379 ret = btrfs_rm_device(fs_info, &args, &bdev, &mode);
3381 btrfs_info(fs_info, "disk deleted %s", vol_args->name);
3382 btrfs_exclop_finish(fs_info);
3385 mnt_drop_write_file(file);
3387 blkdev_put(bdev, mode);
3389 btrfs_put_dev_args_from_path(&args);
3394 static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
3397 struct btrfs_ioctl_fs_info_args *fi_args;
3398 struct btrfs_device *device;
3399 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3403 fi_args = memdup_user(arg, sizeof(*fi_args));
3404 if (IS_ERR(fi_args))
3405 return PTR_ERR(fi_args);
3407 flags_in = fi_args->flags;
3408 memset(fi_args, 0, sizeof(*fi_args));
3411 fi_args->num_devices = fs_devices->num_devices;
3413 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
3414 if (device->devid > fi_args->max_id)
3415 fi_args->max_id = device->devid;
3419 memcpy(&fi_args->fsid, fs_devices->fsid, sizeof(fi_args->fsid));
3420 fi_args->nodesize = fs_info->nodesize;
3421 fi_args->sectorsize = fs_info->sectorsize;
3422 fi_args->clone_alignment = fs_info->sectorsize;
3424 if (flags_in & BTRFS_FS_INFO_FLAG_CSUM_INFO) {
3425 fi_args->csum_type = btrfs_super_csum_type(fs_info->super_copy);
3426 fi_args->csum_size = btrfs_super_csum_size(fs_info->super_copy);
3427 fi_args->flags |= BTRFS_FS_INFO_FLAG_CSUM_INFO;
3430 if (flags_in & BTRFS_FS_INFO_FLAG_GENERATION) {
3431 fi_args->generation = fs_info->generation;
3432 fi_args->flags |= BTRFS_FS_INFO_FLAG_GENERATION;
3435 if (flags_in & BTRFS_FS_INFO_FLAG_METADATA_UUID) {
3436 memcpy(&fi_args->metadata_uuid, fs_devices->metadata_uuid,
3437 sizeof(fi_args->metadata_uuid));
3438 fi_args->flags |= BTRFS_FS_INFO_FLAG_METADATA_UUID;
3441 if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
3448 static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
3451 BTRFS_DEV_LOOKUP_ARGS(args);
3452 struct btrfs_ioctl_dev_info_args *di_args;
3453 struct btrfs_device *dev;
3456 di_args = memdup_user(arg, sizeof(*di_args));
3457 if (IS_ERR(di_args))
3458 return PTR_ERR(di_args);
3460 args.devid = di_args->devid;
3461 if (!btrfs_is_empty_uuid(di_args->uuid))
3462 args.uuid = di_args->uuid;
3465 dev = btrfs_find_device(fs_info->fs_devices, &args);
3471 di_args->devid = dev->devid;
3472 di_args->bytes_used = btrfs_device_get_bytes_used(dev);
3473 di_args->total_bytes = btrfs_device_get_total_bytes(dev);
3474 memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
3476 strncpy(di_args->path, rcu_str_deref(dev->name),
3477 sizeof(di_args->path) - 1);
3478 di_args->path[sizeof(di_args->path) - 1] = 0;
3480 di_args->path[0] = '\0';
3485 if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
3492 static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
3494 struct inode *inode = file_inode(file);
3495 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3496 struct btrfs_root *root = BTRFS_I(inode)->root;
3497 struct btrfs_root *new_root;
3498 struct btrfs_dir_item *di;
3499 struct btrfs_trans_handle *trans;
3500 struct btrfs_path *path = NULL;
3501 struct btrfs_disk_key disk_key;
3506 if (!capable(CAP_SYS_ADMIN))
3509 ret = mnt_want_write_file(file);
3513 if (copy_from_user(&objectid, argp, sizeof(objectid))) {
3519 objectid = BTRFS_FS_TREE_OBJECTID;
3521 new_root = btrfs_get_fs_root(fs_info, objectid, true);
3522 if (IS_ERR(new_root)) {
3523 ret = PTR_ERR(new_root);
3526 if (!is_fstree(new_root->root_key.objectid)) {
3531 path = btrfs_alloc_path();
3537 trans = btrfs_start_transaction(root, 1);
3538 if (IS_ERR(trans)) {
3539 ret = PTR_ERR(trans);
3543 dir_id = btrfs_super_root_dir(fs_info->super_copy);
3544 di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path,
3545 dir_id, "default", 7, 1);
3546 if (IS_ERR_OR_NULL(di)) {
3547 btrfs_release_path(path);
3548 btrfs_end_transaction(trans);
3550 "Umm, you don't have the default diritem, this isn't going to work");
3555 btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
3556 btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
3557 btrfs_mark_buffer_dirty(path->nodes[0]);
3558 btrfs_release_path(path);
3560 btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
3561 btrfs_end_transaction(trans);
3563 btrfs_put_root(new_root);
3564 btrfs_free_path(path);
3566 mnt_drop_write_file(file);
3570 static void get_block_group_info(struct list_head *groups_list,
3571 struct btrfs_ioctl_space_info *space)
3573 struct btrfs_block_group *block_group;
3575 space->total_bytes = 0;
3576 space->used_bytes = 0;
3578 list_for_each_entry(block_group, groups_list, list) {
3579 space->flags = block_group->flags;
3580 space->total_bytes += block_group->length;
3581 space->used_bytes += block_group->used;
3585 static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
3588 struct btrfs_ioctl_space_args space_args;
3589 struct btrfs_ioctl_space_info space;
3590 struct btrfs_ioctl_space_info *dest;
3591 struct btrfs_ioctl_space_info *dest_orig;
3592 struct btrfs_ioctl_space_info __user *user_dest;
3593 struct btrfs_space_info *info;
3594 static const u64 types[] = {
3595 BTRFS_BLOCK_GROUP_DATA,
3596 BTRFS_BLOCK_GROUP_SYSTEM,
3597 BTRFS_BLOCK_GROUP_METADATA,
3598 BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA
3606 if (copy_from_user(&space_args,
3607 (struct btrfs_ioctl_space_args __user *)arg,
3608 sizeof(space_args)))
3611 for (i = 0; i < num_types; i++) {
3612 struct btrfs_space_info *tmp;
3615 list_for_each_entry(tmp, &fs_info->space_info, list) {
3616 if (tmp->flags == types[i]) {
3625 down_read(&info->groups_sem);
3626 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3627 if (!list_empty(&info->block_groups[c]))
3630 up_read(&info->groups_sem);
3634 * Global block reserve, exported as a space_info
3638 /* space_slots == 0 means they are asking for a count */
3639 if (space_args.space_slots == 0) {
3640 space_args.total_spaces = slot_count;
3644 slot_count = min_t(u64, space_args.space_slots, slot_count);
3646 alloc_size = sizeof(*dest) * slot_count;
3648 /* we generally have at most 6 or so space infos, one for each raid
3649 * level. So, a whole page should be more than enough for everyone
3651 if (alloc_size > PAGE_SIZE)
3654 space_args.total_spaces = 0;
3655 dest = kmalloc(alloc_size, GFP_KERNEL);
3660 /* now we have a buffer to copy into */
3661 for (i = 0; i < num_types; i++) {
3662 struct btrfs_space_info *tmp;
3668 list_for_each_entry(tmp, &fs_info->space_info, list) {
3669 if (tmp->flags == types[i]) {
3677 down_read(&info->groups_sem);
3678 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3679 if (!list_empty(&info->block_groups[c])) {
3680 get_block_group_info(&info->block_groups[c],
3682 memcpy(dest, &space, sizeof(space));
3684 space_args.total_spaces++;
3690 up_read(&info->groups_sem);
3694 * Add global block reserve
3697 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
3699 spin_lock(&block_rsv->lock);
3700 space.total_bytes = block_rsv->size;
3701 space.used_bytes = block_rsv->size - block_rsv->reserved;
3702 spin_unlock(&block_rsv->lock);
3703 space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
3704 memcpy(dest, &space, sizeof(space));
3705 space_args.total_spaces++;
3708 user_dest = (struct btrfs_ioctl_space_info __user *)
3709 (arg + sizeof(struct btrfs_ioctl_space_args));
3711 if (copy_to_user(user_dest, dest_orig, alloc_size))
3716 if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
3722 static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
3725 struct btrfs_trans_handle *trans;
3728 trans = btrfs_attach_transaction_barrier(root);
3729 if (IS_ERR(trans)) {
3730 if (PTR_ERR(trans) != -ENOENT)
3731 return PTR_ERR(trans);
3733 /* No running transaction, don't bother */
3734 transid = root->fs_info->last_trans_committed;
3737 transid = trans->transid;
3738 btrfs_commit_transaction_async(trans);
3741 if (copy_to_user(argp, &transid, sizeof(transid)))
3746 static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
3752 if (copy_from_user(&transid, argp, sizeof(transid)))
3755 transid = 0; /* current trans */
3757 return btrfs_wait_for_commit(fs_info, transid);
3760 static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
3762 struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
3763 struct btrfs_ioctl_scrub_args *sa;
3766 if (!capable(CAP_SYS_ADMIN))
3769 sa = memdup_user(arg, sizeof(*sa));
3773 if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
3774 ret = mnt_want_write_file(file);
3779 ret = btrfs_scrub_dev(fs_info, sa->devid, sa->start, sa->end,
3780 &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
3784 * Copy scrub args to user space even if btrfs_scrub_dev() returned an
3785 * error. This is important as it allows user space to know how much
3786 * progress scrub has done. For example, if scrub is canceled we get
3787 * -ECANCELED from btrfs_scrub_dev() and return that error back to user
3788 * space. Later user space can inspect the progress from the structure
3789 * btrfs_ioctl_scrub_args and resume scrub from where it left off
3790 * previously (btrfs-progs does this).
3791 * If we fail to copy the btrfs_ioctl_scrub_args structure to user space
3792 * then return -EFAULT to signal the structure was not copied or it may
3793 * be corrupt and unreliable due to a partial copy.
3795 if (copy_to_user(arg, sa, sizeof(*sa)))
3798 if (!(sa->flags & BTRFS_SCRUB_READONLY))
3799 mnt_drop_write_file(file);
3805 static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info)
3807 if (!capable(CAP_SYS_ADMIN))
3810 return btrfs_scrub_cancel(fs_info);
3813 static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info,
3816 struct btrfs_ioctl_scrub_args *sa;
3819 if (!capable(CAP_SYS_ADMIN))
3822 sa = memdup_user(arg, sizeof(*sa));
3826 ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress);
3828 if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa)))
3835 static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info,
3838 struct btrfs_ioctl_get_dev_stats *sa;
3841 sa = memdup_user(arg, sizeof(*sa));
3845 if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
3850 ret = btrfs_get_dev_stats(fs_info, sa);
3852 if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa)))
3859 static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
3862 struct btrfs_ioctl_dev_replace_args *p;
3865 if (!capable(CAP_SYS_ADMIN))
3868 p = memdup_user(arg, sizeof(*p));
3873 case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
3874 if (sb_rdonly(fs_info->sb)) {
3878 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_REPLACE)) {
3879 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3881 ret = btrfs_dev_replace_by_ioctl(fs_info, p);
3882 btrfs_exclop_finish(fs_info);
3885 case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
3886 btrfs_dev_replace_status(fs_info, p);
3889 case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
3890 p->result = btrfs_dev_replace_cancel(fs_info);
3898 if ((ret == 0 || ret == -ECANCELED) && copy_to_user(arg, p, sizeof(*p)))
3905 static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
3911 struct btrfs_ioctl_ino_path_args *ipa = NULL;
3912 struct inode_fs_paths *ipath = NULL;
3913 struct btrfs_path *path;
3915 if (!capable(CAP_DAC_READ_SEARCH))
3918 path = btrfs_alloc_path();
3924 ipa = memdup_user(arg, sizeof(*ipa));
3931 size = min_t(u32, ipa->size, 4096);
3932 ipath = init_ipath(size, root, path);
3933 if (IS_ERR(ipath)) {
3934 ret = PTR_ERR(ipath);
3939 ret = paths_from_inode(ipa->inum, ipath);
3943 for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
3944 rel_ptr = ipath->fspath->val[i] -
3945 (u64)(unsigned long)ipath->fspath->val;
3946 ipath->fspath->val[i] = rel_ptr;
3949 ret = copy_to_user((void __user *)(unsigned long)ipa->fspath,
3950 ipath->fspath, size);
3957 btrfs_free_path(path);
3964 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
3966 struct btrfs_data_container *inodes = ctx;
3967 const size_t c = 3 * sizeof(u64);
3969 if (inodes->bytes_left >= c) {
3970 inodes->bytes_left -= c;
3971 inodes->val[inodes->elem_cnt] = inum;
3972 inodes->val[inodes->elem_cnt + 1] = offset;
3973 inodes->val[inodes->elem_cnt + 2] = root;
3974 inodes->elem_cnt += 3;
3976 inodes->bytes_missing += c - inodes->bytes_left;
3977 inodes->bytes_left = 0;
3978 inodes->elem_missed += 3;
3984 static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
3985 void __user *arg, int version)
3989 struct btrfs_ioctl_logical_ino_args *loi;
3990 struct btrfs_data_container *inodes = NULL;
3991 struct btrfs_path *path = NULL;
3994 if (!capable(CAP_SYS_ADMIN))
3997 loi = memdup_user(arg, sizeof(*loi));
3999 return PTR_ERR(loi);
4002 ignore_offset = false;
4003 size = min_t(u32, loi->size, SZ_64K);
4005 /* All reserved bits must be 0 for now */
4006 if (memchr_inv(loi->reserved, 0, sizeof(loi->reserved))) {
4010 /* Only accept flags we have defined so far */
4011 if (loi->flags & ~(BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET)) {
4015 ignore_offset = loi->flags & BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET;
4016 size = min_t(u32, loi->size, SZ_16M);
4019 path = btrfs_alloc_path();
4025 inodes = init_data_container(size);
4026 if (IS_ERR(inodes)) {
4027 ret = PTR_ERR(inodes);
4032 ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
4033 build_ino_list, inodes, ignore_offset);
4039 ret = copy_to_user((void __user *)(unsigned long)loi->inodes, inodes,
4045 btrfs_free_path(path);
4053 void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
4054 struct btrfs_ioctl_balance_args *bargs)
4056 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4058 bargs->flags = bctl->flags;
4060 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags))
4061 bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
4062 if (atomic_read(&fs_info->balance_pause_req))
4063 bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
4064 if (atomic_read(&fs_info->balance_cancel_req))
4065 bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
4067 memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
4068 memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
4069 memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
4071 spin_lock(&fs_info->balance_lock);
4072 memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
4073 spin_unlock(&fs_info->balance_lock);
4076 static long btrfs_ioctl_balance(struct file *file, void __user *arg)
4078 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4079 struct btrfs_fs_info *fs_info = root->fs_info;
4080 struct btrfs_ioctl_balance_args *bargs;
4081 struct btrfs_balance_control *bctl;
4082 bool need_unlock; /* for mut. excl. ops lock */
4087 "IOC_BALANCE ioctl (v1) is deprecated and will be removed in kernel 5.18");
4089 if (!capable(CAP_SYS_ADMIN))
4092 ret = mnt_want_write_file(file);
4097 if (btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
4098 mutex_lock(&fs_info->balance_mutex);
4104 * mut. excl. ops lock is locked. Three possibilities:
4105 * (1) some other op is running
4106 * (2) balance is running
4107 * (3) balance is paused -- special case (think resume)
4109 mutex_lock(&fs_info->balance_mutex);
4110 if (fs_info->balance_ctl) {
4111 /* this is either (2) or (3) */
4112 if (!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4113 mutex_unlock(&fs_info->balance_mutex);
4115 * Lock released to allow other waiters to continue,
4116 * we'll reexamine the status again.
4118 mutex_lock(&fs_info->balance_mutex);
4120 if (fs_info->balance_ctl &&
4121 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4123 need_unlock = false;
4127 mutex_unlock(&fs_info->balance_mutex);
4131 mutex_unlock(&fs_info->balance_mutex);
4137 mutex_unlock(&fs_info->balance_mutex);
4138 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4145 bargs = memdup_user(arg, sizeof(*bargs));
4146 if (IS_ERR(bargs)) {
4147 ret = PTR_ERR(bargs);
4151 if (bargs->flags & BTRFS_BALANCE_RESUME) {
4152 if (!fs_info->balance_ctl) {
4157 bctl = fs_info->balance_ctl;
4158 spin_lock(&fs_info->balance_lock);
4159 bctl->flags |= BTRFS_BALANCE_RESUME;
4160 spin_unlock(&fs_info->balance_lock);
4161 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE);
4169 if (fs_info->balance_ctl) {
4174 bctl = kzalloc(sizeof(*bctl), GFP_KERNEL);
4181 memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
4182 memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
4183 memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));
4185 bctl->flags = bargs->flags;
4187 /* balance everything - no filters */
4188 bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
4191 if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
4198 * Ownership of bctl and exclusive operation goes to btrfs_balance.
4199 * bctl is freed in reset_balance_state, or, if restriper was paused
4200 * all the way until unmount, in free_fs_info. The flag should be
4201 * cleared after reset_balance_state.
4203 need_unlock = false;
4205 ret = btrfs_balance(fs_info, bctl, bargs);
4208 if ((ret == 0 || ret == -ECANCELED) && arg) {
4209 if (copy_to_user(arg, bargs, sizeof(*bargs)))
4218 mutex_unlock(&fs_info->balance_mutex);
4220 btrfs_exclop_finish(fs_info);
4222 mnt_drop_write_file(file);
4226 static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
4228 if (!capable(CAP_SYS_ADMIN))
4232 case BTRFS_BALANCE_CTL_PAUSE:
4233 return btrfs_pause_balance(fs_info);
4234 case BTRFS_BALANCE_CTL_CANCEL:
4235 return btrfs_cancel_balance(fs_info);
4241 static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
4244 struct btrfs_ioctl_balance_args *bargs;
4247 if (!capable(CAP_SYS_ADMIN))
4250 mutex_lock(&fs_info->balance_mutex);
4251 if (!fs_info->balance_ctl) {
4256 bargs = kzalloc(sizeof(*bargs), GFP_KERNEL);
4262 btrfs_update_ioctl_balance_args(fs_info, bargs);
4264 if (copy_to_user(arg, bargs, sizeof(*bargs)))
4269 mutex_unlock(&fs_info->balance_mutex);
4273 static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
4275 struct inode *inode = file_inode(file);
4276 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4277 struct btrfs_ioctl_quota_ctl_args *sa;
4280 if (!capable(CAP_SYS_ADMIN))
4283 ret = mnt_want_write_file(file);
4287 sa = memdup_user(arg, sizeof(*sa));
4293 down_write(&fs_info->subvol_sem);
4296 case BTRFS_QUOTA_CTL_ENABLE:
4297 ret = btrfs_quota_enable(fs_info);
4299 case BTRFS_QUOTA_CTL_DISABLE:
4300 ret = btrfs_quota_disable(fs_info);
4308 up_write(&fs_info->subvol_sem);
4310 mnt_drop_write_file(file);
4314 static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
4316 struct inode *inode = file_inode(file);
4317 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4318 struct btrfs_root *root = BTRFS_I(inode)->root;
4319 struct btrfs_ioctl_qgroup_assign_args *sa;
4320 struct btrfs_trans_handle *trans;
4324 if (!capable(CAP_SYS_ADMIN))
4327 ret = mnt_want_write_file(file);
4331 sa = memdup_user(arg, sizeof(*sa));
4337 trans = btrfs_join_transaction(root);
4338 if (IS_ERR(trans)) {
4339 ret = PTR_ERR(trans);
4344 ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst);
4346 ret = btrfs_del_qgroup_relation(trans, sa->src, sa->dst);
4349 /* update qgroup status and info */
4350 err = btrfs_run_qgroups(trans);
4352 btrfs_handle_fs_error(fs_info, err,
4353 "failed to update qgroup status and info");
4354 err = btrfs_end_transaction(trans);
4361 mnt_drop_write_file(file);
4365 static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
4367 struct inode *inode = file_inode(file);
4368 struct btrfs_root *root = BTRFS_I(inode)->root;
4369 struct btrfs_ioctl_qgroup_create_args *sa;
4370 struct btrfs_trans_handle *trans;
4374 if (!capable(CAP_SYS_ADMIN))
4377 ret = mnt_want_write_file(file);
4381 sa = memdup_user(arg, sizeof(*sa));
4387 if (!sa->qgroupid) {
4392 trans = btrfs_join_transaction(root);
4393 if (IS_ERR(trans)) {
4394 ret = PTR_ERR(trans);
4399 ret = btrfs_create_qgroup(trans, sa->qgroupid);
4401 ret = btrfs_remove_qgroup(trans, sa->qgroupid);
4404 err = btrfs_end_transaction(trans);
4411 mnt_drop_write_file(file);
4415 static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
4417 struct inode *inode = file_inode(file);
4418 struct btrfs_root *root = BTRFS_I(inode)->root;
4419 struct btrfs_ioctl_qgroup_limit_args *sa;
4420 struct btrfs_trans_handle *trans;
4425 if (!capable(CAP_SYS_ADMIN))
4428 ret = mnt_want_write_file(file);
4432 sa = memdup_user(arg, sizeof(*sa));
4438 trans = btrfs_join_transaction(root);
4439 if (IS_ERR(trans)) {
4440 ret = PTR_ERR(trans);
4444 qgroupid = sa->qgroupid;
4446 /* take the current subvol as qgroup */
4447 qgroupid = root->root_key.objectid;
4450 ret = btrfs_limit_qgroup(trans, qgroupid, &sa->lim);
4452 err = btrfs_end_transaction(trans);
4459 mnt_drop_write_file(file);
4463 static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
4465 struct inode *inode = file_inode(file);
4466 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4467 struct btrfs_ioctl_quota_rescan_args *qsa;
4470 if (!capable(CAP_SYS_ADMIN))
4473 ret = mnt_want_write_file(file);
4477 qsa = memdup_user(arg, sizeof(*qsa));
4488 ret = btrfs_qgroup_rescan(fs_info);
4493 mnt_drop_write_file(file);
4497 static long btrfs_ioctl_quota_rescan_status(struct btrfs_fs_info *fs_info,
4500 struct btrfs_ioctl_quota_rescan_args qsa = {0};
4502 if (!capable(CAP_SYS_ADMIN))
4505 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
4507 qsa.progress = fs_info->qgroup_rescan_progress.objectid;
4510 if (copy_to_user(arg, &qsa, sizeof(qsa)))
4516 static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info *fs_info,
4519 if (!capable(CAP_SYS_ADMIN))
4522 return btrfs_qgroup_wait_for_completion(fs_info, true);
4525 static long _btrfs_ioctl_set_received_subvol(struct file *file,
4526 struct user_namespace *mnt_userns,
4527 struct btrfs_ioctl_received_subvol_args *sa)
4529 struct inode *inode = file_inode(file);
4530 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4531 struct btrfs_root *root = BTRFS_I(inode)->root;
4532 struct btrfs_root_item *root_item = &root->root_item;
4533 struct btrfs_trans_handle *trans;
4534 struct timespec64 ct = current_time(inode);
4536 int received_uuid_changed;
4538 if (!inode_owner_or_capable(mnt_userns, inode))
4541 ret = mnt_want_write_file(file);
4545 down_write(&fs_info->subvol_sem);
4547 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
4552 if (btrfs_root_readonly(root)) {
4559 * 2 - uuid items (received uuid + subvol uuid)
4561 trans = btrfs_start_transaction(root, 3);
4562 if (IS_ERR(trans)) {
4563 ret = PTR_ERR(trans);
4568 sa->rtransid = trans->transid;
4569 sa->rtime.sec = ct.tv_sec;
4570 sa->rtime.nsec = ct.tv_nsec;
4572 received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
4574 if (received_uuid_changed &&
4575 !btrfs_is_empty_uuid(root_item->received_uuid)) {
4576 ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid,
4577 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4578 root->root_key.objectid);
4579 if (ret && ret != -ENOENT) {
4580 btrfs_abort_transaction(trans, ret);
4581 btrfs_end_transaction(trans);
4585 memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
4586 btrfs_set_root_stransid(root_item, sa->stransid);
4587 btrfs_set_root_rtransid(root_item, sa->rtransid);
4588 btrfs_set_stack_timespec_sec(&root_item->stime, sa->stime.sec);
4589 btrfs_set_stack_timespec_nsec(&root_item->stime, sa->stime.nsec);
4590 btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec);
4591 btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec);
4593 ret = btrfs_update_root(trans, fs_info->tree_root,
4594 &root->root_key, &root->root_item);
4596 btrfs_end_transaction(trans);
4599 if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
4600 ret = btrfs_uuid_tree_add(trans, sa->uuid,
4601 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4602 root->root_key.objectid);
4603 if (ret < 0 && ret != -EEXIST) {
4604 btrfs_abort_transaction(trans, ret);
4605 btrfs_end_transaction(trans);
4609 ret = btrfs_commit_transaction(trans);
4611 up_write(&fs_info->subvol_sem);
4612 mnt_drop_write_file(file);
4617 static long btrfs_ioctl_set_received_subvol_32(struct file *file,
4620 struct btrfs_ioctl_received_subvol_args_32 *args32 = NULL;
4621 struct btrfs_ioctl_received_subvol_args *args64 = NULL;
4624 args32 = memdup_user(arg, sizeof(*args32));
4626 return PTR_ERR(args32);
4628 args64 = kmalloc(sizeof(*args64), GFP_KERNEL);
4634 memcpy(args64->uuid, args32->uuid, BTRFS_UUID_SIZE);
4635 args64->stransid = args32->stransid;
4636 args64->rtransid = args32->rtransid;
4637 args64->stime.sec = args32->stime.sec;
4638 args64->stime.nsec = args32->stime.nsec;
4639 args64->rtime.sec = args32->rtime.sec;
4640 args64->rtime.nsec = args32->rtime.nsec;
4641 args64->flags = args32->flags;
4643 ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_user_ns(file), args64);
4647 memcpy(args32->uuid, args64->uuid, BTRFS_UUID_SIZE);
4648 args32->stransid = args64->stransid;
4649 args32->rtransid = args64->rtransid;
4650 args32->stime.sec = args64->stime.sec;
4651 args32->stime.nsec = args64->stime.nsec;
4652 args32->rtime.sec = args64->rtime.sec;
4653 args32->rtime.nsec = args64->rtime.nsec;
4654 args32->flags = args64->flags;
4656 ret = copy_to_user(arg, args32, sizeof(*args32));
4667 static long btrfs_ioctl_set_received_subvol(struct file *file,
4670 struct btrfs_ioctl_received_subvol_args *sa = NULL;
4673 sa = memdup_user(arg, sizeof(*sa));
4677 ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_user_ns(file), sa);
4682 ret = copy_to_user(arg, sa, sizeof(*sa));
4691 static int btrfs_ioctl_get_fslabel(struct btrfs_fs_info *fs_info,
4696 char label[BTRFS_LABEL_SIZE];
4698 spin_lock(&fs_info->super_lock);
4699 memcpy(label, fs_info->super_copy->label, BTRFS_LABEL_SIZE);
4700 spin_unlock(&fs_info->super_lock);
4702 len = strnlen(label, BTRFS_LABEL_SIZE);
4704 if (len == BTRFS_LABEL_SIZE) {
4706 "label is too long, return the first %zu bytes",
4710 ret = copy_to_user(arg, label, len);
4712 return ret ? -EFAULT : 0;
4715 static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
4717 struct inode *inode = file_inode(file);
4718 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4719 struct btrfs_root *root = BTRFS_I(inode)->root;
4720 struct btrfs_super_block *super_block = fs_info->super_copy;
4721 struct btrfs_trans_handle *trans;
4722 char label[BTRFS_LABEL_SIZE];
4725 if (!capable(CAP_SYS_ADMIN))
4728 if (copy_from_user(label, arg, sizeof(label)))
4731 if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
4733 "unable to set label with more than %d bytes",
4734 BTRFS_LABEL_SIZE - 1);
4738 ret = mnt_want_write_file(file);
4742 trans = btrfs_start_transaction(root, 0);
4743 if (IS_ERR(trans)) {
4744 ret = PTR_ERR(trans);
4748 spin_lock(&fs_info->super_lock);
4749 strcpy(super_block->label, label);
4750 spin_unlock(&fs_info->super_lock);
4751 ret = btrfs_commit_transaction(trans);
4754 mnt_drop_write_file(file);
4758 #define INIT_FEATURE_FLAGS(suffix) \
4759 { .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \
4760 .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \
4761 .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix }
4763 int btrfs_ioctl_get_supported_features(void __user *arg)
4765 static const struct btrfs_ioctl_feature_flags features[3] = {
4766 INIT_FEATURE_FLAGS(SUPP),
4767 INIT_FEATURE_FLAGS(SAFE_SET),
4768 INIT_FEATURE_FLAGS(SAFE_CLEAR)
4771 if (copy_to_user(arg, &features, sizeof(features)))
4777 static int btrfs_ioctl_get_features(struct btrfs_fs_info *fs_info,
4780 struct btrfs_super_block *super_block = fs_info->super_copy;
4781 struct btrfs_ioctl_feature_flags features;
4783 features.compat_flags = btrfs_super_compat_flags(super_block);
4784 features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block);
4785 features.incompat_flags = btrfs_super_incompat_flags(super_block);
4787 if (copy_to_user(arg, &features, sizeof(features)))
4793 static int check_feature_bits(struct btrfs_fs_info *fs_info,
4794 enum btrfs_feature_set set,
4795 u64 change_mask, u64 flags, u64 supported_flags,
4796 u64 safe_set, u64 safe_clear)
4798 const char *type = btrfs_feature_set_name(set);
4800 u64 disallowed, unsupported;
4801 u64 set_mask = flags & change_mask;
4802 u64 clear_mask = ~flags & change_mask;
4804 unsupported = set_mask & ~supported_flags;
4806 names = btrfs_printable_features(set, unsupported);
4809 "this kernel does not support the %s feature bit%s",
4810 names, strchr(names, ',') ? "s" : "");
4814 "this kernel does not support %s bits 0x%llx",
4819 disallowed = set_mask & ~safe_set;
4821 names = btrfs_printable_features(set, disallowed);
4824 "can't set the %s feature bit%s while mounted",
4825 names, strchr(names, ',') ? "s" : "");
4829 "can't set %s bits 0x%llx while mounted",
4834 disallowed = clear_mask & ~safe_clear;
4836 names = btrfs_printable_features(set, disallowed);
4839 "can't clear the %s feature bit%s while mounted",
4840 names, strchr(names, ',') ? "s" : "");
4844 "can't clear %s bits 0x%llx while mounted",
4852 #define check_feature(fs_info, change_mask, flags, mask_base) \
4853 check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags, \
4854 BTRFS_FEATURE_ ## mask_base ## _SUPP, \
4855 BTRFS_FEATURE_ ## mask_base ## _SAFE_SET, \
4856 BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)
4858 static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
4860 struct inode *inode = file_inode(file);
4861 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4862 struct btrfs_root *root = BTRFS_I(inode)->root;
4863 struct btrfs_super_block *super_block = fs_info->super_copy;
4864 struct btrfs_ioctl_feature_flags flags[2];
4865 struct btrfs_trans_handle *trans;
4869 if (!capable(CAP_SYS_ADMIN))
4872 if (copy_from_user(flags, arg, sizeof(flags)))
4876 if (!flags[0].compat_flags && !flags[0].compat_ro_flags &&
4877 !flags[0].incompat_flags)
4880 ret = check_feature(fs_info, flags[0].compat_flags,
4881 flags[1].compat_flags, COMPAT);
4885 ret = check_feature(fs_info, flags[0].compat_ro_flags,
4886 flags[1].compat_ro_flags, COMPAT_RO);
4890 ret = check_feature(fs_info, flags[0].incompat_flags,
4891 flags[1].incompat_flags, INCOMPAT);
4895 ret = mnt_want_write_file(file);
4899 trans = btrfs_start_transaction(root, 0);
4900 if (IS_ERR(trans)) {
4901 ret = PTR_ERR(trans);
4902 goto out_drop_write;
4905 spin_lock(&fs_info->super_lock);
4906 newflags = btrfs_super_compat_flags(super_block);
4907 newflags |= flags[0].compat_flags & flags[1].compat_flags;
4908 newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
4909 btrfs_set_super_compat_flags(super_block, newflags);
4911 newflags = btrfs_super_compat_ro_flags(super_block);
4912 newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags;
4913 newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags);
4914 btrfs_set_super_compat_ro_flags(super_block, newflags);
4916 newflags = btrfs_super_incompat_flags(super_block);
4917 newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
4918 newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
4919 btrfs_set_super_incompat_flags(super_block, newflags);
4920 spin_unlock(&fs_info->super_lock);
4922 ret = btrfs_commit_transaction(trans);
4924 mnt_drop_write_file(file);
4929 static int _btrfs_ioctl_send(struct file *file, void __user *argp, bool compat)
4931 struct btrfs_ioctl_send_args *arg;
4935 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
4936 struct btrfs_ioctl_send_args_32 args32;
4938 ret = copy_from_user(&args32, argp, sizeof(args32));
4941 arg = kzalloc(sizeof(*arg), GFP_KERNEL);
4944 arg->send_fd = args32.send_fd;
4945 arg->clone_sources_count = args32.clone_sources_count;
4946 arg->clone_sources = compat_ptr(args32.clone_sources);
4947 arg->parent_root = args32.parent_root;
4948 arg->flags = args32.flags;
4949 memcpy(arg->reserved, args32.reserved,
4950 sizeof(args32.reserved));
4955 arg = memdup_user(argp, sizeof(*arg));
4957 return PTR_ERR(arg);
4959 ret = btrfs_ioctl_send(file, arg);
4964 long btrfs_ioctl(struct file *file, unsigned int
4965 cmd, unsigned long arg)
4967 struct inode *inode = file_inode(file);
4968 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4969 struct btrfs_root *root = BTRFS_I(inode)->root;
4970 void __user *argp = (void __user *)arg;
4973 case FS_IOC_GETVERSION:
4974 return btrfs_ioctl_getversion(file, argp);
4975 case FS_IOC_GETFSLABEL:
4976 return btrfs_ioctl_get_fslabel(fs_info, argp);
4977 case FS_IOC_SETFSLABEL:
4978 return btrfs_ioctl_set_fslabel(file, argp);
4980 return btrfs_ioctl_fitrim(fs_info, argp);
4981 case BTRFS_IOC_SNAP_CREATE:
4982 return btrfs_ioctl_snap_create(file, argp, 0);
4983 case BTRFS_IOC_SNAP_CREATE_V2:
4984 return btrfs_ioctl_snap_create_v2(file, argp, 0);
4985 case BTRFS_IOC_SUBVOL_CREATE:
4986 return btrfs_ioctl_snap_create(file, argp, 1);
4987 case BTRFS_IOC_SUBVOL_CREATE_V2:
4988 return btrfs_ioctl_snap_create_v2(file, argp, 1);
4989 case BTRFS_IOC_SNAP_DESTROY:
4990 return btrfs_ioctl_snap_destroy(file, argp, false);
4991 case BTRFS_IOC_SNAP_DESTROY_V2:
4992 return btrfs_ioctl_snap_destroy(file, argp, true);
4993 case BTRFS_IOC_SUBVOL_GETFLAGS:
4994 return btrfs_ioctl_subvol_getflags(file, argp);
4995 case BTRFS_IOC_SUBVOL_SETFLAGS:
4996 return btrfs_ioctl_subvol_setflags(file, argp);
4997 case BTRFS_IOC_DEFAULT_SUBVOL:
4998 return btrfs_ioctl_default_subvol(file, argp);
4999 case BTRFS_IOC_DEFRAG:
5000 return btrfs_ioctl_defrag(file, NULL);
5001 case BTRFS_IOC_DEFRAG_RANGE:
5002 return btrfs_ioctl_defrag(file, argp);
5003 case BTRFS_IOC_RESIZE:
5004 return btrfs_ioctl_resize(file, argp);
5005 case BTRFS_IOC_ADD_DEV:
5006 return btrfs_ioctl_add_dev(fs_info, argp);
5007 case BTRFS_IOC_RM_DEV:
5008 return btrfs_ioctl_rm_dev(file, argp);
5009 case BTRFS_IOC_RM_DEV_V2:
5010 return btrfs_ioctl_rm_dev_v2(file, argp);
5011 case BTRFS_IOC_FS_INFO:
5012 return btrfs_ioctl_fs_info(fs_info, argp);
5013 case BTRFS_IOC_DEV_INFO:
5014 return btrfs_ioctl_dev_info(fs_info, argp);
5015 case BTRFS_IOC_BALANCE:
5016 return btrfs_ioctl_balance(file, NULL);
5017 case BTRFS_IOC_TREE_SEARCH:
5018 return btrfs_ioctl_tree_search(file, argp);
5019 case BTRFS_IOC_TREE_SEARCH_V2:
5020 return btrfs_ioctl_tree_search_v2(file, argp);
5021 case BTRFS_IOC_INO_LOOKUP:
5022 return btrfs_ioctl_ino_lookup(file, argp);
5023 case BTRFS_IOC_INO_PATHS:
5024 return btrfs_ioctl_ino_to_path(root, argp);
5025 case BTRFS_IOC_LOGICAL_INO:
5026 return btrfs_ioctl_logical_to_ino(fs_info, argp, 1);
5027 case BTRFS_IOC_LOGICAL_INO_V2:
5028 return btrfs_ioctl_logical_to_ino(fs_info, argp, 2);
5029 case BTRFS_IOC_SPACE_INFO:
5030 return btrfs_ioctl_space_info(fs_info, argp);
5031 case BTRFS_IOC_SYNC: {
5034 ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
5037 ret = btrfs_sync_fs(inode->i_sb, 1);
5039 * The transaction thread may want to do more work,
5040 * namely it pokes the cleaner kthread that will start
5041 * processing uncleaned subvols.
5043 wake_up_process(fs_info->transaction_kthread);
5046 case BTRFS_IOC_START_SYNC:
5047 return btrfs_ioctl_start_sync(root, argp);
5048 case BTRFS_IOC_WAIT_SYNC:
5049 return btrfs_ioctl_wait_sync(fs_info, argp);
5050 case BTRFS_IOC_SCRUB:
5051 return btrfs_ioctl_scrub(file, argp);
5052 case BTRFS_IOC_SCRUB_CANCEL:
5053 return btrfs_ioctl_scrub_cancel(fs_info);
5054 case BTRFS_IOC_SCRUB_PROGRESS:
5055 return btrfs_ioctl_scrub_progress(fs_info, argp);
5056 case BTRFS_IOC_BALANCE_V2:
5057 return btrfs_ioctl_balance(file, argp);
5058 case BTRFS_IOC_BALANCE_CTL:
5059 return btrfs_ioctl_balance_ctl(fs_info, arg);
5060 case BTRFS_IOC_BALANCE_PROGRESS:
5061 return btrfs_ioctl_balance_progress(fs_info, argp);
5062 case BTRFS_IOC_SET_RECEIVED_SUBVOL:
5063 return btrfs_ioctl_set_received_subvol(file, argp);
5065 case BTRFS_IOC_SET_RECEIVED_SUBVOL_32:
5066 return btrfs_ioctl_set_received_subvol_32(file, argp);
5068 case BTRFS_IOC_SEND:
5069 return _btrfs_ioctl_send(file, argp, false);
5070 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
5071 case BTRFS_IOC_SEND_32:
5072 return _btrfs_ioctl_send(file, argp, true);
5074 case BTRFS_IOC_GET_DEV_STATS:
5075 return btrfs_ioctl_get_dev_stats(fs_info, argp);
5076 case BTRFS_IOC_QUOTA_CTL:
5077 return btrfs_ioctl_quota_ctl(file, argp);
5078 case BTRFS_IOC_QGROUP_ASSIGN:
5079 return btrfs_ioctl_qgroup_assign(file, argp);
5080 case BTRFS_IOC_QGROUP_CREATE:
5081 return btrfs_ioctl_qgroup_create(file, argp);
5082 case BTRFS_IOC_QGROUP_LIMIT:
5083 return btrfs_ioctl_qgroup_limit(file, argp);
5084 case BTRFS_IOC_QUOTA_RESCAN:
5085 return btrfs_ioctl_quota_rescan(file, argp);
5086 case BTRFS_IOC_QUOTA_RESCAN_STATUS:
5087 return btrfs_ioctl_quota_rescan_status(fs_info, argp);
5088 case BTRFS_IOC_QUOTA_RESCAN_WAIT:
5089 return btrfs_ioctl_quota_rescan_wait(fs_info, argp);
5090 case BTRFS_IOC_DEV_REPLACE:
5091 return btrfs_ioctl_dev_replace(fs_info, argp);
5092 case BTRFS_IOC_GET_SUPPORTED_FEATURES:
5093 return btrfs_ioctl_get_supported_features(argp);
5094 case BTRFS_IOC_GET_FEATURES:
5095 return btrfs_ioctl_get_features(fs_info, argp);
5096 case BTRFS_IOC_SET_FEATURES:
5097 return btrfs_ioctl_set_features(file, argp);
5098 case BTRFS_IOC_GET_SUBVOL_INFO:
5099 return btrfs_ioctl_get_subvol_info(file, argp);
5100 case BTRFS_IOC_GET_SUBVOL_ROOTREF:
5101 return btrfs_ioctl_get_subvol_rootref(file, argp);
5102 case BTRFS_IOC_INO_LOOKUP_USER:
5103 return btrfs_ioctl_ino_lookup_user(file, argp);
5104 case FS_IOC_ENABLE_VERITY:
5105 return fsverity_ioctl_enable(file, (const void __user *)argp);
5106 case FS_IOC_MEASURE_VERITY:
5107 return fsverity_ioctl_measure(file, argp);
5113 #ifdef CONFIG_COMPAT
5114 long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5117 * These all access 32-bit values anyway so no further
5118 * handling is necessary.
5121 case FS_IOC32_GETVERSION:
5122 cmd = FS_IOC_GETVERSION;
5126 return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));