2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/fsnotify.h>
25 #include <linux/pagemap.h>
26 #include <linux/highmem.h>
27 #include <linux/time.h>
28 #include <linux/init.h>
29 #include <linux/string.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mount.h>
32 #include <linux/mpage.h>
33 #include <linux/namei.h>
34 #include <linux/swap.h>
35 #include <linux/writeback.h>
36 #include <linux/compat.h>
37 #include <linux/bit_spinlock.h>
38 #include <linux/security.h>
39 #include <linux/xattr.h>
41 #include <linux/slab.h>
42 #include <linux/blkdev.h>
43 #include <linux/uuid.h>
44 #include <linux/btrfs.h>
45 #include <linux/uaccess.h>
48 #include "transaction.h"
49 #include "btrfs_inode.h"
50 #include "print-tree.h"
53 #include "inode-map.h"
55 #include "rcu-string.h"
57 #include "dev-replace.h"
62 #include "compression.h"
65 /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
66 * structures are incorrect, as the timespec structure from userspace
67 * is 4 bytes too small. We define these alternatives here to teach
68 * the kernel about the 32-bit struct packing.
70 struct btrfs_ioctl_timespec_32 {
73 } __attribute__ ((__packed__));
75 struct btrfs_ioctl_received_subvol_args_32 {
76 char uuid[BTRFS_UUID_SIZE]; /* in */
77 __u64 stransid; /* in */
78 __u64 rtransid; /* out */
79 struct btrfs_ioctl_timespec_32 stime; /* in */
80 struct btrfs_ioctl_timespec_32 rtime; /* out */
82 __u64 reserved[16]; /* in */
83 } __attribute__ ((__packed__));
85 #define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \
86 struct btrfs_ioctl_received_subvol_args_32)
90 static int btrfs_clone(struct inode *src, struct inode *inode,
91 u64 off, u64 olen, u64 olen_aligned, u64 destoff,
94 /* Mask out flags that are inappropriate for the given type of inode. */
95 static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
99 else if (S_ISREG(mode))
100 return flags & ~FS_DIRSYNC_FL;
102 return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
106 * Export inode flags to the format expected by the FS_IOC_GETFLAGS ioctl.
108 static unsigned int btrfs_flags_to_ioctl(unsigned int flags)
110 unsigned int iflags = 0;
112 if (flags & BTRFS_INODE_SYNC)
113 iflags |= FS_SYNC_FL;
114 if (flags & BTRFS_INODE_IMMUTABLE)
115 iflags |= FS_IMMUTABLE_FL;
116 if (flags & BTRFS_INODE_APPEND)
117 iflags |= FS_APPEND_FL;
118 if (flags & BTRFS_INODE_NODUMP)
119 iflags |= FS_NODUMP_FL;
120 if (flags & BTRFS_INODE_NOATIME)
121 iflags |= FS_NOATIME_FL;
122 if (flags & BTRFS_INODE_DIRSYNC)
123 iflags |= FS_DIRSYNC_FL;
124 if (flags & BTRFS_INODE_NODATACOW)
125 iflags |= FS_NOCOW_FL;
127 if (flags & BTRFS_INODE_NOCOMPRESS)
128 iflags |= FS_NOCOMP_FL;
129 else if (flags & BTRFS_INODE_COMPRESS)
130 iflags |= FS_COMPR_FL;
136 * Update inode->i_flags based on the btrfs internal flags.
138 void btrfs_update_iflags(struct inode *inode)
140 struct btrfs_inode *ip = BTRFS_I(inode);
141 unsigned int new_fl = 0;
143 if (ip->flags & BTRFS_INODE_SYNC)
145 if (ip->flags & BTRFS_INODE_IMMUTABLE)
146 new_fl |= S_IMMUTABLE;
147 if (ip->flags & BTRFS_INODE_APPEND)
149 if (ip->flags & BTRFS_INODE_NOATIME)
151 if (ip->flags & BTRFS_INODE_DIRSYNC)
154 set_mask_bits(&inode->i_flags,
155 S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC,
159 static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
161 struct btrfs_inode *ip = BTRFS_I(file_inode(file));
162 unsigned int flags = btrfs_flags_to_ioctl(ip->flags);
164 if (copy_to_user(arg, &flags, sizeof(flags)))
169 static int check_flags(unsigned int flags)
171 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
172 FS_NOATIME_FL | FS_NODUMP_FL | \
173 FS_SYNC_FL | FS_DIRSYNC_FL | \
174 FS_NOCOMP_FL | FS_COMPR_FL |
178 if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
184 static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
186 struct inode *inode = file_inode(file);
187 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
188 struct btrfs_inode *ip = BTRFS_I(inode);
189 struct btrfs_root *root = ip->root;
190 struct btrfs_trans_handle *trans;
191 unsigned int flags, oldflags;
194 unsigned int i_oldflags;
197 if (!inode_owner_or_capable(inode))
200 if (btrfs_root_readonly(root))
203 if (copy_from_user(&flags, arg, sizeof(flags)))
206 ret = check_flags(flags);
210 ret = mnt_want_write_file(file);
216 ip_oldflags = ip->flags;
217 i_oldflags = inode->i_flags;
218 mode = inode->i_mode;
220 flags = btrfs_mask_flags(inode->i_mode, flags);
221 oldflags = btrfs_flags_to_ioctl(ip->flags);
222 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
223 if (!capable(CAP_LINUX_IMMUTABLE)) {
229 if (flags & FS_SYNC_FL)
230 ip->flags |= BTRFS_INODE_SYNC;
232 ip->flags &= ~BTRFS_INODE_SYNC;
233 if (flags & FS_IMMUTABLE_FL)
234 ip->flags |= BTRFS_INODE_IMMUTABLE;
236 ip->flags &= ~BTRFS_INODE_IMMUTABLE;
237 if (flags & FS_APPEND_FL)
238 ip->flags |= BTRFS_INODE_APPEND;
240 ip->flags &= ~BTRFS_INODE_APPEND;
241 if (flags & FS_NODUMP_FL)
242 ip->flags |= BTRFS_INODE_NODUMP;
244 ip->flags &= ~BTRFS_INODE_NODUMP;
245 if (flags & FS_NOATIME_FL)
246 ip->flags |= BTRFS_INODE_NOATIME;
248 ip->flags &= ~BTRFS_INODE_NOATIME;
249 if (flags & FS_DIRSYNC_FL)
250 ip->flags |= BTRFS_INODE_DIRSYNC;
252 ip->flags &= ~BTRFS_INODE_DIRSYNC;
253 if (flags & FS_NOCOW_FL) {
256 * It's safe to turn csums off here, no extents exist.
257 * Otherwise we want the flag to reflect the real COW
258 * status of the file and will not set it.
260 if (inode->i_size == 0)
261 ip->flags |= BTRFS_INODE_NODATACOW
262 | BTRFS_INODE_NODATASUM;
264 ip->flags |= BTRFS_INODE_NODATACOW;
268 * Revert back under same assumptions as above
271 if (inode->i_size == 0)
272 ip->flags &= ~(BTRFS_INODE_NODATACOW
273 | BTRFS_INODE_NODATASUM);
275 ip->flags &= ~BTRFS_INODE_NODATACOW;
280 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
281 * flag may be changed automatically if compression code won't make
284 if (flags & FS_NOCOMP_FL) {
285 ip->flags &= ~BTRFS_INODE_COMPRESS;
286 ip->flags |= BTRFS_INODE_NOCOMPRESS;
288 ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
289 if (ret && ret != -ENODATA)
291 } else if (flags & FS_COMPR_FL) {
294 ip->flags |= BTRFS_INODE_COMPRESS;
295 ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
297 if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
299 else if (fs_info->compress_type == BTRFS_COMPRESS_ZLIB)
303 ret = btrfs_set_prop(inode, "btrfs.compression",
304 comp, strlen(comp), 0);
309 ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
310 if (ret && ret != -ENODATA)
312 ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
315 trans = btrfs_start_transaction(root, 1);
317 ret = PTR_ERR(trans);
321 btrfs_update_iflags(inode);
322 inode_inc_iversion(inode);
323 inode->i_ctime = current_time(inode);
324 ret = btrfs_update_inode(trans, root, inode);
326 btrfs_end_transaction(trans);
329 ip->flags = ip_oldflags;
330 inode->i_flags = i_oldflags;
335 mnt_drop_write_file(file);
339 static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
341 struct inode *inode = file_inode(file);
343 return put_user(inode->i_generation, arg);
346 static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
348 struct inode *inode = file_inode(file);
349 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
350 struct btrfs_device *device;
351 struct request_queue *q;
352 struct fstrim_range range;
353 u64 minlen = ULLONG_MAX;
355 u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
358 if (!capable(CAP_SYS_ADMIN))
362 list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
366 q = bdev_get_queue(device->bdev);
367 if (blk_queue_discard(q)) {
369 minlen = min_t(u64, q->limits.discard_granularity,
377 if (copy_from_user(&range, arg, sizeof(range)))
379 if (range.start > total_bytes ||
380 range.len < fs_info->sb->s_blocksize)
383 range.len = min(range.len, total_bytes - range.start);
384 range.minlen = max(range.minlen, minlen);
385 ret = btrfs_trim_fs(fs_info, &range);
389 if (copy_to_user(arg, &range, sizeof(range)))
395 int btrfs_is_empty_uuid(u8 *uuid)
399 for (i = 0; i < BTRFS_UUID_SIZE; i++) {
406 static noinline int create_subvol(struct inode *dir,
407 struct dentry *dentry,
408 const char *name, int namelen,
410 struct btrfs_qgroup_inherit *inherit)
412 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
413 struct btrfs_trans_handle *trans;
414 struct btrfs_key key;
415 struct btrfs_root_item *root_item;
416 struct btrfs_inode_item *inode_item;
417 struct extent_buffer *leaf;
418 struct btrfs_root *root = BTRFS_I(dir)->root;
419 struct btrfs_root *new_root;
420 struct btrfs_block_rsv block_rsv;
421 struct timespec cur_time = current_time(dir);
426 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
431 root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
435 ret = btrfs_find_free_objectid(fs_info->tree_root, &objectid);
440 * Don't create subvolume whose level is not zero. Or qgroup will be
441 * screwed up since it assumes subvolume qgroup's level to be 0.
443 if (btrfs_qgroup_level(objectid)) {
448 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
450 * The same as the snapshot creation, please see the comment
451 * of create_snapshot().
453 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv,
454 8, &qgroup_reserved, false);
458 trans = btrfs_start_transaction(root, 0);
460 ret = PTR_ERR(trans);
461 btrfs_subvolume_release_metadata(fs_info, &block_rsv);
464 trans->block_rsv = &block_rsv;
465 trans->bytes_reserved = block_rsv.size;
467 ret = btrfs_qgroup_inherit(trans, fs_info, 0, objectid, inherit);
471 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
477 memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
478 btrfs_set_header_bytenr(leaf, leaf->start);
479 btrfs_set_header_generation(leaf, trans->transid);
480 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
481 btrfs_set_header_owner(leaf, objectid);
483 write_extent_buffer_fsid(leaf, fs_info->fsid);
484 write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
485 btrfs_mark_buffer_dirty(leaf);
487 inode_item = &root_item->inode;
488 btrfs_set_stack_inode_generation(inode_item, 1);
489 btrfs_set_stack_inode_size(inode_item, 3);
490 btrfs_set_stack_inode_nlink(inode_item, 1);
491 btrfs_set_stack_inode_nbytes(inode_item,
493 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
495 btrfs_set_root_flags(root_item, 0);
496 btrfs_set_root_limit(root_item, 0);
497 btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT);
499 btrfs_set_root_bytenr(root_item, leaf->start);
500 btrfs_set_root_generation(root_item, trans->transid);
501 btrfs_set_root_level(root_item, 0);
502 btrfs_set_root_refs(root_item, 1);
503 btrfs_set_root_used(root_item, leaf->len);
504 btrfs_set_root_last_snapshot(root_item, 0);
506 btrfs_set_root_generation_v2(root_item,
507 btrfs_root_generation(root_item));
508 uuid_le_gen(&new_uuid);
509 memcpy(root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
510 btrfs_set_stack_timespec_sec(&root_item->otime, cur_time.tv_sec);
511 btrfs_set_stack_timespec_nsec(&root_item->otime, cur_time.tv_nsec);
512 root_item->ctime = root_item->otime;
513 btrfs_set_root_ctransid(root_item, trans->transid);
514 btrfs_set_root_otransid(root_item, trans->transid);
516 btrfs_tree_unlock(leaf);
517 free_extent_buffer(leaf);
520 btrfs_set_root_dirid(root_item, new_dirid);
522 key.objectid = objectid;
524 key.type = BTRFS_ROOT_ITEM_KEY;
525 ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
530 key.offset = (u64)-1;
531 new_root = btrfs_read_fs_root_no_name(fs_info, &key);
532 if (IS_ERR(new_root)) {
533 ret = PTR_ERR(new_root);
534 btrfs_abort_transaction(trans, ret);
538 btrfs_record_root_in_trans(trans, new_root);
540 ret = btrfs_create_subvol_root(trans, new_root, root, new_dirid);
542 /* We potentially lose an unused inode item here */
543 btrfs_abort_transaction(trans, ret);
547 mutex_lock(&new_root->objectid_mutex);
548 new_root->highest_objectid = new_dirid;
549 mutex_unlock(&new_root->objectid_mutex);
552 * insert the directory item
554 ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
556 btrfs_abort_transaction(trans, ret);
560 ret = btrfs_insert_dir_item(trans, root,
561 name, namelen, BTRFS_I(dir), &key,
562 BTRFS_FT_DIR, index);
564 btrfs_abort_transaction(trans, ret);
568 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2);
569 ret = btrfs_update_inode(trans, root, dir);
572 ret = btrfs_add_root_ref(trans, fs_info,
573 objectid, root->root_key.objectid,
574 btrfs_ino(BTRFS_I(dir)), index, name, namelen);
577 ret = btrfs_uuid_tree_add(trans, fs_info, root_item->uuid,
578 BTRFS_UUID_KEY_SUBVOL, objectid);
580 btrfs_abort_transaction(trans, ret);
584 trans->block_rsv = NULL;
585 trans->bytes_reserved = 0;
586 btrfs_subvolume_release_metadata(fs_info, &block_rsv);
589 *async_transid = trans->transid;
590 err = btrfs_commit_transaction_async(trans, 1);
592 err = btrfs_commit_transaction(trans);
594 err = btrfs_commit_transaction(trans);
600 inode = btrfs_lookup_dentry(dir, dentry);
602 return PTR_ERR(inode);
603 d_instantiate(dentry, inode);
612 static void btrfs_wait_for_no_snapshotting_writes(struct btrfs_root *root)
618 prepare_to_wait(&root->subv_writers->wait, &wait,
619 TASK_UNINTERRUPTIBLE);
621 writers = percpu_counter_sum(&root->subv_writers->counter);
625 finish_wait(&root->subv_writers->wait, &wait);
629 static int create_snapshot(struct btrfs_root *root, struct inode *dir,
630 struct dentry *dentry,
631 u64 *async_transid, bool readonly,
632 struct btrfs_qgroup_inherit *inherit)
634 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
636 struct btrfs_pending_snapshot *pending_snapshot;
637 struct btrfs_trans_handle *trans;
640 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
643 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_KERNEL);
644 if (!pending_snapshot)
647 pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item),
649 pending_snapshot->path = btrfs_alloc_path();
650 if (!pending_snapshot->root_item || !pending_snapshot->path) {
655 atomic_inc(&root->will_be_snapshotted);
656 smp_mb__after_atomic();
657 btrfs_wait_for_no_snapshotting_writes(root);
659 ret = btrfs_start_delalloc_inodes(root, 0);
663 btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
665 btrfs_init_block_rsv(&pending_snapshot->block_rsv,
666 BTRFS_BLOCK_RSV_TEMP);
668 * 1 - parent dir inode
671 * 2 - root ref/backref
672 * 1 - root of snapshot
675 ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
676 &pending_snapshot->block_rsv, 8,
677 &pending_snapshot->qgroup_reserved,
682 pending_snapshot->dentry = dentry;
683 pending_snapshot->root = root;
684 pending_snapshot->readonly = readonly;
685 pending_snapshot->dir = dir;
686 pending_snapshot->inherit = inherit;
688 trans = btrfs_start_transaction(root, 0);
690 ret = PTR_ERR(trans);
694 spin_lock(&fs_info->trans_lock);
695 list_add(&pending_snapshot->list,
696 &trans->transaction->pending_snapshots);
697 spin_unlock(&fs_info->trans_lock);
699 *async_transid = trans->transid;
700 ret = btrfs_commit_transaction_async(trans, 1);
702 ret = btrfs_commit_transaction(trans);
704 ret = btrfs_commit_transaction(trans);
709 ret = pending_snapshot->error;
713 ret = btrfs_orphan_cleanup(pending_snapshot->snap);
717 inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry);
719 ret = PTR_ERR(inode);
723 d_instantiate(dentry, inode);
726 btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv);
728 if (atomic_dec_and_test(&root->will_be_snapshotted))
729 wake_up_atomic_t(&root->will_be_snapshotted);
731 kfree(pending_snapshot->root_item);
732 btrfs_free_path(pending_snapshot->path);
733 kfree(pending_snapshot);
738 /* copy of may_delete in fs/namei.c()
739 * Check whether we can remove a link victim from directory dir, check
740 * whether the type of victim is right.
741 * 1. We can't do it if dir is read-only (done in permission())
742 * 2. We should have write and exec permissions on dir
743 * 3. We can't remove anything from append-only dir
744 * 4. We can't do anything with immutable dir (done in permission())
745 * 5. If the sticky bit on dir is set we should either
746 * a. be owner of dir, or
747 * b. be owner of victim, or
748 * c. have CAP_FOWNER capability
749 * 6. If the victim is append-only or immutable we can't do anything with
750 * links pointing to it.
751 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
752 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
753 * 9. We can't remove a root or mountpoint.
754 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
755 * nfs_async_unlink().
758 static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
762 if (d_really_is_negative(victim))
765 BUG_ON(d_inode(victim->d_parent) != dir);
766 audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
768 error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
773 if (check_sticky(dir, d_inode(victim)) || IS_APPEND(d_inode(victim)) ||
774 IS_IMMUTABLE(d_inode(victim)) || IS_SWAPFILE(d_inode(victim)))
777 if (!d_is_dir(victim))
781 } else if (d_is_dir(victim))
785 if (victim->d_flags & DCACHE_NFSFS_RENAMED)
790 /* copy of may_create in fs/namei.c() */
791 static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
793 if (d_really_is_positive(child))
797 return inode_permission(dir, MAY_WRITE | MAY_EXEC);
801 * Create a new subvolume below @parent. This is largely modeled after
802 * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
803 * inside this filesystem so it's quite a bit simpler.
805 static noinline int btrfs_mksubvol(const struct path *parent,
806 const char *name, int namelen,
807 struct btrfs_root *snap_src,
808 u64 *async_transid, bool readonly,
809 struct btrfs_qgroup_inherit *inherit)
811 struct inode *dir = d_inode(parent->dentry);
812 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
813 struct dentry *dentry;
816 error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
820 dentry = lookup_one_len(name, parent->dentry, namelen);
821 error = PTR_ERR(dentry);
825 error = btrfs_may_create(dir, dentry);
830 * even if this name doesn't exist, we may get hash collisions.
831 * check for them now when we can safely fail
833 error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
839 down_read(&fs_info->subvol_sem);
841 if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
845 error = create_snapshot(snap_src, dir, dentry,
846 async_transid, readonly, inherit);
848 error = create_subvol(dir, dentry, name, namelen,
849 async_transid, inherit);
852 fsnotify_mkdir(dir, dentry);
854 up_read(&fs_info->subvol_sem);
863 * When we're defragging a range, we don't want to kick it off again
864 * if it is really just waiting for delalloc to send it down.
865 * If we find a nice big extent or delalloc range for the bytes in the
866 * file you want to defrag, we return 0 to let you know to skip this
869 static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh)
871 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
872 struct extent_map *em = NULL;
873 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
876 read_lock(&em_tree->lock);
877 em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE);
878 read_unlock(&em_tree->lock);
881 end = extent_map_end(em);
883 if (end - offset > thresh)
886 /* if we already have a nice delalloc here, just stop */
888 end = count_range_bits(io_tree, &offset, offset + thresh,
889 thresh, EXTENT_DELALLOC, 1);
896 * helper function to walk through a file and find extents
897 * newer than a specific transid, and smaller than thresh.
899 * This is used by the defragging code to find new and small
902 static int find_new_extents(struct btrfs_root *root,
903 struct inode *inode, u64 newer_than,
904 u64 *off, u32 thresh)
906 struct btrfs_path *path;
907 struct btrfs_key min_key;
908 struct extent_buffer *leaf;
909 struct btrfs_file_extent_item *extent;
912 u64 ino = btrfs_ino(BTRFS_I(inode));
914 path = btrfs_alloc_path();
918 min_key.objectid = ino;
919 min_key.type = BTRFS_EXTENT_DATA_KEY;
920 min_key.offset = *off;
923 ret = btrfs_search_forward(root, &min_key, path, newer_than);
927 if (min_key.objectid != ino)
929 if (min_key.type != BTRFS_EXTENT_DATA_KEY)
932 leaf = path->nodes[0];
933 extent = btrfs_item_ptr(leaf, path->slots[0],
934 struct btrfs_file_extent_item);
936 type = btrfs_file_extent_type(leaf, extent);
937 if (type == BTRFS_FILE_EXTENT_REG &&
938 btrfs_file_extent_num_bytes(leaf, extent) < thresh &&
939 check_defrag_in_cache(inode, min_key.offset, thresh)) {
940 *off = min_key.offset;
941 btrfs_free_path(path);
946 if (path->slots[0] < btrfs_header_nritems(leaf)) {
947 btrfs_item_key_to_cpu(leaf, &min_key, path->slots[0]);
951 if (min_key.offset == (u64)-1)
955 btrfs_release_path(path);
958 btrfs_free_path(path);
962 static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
964 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
965 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
966 struct extent_map *em;
970 * hopefully we have this extent in the tree already, try without
971 * the full extent lock
973 read_lock(&em_tree->lock);
974 em = lookup_extent_mapping(em_tree, start, len);
975 read_unlock(&em_tree->lock);
978 struct extent_state *cached = NULL;
979 u64 end = start + len - 1;
981 /* get the big lock and read metadata off disk */
982 lock_extent_bits(io_tree, start, end, &cached);
983 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
984 unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS);
993 static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
995 struct extent_map *next;
998 /* this is the last extent */
999 if (em->start + em->len >= i_size_read(inode))
1002 next = defrag_lookup_extent(inode, em->start + em->len);
1003 if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
1005 else if ((em->block_start + em->block_len == next->block_start) &&
1006 (em->block_len > SZ_128K && next->block_len > SZ_128K))
1009 free_extent_map(next);
1013 static int should_defrag_range(struct inode *inode, u64 start, u32 thresh,
1014 u64 *last_len, u64 *skip, u64 *defrag_end,
1017 struct extent_map *em;
1019 bool next_mergeable = true;
1020 bool prev_mergeable = true;
1023 * make sure that once we start defragging an extent, we keep on
1026 if (start < *defrag_end)
1031 em = defrag_lookup_extent(inode, start);
1035 /* this will cover holes, and inline extents */
1036 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1042 prev_mergeable = false;
1044 next_mergeable = defrag_check_next_extent(inode, em);
1046 * we hit a real extent, if it is big or the next extent is not a
1047 * real extent, don't bother defragging it
1049 if (!compress && (*last_len == 0 || *last_len >= thresh) &&
1050 (em->len >= thresh || (!next_mergeable && !prev_mergeable)))
1054 * last_len ends up being a counter of how many bytes we've defragged.
1055 * every time we choose not to defrag an extent, we reset *last_len
1056 * so that the next tiny extent will force a defrag.
1058 * The end result of this is that tiny extents before a single big
1059 * extent will force at least part of that big extent to be defragged.
1062 *defrag_end = extent_map_end(em);
1065 *skip = extent_map_end(em);
1069 free_extent_map(em);
1074 * it doesn't do much good to defrag one or two pages
1075 * at a time. This pulls in a nice chunk of pages
1076 * to COW and defrag.
1078 * It also makes sure the delalloc code has enough
1079 * dirty data to avoid making new small extents as part
1082 * It's a good idea to start RA on this range
1083 * before calling this.
1085 static int cluster_pages_for_defrag(struct inode *inode,
1086 struct page **pages,
1087 unsigned long start_index,
1088 unsigned long num_pages)
1090 unsigned long file_end;
1091 u64 isize = i_size_read(inode);
1098 struct btrfs_ordered_extent *ordered;
1099 struct extent_state *cached_state = NULL;
1100 struct extent_io_tree *tree;
1101 struct extent_changeset *data_reserved = NULL;
1102 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1104 file_end = (isize - 1) >> PAGE_SHIFT;
1105 if (!isize || start_index > file_end)
1108 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
1110 ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
1111 start_index << PAGE_SHIFT,
1112 page_cnt << PAGE_SHIFT);
1116 tree = &BTRFS_I(inode)->io_tree;
1118 /* step one, lock all the pages */
1119 for (i = 0; i < page_cnt; i++) {
1122 page = find_or_create_page(inode->i_mapping,
1123 start_index + i, mask);
1127 page_start = page_offset(page);
1128 page_end = page_start + PAGE_SIZE - 1;
1130 lock_extent_bits(tree, page_start, page_end,
1132 ordered = btrfs_lookup_ordered_extent(inode,
1134 unlock_extent_cached(tree, page_start, page_end,
1135 &cached_state, GFP_NOFS);
1140 btrfs_start_ordered_extent(inode, ordered, 1);
1141 btrfs_put_ordered_extent(ordered);
1144 * we unlocked the page above, so we need check if
1145 * it was released or not.
1147 if (page->mapping != inode->i_mapping) {
1154 if (!PageUptodate(page)) {
1155 btrfs_readpage(NULL, page);
1157 if (!PageUptodate(page)) {
1165 if (page->mapping != inode->i_mapping) {
1177 if (!(inode->i_sb->s_flags & MS_ACTIVE))
1181 * so now we have a nice long stream of locked
1182 * and up to date pages, lets wait on them
1184 for (i = 0; i < i_done; i++)
1185 wait_on_page_writeback(pages[i]);
1187 page_start = page_offset(pages[0]);
1188 page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE;
1190 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1191 page_start, page_end - 1, &cached_state);
1192 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
1193 page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1194 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
1195 &cached_state, GFP_NOFS);
1197 if (i_done != page_cnt) {
1198 spin_lock(&BTRFS_I(inode)->lock);
1199 BTRFS_I(inode)->outstanding_extents++;
1200 spin_unlock(&BTRFS_I(inode)->lock);
1201 btrfs_delalloc_release_space(inode, data_reserved,
1202 start_index << PAGE_SHIFT,
1203 (page_cnt - i_done) << PAGE_SHIFT);
1207 set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
1210 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1211 page_start, page_end - 1, &cached_state,
1214 for (i = 0; i < i_done; i++) {
1215 clear_page_dirty_for_io(pages[i]);
1216 ClearPageChecked(pages[i]);
1217 set_page_extent_mapped(pages[i]);
1218 set_page_dirty(pages[i]);
1219 unlock_page(pages[i]);
1222 extent_changeset_free(data_reserved);
1225 for (i = 0; i < i_done; i++) {
1226 unlock_page(pages[i]);
1229 btrfs_delalloc_release_space(inode, data_reserved,
1230 start_index << PAGE_SHIFT,
1231 page_cnt << PAGE_SHIFT);
1232 extent_changeset_free(data_reserved);
1237 int btrfs_defrag_file(struct inode *inode, struct file *file,
1238 struct btrfs_ioctl_defrag_range_args *range,
1239 u64 newer_than, unsigned long max_to_defrag)
1241 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1242 struct btrfs_root *root = BTRFS_I(inode)->root;
1243 struct file_ra_state *ra = NULL;
1244 unsigned long last_index;
1245 u64 isize = i_size_read(inode);
1249 u64 newer_off = range->start;
1251 unsigned long ra_index = 0;
1253 int defrag_count = 0;
1254 int compress_type = BTRFS_COMPRESS_ZLIB;
1255 u32 extent_thresh = range->extent_thresh;
1256 unsigned long max_cluster = SZ_256K >> PAGE_SHIFT;
1257 unsigned long cluster = max_cluster;
1258 u64 new_align = ~((u64)SZ_128K - 1);
1259 struct page **pages = NULL;
1260 bool do_compress = range->flags & BTRFS_DEFRAG_RANGE_COMPRESS;
1265 if (range->start >= isize)
1269 if (range->compress_type > BTRFS_COMPRESS_TYPES)
1271 if (range->compress_type)
1272 compress_type = range->compress_type;
1275 if (extent_thresh == 0)
1276 extent_thresh = SZ_256K;
1279 * If we were not given a file, allocate a readahead context. As
1280 * readahead is just an optimization, defrag will work without it so
1281 * we don't error out.
1284 ra = kzalloc(sizeof(*ra), GFP_KERNEL);
1286 file_ra_state_init(ra, inode->i_mapping);
1291 pages = kmalloc_array(max_cluster, sizeof(struct page *), GFP_KERNEL);
1297 /* find the last page to defrag */
1298 if (range->start + range->len > range->start) {
1299 last_index = min_t(u64, isize - 1,
1300 range->start + range->len - 1) >> PAGE_SHIFT;
1302 last_index = (isize - 1) >> PAGE_SHIFT;
1306 ret = find_new_extents(root, inode, newer_than,
1307 &newer_off, SZ_64K);
1309 range->start = newer_off;
1311 * we always align our defrag to help keep
1312 * the extents in the file evenly spaced
1314 i = (newer_off & new_align) >> PAGE_SHIFT;
1318 i = range->start >> PAGE_SHIFT;
1321 max_to_defrag = last_index - i + 1;
1324 * make writeback starts from i, so the defrag range can be
1325 * written sequentially.
1327 if (i < inode->i_mapping->writeback_index)
1328 inode->i_mapping->writeback_index = i;
1330 while (i <= last_index && defrag_count < max_to_defrag &&
1331 (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) {
1333 * make sure we stop running if someone unmounts
1336 if (!(inode->i_sb->s_flags & MS_ACTIVE))
1339 if (btrfs_defrag_cancelled(fs_info)) {
1340 btrfs_debug(fs_info, "defrag_file cancelled");
1345 if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
1346 extent_thresh, &last_len, &skip,
1347 &defrag_end, do_compress)){
1350 * the should_defrag function tells us how much to skip
1351 * bump our counter by the suggested amount
1353 next = DIV_ROUND_UP(skip, PAGE_SIZE);
1354 i = max(i + 1, next);
1359 cluster = (PAGE_ALIGN(defrag_end) >>
1361 cluster = min(cluster, max_cluster);
1363 cluster = max_cluster;
1366 if (i + cluster > ra_index) {
1367 ra_index = max(i, ra_index);
1369 page_cache_sync_readahead(inode->i_mapping, ra,
1370 file, ra_index, cluster);
1371 ra_index += cluster;
1376 BTRFS_I(inode)->defrag_compress = compress_type;
1377 ret = cluster_pages_for_defrag(inode, pages, i, cluster);
1379 inode_unlock(inode);
1383 defrag_count += ret;
1384 balance_dirty_pages_ratelimited(inode->i_mapping);
1385 inode_unlock(inode);
1388 if (newer_off == (u64)-1)
1394 newer_off = max(newer_off + 1,
1395 (u64)i << PAGE_SHIFT);
1397 ret = find_new_extents(root, inode, newer_than,
1398 &newer_off, SZ_64K);
1400 range->start = newer_off;
1401 i = (newer_off & new_align) >> PAGE_SHIFT;
1408 last_len += ret << PAGE_SHIFT;
1416 if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) {
1417 filemap_flush(inode->i_mapping);
1418 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1419 &BTRFS_I(inode)->runtime_flags))
1420 filemap_flush(inode->i_mapping);
1424 /* the filemap_flush will queue IO into the worker threads, but
1425 * we have to make sure the IO is actually started and that
1426 * ordered extents get created before we return
1428 atomic_inc(&fs_info->async_submit_draining);
1429 while (atomic_read(&fs_info->nr_async_submits) ||
1430 atomic_read(&fs_info->async_delalloc_pages)) {
1431 wait_event(fs_info->async_submit_wait,
1432 (atomic_read(&fs_info->nr_async_submits) == 0 &&
1433 atomic_read(&fs_info->async_delalloc_pages) == 0));
1435 atomic_dec(&fs_info->async_submit_draining);
1438 if (range->compress_type == BTRFS_COMPRESS_LZO) {
1439 btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
1440 } else if (range->compress_type == BTRFS_COMPRESS_ZSTD) {
1441 btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
1449 BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
1450 inode_unlock(inode);
1458 static noinline int btrfs_ioctl_resize(struct file *file,
1461 struct inode *inode = file_inode(file);
1462 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1466 struct btrfs_root *root = BTRFS_I(inode)->root;
1467 struct btrfs_ioctl_vol_args *vol_args;
1468 struct btrfs_trans_handle *trans;
1469 struct btrfs_device *device = NULL;
1472 char *devstr = NULL;
1476 if (!capable(CAP_SYS_ADMIN))
1479 ret = mnt_want_write_file(file);
1483 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
1484 mnt_drop_write_file(file);
1485 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
1488 mutex_lock(&fs_info->volume_mutex);
1489 vol_args = memdup_user(arg, sizeof(*vol_args));
1490 if (IS_ERR(vol_args)) {
1491 ret = PTR_ERR(vol_args);
1495 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
1497 sizestr = vol_args->name;
1498 devstr = strchr(sizestr, ':');
1500 sizestr = devstr + 1;
1502 devstr = vol_args->name;
1503 ret = kstrtoull(devstr, 10, &devid);
1510 btrfs_info(fs_info, "resizing devid %llu", devid);
1513 device = btrfs_find_device(fs_info, devid, NULL, NULL);
1515 btrfs_info(fs_info, "resizer unable to find device %llu",
1521 if (!device->writeable) {
1523 "resizer unable to apply on readonly device %llu",
1529 if (!strcmp(sizestr, "max"))
1530 new_size = device->bdev->bd_inode->i_size;
1532 if (sizestr[0] == '-') {
1535 } else if (sizestr[0] == '+') {
1539 new_size = memparse(sizestr, &retptr);
1540 if (*retptr != '\0' || new_size == 0) {
1546 if (device->is_tgtdev_for_dev_replace) {
1551 old_size = btrfs_device_get_total_bytes(device);
1554 if (new_size > old_size) {
1558 new_size = old_size - new_size;
1559 } else if (mod > 0) {
1560 if (new_size > ULLONG_MAX - old_size) {
1564 new_size = old_size + new_size;
1567 if (new_size < SZ_256M) {
1571 if (new_size > device->bdev->bd_inode->i_size) {
1576 new_size = round_down(new_size, fs_info->sectorsize);
1578 btrfs_info_in_rcu(fs_info, "new size for %s is %llu",
1579 rcu_str_deref(device->name), new_size);
1581 if (new_size > old_size) {
1582 trans = btrfs_start_transaction(root, 0);
1583 if (IS_ERR(trans)) {
1584 ret = PTR_ERR(trans);
1587 ret = btrfs_grow_device(trans, device, new_size);
1588 btrfs_commit_transaction(trans);
1589 } else if (new_size < old_size) {
1590 ret = btrfs_shrink_device(device, new_size);
1591 } /* equal, nothing need to do */
1596 mutex_unlock(&fs_info->volume_mutex);
1597 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
1598 mnt_drop_write_file(file);
1602 static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
1603 const char *name, unsigned long fd, int subvol,
1604 u64 *transid, bool readonly,
1605 struct btrfs_qgroup_inherit *inherit)
1610 if (!S_ISDIR(file_inode(file)->i_mode))
1613 ret = mnt_want_write_file(file);
1617 namelen = strlen(name);
1618 if (strchr(name, '/')) {
1620 goto out_drop_write;
1623 if (name[0] == '.' &&
1624 (namelen == 1 || (name[1] == '.' && namelen == 2))) {
1626 goto out_drop_write;
1630 ret = btrfs_mksubvol(&file->f_path, name, namelen,
1631 NULL, transid, readonly, inherit);
1633 struct fd src = fdget(fd);
1634 struct inode *src_inode;
1637 goto out_drop_write;
1640 src_inode = file_inode(src.file);
1641 if (src_inode->i_sb != file_inode(file)->i_sb) {
1642 btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
1643 "Snapshot src from another FS");
1645 } else if (!inode_owner_or_capable(src_inode)) {
1647 * Subvolume creation is not restricted, but snapshots
1648 * are limited to own subvolumes only
1652 ret = btrfs_mksubvol(&file->f_path, name, namelen,
1653 BTRFS_I(src_inode)->root,
1654 transid, readonly, inherit);
1659 mnt_drop_write_file(file);
1664 static noinline int btrfs_ioctl_snap_create(struct file *file,
1665 void __user *arg, int subvol)
1667 struct btrfs_ioctl_vol_args *vol_args;
1670 if (!S_ISDIR(file_inode(file)->i_mode))
1673 vol_args = memdup_user(arg, sizeof(*vol_args));
1674 if (IS_ERR(vol_args))
1675 return PTR_ERR(vol_args);
1676 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
1678 ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
1679 vol_args->fd, subvol,
1686 static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
1687 void __user *arg, int subvol)
1689 struct btrfs_ioctl_vol_args_v2 *vol_args;
1693 bool readonly = false;
1694 struct btrfs_qgroup_inherit *inherit = NULL;
1696 if (!S_ISDIR(file_inode(file)->i_mode))
1699 vol_args = memdup_user(arg, sizeof(*vol_args));
1700 if (IS_ERR(vol_args))
1701 return PTR_ERR(vol_args);
1702 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
1704 if (vol_args->flags &
1705 ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
1706 BTRFS_SUBVOL_QGROUP_INHERIT)) {
1711 if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
1713 if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
1715 if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
1716 if (vol_args->size > PAGE_SIZE) {
1720 inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
1721 if (IS_ERR(inherit)) {
1722 ret = PTR_ERR(inherit);
1727 ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
1728 vol_args->fd, subvol, ptr,
1733 if (ptr && copy_to_user(arg +
1734 offsetof(struct btrfs_ioctl_vol_args_v2,
1746 static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
1749 struct inode *inode = file_inode(file);
1750 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1751 struct btrfs_root *root = BTRFS_I(inode)->root;
1755 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID)
1758 down_read(&fs_info->subvol_sem);
1759 if (btrfs_root_readonly(root))
1760 flags |= BTRFS_SUBVOL_RDONLY;
1761 up_read(&fs_info->subvol_sem);
1763 if (copy_to_user(arg, &flags, sizeof(flags)))
1769 static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
1772 struct inode *inode = file_inode(file);
1773 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1774 struct btrfs_root *root = BTRFS_I(inode)->root;
1775 struct btrfs_trans_handle *trans;
1780 if (!inode_owner_or_capable(inode))
1783 ret = mnt_want_write_file(file);
1787 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
1789 goto out_drop_write;
1792 if (copy_from_user(&flags, arg, sizeof(flags))) {
1794 goto out_drop_write;
1797 if (flags & BTRFS_SUBVOL_CREATE_ASYNC) {
1799 goto out_drop_write;
1802 if (flags & ~BTRFS_SUBVOL_RDONLY) {
1804 goto out_drop_write;
1807 down_write(&fs_info->subvol_sem);
1810 if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
1813 root_flags = btrfs_root_flags(&root->root_item);
1814 if (flags & BTRFS_SUBVOL_RDONLY) {
1815 btrfs_set_root_flags(&root->root_item,
1816 root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
1819 * Block RO -> RW transition if this subvolume is involved in
1822 spin_lock(&root->root_item_lock);
1823 if (root->send_in_progress == 0) {
1824 btrfs_set_root_flags(&root->root_item,
1825 root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
1826 spin_unlock(&root->root_item_lock);
1828 spin_unlock(&root->root_item_lock);
1830 "Attempt to set subvolume %llu read-write during send",
1831 root->root_key.objectid);
1837 trans = btrfs_start_transaction(root, 1);
1838 if (IS_ERR(trans)) {
1839 ret = PTR_ERR(trans);
1843 ret = btrfs_update_root(trans, fs_info->tree_root,
1844 &root->root_key, &root->root_item);
1846 btrfs_end_transaction(trans);
1850 ret = btrfs_commit_transaction(trans);
1854 btrfs_set_root_flags(&root->root_item, root_flags);
1856 up_write(&fs_info->subvol_sem);
1858 mnt_drop_write_file(file);
1864 * helper to check if the subvolume references other subvolumes
1866 static noinline int may_destroy_subvol(struct btrfs_root *root)
1868 struct btrfs_fs_info *fs_info = root->fs_info;
1869 struct btrfs_path *path;
1870 struct btrfs_dir_item *di;
1871 struct btrfs_key key;
1875 path = btrfs_alloc_path();
1879 /* Make sure this root isn't set as the default subvol */
1880 dir_id = btrfs_super_root_dir(fs_info->super_copy);
1881 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
1882 dir_id, "default", 7, 0);
1883 if (di && !IS_ERR(di)) {
1884 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1885 if (key.objectid == root->root_key.objectid) {
1888 "deleting default subvolume %llu is not allowed",
1892 btrfs_release_path(path);
1895 key.objectid = root->root_key.objectid;
1896 key.type = BTRFS_ROOT_REF_KEY;
1897 key.offset = (u64)-1;
1899 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1905 if (path->slots[0] > 0) {
1907 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1908 if (key.objectid == root->root_key.objectid &&
1909 key.type == BTRFS_ROOT_REF_KEY)
1913 btrfs_free_path(path);
1917 static noinline int key_in_sk(struct btrfs_key *key,
1918 struct btrfs_ioctl_search_key *sk)
1920 struct btrfs_key test;
1923 test.objectid = sk->min_objectid;
1924 test.type = sk->min_type;
1925 test.offset = sk->min_offset;
1927 ret = btrfs_comp_cpu_keys(key, &test);
1931 test.objectid = sk->max_objectid;
1932 test.type = sk->max_type;
1933 test.offset = sk->max_offset;
1935 ret = btrfs_comp_cpu_keys(key, &test);
1941 static noinline int copy_to_sk(struct btrfs_path *path,
1942 struct btrfs_key *key,
1943 struct btrfs_ioctl_search_key *sk,
1946 unsigned long *sk_offset,
1950 struct extent_buffer *leaf;
1951 struct btrfs_ioctl_search_header sh;
1952 struct btrfs_key test;
1953 unsigned long item_off;
1954 unsigned long item_len;
1960 leaf = path->nodes[0];
1961 slot = path->slots[0];
1962 nritems = btrfs_header_nritems(leaf);
1964 if (btrfs_header_generation(leaf) > sk->max_transid) {
1968 found_transid = btrfs_header_generation(leaf);
1970 for (i = slot; i < nritems; i++) {
1971 item_off = btrfs_item_ptr_offset(leaf, i);
1972 item_len = btrfs_item_size_nr(leaf, i);
1974 btrfs_item_key_to_cpu(leaf, key, i);
1975 if (!key_in_sk(key, sk))
1978 if (sizeof(sh) + item_len > *buf_size) {
1985 * return one empty item back for v1, which does not
1989 *buf_size = sizeof(sh) + item_len;
1994 if (sizeof(sh) + item_len + *sk_offset > *buf_size) {
1999 sh.objectid = key->objectid;
2000 sh.offset = key->offset;
2001 sh.type = key->type;
2003 sh.transid = found_transid;
2005 /* copy search result header */
2006 if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) {
2011 *sk_offset += sizeof(sh);
2014 char __user *up = ubuf + *sk_offset;
2016 if (read_extent_buffer_to_user(leaf, up,
2017 item_off, item_len)) {
2022 *sk_offset += item_len;
2026 if (ret) /* -EOVERFLOW from above */
2029 if (*num_found >= sk->nr_items) {
2036 test.objectid = sk->max_objectid;
2037 test.type = sk->max_type;
2038 test.offset = sk->max_offset;
2039 if (btrfs_comp_cpu_keys(key, &test) >= 0)
2041 else if (key->offset < (u64)-1)
2043 else if (key->type < (u8)-1) {
2046 } else if (key->objectid < (u64)-1) {
2054 * 0: all items from this leaf copied, continue with next
2055 * 1: * more items can be copied, but unused buffer is too small
2056 * * all items were found
2057 * Either way, it will stops the loop which iterates to the next
2059 * -EOVERFLOW: item was to large for buffer
2060 * -EFAULT: could not copy extent buffer back to userspace
2065 static noinline int search_ioctl(struct inode *inode,
2066 struct btrfs_ioctl_search_key *sk,
2070 struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
2071 struct btrfs_root *root;
2072 struct btrfs_key key;
2073 struct btrfs_path *path;
2076 unsigned long sk_offset = 0;
2078 if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) {
2079 *buf_size = sizeof(struct btrfs_ioctl_search_header);
2083 path = btrfs_alloc_path();
2087 if (sk->tree_id == 0) {
2088 /* search the root of the inode that was passed */
2089 root = BTRFS_I(inode)->root;
2091 key.objectid = sk->tree_id;
2092 key.type = BTRFS_ROOT_ITEM_KEY;
2093 key.offset = (u64)-1;
2094 root = btrfs_read_fs_root_no_name(info, &key);
2096 btrfs_free_path(path);
2101 key.objectid = sk->min_objectid;
2102 key.type = sk->min_type;
2103 key.offset = sk->min_offset;
2106 ret = btrfs_search_forward(root, &key, path, sk->min_transid);
2112 ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
2113 &sk_offset, &num_found);
2114 btrfs_release_path(path);
2122 sk->nr_items = num_found;
2123 btrfs_free_path(path);
2127 static noinline int btrfs_ioctl_tree_search(struct file *file,
2130 struct btrfs_ioctl_search_args __user *uargs;
2131 struct btrfs_ioctl_search_key sk;
2132 struct inode *inode;
2136 if (!capable(CAP_SYS_ADMIN))
2139 uargs = (struct btrfs_ioctl_search_args __user *)argp;
2141 if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
2144 buf_size = sizeof(uargs->buf);
2146 inode = file_inode(file);
2147 ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
2150 * In the origin implementation an overflow is handled by returning a
2151 * search header with a len of zero, so reset ret.
2153 if (ret == -EOVERFLOW)
2156 if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk)))
2161 static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
2164 struct btrfs_ioctl_search_args_v2 __user *uarg;
2165 struct btrfs_ioctl_search_args_v2 args;
2166 struct inode *inode;
2169 const size_t buf_limit = SZ_16M;
2171 if (!capable(CAP_SYS_ADMIN))
2174 /* copy search header and buffer size */
2175 uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp;
2176 if (copy_from_user(&args, uarg, sizeof(args)))
2179 buf_size = args.buf_size;
2181 /* limit result size to 16MB */
2182 if (buf_size > buf_limit)
2183 buf_size = buf_limit;
2185 inode = file_inode(file);
2186 ret = search_ioctl(inode, &args.key, &buf_size,
2187 (char *)(&uarg->buf[0]));
2188 if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
2190 else if (ret == -EOVERFLOW &&
2191 copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size)))
2198 * Search INODE_REFs to identify path name of 'dirid' directory
2199 * in a 'tree_id' tree. and sets path name to 'name'.
2201 static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
2202 u64 tree_id, u64 dirid, char *name)
2204 struct btrfs_root *root;
2205 struct btrfs_key key;
2211 struct btrfs_inode_ref *iref;
2212 struct extent_buffer *l;
2213 struct btrfs_path *path;
2215 if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
2220 path = btrfs_alloc_path();
2224 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX];
2226 key.objectid = tree_id;
2227 key.type = BTRFS_ROOT_ITEM_KEY;
2228 key.offset = (u64)-1;
2229 root = btrfs_read_fs_root_no_name(info, &key);
2231 btrfs_err(info, "could not find root %llu", tree_id);
2236 key.objectid = dirid;
2237 key.type = BTRFS_INODE_REF_KEY;
2238 key.offset = (u64)-1;
2241 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2245 ret = btrfs_previous_item(root, path, dirid,
2246 BTRFS_INODE_REF_KEY);
2256 slot = path->slots[0];
2257 btrfs_item_key_to_cpu(l, &key, slot);
2259 iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
2260 len = btrfs_inode_ref_name_len(l, iref);
2262 total_len += len + 1;
2264 ret = -ENAMETOOLONG;
2269 read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len);
2271 if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
2274 btrfs_release_path(path);
2275 key.objectid = key.offset;
2276 key.offset = (u64)-1;
2277 dirid = key.objectid;
2279 memmove(name, ptr, total_len);
2280 name[total_len] = '\0';
2283 btrfs_free_path(path);
2287 static noinline int btrfs_ioctl_ino_lookup(struct file *file,
2290 struct btrfs_ioctl_ino_lookup_args *args;
2291 struct inode *inode;
2294 args = memdup_user(argp, sizeof(*args));
2296 return PTR_ERR(args);
2298 inode = file_inode(file);
2301 * Unprivileged query to obtain the containing subvolume root id. The
2302 * path is reset so it's consistent with btrfs_search_path_in_tree.
2304 if (args->treeid == 0)
2305 args->treeid = BTRFS_I(inode)->root->root_key.objectid;
2307 if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
2312 if (!capable(CAP_SYS_ADMIN)) {
2317 ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
2318 args->treeid, args->objectid,
2322 if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
2329 static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2332 struct dentry *parent = file->f_path.dentry;
2333 struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb);
2334 struct dentry *dentry;
2335 struct inode *dir = d_inode(parent);
2336 struct inode *inode;
2337 struct btrfs_root *root = BTRFS_I(dir)->root;
2338 struct btrfs_root *dest = NULL;
2339 struct btrfs_ioctl_vol_args *vol_args;
2340 struct btrfs_trans_handle *trans;
2341 struct btrfs_block_rsv block_rsv;
2343 u64 qgroup_reserved;
2348 if (!S_ISDIR(dir->i_mode))
2351 vol_args = memdup_user(arg, sizeof(*vol_args));
2352 if (IS_ERR(vol_args))
2353 return PTR_ERR(vol_args);
2355 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2356 namelen = strlen(vol_args->name);
2357 if (strchr(vol_args->name, '/') ||
2358 strncmp(vol_args->name, "..", namelen) == 0) {
2363 err = mnt_want_write_file(file);
2368 err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
2370 goto out_drop_write;
2371 dentry = lookup_one_len(vol_args->name, parent, namelen);
2372 if (IS_ERR(dentry)) {
2373 err = PTR_ERR(dentry);
2374 goto out_unlock_dir;
2377 if (d_really_is_negative(dentry)) {
2382 inode = d_inode(dentry);
2383 dest = BTRFS_I(inode)->root;
2384 if (!capable(CAP_SYS_ADMIN)) {
2386 * Regular user. Only allow this with a special mount
2387 * option, when the user has write+exec access to the
2388 * subvol root, and when rmdir(2) would have been
2391 * Note that this is _not_ check that the subvol is
2392 * empty or doesn't contain data that we wouldn't
2393 * otherwise be able to delete.
2395 * Users who want to delete empty subvols should try
2399 if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED))
2403 * Do not allow deletion if the parent dir is the same
2404 * as the dir to be deleted. That means the ioctl
2405 * must be called on the dentry referencing the root
2406 * of the subvol, not a random directory contained
2413 err = inode_permission(inode, MAY_WRITE | MAY_EXEC);
2418 /* check if subvolume may be deleted by a user */
2419 err = btrfs_may_delete(dir, dentry, 1);
2423 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
2431 * Don't allow to delete a subvolume with send in progress. This is
2432 * inside the i_mutex so the error handling that has to drop the bit
2433 * again is not run concurrently.
2435 spin_lock(&dest->root_item_lock);
2436 root_flags = btrfs_root_flags(&dest->root_item);
2437 if (dest->send_in_progress == 0) {
2438 btrfs_set_root_flags(&dest->root_item,
2439 root_flags | BTRFS_ROOT_SUBVOL_DEAD);
2440 spin_unlock(&dest->root_item_lock);
2442 spin_unlock(&dest->root_item_lock);
2444 "Attempt to delete subvolume %llu during send",
2445 dest->root_key.objectid);
2447 goto out_unlock_inode;
2450 down_write(&fs_info->subvol_sem);
2452 err = may_destroy_subvol(dest);
2456 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
2458 * One for dir inode, two for dir entries, two for root
2461 err = btrfs_subvolume_reserve_metadata(root, &block_rsv,
2462 5, &qgroup_reserved, true);
2466 trans = btrfs_start_transaction(root, 0);
2467 if (IS_ERR(trans)) {
2468 err = PTR_ERR(trans);
2471 trans->block_rsv = &block_rsv;
2472 trans->bytes_reserved = block_rsv.size;
2474 btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
2476 ret = btrfs_unlink_subvol(trans, root, dir,
2477 dest->root_key.objectid,
2478 dentry->d_name.name,
2479 dentry->d_name.len);
2482 btrfs_abort_transaction(trans, ret);
2486 btrfs_record_root_in_trans(trans, dest);
2488 memset(&dest->root_item.drop_progress, 0,
2489 sizeof(dest->root_item.drop_progress));
2490 dest->root_item.drop_level = 0;
2491 btrfs_set_root_refs(&dest->root_item, 0);
2493 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
2494 ret = btrfs_insert_orphan_item(trans,
2496 dest->root_key.objectid);
2498 btrfs_abort_transaction(trans, ret);
2504 ret = btrfs_uuid_tree_rem(trans, fs_info, dest->root_item.uuid,
2505 BTRFS_UUID_KEY_SUBVOL,
2506 dest->root_key.objectid);
2507 if (ret && ret != -ENOENT) {
2508 btrfs_abort_transaction(trans, ret);
2512 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
2513 ret = btrfs_uuid_tree_rem(trans, fs_info,
2514 dest->root_item.received_uuid,
2515 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
2516 dest->root_key.objectid);
2517 if (ret && ret != -ENOENT) {
2518 btrfs_abort_transaction(trans, ret);
2525 trans->block_rsv = NULL;
2526 trans->bytes_reserved = 0;
2527 ret = btrfs_end_transaction(trans);
2530 inode->i_flags |= S_DEAD;
2532 btrfs_subvolume_release_metadata(fs_info, &block_rsv);
2534 up_write(&fs_info->subvol_sem);
2536 spin_lock(&dest->root_item_lock);
2537 root_flags = btrfs_root_flags(&dest->root_item);
2538 btrfs_set_root_flags(&dest->root_item,
2539 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
2540 spin_unlock(&dest->root_item_lock);
2543 inode_unlock(inode);
2545 d_invalidate(dentry);
2546 btrfs_invalidate_inodes(dest);
2548 ASSERT(dest->send_in_progress == 0);
2551 if (dest->ino_cache_inode) {
2552 iput(dest->ino_cache_inode);
2553 dest->ino_cache_inode = NULL;
2561 mnt_drop_write_file(file);
2567 static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
2569 struct inode *inode = file_inode(file);
2570 struct btrfs_root *root = BTRFS_I(inode)->root;
2571 struct btrfs_ioctl_defrag_range_args *range;
2574 ret = mnt_want_write_file(file);
2578 if (btrfs_root_readonly(root)) {
2583 switch (inode->i_mode & S_IFMT) {
2585 if (!capable(CAP_SYS_ADMIN)) {
2589 ret = btrfs_defrag_root(root);
2592 if (!(file->f_mode & FMODE_WRITE)) {
2597 range = kzalloc(sizeof(*range), GFP_KERNEL);
2604 if (copy_from_user(range, argp,
2610 /* compression requires us to start the IO */
2611 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
2612 range->flags |= BTRFS_DEFRAG_RANGE_START_IO;
2613 range->extent_thresh = (u32)-1;
2616 /* the rest are all set to zero by kzalloc */
2617 range->len = (u64)-1;
2619 ret = btrfs_defrag_file(file_inode(file), file,
2629 mnt_drop_write_file(file);
2633 static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
2635 struct btrfs_ioctl_vol_args *vol_args;
2638 if (!capable(CAP_SYS_ADMIN))
2641 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags))
2642 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
2644 mutex_lock(&fs_info->volume_mutex);
2645 vol_args = memdup_user(arg, sizeof(*vol_args));
2646 if (IS_ERR(vol_args)) {
2647 ret = PTR_ERR(vol_args);
2651 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2652 ret = btrfs_init_new_device(fs_info, vol_args->name);
2655 btrfs_info(fs_info, "disk added %s", vol_args->name);
2659 mutex_unlock(&fs_info->volume_mutex);
2660 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
2664 static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
2666 struct inode *inode = file_inode(file);
2667 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2668 struct btrfs_ioctl_vol_args_v2 *vol_args;
2671 if (!capable(CAP_SYS_ADMIN))
2674 ret = mnt_want_write_file(file);
2678 vol_args = memdup_user(arg, sizeof(*vol_args));
2679 if (IS_ERR(vol_args)) {
2680 ret = PTR_ERR(vol_args);
2684 /* Check for compatibility reject unknown flags */
2685 if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED)
2688 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
2689 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
2693 mutex_lock(&fs_info->volume_mutex);
2694 if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
2695 ret = btrfs_rm_device(fs_info, NULL, vol_args->devid);
2697 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
2698 ret = btrfs_rm_device(fs_info, vol_args->name, 0);
2700 mutex_unlock(&fs_info->volume_mutex);
2701 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
2704 if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
2705 btrfs_info(fs_info, "device deleted: id %llu",
2708 btrfs_info(fs_info, "device deleted: %s",
2714 mnt_drop_write_file(file);
2718 static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
2720 struct inode *inode = file_inode(file);
2721 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2722 struct btrfs_ioctl_vol_args *vol_args;
2725 if (!capable(CAP_SYS_ADMIN))
2728 ret = mnt_want_write_file(file);
2732 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
2733 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
2734 goto out_drop_write;
2737 vol_args = memdup_user(arg, sizeof(*vol_args));
2738 if (IS_ERR(vol_args)) {
2739 ret = PTR_ERR(vol_args);
2743 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2744 mutex_lock(&fs_info->volume_mutex);
2745 ret = btrfs_rm_device(fs_info, vol_args->name, 0);
2746 mutex_unlock(&fs_info->volume_mutex);
2749 btrfs_info(fs_info, "disk deleted %s", vol_args->name);
2752 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
2754 mnt_drop_write_file(file);
2759 static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
2762 struct btrfs_ioctl_fs_info_args *fi_args;
2763 struct btrfs_device *device;
2764 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2767 fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
2771 mutex_lock(&fs_devices->device_list_mutex);
2772 fi_args->num_devices = fs_devices->num_devices;
2773 memcpy(&fi_args->fsid, fs_info->fsid, sizeof(fi_args->fsid));
2775 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2776 if (device->devid > fi_args->max_id)
2777 fi_args->max_id = device->devid;
2779 mutex_unlock(&fs_devices->device_list_mutex);
2781 fi_args->nodesize = fs_info->nodesize;
2782 fi_args->sectorsize = fs_info->sectorsize;
2783 fi_args->clone_alignment = fs_info->sectorsize;
2785 if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
2792 static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
2795 struct btrfs_ioctl_dev_info_args *di_args;
2796 struct btrfs_device *dev;
2797 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2799 char *s_uuid = NULL;
2801 di_args = memdup_user(arg, sizeof(*di_args));
2802 if (IS_ERR(di_args))
2803 return PTR_ERR(di_args);
2805 if (!btrfs_is_empty_uuid(di_args->uuid))
2806 s_uuid = di_args->uuid;
2808 mutex_lock(&fs_devices->device_list_mutex);
2809 dev = btrfs_find_device(fs_info, di_args->devid, s_uuid, NULL);
2816 di_args->devid = dev->devid;
2817 di_args->bytes_used = btrfs_device_get_bytes_used(dev);
2818 di_args->total_bytes = btrfs_device_get_total_bytes(dev);
2819 memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
2821 struct rcu_string *name;
2824 name = rcu_dereference(dev->name);
2825 strncpy(di_args->path, name->str, sizeof(di_args->path));
2827 di_args->path[sizeof(di_args->path) - 1] = 0;
2829 di_args->path[0] = '\0';
2833 mutex_unlock(&fs_devices->device_list_mutex);
2834 if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
2841 static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
2845 page = grab_cache_page(inode->i_mapping, index);
2847 return ERR_PTR(-ENOMEM);
2849 if (!PageUptodate(page)) {
2852 ret = btrfs_readpage(NULL, page);
2854 return ERR_PTR(ret);
2856 if (!PageUptodate(page)) {
2859 return ERR_PTR(-EIO);
2861 if (page->mapping != inode->i_mapping) {
2864 return ERR_PTR(-EAGAIN);
2871 static int gather_extent_pages(struct inode *inode, struct page **pages,
2872 int num_pages, u64 off)
2875 pgoff_t index = off >> PAGE_SHIFT;
2877 for (i = 0; i < num_pages; i++) {
2879 pages[i] = extent_same_get_page(inode, index + i);
2880 if (IS_ERR(pages[i])) {
2881 int err = PTR_ERR(pages[i]);
2892 static int lock_extent_range(struct inode *inode, u64 off, u64 len,
2893 bool retry_range_locking)
2896 * Do any pending delalloc/csum calculations on inode, one way or
2897 * another, and lock file content.
2898 * The locking order is:
2901 * 2) range in the inode's io tree
2904 struct btrfs_ordered_extent *ordered;
2905 lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
2906 ordered = btrfs_lookup_first_ordered_extent(inode,
2909 ordered->file_offset + ordered->len <= off ||
2910 ordered->file_offset >= off + len) &&
2911 !test_range_bit(&BTRFS_I(inode)->io_tree, off,
2912 off + len - 1, EXTENT_DELALLOC, 0, NULL)) {
2914 btrfs_put_ordered_extent(ordered);
2917 unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
2919 btrfs_put_ordered_extent(ordered);
2920 if (!retry_range_locking)
2922 btrfs_wait_ordered_range(inode, off, len);
2927 static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
2929 inode_unlock(inode1);
2930 inode_unlock(inode2);
2933 static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
2935 if (inode1 < inode2)
2936 swap(inode1, inode2);
2938 inode_lock_nested(inode1, I_MUTEX_PARENT);
2939 inode_lock_nested(inode2, I_MUTEX_CHILD);
2942 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
2943 struct inode *inode2, u64 loff2, u64 len)
2945 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
2946 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
2949 static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
2950 struct inode *inode2, u64 loff2, u64 len,
2951 bool retry_range_locking)
2955 if (inode1 < inode2) {
2956 swap(inode1, inode2);
2959 ret = lock_extent_range(inode1, loff1, len, retry_range_locking);
2962 ret = lock_extent_range(inode2, loff2, len, retry_range_locking);
2964 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1,
2971 struct page **src_pages;
2972 struct page **dst_pages;
2975 static void btrfs_cmp_data_free(struct cmp_pages *cmp)
2980 for (i = 0; i < cmp->num_pages; i++) {
2981 pg = cmp->src_pages[i];
2986 pg = cmp->dst_pages[i];
2992 kfree(cmp->src_pages);
2993 kfree(cmp->dst_pages);
2996 static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
2997 struct inode *dst, u64 dst_loff,
2998 u64 len, struct cmp_pages *cmp)
3001 int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT;
3002 struct page **src_pgarr, **dst_pgarr;
3005 * We must gather up all the pages before we initiate our
3006 * extent locking. We use an array for the page pointers. Size
3007 * of the array is bounded by len, which is in turn bounded by
3008 * BTRFS_MAX_DEDUPE_LEN.
3010 src_pgarr = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
3011 dst_pgarr = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
3012 if (!src_pgarr || !dst_pgarr) {
3017 cmp->num_pages = num_pages;
3018 cmp->src_pages = src_pgarr;
3019 cmp->dst_pages = dst_pgarr;
3022 * If deduping ranges in the same inode, locking rules make it mandatory
3023 * to always lock pages in ascending order to avoid deadlocks with
3024 * concurrent tasks (such as starting writeback/delalloc).
3026 if (src == dst && dst_loff < loff) {
3027 swap(src_pgarr, dst_pgarr);
3028 swap(loff, dst_loff);
3031 ret = gather_extent_pages(src, src_pgarr, cmp->num_pages, loff);
3035 ret = gather_extent_pages(dst, dst_pgarr, cmp->num_pages, dst_loff);
3039 btrfs_cmp_data_free(cmp);
3043 static int btrfs_cmp_data(u64 len, struct cmp_pages *cmp)
3047 struct page *src_page, *dst_page;
3048 unsigned int cmp_len = PAGE_SIZE;
3049 void *addr, *dst_addr;
3053 if (len < PAGE_SIZE)
3056 BUG_ON(i >= cmp->num_pages);
3058 src_page = cmp->src_pages[i];
3059 dst_page = cmp->dst_pages[i];
3060 ASSERT(PageLocked(src_page));
3061 ASSERT(PageLocked(dst_page));
3063 addr = kmap_atomic(src_page);
3064 dst_addr = kmap_atomic(dst_page);
3066 flush_dcache_page(src_page);
3067 flush_dcache_page(dst_page);
3069 if (memcmp(addr, dst_addr, cmp_len))
3072 kunmap_atomic(addr);
3073 kunmap_atomic(dst_addr);
3085 static int extent_same_check_offsets(struct inode *inode, u64 off, u64 *plen,
3089 u64 bs = BTRFS_I(inode)->root->fs_info->sb->s_blocksize;
3091 if (off + olen > inode->i_size || off + olen < off)
3094 /* if we extend to eof, continue to block boundary */
3095 if (off + len == inode->i_size)
3096 *plen = len = ALIGN(inode->i_size, bs) - off;
3098 /* Check that we are block aligned - btrfs_clone() requires this */
3099 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs))
3105 static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
3106 struct inode *dst, u64 dst_loff)
3110 struct cmp_pages cmp;
3111 bool same_inode = (src == dst);
3112 u64 same_lock_start = 0;
3113 u64 same_lock_len = 0;
3121 btrfs_double_inode_lock(src, dst);
3123 ret = extent_same_check_offsets(src, loff, &len, olen);
3127 ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
3133 * Single inode case wants the same checks, except we
3134 * don't want our length pushed out past i_size as
3135 * comparing that data range makes no sense.
3137 * extent_same_check_offsets() will do this for an
3138 * unaligned length at i_size, so catch it here and
3139 * reject the request.
3141 * This effectively means we require aligned extents
3142 * for the single-inode case, whereas the other cases
3143 * allow an unaligned length so long as it ends at
3151 /* Check for overlapping ranges */
3152 if (dst_loff + len > loff && dst_loff < loff + len) {
3157 same_lock_start = min_t(u64, loff, dst_loff);
3158 same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
3161 /* don't make the dst file partly checksummed */
3162 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
3163 (BTRFS_I(dst)->flags & BTRFS_INODE_NODATASUM)) {
3169 ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);
3174 ret = lock_extent_range(src, same_lock_start, same_lock_len,
3177 ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len,
3180 * If one of the inodes has dirty pages in the respective range or
3181 * ordered extents, we need to flush dellaloc and wait for all ordered
3182 * extents in the range. We must unlock the pages and the ranges in the
3183 * io trees to avoid deadlocks when flushing delalloc (requires locking
3184 * pages) and when waiting for ordered extents to complete (they require
3187 if (ret == -EAGAIN) {
3189 * Ranges in the io trees already unlocked. Now unlock all
3190 * pages before waiting for all IO to complete.
3192 btrfs_cmp_data_free(&cmp);
3194 btrfs_wait_ordered_range(src, same_lock_start,
3197 btrfs_wait_ordered_range(src, loff, len);
3198 btrfs_wait_ordered_range(dst, dst_loff, len);
3204 /* ranges in the io trees already unlocked */
3205 btrfs_cmp_data_free(&cmp);
3209 /* pass original length for comparison so we stay within i_size */
3210 ret = btrfs_cmp_data(olen, &cmp);
3212 ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
3215 unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start,
3216 same_lock_start + same_lock_len - 1);
3218 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
3220 btrfs_cmp_data_free(&cmp);
3225 btrfs_double_inode_unlock(src, dst);
3230 #define BTRFS_MAX_DEDUPE_LEN SZ_16M
3232 ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen,
3233 struct file *dst_file, u64 dst_loff)
3235 struct inode *src = file_inode(src_file);
3236 struct inode *dst = file_inode(dst_file);
3237 u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
3240 if (olen > BTRFS_MAX_DEDUPE_LEN)
3241 olen = BTRFS_MAX_DEDUPE_LEN;
3243 if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
3245 * Btrfs does not support blocksize < page_size. As a
3246 * result, btrfs_cmp_data() won't correctly handle
3247 * this situation without an update.
3252 res = btrfs_extent_same(src, loff, olen, dst, dst_loff);
3258 static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
3259 struct inode *inode,
3265 struct btrfs_root *root = BTRFS_I(inode)->root;
3268 inode_inc_iversion(inode);
3269 if (!no_time_update)
3270 inode->i_mtime = inode->i_ctime = current_time(inode);
3272 * We round up to the block size at eof when determining which
3273 * extents to clone above, but shouldn't round up the file size.
3275 if (endoff > destoff + olen)
3276 endoff = destoff + olen;
3277 if (endoff > inode->i_size)
3278 btrfs_i_size_write(BTRFS_I(inode), endoff);
3280 ret = btrfs_update_inode(trans, root, inode);
3282 btrfs_abort_transaction(trans, ret);
3283 btrfs_end_transaction(trans);
3286 ret = btrfs_end_transaction(trans);
3291 static void clone_update_extent_map(struct btrfs_inode *inode,
3292 const struct btrfs_trans_handle *trans,
3293 const struct btrfs_path *path,
3294 const u64 hole_offset,
3297 struct extent_map_tree *em_tree = &inode->extent_tree;
3298 struct extent_map *em;
3301 em = alloc_extent_map();
3303 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
3308 struct btrfs_file_extent_item *fi;
3310 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
3311 struct btrfs_file_extent_item);
3312 btrfs_extent_item_to_extent_map(inode, path, fi, false, em);
3313 em->generation = -1;
3314 if (btrfs_file_extent_type(path->nodes[0], fi) ==
3315 BTRFS_FILE_EXTENT_INLINE)
3316 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3317 &inode->runtime_flags);
3319 em->start = hole_offset;
3321 em->ram_bytes = em->len;
3322 em->orig_start = hole_offset;
3323 em->block_start = EXTENT_MAP_HOLE;
3325 em->orig_block_len = 0;
3326 em->compress_type = BTRFS_COMPRESS_NONE;
3327 em->generation = trans->transid;
3331 write_lock(&em_tree->lock);
3332 ret = add_extent_mapping(em_tree, em, 1);
3333 write_unlock(&em_tree->lock);
3334 if (ret != -EEXIST) {
3335 free_extent_map(em);
3338 btrfs_drop_extent_cache(inode, em->start,
3339 em->start + em->len - 1, 0);
3343 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
3347 * Make sure we do not end up inserting an inline extent into a file that has
3348 * already other (non-inline) extents. If a file has an inline extent it can
3349 * not have any other extents and the (single) inline extent must start at the
3350 * file offset 0. Failing to respect these rules will lead to file corruption,
3351 * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc
3353 * We can have extents that have been already written to disk or we can have
3354 * dirty ranges still in delalloc, in which case the extent maps and items are
3355 * created only when we run delalloc, and the delalloc ranges might fall outside
3356 * the range we are currently locking in the inode's io tree. So we check the
3357 * inode's i_size because of that (i_size updates are done while holding the
3358 * i_mutex, which we are holding here).
3359 * We also check to see if the inode has a size not greater than "datal" but has
3360 * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are
3361 * protected against such concurrent fallocate calls by the i_mutex).
3363 * If the file has no extents but a size greater than datal, do not allow the
3364 * copy because we would need turn the inline extent into a non-inline one (even
3365 * with NO_HOLES enabled). If we find our destination inode only has one inline
3366 * extent, just overwrite it with the source inline extent if its size is less
3367 * than the source extent's size, or we could copy the source inline extent's
3368 * data into the destination inode's inline extent if the later is greater then
3371 static int clone_copy_inline_extent(struct inode *dst,
3372 struct btrfs_trans_handle *trans,
3373 struct btrfs_path *path,
3374 struct btrfs_key *new_key,
3375 const u64 drop_start,
3381 struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb);
3382 struct btrfs_root *root = BTRFS_I(dst)->root;
3383 const u64 aligned_end = ALIGN(new_key->offset + datal,
3384 fs_info->sectorsize);
3386 struct btrfs_key key;
3388 if (new_key->offset > 0)
3391 key.objectid = btrfs_ino(BTRFS_I(dst));
3392 key.type = BTRFS_EXTENT_DATA_KEY;
3394 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3397 } else if (ret > 0) {
3398 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
3399 ret = btrfs_next_leaf(root, path);
3403 goto copy_inline_extent;
3405 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3406 if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
3407 key.type == BTRFS_EXTENT_DATA_KEY) {
3408 ASSERT(key.offset > 0);
3411 } else if (i_size_read(dst) <= datal) {
3412 struct btrfs_file_extent_item *ei;
3416 * If the file size is <= datal, make sure there are no other
3417 * extents following (can happen do to an fallocate call with
3418 * the flag FALLOC_FL_KEEP_SIZE).
3420 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3421 struct btrfs_file_extent_item);
3423 * If it's an inline extent, it can not have other extents
3426 if (btrfs_file_extent_type(path->nodes[0], ei) ==
3427 BTRFS_FILE_EXTENT_INLINE)
3428 goto copy_inline_extent;
3430 ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
3431 if (ext_len > aligned_end)
3434 ret = btrfs_next_item(root, path);
3437 } else if (ret == 0) {
3438 btrfs_item_key_to_cpu(path->nodes[0], &key,
3440 if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
3441 key.type == BTRFS_EXTENT_DATA_KEY)
3448 * We have no extent items, or we have an extent at offset 0 which may
3449 * or may not be inlined. All these cases are dealt the same way.
3451 if (i_size_read(dst) > datal) {
3453 * If the destination inode has an inline extent...
3454 * This would require copying the data from the source inline
3455 * extent into the beginning of the destination's inline extent.
3456 * But this is really complex, both extents can be compressed
3457 * or just one of them, which would require decompressing and
3458 * re-compressing data (which could increase the new compressed
3459 * size, not allowing the compressed data to fit anymore in an
3461 * So just don't support this case for now (it should be rare,
3462 * we are not really saving space when cloning inline extents).
3467 btrfs_release_path(path);
3468 ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
3471 ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
3476 const u32 start = btrfs_file_extent_calc_inline_size(0);
3478 memmove(inline_data + start, inline_data + start + skip, datal);
3481 write_extent_buffer(path->nodes[0], inline_data,
3482 btrfs_item_ptr_offset(path->nodes[0],
3485 inode_add_bytes(dst, datal);
3491 * btrfs_clone() - clone a range from inode file to another
3493 * @src: Inode to clone from
3494 * @inode: Inode to clone to
3495 * @off: Offset within source to start clone from
3496 * @olen: Original length, passed by user, of range to clone
3497 * @olen_aligned: Block-aligned value of olen
3498 * @destoff: Offset within @inode to start clone
3499 * @no_time_update: Whether to update mtime/ctime on the target inode
3501 static int btrfs_clone(struct inode *src, struct inode *inode,
3502 const u64 off, const u64 olen, const u64 olen_aligned,
3503 const u64 destoff, int no_time_update)
3505 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3506 struct btrfs_root *root = BTRFS_I(inode)->root;
3507 struct btrfs_path *path = NULL;
3508 struct extent_buffer *leaf;
3509 struct btrfs_trans_handle *trans;
3511 struct btrfs_key key;
3515 const u64 len = olen_aligned;
3516 u64 last_dest_end = destoff;
3519 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
3523 path = btrfs_alloc_path();
3529 path->reada = READA_FORWARD;
3531 key.objectid = btrfs_ino(BTRFS_I(src));
3532 key.type = BTRFS_EXTENT_DATA_KEY;
3536 u64 next_key_min_offset = key.offset + 1;
3539 * note the key will change type as we walk through the
3542 path->leave_spinning = 1;
3543 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
3548 * First search, if no extent item that starts at offset off was
3549 * found but the previous item is an extent item, it's possible
3550 * it might overlap our target range, therefore process it.
3552 if (key.offset == off && ret > 0 && path->slots[0] > 0) {
3553 btrfs_item_key_to_cpu(path->nodes[0], &key,
3554 path->slots[0] - 1);
3555 if (key.type == BTRFS_EXTENT_DATA_KEY)
3559 nritems = btrfs_header_nritems(path->nodes[0]);
3561 if (path->slots[0] >= nritems) {
3562 ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
3567 nritems = btrfs_header_nritems(path->nodes[0]);
3569 leaf = path->nodes[0];
3570 slot = path->slots[0];
3572 btrfs_item_key_to_cpu(leaf, &key, slot);
3573 if (key.type > BTRFS_EXTENT_DATA_KEY ||
3574 key.objectid != btrfs_ino(BTRFS_I(src)))
3577 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3578 struct btrfs_file_extent_item *extent;
3581 struct btrfs_key new_key;
3582 u64 disko = 0, diskl = 0;
3583 u64 datao = 0, datal = 0;
3587 extent = btrfs_item_ptr(leaf, slot,
3588 struct btrfs_file_extent_item);
3589 comp = btrfs_file_extent_compression(leaf, extent);
3590 type = btrfs_file_extent_type(leaf, extent);
3591 if (type == BTRFS_FILE_EXTENT_REG ||
3592 type == BTRFS_FILE_EXTENT_PREALLOC) {
3593 disko = btrfs_file_extent_disk_bytenr(leaf,
3595 diskl = btrfs_file_extent_disk_num_bytes(leaf,
3597 datao = btrfs_file_extent_offset(leaf, extent);
3598 datal = btrfs_file_extent_num_bytes(leaf,
3600 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
3601 /* take upper bound, may be compressed */
3602 datal = btrfs_file_extent_ram_bytes(leaf,
3607 * The first search might have left us at an extent
3608 * item that ends before our target range's start, can
3609 * happen if we have holes and NO_HOLES feature enabled.
3611 if (key.offset + datal <= off) {
3614 } else if (key.offset >= off + len) {
3617 next_key_min_offset = key.offset + datal;
3618 size = btrfs_item_size_nr(leaf, slot);
3619 read_extent_buffer(leaf, buf,
3620 btrfs_item_ptr_offset(leaf, slot),
3623 btrfs_release_path(path);
3624 path->leave_spinning = 0;
3626 memcpy(&new_key, &key, sizeof(new_key));
3627 new_key.objectid = btrfs_ino(BTRFS_I(inode));
3628 if (off <= key.offset)
3629 new_key.offset = key.offset + destoff - off;
3631 new_key.offset = destoff;
3634 * Deal with a hole that doesn't have an extent item
3635 * that represents it (NO_HOLES feature enabled).
3636 * This hole is either in the middle of the cloning
3637 * range or at the beginning (fully overlaps it or
3638 * partially overlaps it).
3640 if (new_key.offset != last_dest_end)
3641 drop_start = last_dest_end;
3643 drop_start = new_key.offset;
3646 * 1 - adjusting old extent (we may have to split it)
3647 * 1 - add new extent
3650 trans = btrfs_start_transaction(root, 3);
3651 if (IS_ERR(trans)) {
3652 ret = PTR_ERR(trans);
3656 if (type == BTRFS_FILE_EXTENT_REG ||
3657 type == BTRFS_FILE_EXTENT_PREALLOC) {
3659 * a | --- range to clone ---| b
3660 * | ------------- extent ------------- |
3663 /* subtract range b */
3664 if (key.offset + datal > off + len)
3665 datal = off + len - key.offset;
3667 /* subtract range a */
3668 if (off > key.offset) {
3669 datao += off - key.offset;
3670 datal -= off - key.offset;
3673 ret = btrfs_drop_extents(trans, root, inode,
3675 new_key.offset + datal,
3678 if (ret != -EOPNOTSUPP)
3679 btrfs_abort_transaction(trans,
3681 btrfs_end_transaction(trans);
3685 ret = btrfs_insert_empty_item(trans, root, path,
3688 btrfs_abort_transaction(trans, ret);
3689 btrfs_end_transaction(trans);
3693 leaf = path->nodes[0];
3694 slot = path->slots[0];
3695 write_extent_buffer(leaf, buf,
3696 btrfs_item_ptr_offset(leaf, slot),
3699 extent = btrfs_item_ptr(leaf, slot,
3700 struct btrfs_file_extent_item);
3702 /* disko == 0 means it's a hole */
3706 btrfs_set_file_extent_offset(leaf, extent,
3708 btrfs_set_file_extent_num_bytes(leaf, extent,
3712 inode_add_bytes(inode, datal);
3713 ret = btrfs_inc_extent_ref(trans,
3716 root->root_key.objectid,
3717 btrfs_ino(BTRFS_I(inode)),
3718 new_key.offset - datao);
3720 btrfs_abort_transaction(trans,
3722 btrfs_end_transaction(trans);
3727 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
3731 if (off > key.offset) {
3732 skip = off - key.offset;
3733 new_key.offset += skip;
3736 if (key.offset + datal > off + len)
3737 trim = key.offset + datal - (off + len);
3739 if (comp && (skip || trim)) {
3741 btrfs_end_transaction(trans);
3744 size -= skip + trim;
3745 datal -= skip + trim;
3747 ret = clone_copy_inline_extent(inode,
3754 if (ret != -EOPNOTSUPP)
3755 btrfs_abort_transaction(trans,
3757 btrfs_end_transaction(trans);
3760 leaf = path->nodes[0];
3761 slot = path->slots[0];
3764 /* If we have an implicit hole (NO_HOLES feature). */
3765 if (drop_start < new_key.offset)
3766 clone_update_extent_map(BTRFS_I(inode), trans,
3768 new_key.offset - drop_start);
3770 clone_update_extent_map(BTRFS_I(inode), trans,
3773 btrfs_mark_buffer_dirty(leaf);
3774 btrfs_release_path(path);
3776 last_dest_end = ALIGN(new_key.offset + datal,
3777 fs_info->sectorsize);
3778 ret = clone_finish_inode_update(trans, inode,
3784 if (new_key.offset + datal >= destoff + len)
3787 btrfs_release_path(path);
3788 key.offset = next_key_min_offset;
3790 if (fatal_signal_pending(current)) {
3797 if (last_dest_end < destoff + len) {
3799 * We have an implicit hole (NO_HOLES feature is enabled) that
3800 * fully or partially overlaps our cloning range at its end.
3802 btrfs_release_path(path);
3805 * 1 - remove extent(s)
3808 trans = btrfs_start_transaction(root, 2);
3809 if (IS_ERR(trans)) {
3810 ret = PTR_ERR(trans);
3813 ret = btrfs_drop_extents(trans, root, inode,
3814 last_dest_end, destoff + len, 1);
3816 if (ret != -EOPNOTSUPP)
3817 btrfs_abort_transaction(trans, ret);
3818 btrfs_end_transaction(trans);
3821 clone_update_extent_map(BTRFS_I(inode), trans, NULL,
3823 destoff + len - last_dest_end);
3824 ret = clone_finish_inode_update(trans, inode, destoff + len,
3825 destoff, olen, no_time_update);
3829 btrfs_free_path(path);
3834 static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
3835 u64 off, u64 olen, u64 destoff)
3837 struct inode *inode = file_inode(file);
3838 struct inode *src = file_inode(file_src);
3839 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3840 struct btrfs_root *root = BTRFS_I(inode)->root;
3843 u64 bs = fs_info->sb->s_blocksize;
3844 int same_inode = src == inode;
3848 * - split compressed inline extents. annoying: we need to
3849 * decompress into destination's address_space (the file offset
3850 * may change, so source mapping won't do), then recompress (or
3851 * otherwise reinsert) a subrange.
3853 * - split destination inode's inline extents. The inline extents can
3854 * be either compressed or non-compressed.
3857 if (btrfs_root_readonly(root))
3860 if (file_src->f_path.mnt != file->f_path.mnt ||
3861 src->i_sb != inode->i_sb)
3864 /* don't make the dst file partly checksummed */
3865 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
3866 (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
3869 if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
3873 btrfs_double_inode_lock(src, inode);
3878 /* determine range to clone */
3880 if (off + len > src->i_size || off + len < off)
3883 olen = len = src->i_size - off;
3884 /* if we extend to eof, continue to block boundary */
3885 if (off + len == src->i_size)
3886 len = ALIGN(src->i_size, bs) - off;
3893 /* verify the end result is block aligned */
3894 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
3895 !IS_ALIGNED(destoff, bs))
3898 /* verify if ranges are overlapped within the same file */
3900 if (destoff + len > off && destoff < off + len)
3904 if (destoff > inode->i_size) {
3905 ret = btrfs_cont_expand(inode, inode->i_size, destoff);
3911 * Lock the target range too. Right after we replace the file extent
3912 * items in the fs tree (which now point to the cloned data), we might
3913 * have a worker replace them with extent items relative to a write
3914 * operation that was issued before this clone operation (i.e. confront
3915 * with inode.c:btrfs_finish_ordered_io).
3918 u64 lock_start = min_t(u64, off, destoff);
3919 u64 lock_len = max_t(u64, off, destoff) + len - lock_start;
3921 ret = lock_extent_range(src, lock_start, lock_len, true);
3923 ret = btrfs_double_extent_lock(src, off, inode, destoff, len,
3928 /* ranges in the io trees already unlocked */
3932 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
3935 u64 lock_start = min_t(u64, off, destoff);
3936 u64 lock_end = max_t(u64, off, destoff) + len - 1;
3938 unlock_extent(&BTRFS_I(src)->io_tree, lock_start, lock_end);
3940 btrfs_double_extent_unlock(src, off, inode, destoff, len);
3943 * Truncate page cache pages so that future reads will see the cloned
3944 * data immediately and not the previous data.
3946 truncate_inode_pages_range(&inode->i_data,
3947 round_down(destoff, PAGE_SIZE),
3948 round_up(destoff + len, PAGE_SIZE) - 1);
3951 btrfs_double_inode_unlock(src, inode);
3957 int btrfs_clone_file_range(struct file *src_file, loff_t off,
3958 struct file *dst_file, loff_t destoff, u64 len)
3960 return btrfs_clone_files(dst_file, src_file, off, len, destoff);
3964 * there are many ways the trans_start and trans_end ioctls can lead
3965 * to deadlocks. They should only be used by applications that
3966 * basically own the machine, and have a very in depth understanding
3967 * of all the possible deadlocks and enospc problems.
3969 static long btrfs_ioctl_trans_start(struct file *file)
3971 struct inode *inode = file_inode(file);
3972 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3973 struct btrfs_root *root = BTRFS_I(inode)->root;
3974 struct btrfs_trans_handle *trans;
3975 struct btrfs_file_private *private;
3977 static bool warned = false;
3980 if (!capable(CAP_SYS_ADMIN))
3985 "Userspace transaction mechanism is considered "
3986 "deprecated and slated to be removed in 4.17. "
3987 "If you have a valid use case please "
3988 "speak up on the mailing list");
3994 private = file->private_data;
3995 if (private && private->trans)
3998 private = kzalloc(sizeof(struct btrfs_file_private),
4002 file->private_data = private;
4006 if (btrfs_root_readonly(root))
4009 ret = mnt_want_write_file(file);
4013 atomic_inc(&fs_info->open_ioctl_trans);
4016 trans = btrfs_start_ioctl_transaction(root);
4020 private->trans = trans;
4024 atomic_dec(&fs_info->open_ioctl_trans);
4025 mnt_drop_write_file(file);
4030 static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
4032 struct inode *inode = file_inode(file);
4033 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4034 struct btrfs_root *root = BTRFS_I(inode)->root;
4035 struct btrfs_root *new_root;
4036 struct btrfs_dir_item *di;
4037 struct btrfs_trans_handle *trans;
4038 struct btrfs_path *path;
4039 struct btrfs_key location;
4040 struct btrfs_disk_key disk_key;
4045 if (!capable(CAP_SYS_ADMIN))
4048 ret = mnt_want_write_file(file);
4052 if (copy_from_user(&objectid, argp, sizeof(objectid))) {
4058 objectid = BTRFS_FS_TREE_OBJECTID;
4060 location.objectid = objectid;
4061 location.type = BTRFS_ROOT_ITEM_KEY;
4062 location.offset = (u64)-1;
4064 new_root = btrfs_read_fs_root_no_name(fs_info, &location);
4065 if (IS_ERR(new_root)) {
4066 ret = PTR_ERR(new_root);
4069 if (!is_fstree(new_root->objectid)) {
4074 path = btrfs_alloc_path();
4079 path->leave_spinning = 1;
4081 trans = btrfs_start_transaction(root, 1);
4082 if (IS_ERR(trans)) {
4083 btrfs_free_path(path);
4084 ret = PTR_ERR(trans);
4088 dir_id = btrfs_super_root_dir(fs_info->super_copy);
4089 di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path,
4090 dir_id, "default", 7, 1);
4091 if (IS_ERR_OR_NULL(di)) {
4092 btrfs_free_path(path);
4093 btrfs_end_transaction(trans);
4095 "Umm, you don't have the default diritem, this isn't going to work");
4100 btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
4101 btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
4102 btrfs_mark_buffer_dirty(path->nodes[0]);
4103 btrfs_free_path(path);
4105 btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
4106 btrfs_end_transaction(trans);
4108 mnt_drop_write_file(file);
4112 void btrfs_get_block_group_info(struct list_head *groups_list,
4113 struct btrfs_ioctl_space_info *space)
4115 struct btrfs_block_group_cache *block_group;
4117 space->total_bytes = 0;
4118 space->used_bytes = 0;
4120 list_for_each_entry(block_group, groups_list, list) {
4121 space->flags = block_group->flags;
4122 space->total_bytes += block_group->key.offset;
4123 space->used_bytes +=
4124 btrfs_block_group_used(&block_group->item);
4128 static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
4131 struct btrfs_ioctl_space_args space_args;
4132 struct btrfs_ioctl_space_info space;
4133 struct btrfs_ioctl_space_info *dest;
4134 struct btrfs_ioctl_space_info *dest_orig;
4135 struct btrfs_ioctl_space_info __user *user_dest;
4136 struct btrfs_space_info *info;
4137 u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
4138 BTRFS_BLOCK_GROUP_SYSTEM,
4139 BTRFS_BLOCK_GROUP_METADATA,
4140 BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
4147 if (copy_from_user(&space_args,
4148 (struct btrfs_ioctl_space_args __user *)arg,
4149 sizeof(space_args)))
4152 for (i = 0; i < num_types; i++) {
4153 struct btrfs_space_info *tmp;
4157 list_for_each_entry_rcu(tmp, &fs_info->space_info,
4159 if (tmp->flags == types[i]) {
4169 down_read(&info->groups_sem);
4170 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
4171 if (!list_empty(&info->block_groups[c]))
4174 up_read(&info->groups_sem);
4178 * Global block reserve, exported as a space_info
4182 /* space_slots == 0 means they are asking for a count */
4183 if (space_args.space_slots == 0) {
4184 space_args.total_spaces = slot_count;
4188 slot_count = min_t(u64, space_args.space_slots, slot_count);
4190 alloc_size = sizeof(*dest) * slot_count;
4192 /* we generally have at most 6 or so space infos, one for each raid
4193 * level. So, a whole page should be more than enough for everyone
4195 if (alloc_size > PAGE_SIZE)
4198 space_args.total_spaces = 0;
4199 dest = kmalloc(alloc_size, GFP_KERNEL);
4204 /* now we have a buffer to copy into */
4205 for (i = 0; i < num_types; i++) {
4206 struct btrfs_space_info *tmp;
4213 list_for_each_entry_rcu(tmp, &fs_info->space_info,
4215 if (tmp->flags == types[i]) {
4224 down_read(&info->groups_sem);
4225 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
4226 if (!list_empty(&info->block_groups[c])) {
4227 btrfs_get_block_group_info(
4228 &info->block_groups[c], &space);
4229 memcpy(dest, &space, sizeof(space));
4231 space_args.total_spaces++;
4237 up_read(&info->groups_sem);
4241 * Add global block reserve
4244 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4246 spin_lock(&block_rsv->lock);
4247 space.total_bytes = block_rsv->size;
4248 space.used_bytes = block_rsv->size - block_rsv->reserved;
4249 spin_unlock(&block_rsv->lock);
4250 space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
4251 memcpy(dest, &space, sizeof(space));
4252 space_args.total_spaces++;
4255 user_dest = (struct btrfs_ioctl_space_info __user *)
4256 (arg + sizeof(struct btrfs_ioctl_space_args));
4258 if (copy_to_user(user_dest, dest_orig, alloc_size))
4263 if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
4270 * there are many ways the trans_start and trans_end ioctls can lead
4271 * to deadlocks. They should only be used by applications that
4272 * basically own the machine, and have a very in depth understanding
4273 * of all the possible deadlocks and enospc problems.
4275 long btrfs_ioctl_trans_end(struct file *file)
4277 struct inode *inode = file_inode(file);
4278 struct btrfs_root *root = BTRFS_I(inode)->root;
4279 struct btrfs_file_private *private = file->private_data;
4281 if (!private || !private->trans)
4284 btrfs_end_transaction(private->trans);
4285 private->trans = NULL;
4287 atomic_dec(&root->fs_info->open_ioctl_trans);
4289 mnt_drop_write_file(file);
4293 static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
4296 struct btrfs_trans_handle *trans;
4300 trans = btrfs_attach_transaction_barrier(root);
4301 if (IS_ERR(trans)) {
4302 if (PTR_ERR(trans) != -ENOENT)
4303 return PTR_ERR(trans);
4305 /* No running transaction, don't bother */
4306 transid = root->fs_info->last_trans_committed;
4309 transid = trans->transid;
4310 ret = btrfs_commit_transaction_async(trans, 0);
4312 btrfs_end_transaction(trans);
4317 if (copy_to_user(argp, &transid, sizeof(transid)))
4322 static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
4328 if (copy_from_user(&transid, argp, sizeof(transid)))
4331 transid = 0; /* current trans */
4333 return btrfs_wait_for_commit(fs_info, transid);
4336 static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
4338 struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
4339 struct btrfs_ioctl_scrub_args *sa;
4342 if (!capable(CAP_SYS_ADMIN))
4345 sa = memdup_user(arg, sizeof(*sa));
4349 if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
4350 ret = mnt_want_write_file(file);
4355 ret = btrfs_scrub_dev(fs_info, sa->devid, sa->start, sa->end,
4356 &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
4359 if (copy_to_user(arg, sa, sizeof(*sa)))
4362 if (!(sa->flags & BTRFS_SCRUB_READONLY))
4363 mnt_drop_write_file(file);
4369 static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info)
4371 if (!capable(CAP_SYS_ADMIN))
4374 return btrfs_scrub_cancel(fs_info);
4377 static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info,
4380 struct btrfs_ioctl_scrub_args *sa;
4383 if (!capable(CAP_SYS_ADMIN))
4386 sa = memdup_user(arg, sizeof(*sa));
4390 ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress);
4392 if (copy_to_user(arg, sa, sizeof(*sa)))
4399 static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info,
4402 struct btrfs_ioctl_get_dev_stats *sa;
4405 sa = memdup_user(arg, sizeof(*sa));
4409 if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
4414 ret = btrfs_get_dev_stats(fs_info, sa);
4416 if (copy_to_user(arg, sa, sizeof(*sa)))
4423 static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
4426 struct btrfs_ioctl_dev_replace_args *p;
4429 if (!capable(CAP_SYS_ADMIN))
4432 p = memdup_user(arg, sizeof(*p));
4437 case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
4438 if (sb_rdonly(fs_info->sb)) {
4442 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
4443 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4445 ret = btrfs_dev_replace_by_ioctl(fs_info, p);
4446 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4449 case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
4450 btrfs_dev_replace_status(fs_info, p);
4453 case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
4454 ret = btrfs_dev_replace_cancel(fs_info, p);
4461 if (copy_to_user(arg, p, sizeof(*p)))
4468 static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
4474 struct btrfs_ioctl_ino_path_args *ipa = NULL;
4475 struct inode_fs_paths *ipath = NULL;
4476 struct btrfs_path *path;
4478 if (!capable(CAP_DAC_READ_SEARCH))
4481 path = btrfs_alloc_path();
4487 ipa = memdup_user(arg, sizeof(*ipa));
4494 size = min_t(u32, ipa->size, 4096);
4495 ipath = init_ipath(size, root, path);
4496 if (IS_ERR(ipath)) {
4497 ret = PTR_ERR(ipath);
4502 ret = paths_from_inode(ipa->inum, ipath);
4506 for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
4507 rel_ptr = ipath->fspath->val[i] -
4508 (u64)(unsigned long)ipath->fspath->val;
4509 ipath->fspath->val[i] = rel_ptr;
4512 ret = copy_to_user((void *)(unsigned long)ipa->fspath,
4513 (void *)(unsigned long)ipath->fspath, size);
4520 btrfs_free_path(path);
4527 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
4529 struct btrfs_data_container *inodes = ctx;
4530 const size_t c = 3 * sizeof(u64);
4532 if (inodes->bytes_left >= c) {
4533 inodes->bytes_left -= c;
4534 inodes->val[inodes->elem_cnt] = inum;
4535 inodes->val[inodes->elem_cnt + 1] = offset;
4536 inodes->val[inodes->elem_cnt + 2] = root;
4537 inodes->elem_cnt += 3;
4539 inodes->bytes_missing += c - inodes->bytes_left;
4540 inodes->bytes_left = 0;
4541 inodes->elem_missed += 3;
4547 static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
4552 struct btrfs_ioctl_logical_ino_args *loi;
4553 struct btrfs_data_container *inodes = NULL;
4554 struct btrfs_path *path = NULL;
4556 if (!capable(CAP_SYS_ADMIN))
4559 loi = memdup_user(arg, sizeof(*loi));
4561 return PTR_ERR(loi);
4563 path = btrfs_alloc_path();
4569 size = min_t(u32, loi->size, SZ_64K);
4570 inodes = init_data_container(size);
4571 if (IS_ERR(inodes)) {
4572 ret = PTR_ERR(inodes);
4577 ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
4578 build_ino_list, inodes);
4584 ret = copy_to_user((void *)(unsigned long)loi->inodes,
4585 (void *)(unsigned long)inodes, size);
4590 btrfs_free_path(path);
4597 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
4598 struct btrfs_ioctl_balance_args *bargs)
4600 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4602 bargs->flags = bctl->flags;
4604 if (atomic_read(&fs_info->balance_running))
4605 bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
4606 if (atomic_read(&fs_info->balance_pause_req))
4607 bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
4608 if (atomic_read(&fs_info->balance_cancel_req))
4609 bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
4611 memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
4612 memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
4613 memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
4616 spin_lock(&fs_info->balance_lock);
4617 memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
4618 spin_unlock(&fs_info->balance_lock);
4620 memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
4624 static long btrfs_ioctl_balance(struct file *file, void __user *arg)
4626 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4627 struct btrfs_fs_info *fs_info = root->fs_info;
4628 struct btrfs_ioctl_balance_args *bargs;
4629 struct btrfs_balance_control *bctl;
4630 bool need_unlock; /* for mut. excl. ops lock */
4633 if (!capable(CAP_SYS_ADMIN))
4636 ret = mnt_want_write_file(file);
4641 if (!test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
4642 mutex_lock(&fs_info->volume_mutex);
4643 mutex_lock(&fs_info->balance_mutex);
4649 * mut. excl. ops lock is locked. Three possibilities:
4650 * (1) some other op is running
4651 * (2) balance is running
4652 * (3) balance is paused -- special case (think resume)
4654 mutex_lock(&fs_info->balance_mutex);
4655 if (fs_info->balance_ctl) {
4656 /* this is either (2) or (3) */
4657 if (!atomic_read(&fs_info->balance_running)) {
4658 mutex_unlock(&fs_info->balance_mutex);
4659 if (!mutex_trylock(&fs_info->volume_mutex))
4661 mutex_lock(&fs_info->balance_mutex);
4663 if (fs_info->balance_ctl &&
4664 !atomic_read(&fs_info->balance_running)) {
4666 need_unlock = false;
4670 mutex_unlock(&fs_info->balance_mutex);
4671 mutex_unlock(&fs_info->volume_mutex);
4675 mutex_unlock(&fs_info->balance_mutex);
4681 mutex_unlock(&fs_info->balance_mutex);
4682 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4687 BUG_ON(!test_bit(BTRFS_FS_EXCL_OP, &fs_info->flags));
4690 bargs = memdup_user(arg, sizeof(*bargs));
4691 if (IS_ERR(bargs)) {
4692 ret = PTR_ERR(bargs);
4696 if (bargs->flags & BTRFS_BALANCE_RESUME) {
4697 if (!fs_info->balance_ctl) {
4702 bctl = fs_info->balance_ctl;
4703 spin_lock(&fs_info->balance_lock);
4704 bctl->flags |= BTRFS_BALANCE_RESUME;
4705 spin_unlock(&fs_info->balance_lock);
4713 if (fs_info->balance_ctl) {
4718 bctl = kzalloc(sizeof(*bctl), GFP_KERNEL);
4724 bctl->fs_info = fs_info;
4726 memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
4727 memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
4728 memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));
4730 bctl->flags = bargs->flags;
4732 /* balance everything - no filters */
4733 bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
4736 if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
4743 * Ownership of bctl and filesystem flag BTRFS_FS_EXCL_OP
4744 * goes to to btrfs_balance. bctl is freed in __cancel_balance,
4745 * or, if restriper was paused all the way until unmount, in
4746 * free_fs_info. The flag is cleared in __cancel_balance.
4748 need_unlock = false;
4750 ret = btrfs_balance(bctl, bargs);
4754 if (copy_to_user(arg, bargs, sizeof(*bargs)))
4763 mutex_unlock(&fs_info->balance_mutex);
4764 mutex_unlock(&fs_info->volume_mutex);
4766 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4768 mnt_drop_write_file(file);
4772 static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
4774 if (!capable(CAP_SYS_ADMIN))
4778 case BTRFS_BALANCE_CTL_PAUSE:
4779 return btrfs_pause_balance(fs_info);
4780 case BTRFS_BALANCE_CTL_CANCEL:
4781 return btrfs_cancel_balance(fs_info);
4787 static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
4790 struct btrfs_ioctl_balance_args *bargs;
4793 if (!capable(CAP_SYS_ADMIN))
4796 mutex_lock(&fs_info->balance_mutex);
4797 if (!fs_info->balance_ctl) {
4802 bargs = kzalloc(sizeof(*bargs), GFP_KERNEL);
4808 update_ioctl_balance_args(fs_info, 1, bargs);
4810 if (copy_to_user(arg, bargs, sizeof(*bargs)))
4815 mutex_unlock(&fs_info->balance_mutex);
4819 static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
4821 struct inode *inode = file_inode(file);
4822 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4823 struct btrfs_ioctl_quota_ctl_args *sa;
4824 struct btrfs_trans_handle *trans = NULL;
4828 if (!capable(CAP_SYS_ADMIN))
4831 ret = mnt_want_write_file(file);
4835 sa = memdup_user(arg, sizeof(*sa));
4841 down_write(&fs_info->subvol_sem);
4842 trans = btrfs_start_transaction(fs_info->tree_root, 2);
4843 if (IS_ERR(trans)) {
4844 ret = PTR_ERR(trans);
4849 case BTRFS_QUOTA_CTL_ENABLE:
4850 ret = btrfs_quota_enable(trans, fs_info);
4852 case BTRFS_QUOTA_CTL_DISABLE:
4853 ret = btrfs_quota_disable(trans, fs_info);
4860 err = btrfs_commit_transaction(trans);
4865 up_write(&fs_info->subvol_sem);
4867 mnt_drop_write_file(file);
4871 static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
4873 struct inode *inode = file_inode(file);
4874 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4875 struct btrfs_root *root = BTRFS_I(inode)->root;
4876 struct btrfs_ioctl_qgroup_assign_args *sa;
4877 struct btrfs_trans_handle *trans;
4881 if (!capable(CAP_SYS_ADMIN))
4884 ret = mnt_want_write_file(file);
4888 sa = memdup_user(arg, sizeof(*sa));
4894 trans = btrfs_join_transaction(root);
4895 if (IS_ERR(trans)) {
4896 ret = PTR_ERR(trans);
4901 ret = btrfs_add_qgroup_relation(trans, fs_info,
4904 ret = btrfs_del_qgroup_relation(trans, fs_info,
4908 /* update qgroup status and info */
4909 err = btrfs_run_qgroups(trans, fs_info);
4911 btrfs_handle_fs_error(fs_info, err,
4912 "failed to update qgroup status and info");
4913 err = btrfs_end_transaction(trans);
4920 mnt_drop_write_file(file);
4924 static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
4926 struct inode *inode = file_inode(file);
4927 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4928 struct btrfs_root *root = BTRFS_I(inode)->root;
4929 struct btrfs_ioctl_qgroup_create_args *sa;
4930 struct btrfs_trans_handle *trans;
4934 if (!capable(CAP_SYS_ADMIN))
4937 ret = mnt_want_write_file(file);
4941 sa = memdup_user(arg, sizeof(*sa));
4947 if (!sa->qgroupid) {
4952 trans = btrfs_join_transaction(root);
4953 if (IS_ERR(trans)) {
4954 ret = PTR_ERR(trans);
4959 ret = btrfs_create_qgroup(trans, fs_info, sa->qgroupid);
4961 ret = btrfs_remove_qgroup(trans, fs_info, sa->qgroupid);
4964 err = btrfs_end_transaction(trans);
4971 mnt_drop_write_file(file);
4975 static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
4977 struct inode *inode = file_inode(file);
4978 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4979 struct btrfs_root *root = BTRFS_I(inode)->root;
4980 struct btrfs_ioctl_qgroup_limit_args *sa;
4981 struct btrfs_trans_handle *trans;
4986 if (!capable(CAP_SYS_ADMIN))
4989 ret = mnt_want_write_file(file);
4993 sa = memdup_user(arg, sizeof(*sa));
4999 trans = btrfs_join_transaction(root);
5000 if (IS_ERR(trans)) {
5001 ret = PTR_ERR(trans);
5005 qgroupid = sa->qgroupid;
5007 /* take the current subvol as qgroup */
5008 qgroupid = root->root_key.objectid;
5011 ret = btrfs_limit_qgroup(trans, fs_info, qgroupid, &sa->lim);
5013 err = btrfs_end_transaction(trans);
5020 mnt_drop_write_file(file);
5024 static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
5026 struct inode *inode = file_inode(file);
5027 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5028 struct btrfs_ioctl_quota_rescan_args *qsa;
5031 if (!capable(CAP_SYS_ADMIN))
5034 ret = mnt_want_write_file(file);
5038 qsa = memdup_user(arg, sizeof(*qsa));
5049 ret = btrfs_qgroup_rescan(fs_info);
5054 mnt_drop_write_file(file);
5058 static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
5060 struct inode *inode = file_inode(file);
5061 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5062 struct btrfs_ioctl_quota_rescan_args *qsa;
5065 if (!capable(CAP_SYS_ADMIN))
5068 qsa = kzalloc(sizeof(*qsa), GFP_KERNEL);
5072 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
5074 qsa->progress = fs_info->qgroup_rescan_progress.objectid;
5077 if (copy_to_user(arg, qsa, sizeof(*qsa)))
5084 static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
5086 struct inode *inode = file_inode(file);
5087 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5089 if (!capable(CAP_SYS_ADMIN))
5092 return btrfs_qgroup_wait_for_completion(fs_info, true);
5095 static long _btrfs_ioctl_set_received_subvol(struct file *file,
5096 struct btrfs_ioctl_received_subvol_args *sa)
5098 struct inode *inode = file_inode(file);
5099 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5100 struct btrfs_root *root = BTRFS_I(inode)->root;
5101 struct btrfs_root_item *root_item = &root->root_item;
5102 struct btrfs_trans_handle *trans;
5103 struct timespec ct = current_time(inode);
5105 int received_uuid_changed;
5107 if (!inode_owner_or_capable(inode))
5110 ret = mnt_want_write_file(file);
5114 down_write(&fs_info->subvol_sem);
5116 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
5121 if (btrfs_root_readonly(root)) {
5128 * 2 - uuid items (received uuid + subvol uuid)
5130 trans = btrfs_start_transaction(root, 3);
5131 if (IS_ERR(trans)) {
5132 ret = PTR_ERR(trans);
5137 sa->rtransid = trans->transid;
5138 sa->rtime.sec = ct.tv_sec;
5139 sa->rtime.nsec = ct.tv_nsec;
5141 received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
5143 if (received_uuid_changed &&
5144 !btrfs_is_empty_uuid(root_item->received_uuid))
5145 btrfs_uuid_tree_rem(trans, fs_info, root_item->received_uuid,
5146 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
5147 root->root_key.objectid);
5148 memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
5149 btrfs_set_root_stransid(root_item, sa->stransid);
5150 btrfs_set_root_rtransid(root_item, sa->rtransid);
5151 btrfs_set_stack_timespec_sec(&root_item->stime, sa->stime.sec);
5152 btrfs_set_stack_timespec_nsec(&root_item->stime, sa->stime.nsec);
5153 btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec);
5154 btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec);
5156 ret = btrfs_update_root(trans, fs_info->tree_root,
5157 &root->root_key, &root->root_item);
5159 btrfs_end_transaction(trans);
5162 if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
5163 ret = btrfs_uuid_tree_add(trans, fs_info, sa->uuid,
5164 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
5165 root->root_key.objectid);
5166 if (ret < 0 && ret != -EEXIST) {
5167 btrfs_abort_transaction(trans, ret);
5171 ret = btrfs_commit_transaction(trans);
5173 btrfs_abort_transaction(trans, ret);
5178 up_write(&fs_info->subvol_sem);
5179 mnt_drop_write_file(file);
5184 static long btrfs_ioctl_set_received_subvol_32(struct file *file,
5187 struct btrfs_ioctl_received_subvol_args_32 *args32 = NULL;
5188 struct btrfs_ioctl_received_subvol_args *args64 = NULL;
5191 args32 = memdup_user(arg, sizeof(*args32));
5193 return PTR_ERR(args32);
5195 args64 = kmalloc(sizeof(*args64), GFP_KERNEL);
5201 memcpy(args64->uuid, args32->uuid, BTRFS_UUID_SIZE);
5202 args64->stransid = args32->stransid;
5203 args64->rtransid = args32->rtransid;
5204 args64->stime.sec = args32->stime.sec;
5205 args64->stime.nsec = args32->stime.nsec;
5206 args64->rtime.sec = args32->rtime.sec;
5207 args64->rtime.nsec = args32->rtime.nsec;
5208 args64->flags = args32->flags;
5210 ret = _btrfs_ioctl_set_received_subvol(file, args64);
5214 memcpy(args32->uuid, args64->uuid, BTRFS_UUID_SIZE);
5215 args32->stransid = args64->stransid;
5216 args32->rtransid = args64->rtransid;
5217 args32->stime.sec = args64->stime.sec;
5218 args32->stime.nsec = args64->stime.nsec;
5219 args32->rtime.sec = args64->rtime.sec;
5220 args32->rtime.nsec = args64->rtime.nsec;
5221 args32->flags = args64->flags;
5223 ret = copy_to_user(arg, args32, sizeof(*args32));
5234 static long btrfs_ioctl_set_received_subvol(struct file *file,
5237 struct btrfs_ioctl_received_subvol_args *sa = NULL;
5240 sa = memdup_user(arg, sizeof(*sa));
5244 ret = _btrfs_ioctl_set_received_subvol(file, sa);
5249 ret = copy_to_user(arg, sa, sizeof(*sa));
5258 static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
5260 struct inode *inode = file_inode(file);
5261 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5264 char label[BTRFS_LABEL_SIZE];
5266 spin_lock(&fs_info->super_lock);
5267 memcpy(label, fs_info->super_copy->label, BTRFS_LABEL_SIZE);
5268 spin_unlock(&fs_info->super_lock);
5270 len = strnlen(label, BTRFS_LABEL_SIZE);
5272 if (len == BTRFS_LABEL_SIZE) {
5274 "label is too long, return the first %zu bytes",
5278 ret = copy_to_user(arg, label, len);
5280 return ret ? -EFAULT : 0;
5283 static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
5285 struct inode *inode = file_inode(file);
5286 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5287 struct btrfs_root *root = BTRFS_I(inode)->root;
5288 struct btrfs_super_block *super_block = fs_info->super_copy;
5289 struct btrfs_trans_handle *trans;
5290 char label[BTRFS_LABEL_SIZE];
5293 if (!capable(CAP_SYS_ADMIN))
5296 if (copy_from_user(label, arg, sizeof(label)))
5299 if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
5301 "unable to set label with more than %d bytes",
5302 BTRFS_LABEL_SIZE - 1);
5306 ret = mnt_want_write_file(file);
5310 trans = btrfs_start_transaction(root, 0);
5311 if (IS_ERR(trans)) {
5312 ret = PTR_ERR(trans);
5316 spin_lock(&fs_info->super_lock);
5317 strcpy(super_block->label, label);
5318 spin_unlock(&fs_info->super_lock);
5319 ret = btrfs_commit_transaction(trans);
5322 mnt_drop_write_file(file);
5326 #define INIT_FEATURE_FLAGS(suffix) \
5327 { .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \
5328 .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \
5329 .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix }
5331 int btrfs_ioctl_get_supported_features(void __user *arg)
5333 static const struct btrfs_ioctl_feature_flags features[3] = {
5334 INIT_FEATURE_FLAGS(SUPP),
5335 INIT_FEATURE_FLAGS(SAFE_SET),
5336 INIT_FEATURE_FLAGS(SAFE_CLEAR)
5339 if (copy_to_user(arg, &features, sizeof(features)))
5345 static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
5347 struct inode *inode = file_inode(file);
5348 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5349 struct btrfs_super_block *super_block = fs_info->super_copy;
5350 struct btrfs_ioctl_feature_flags features;
5352 features.compat_flags = btrfs_super_compat_flags(super_block);
5353 features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block);
5354 features.incompat_flags = btrfs_super_incompat_flags(super_block);
5356 if (copy_to_user(arg, &features, sizeof(features)))
5362 static int check_feature_bits(struct btrfs_fs_info *fs_info,
5363 enum btrfs_feature_set set,
5364 u64 change_mask, u64 flags, u64 supported_flags,
5365 u64 safe_set, u64 safe_clear)
5367 const char *type = btrfs_feature_set_names[set];
5369 u64 disallowed, unsupported;
5370 u64 set_mask = flags & change_mask;
5371 u64 clear_mask = ~flags & change_mask;
5373 unsupported = set_mask & ~supported_flags;
5375 names = btrfs_printable_features(set, unsupported);
5378 "this kernel does not support the %s feature bit%s",
5379 names, strchr(names, ',') ? "s" : "");
5383 "this kernel does not support %s bits 0x%llx",
5388 disallowed = set_mask & ~safe_set;
5390 names = btrfs_printable_features(set, disallowed);
5393 "can't set the %s feature bit%s while mounted",
5394 names, strchr(names, ',') ? "s" : "");
5398 "can't set %s bits 0x%llx while mounted",
5403 disallowed = clear_mask & ~safe_clear;
5405 names = btrfs_printable_features(set, disallowed);
5408 "can't clear the %s feature bit%s while mounted",
5409 names, strchr(names, ',') ? "s" : "");
5413 "can't clear %s bits 0x%llx while mounted",
5421 #define check_feature(fs_info, change_mask, flags, mask_base) \
5422 check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags, \
5423 BTRFS_FEATURE_ ## mask_base ## _SUPP, \
5424 BTRFS_FEATURE_ ## mask_base ## _SAFE_SET, \
5425 BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)
5427 static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
5429 struct inode *inode = file_inode(file);
5430 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5431 struct btrfs_root *root = BTRFS_I(inode)->root;
5432 struct btrfs_super_block *super_block = fs_info->super_copy;
5433 struct btrfs_ioctl_feature_flags flags[2];
5434 struct btrfs_trans_handle *trans;
5438 if (!capable(CAP_SYS_ADMIN))
5441 if (copy_from_user(flags, arg, sizeof(flags)))
5445 if (!flags[0].compat_flags && !flags[0].compat_ro_flags &&
5446 !flags[0].incompat_flags)
5449 ret = check_feature(fs_info, flags[0].compat_flags,
5450 flags[1].compat_flags, COMPAT);
5454 ret = check_feature(fs_info, flags[0].compat_ro_flags,
5455 flags[1].compat_ro_flags, COMPAT_RO);
5459 ret = check_feature(fs_info, flags[0].incompat_flags,
5460 flags[1].incompat_flags, INCOMPAT);
5464 ret = mnt_want_write_file(file);
5468 trans = btrfs_start_transaction(root, 0);
5469 if (IS_ERR(trans)) {
5470 ret = PTR_ERR(trans);
5471 goto out_drop_write;
5474 spin_lock(&fs_info->super_lock);
5475 newflags = btrfs_super_compat_flags(super_block);
5476 newflags |= flags[0].compat_flags & flags[1].compat_flags;
5477 newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
5478 btrfs_set_super_compat_flags(super_block, newflags);
5480 newflags = btrfs_super_compat_ro_flags(super_block);
5481 newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags;
5482 newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags);
5483 btrfs_set_super_compat_ro_flags(super_block, newflags);
5485 newflags = btrfs_super_incompat_flags(super_block);
5486 newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
5487 newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
5488 btrfs_set_super_incompat_flags(super_block, newflags);
5489 spin_unlock(&fs_info->super_lock);
5491 ret = btrfs_commit_transaction(trans);
5493 mnt_drop_write_file(file);
5498 long btrfs_ioctl(struct file *file, unsigned int
5499 cmd, unsigned long arg)
5501 struct inode *inode = file_inode(file);
5502 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5503 struct btrfs_root *root = BTRFS_I(inode)->root;
5504 void __user *argp = (void __user *)arg;
5507 case FS_IOC_GETFLAGS:
5508 return btrfs_ioctl_getflags(file, argp);
5509 case FS_IOC_SETFLAGS:
5510 return btrfs_ioctl_setflags(file, argp);
5511 case FS_IOC_GETVERSION:
5512 return btrfs_ioctl_getversion(file, argp);
5514 return btrfs_ioctl_fitrim(file, argp);
5515 case BTRFS_IOC_SNAP_CREATE:
5516 return btrfs_ioctl_snap_create(file, argp, 0);
5517 case BTRFS_IOC_SNAP_CREATE_V2:
5518 return btrfs_ioctl_snap_create_v2(file, argp, 0);
5519 case BTRFS_IOC_SUBVOL_CREATE:
5520 return btrfs_ioctl_snap_create(file, argp, 1);
5521 case BTRFS_IOC_SUBVOL_CREATE_V2:
5522 return btrfs_ioctl_snap_create_v2(file, argp, 1);
5523 case BTRFS_IOC_SNAP_DESTROY:
5524 return btrfs_ioctl_snap_destroy(file, argp);
5525 case BTRFS_IOC_SUBVOL_GETFLAGS:
5526 return btrfs_ioctl_subvol_getflags(file, argp);
5527 case BTRFS_IOC_SUBVOL_SETFLAGS:
5528 return btrfs_ioctl_subvol_setflags(file, argp);
5529 case BTRFS_IOC_DEFAULT_SUBVOL:
5530 return btrfs_ioctl_default_subvol(file, argp);
5531 case BTRFS_IOC_DEFRAG:
5532 return btrfs_ioctl_defrag(file, NULL);
5533 case BTRFS_IOC_DEFRAG_RANGE:
5534 return btrfs_ioctl_defrag(file, argp);
5535 case BTRFS_IOC_RESIZE:
5536 return btrfs_ioctl_resize(file, argp);
5537 case BTRFS_IOC_ADD_DEV:
5538 return btrfs_ioctl_add_dev(fs_info, argp);
5539 case BTRFS_IOC_RM_DEV:
5540 return btrfs_ioctl_rm_dev(file, argp);
5541 case BTRFS_IOC_RM_DEV_V2:
5542 return btrfs_ioctl_rm_dev_v2(file, argp);
5543 case BTRFS_IOC_FS_INFO:
5544 return btrfs_ioctl_fs_info(fs_info, argp);
5545 case BTRFS_IOC_DEV_INFO:
5546 return btrfs_ioctl_dev_info(fs_info, argp);
5547 case BTRFS_IOC_BALANCE:
5548 return btrfs_ioctl_balance(file, NULL);
5549 case BTRFS_IOC_TRANS_START:
5550 return btrfs_ioctl_trans_start(file);
5551 case BTRFS_IOC_TRANS_END:
5552 return btrfs_ioctl_trans_end(file);
5553 case BTRFS_IOC_TREE_SEARCH:
5554 return btrfs_ioctl_tree_search(file, argp);
5555 case BTRFS_IOC_TREE_SEARCH_V2:
5556 return btrfs_ioctl_tree_search_v2(file, argp);
5557 case BTRFS_IOC_INO_LOOKUP:
5558 return btrfs_ioctl_ino_lookup(file, argp);
5559 case BTRFS_IOC_INO_PATHS:
5560 return btrfs_ioctl_ino_to_path(root, argp);
5561 case BTRFS_IOC_LOGICAL_INO:
5562 return btrfs_ioctl_logical_to_ino(fs_info, argp);
5563 case BTRFS_IOC_SPACE_INFO:
5564 return btrfs_ioctl_space_info(fs_info, argp);
5565 case BTRFS_IOC_SYNC: {
5568 ret = btrfs_start_delalloc_roots(fs_info, 0, -1);
5571 ret = btrfs_sync_fs(inode->i_sb, 1);
5573 * The transaction thread may want to do more work,
5574 * namely it pokes the cleaner kthread that will start
5575 * processing uncleaned subvols.
5577 wake_up_process(fs_info->transaction_kthread);
5580 case BTRFS_IOC_START_SYNC:
5581 return btrfs_ioctl_start_sync(root, argp);
5582 case BTRFS_IOC_WAIT_SYNC:
5583 return btrfs_ioctl_wait_sync(fs_info, argp);
5584 case BTRFS_IOC_SCRUB:
5585 return btrfs_ioctl_scrub(file, argp);
5586 case BTRFS_IOC_SCRUB_CANCEL:
5587 return btrfs_ioctl_scrub_cancel(fs_info);
5588 case BTRFS_IOC_SCRUB_PROGRESS:
5589 return btrfs_ioctl_scrub_progress(fs_info, argp);
5590 case BTRFS_IOC_BALANCE_V2:
5591 return btrfs_ioctl_balance(file, argp);
5592 case BTRFS_IOC_BALANCE_CTL:
5593 return btrfs_ioctl_balance_ctl(fs_info, arg);
5594 case BTRFS_IOC_BALANCE_PROGRESS:
5595 return btrfs_ioctl_balance_progress(fs_info, argp);
5596 case BTRFS_IOC_SET_RECEIVED_SUBVOL:
5597 return btrfs_ioctl_set_received_subvol(file, argp);
5599 case BTRFS_IOC_SET_RECEIVED_SUBVOL_32:
5600 return btrfs_ioctl_set_received_subvol_32(file, argp);
5602 case BTRFS_IOC_SEND:
5603 return btrfs_ioctl_send(file, argp);
5604 case BTRFS_IOC_GET_DEV_STATS:
5605 return btrfs_ioctl_get_dev_stats(fs_info, argp);
5606 case BTRFS_IOC_QUOTA_CTL:
5607 return btrfs_ioctl_quota_ctl(file, argp);
5608 case BTRFS_IOC_QGROUP_ASSIGN:
5609 return btrfs_ioctl_qgroup_assign(file, argp);
5610 case BTRFS_IOC_QGROUP_CREATE:
5611 return btrfs_ioctl_qgroup_create(file, argp);
5612 case BTRFS_IOC_QGROUP_LIMIT:
5613 return btrfs_ioctl_qgroup_limit(file, argp);
5614 case BTRFS_IOC_QUOTA_RESCAN:
5615 return btrfs_ioctl_quota_rescan(file, argp);
5616 case BTRFS_IOC_QUOTA_RESCAN_STATUS:
5617 return btrfs_ioctl_quota_rescan_status(file, argp);
5618 case BTRFS_IOC_QUOTA_RESCAN_WAIT:
5619 return btrfs_ioctl_quota_rescan_wait(file, argp);
5620 case BTRFS_IOC_DEV_REPLACE:
5621 return btrfs_ioctl_dev_replace(fs_info, argp);
5622 case BTRFS_IOC_GET_FSLABEL:
5623 return btrfs_ioctl_get_fslabel(file, argp);
5624 case BTRFS_IOC_SET_FSLABEL:
5625 return btrfs_ioctl_set_fslabel(file, argp);
5626 case BTRFS_IOC_GET_SUPPORTED_FEATURES:
5627 return btrfs_ioctl_get_supported_features(argp);
5628 case BTRFS_IOC_GET_FEATURES:
5629 return btrfs_ioctl_get_features(file, argp);
5630 case BTRFS_IOC_SET_FEATURES:
5631 return btrfs_ioctl_set_features(file, argp);
5637 #ifdef CONFIG_COMPAT
5638 long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5641 * These all access 32-bit values anyway so no further
5642 * handling is necessary.
5645 case FS_IOC32_GETFLAGS:
5646 cmd = FS_IOC_GETFLAGS;
5648 case FS_IOC32_SETFLAGS:
5649 cmd = FS_IOC_SETFLAGS;
5651 case FS_IOC32_GETVERSION:
5652 cmd = FS_IOC_GETVERSION;
5656 return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));