1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/ocfs2/ioctl.c
5 * Copyright (C) 2006 Herbert Poetzl
6 * adapted from Remy Card's ext2/ioctl.c
10 #include <linux/mount.h>
11 #include <linux/blkdev.h>
12 #include <linux/compat.h>
13 #include <linux/fileattr.h>
15 #include <cluster/masklog.h>
27 #include "refcounttree.h"
30 #include "buffer_head_io.h"
32 #include "move_extents.h"
34 #define o2info_from_user(a, b) \
35 copy_from_user(&(a), (b), sizeof(a))
36 #define o2info_to_user(a, b) \
37 copy_to_user((typeof(a) __user *)b, &(a), sizeof(a))
40 * This is just a best-effort to tell userspace that this request
43 static inline void o2info_set_request_error(struct ocfs2_info_request *kreq,
44 struct ocfs2_info_request __user *req)
46 kreq->ir_flags |= OCFS2_INFO_FL_ERROR;
47 (void)put_user(kreq->ir_flags, (__u32 __user *)&(req->ir_flags));
50 static inline void o2info_set_request_filled(struct ocfs2_info_request *req)
52 req->ir_flags |= OCFS2_INFO_FL_FILLED;
55 static inline void o2info_clear_request_filled(struct ocfs2_info_request *req)
57 req->ir_flags &= ~OCFS2_INFO_FL_FILLED;
60 static inline int o2info_coherent(struct ocfs2_info_request *req)
62 return (!(req->ir_flags & OCFS2_INFO_FL_NON_COHERENT));
65 int ocfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa)
67 struct inode *inode = d_inode(dentry);
71 status = ocfs2_inode_lock(inode, NULL, 0);
76 ocfs2_get_inode_flags(OCFS2_I(inode));
77 flags = OCFS2_I(inode)->ip_attr;
78 ocfs2_inode_unlock(inode, 0);
80 fileattr_fill_flags(fa, flags & OCFS2_FL_VISIBLE);
85 int ocfs2_fileattr_set(struct mnt_idmap *idmap,
86 struct dentry *dentry, struct fileattr *fa)
88 struct inode *inode = d_inode(dentry);
89 unsigned int flags = fa->flags;
90 struct ocfs2_inode_info *ocfs2_inode = OCFS2_I(inode);
91 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
92 handle_t *handle = NULL;
93 struct buffer_head *bh = NULL;
97 if (fileattr_has_fsx(fa))
100 status = ocfs2_inode_lock(inode, &bh, 1);
106 if (!S_ISDIR(inode->i_mode))
107 flags &= ~OCFS2_DIRSYNC_FL;
109 oldflags = ocfs2_inode->ip_attr;
110 flags = flags & OCFS2_FL_MODIFIABLE;
111 flags |= oldflags & ~OCFS2_FL_MODIFIABLE;
113 /* Check already done by VFS, but repeat with ocfs lock */
115 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL) &&
116 !capable(CAP_LINUX_IMMUTABLE))
119 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
120 if (IS_ERR(handle)) {
121 status = PTR_ERR(handle);
126 ocfs2_inode->ip_attr = flags;
127 ocfs2_set_inode_flags(inode);
129 status = ocfs2_mark_inode_dirty(handle, inode, bh);
133 ocfs2_commit_trans(osb, handle);
136 ocfs2_inode_unlock(inode, 1);
143 static int ocfs2_info_handle_blocksize(struct inode *inode,
144 struct ocfs2_info_request __user *req)
146 struct ocfs2_info_blocksize oib;
148 if (o2info_from_user(oib, req))
151 oib.ib_blocksize = inode->i_sb->s_blocksize;
153 o2info_set_request_filled(&oib.ib_req);
155 if (o2info_to_user(oib, req))
161 static int ocfs2_info_handle_clustersize(struct inode *inode,
162 struct ocfs2_info_request __user *req)
164 struct ocfs2_info_clustersize oic;
165 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
167 if (o2info_from_user(oic, req))
170 oic.ic_clustersize = osb->s_clustersize;
172 o2info_set_request_filled(&oic.ic_req);
174 if (o2info_to_user(oic, req))
180 static int ocfs2_info_handle_maxslots(struct inode *inode,
181 struct ocfs2_info_request __user *req)
183 struct ocfs2_info_maxslots oim;
184 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
186 if (o2info_from_user(oim, req))
189 oim.im_max_slots = osb->max_slots;
191 o2info_set_request_filled(&oim.im_req);
193 if (o2info_to_user(oim, req))
199 static int ocfs2_info_handle_label(struct inode *inode,
200 struct ocfs2_info_request __user *req)
202 struct ocfs2_info_label oil;
203 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
205 if (o2info_from_user(oil, req))
208 memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN);
210 o2info_set_request_filled(&oil.il_req);
212 if (o2info_to_user(oil, req))
218 static int ocfs2_info_handle_uuid(struct inode *inode,
219 struct ocfs2_info_request __user *req)
221 struct ocfs2_info_uuid oiu;
222 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
224 if (o2info_from_user(oiu, req))
227 memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1);
229 o2info_set_request_filled(&oiu.iu_req);
231 if (o2info_to_user(oiu, req))
237 static int ocfs2_info_handle_fs_features(struct inode *inode,
238 struct ocfs2_info_request __user *req)
240 struct ocfs2_info_fs_features oif;
241 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
243 if (o2info_from_user(oif, req))
246 oif.if_compat_features = osb->s_feature_compat;
247 oif.if_incompat_features = osb->s_feature_incompat;
248 oif.if_ro_compat_features = osb->s_feature_ro_compat;
250 o2info_set_request_filled(&oif.if_req);
252 if (o2info_to_user(oif, req))
258 static int ocfs2_info_handle_journal_size(struct inode *inode,
259 struct ocfs2_info_request __user *req)
261 struct ocfs2_info_journal_size oij;
262 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
264 if (o2info_from_user(oij, req))
267 oij.ij_journal_size = i_size_read(osb->journal->j_inode);
269 o2info_set_request_filled(&oij.ij_req);
271 if (o2info_to_user(oij, req))
277 static int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb,
278 struct inode *inode_alloc, u64 blkno,
279 struct ocfs2_info_freeinode *fi,
282 int status = 0, unlock = 0;
284 struct buffer_head *bh = NULL;
285 struct ocfs2_dinode *dinode_alloc = NULL;
288 inode_lock(inode_alloc);
290 if (inode_alloc && o2info_coherent(&fi->ifi_req)) {
291 status = ocfs2_inode_lock(inode_alloc, &bh, 0);
298 status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh);
305 dinode_alloc = (struct ocfs2_dinode *)bh->b_data;
307 fi->ifi_stat[slot].lfi_total =
308 le32_to_cpu(dinode_alloc->id1.bitmap1.i_total);
309 fi->ifi_stat[slot].lfi_free =
310 le32_to_cpu(dinode_alloc->id1.bitmap1.i_total) -
311 le32_to_cpu(dinode_alloc->id1.bitmap1.i_used);
315 ocfs2_inode_unlock(inode_alloc, 0);
318 inode_unlock(inode_alloc);
325 static int ocfs2_info_handle_freeinode(struct inode *inode,
326 struct ocfs2_info_request __user *req)
331 int status, type = INODE_ALLOC_SYSTEM_INODE;
332 struct ocfs2_info_freeinode *oifi = NULL;
333 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
334 struct inode *inode_alloc = NULL;
336 oifi = kzalloc(sizeof(struct ocfs2_info_freeinode), GFP_KERNEL);
343 if (o2info_from_user(*oifi, req)) {
348 oifi->ifi_slotnum = osb->max_slots;
350 for (i = 0; i < oifi->ifi_slotnum; i++) {
351 if (o2info_coherent(&oifi->ifi_req)) {
352 inode_alloc = ocfs2_get_system_file_inode(osb, type, i);
354 mlog(ML_ERROR, "unable to get alloc inode in "
360 ocfs2_sprintf_system_inode_name(namebuf,
363 status = ocfs2_lookup_ino_from_name(osb->sys_root_inode,
373 status = ocfs2_info_scan_inode_alloc(osb, inode_alloc, blkno, oifi, i);
382 o2info_set_request_filled(&oifi->ifi_req);
384 if (o2info_to_user(*oifi, req)) {
392 o2info_set_request_error(&oifi->ifi_req, req);
399 static void o2ffg_update_histogram(struct ocfs2_info_free_chunk_list *hist,
400 unsigned int chunksize)
404 index = __ilog2_u32(chunksize);
405 if (index >= OCFS2_INFO_MAX_HIST)
406 index = OCFS2_INFO_MAX_HIST - 1;
408 hist->fc_chunks[index]++;
409 hist->fc_clusters[index] += chunksize;
412 static void o2ffg_update_stats(struct ocfs2_info_freefrag_stats *stats,
413 unsigned int chunksize)
415 if (chunksize > stats->ffs_max)
416 stats->ffs_max = chunksize;
418 if (chunksize < stats->ffs_min)
419 stats->ffs_min = chunksize;
421 stats->ffs_avg += chunksize;
422 stats->ffs_free_chunks_real++;
425 static void ocfs2_info_update_ffg(struct ocfs2_info_freefrag *ffg,
426 unsigned int chunksize)
428 o2ffg_update_histogram(&(ffg->iff_ffs.ffs_fc_hist), chunksize);
429 o2ffg_update_stats(&(ffg->iff_ffs), chunksize);
432 static int ocfs2_info_freefrag_scan_chain(struct ocfs2_super *osb,
433 struct inode *gb_inode,
434 struct ocfs2_dinode *gb_dinode,
435 struct ocfs2_chain_rec *rec,
436 struct ocfs2_info_freefrag *ffg,
439 int status = 0, used;
442 struct buffer_head *bh = NULL;
443 struct ocfs2_group_desc *bg = NULL;
445 unsigned int max_bits, num_clusters;
446 unsigned int offset = 0, cluster, chunk;
447 unsigned int chunk_free, last_chunksize = 0;
449 if (!le32_to_cpu(rec->c_free))
454 blkno = le64_to_cpu(rec->c_blkno);
456 blkno = le64_to_cpu(bg->bg_next_group);
463 if (o2info_coherent(&ffg->iff_req))
464 status = ocfs2_read_group_descriptor(gb_inode,
468 status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh);
471 mlog(ML_ERROR, "Can't read the group descriptor # "
472 "%llu from device.", (unsigned long long)blkno);
477 bg = (struct ocfs2_group_desc *)bh->b_data;
479 if (!le16_to_cpu(bg->bg_free_bits_count))
482 max_bits = le16_to_cpu(bg->bg_bits);
485 for (chunk = 0; chunk < chunks_in_group; chunk++) {
487 * last chunk may be not an entire one.
489 if ((offset + ffg->iff_chunksize) > max_bits)
490 num_clusters = max_bits - offset;
492 num_clusters = ffg->iff_chunksize;
495 for (cluster = 0; cluster < num_clusters; cluster++) {
496 used = ocfs2_test_bit(offset,
497 (unsigned long *)bg->bg_bitmap);
499 * - chunk_free counts free clusters in #N chunk.
500 * - last_chunksize records the size(in) clusters
501 * for the last real free chunk being counted.
508 if (used && last_chunksize) {
509 ocfs2_info_update_ffg(ffg,
517 if (chunk_free == ffg->iff_chunksize)
518 ffg->iff_ffs.ffs_free_chunks++;
522 * need to update the info for last free chunk.
525 ocfs2_info_update_ffg(ffg, last_chunksize);
527 } while (le64_to_cpu(bg->bg_next_group));
535 static int ocfs2_info_freefrag_scan_bitmap(struct ocfs2_super *osb,
536 struct inode *gb_inode, u64 blkno,
537 struct ocfs2_info_freefrag *ffg)
540 int status = 0, unlock = 0, i;
542 struct buffer_head *bh = NULL;
543 struct ocfs2_chain_list *cl = NULL;
544 struct ocfs2_chain_rec *rec = NULL;
545 struct ocfs2_dinode *gb_dinode = NULL;
548 inode_lock(gb_inode);
550 if (o2info_coherent(&ffg->iff_req)) {
551 status = ocfs2_inode_lock(gb_inode, &bh, 0);
558 status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh);
565 gb_dinode = (struct ocfs2_dinode *)bh->b_data;
566 cl = &(gb_dinode->id2.i_chain);
569 * Chunksize(in) clusters from userspace should be
570 * less than clusters in a group.
572 if (ffg->iff_chunksize > le16_to_cpu(cl->cl_cpg)) {
577 memset(&ffg->iff_ffs, 0, sizeof(struct ocfs2_info_freefrag_stats));
579 ffg->iff_ffs.ffs_min = ~0U;
580 ffg->iff_ffs.ffs_clusters =
581 le32_to_cpu(gb_dinode->id1.bitmap1.i_total);
582 ffg->iff_ffs.ffs_free_clusters = ffg->iff_ffs.ffs_clusters -
583 le32_to_cpu(gb_dinode->id1.bitmap1.i_used);
585 chunks_in_group = le16_to_cpu(cl->cl_cpg) / ffg->iff_chunksize + 1;
587 for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
588 rec = &(cl->cl_recs[i]);
589 status = ocfs2_info_freefrag_scan_chain(osb, gb_inode,
597 if (ffg->iff_ffs.ffs_free_chunks_real)
598 ffg->iff_ffs.ffs_avg = (ffg->iff_ffs.ffs_avg /
599 ffg->iff_ffs.ffs_free_chunks_real);
602 ocfs2_inode_unlock(gb_inode, 0);
605 inode_unlock(gb_inode);
613 static int ocfs2_info_handle_freefrag(struct inode *inode,
614 struct ocfs2_info_request __user *req)
618 int status, type = GLOBAL_BITMAP_SYSTEM_INODE;
620 struct ocfs2_info_freefrag *oiff;
621 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
622 struct inode *gb_inode = NULL;
624 oiff = kzalloc(sizeof(struct ocfs2_info_freefrag), GFP_KERNEL);
631 if (o2info_from_user(*oiff, req)) {
636 * chunksize from userspace should be power of 2.
638 if ((oiff->iff_chunksize & (oiff->iff_chunksize - 1)) ||
639 (!oiff->iff_chunksize)) {
644 if (o2info_coherent(&oiff->iff_req)) {
645 gb_inode = ocfs2_get_system_file_inode(osb, type,
648 mlog(ML_ERROR, "unable to get global_bitmap inode\n");
653 ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type,
655 status = ocfs2_lookup_ino_from_name(osb->sys_root_inode,
665 status = ocfs2_info_freefrag_scan_bitmap(osb, gb_inode, blkno, oiff);
669 o2info_set_request_filled(&oiff->iff_req);
671 if (o2info_to_user(*oiff, req)) {
679 o2info_set_request_error(&oiff->iff_req, req);
686 static int ocfs2_info_handle_unknown(struct inode *inode,
687 struct ocfs2_info_request __user *req)
689 struct ocfs2_info_request oir;
691 if (o2info_from_user(oir, req))
694 o2info_clear_request_filled(&oir);
696 if (o2info_to_user(oir, req))
703 * Validate and distinguish OCFS2_IOC_INFO requests.
705 * - validate the magic number.
706 * - distinguish different requests.
707 * - validate size of different requests.
709 static int ocfs2_info_handle_request(struct inode *inode,
710 struct ocfs2_info_request __user *req)
712 int status = -EFAULT;
713 struct ocfs2_info_request oir;
715 if (o2info_from_user(oir, req))
719 if (oir.ir_magic != OCFS2_INFO_MAGIC)
722 switch (oir.ir_code) {
723 case OCFS2_INFO_BLOCKSIZE:
724 if (oir.ir_size == sizeof(struct ocfs2_info_blocksize))
725 status = ocfs2_info_handle_blocksize(inode, req);
727 case OCFS2_INFO_CLUSTERSIZE:
728 if (oir.ir_size == sizeof(struct ocfs2_info_clustersize))
729 status = ocfs2_info_handle_clustersize(inode, req);
731 case OCFS2_INFO_MAXSLOTS:
732 if (oir.ir_size == sizeof(struct ocfs2_info_maxslots))
733 status = ocfs2_info_handle_maxslots(inode, req);
735 case OCFS2_INFO_LABEL:
736 if (oir.ir_size == sizeof(struct ocfs2_info_label))
737 status = ocfs2_info_handle_label(inode, req);
739 case OCFS2_INFO_UUID:
740 if (oir.ir_size == sizeof(struct ocfs2_info_uuid))
741 status = ocfs2_info_handle_uuid(inode, req);
743 case OCFS2_INFO_FS_FEATURES:
744 if (oir.ir_size == sizeof(struct ocfs2_info_fs_features))
745 status = ocfs2_info_handle_fs_features(inode, req);
747 case OCFS2_INFO_JOURNAL_SIZE:
748 if (oir.ir_size == sizeof(struct ocfs2_info_journal_size))
749 status = ocfs2_info_handle_journal_size(inode, req);
751 case OCFS2_INFO_FREEINODE:
752 if (oir.ir_size == sizeof(struct ocfs2_info_freeinode))
753 status = ocfs2_info_handle_freeinode(inode, req);
755 case OCFS2_INFO_FREEFRAG:
756 if (oir.ir_size == sizeof(struct ocfs2_info_freefrag))
757 status = ocfs2_info_handle_freefrag(inode, req);
760 status = ocfs2_info_handle_unknown(inode, req);
768 static int ocfs2_get_request_ptr(struct ocfs2_info *info, int idx,
769 u64 *req_addr, int compat_flag)
771 int status = -EFAULT;
772 u64 __user *bp = NULL;
777 * pointer bp stores the base address of a pointers array,
778 * which collects all addresses of separate request.
780 bp = (u64 __user *)(unsigned long)compat_ptr(info->oi_requests);
785 bp = (u64 __user *)(unsigned long)(info->oi_requests);
787 if (o2info_from_user(*req_addr, bp + idx))
796 * OCFS2_IOC_INFO handles an array of requests passed from userspace.
798 * ocfs2_info_handle() recevies a large info aggregation, grab and
799 * validate the request count from header, then break it into small
800 * pieces, later specific handlers can handle them one by one.
802 * Idea here is to make each separate request small enough to ensure
803 * a better backward&forward compatibility, since a small piece of
804 * request will be less likely to be broken if disk layout get changed.
806 static noinline_for_stack int
807 ocfs2_info_handle(struct inode *inode, struct ocfs2_info *info, int compat_flag)
811 struct ocfs2_info_request __user *reqp;
813 if ((info->oi_count > OCFS2_INFO_MAX_REQUEST) ||
814 (!info->oi_requests)) {
819 for (i = 0; i < info->oi_count; i++) {
821 status = ocfs2_get_request_ptr(info, i, &req_addr, compat_flag);
825 reqp = (struct ocfs2_info_request __user *)(unsigned long)req_addr;
831 status = ocfs2_info_handle_request(inode, reqp);
840 long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
842 struct inode *inode = file_inode(filp);
843 void __user *argp = (void __user *)arg;
847 case OCFS2_IOC_RESVSP:
848 case OCFS2_IOC_RESVSP64:
849 case OCFS2_IOC_UNRESVSP:
850 case OCFS2_IOC_UNRESVSP64:
852 struct ocfs2_space_resv sr;
854 if (copy_from_user(&sr, (int __user *) arg, sizeof(sr)))
857 return ocfs2_change_file_space(filp, cmd, &sr);
859 case OCFS2_IOC_GROUP_EXTEND:
863 if (!capable(CAP_SYS_RESOURCE))
866 if (get_user(new_clusters, (int __user *)arg))
869 status = mnt_want_write_file(filp);
872 status = ocfs2_group_extend(inode, new_clusters);
873 mnt_drop_write_file(filp);
876 case OCFS2_IOC_GROUP_ADD:
877 case OCFS2_IOC_GROUP_ADD64:
879 struct ocfs2_new_group_input input;
881 if (!capable(CAP_SYS_RESOURCE))
884 if (copy_from_user(&input, (int __user *) arg, sizeof(input)))
887 status = mnt_want_write_file(filp);
890 status = ocfs2_group_add(inode, &input);
891 mnt_drop_write_file(filp);
894 case OCFS2_IOC_REFLINK:
896 struct reflink_arguments args;
897 const char __user *old_path;
898 const char __user *new_path;
901 if (copy_from_user(&args, argp, sizeof(args)))
903 old_path = (const char __user *)(unsigned long)args.old_path;
904 new_path = (const char __user *)(unsigned long)args.new_path;
905 preserve = (args.preserve != 0);
907 return ocfs2_reflink_ioctl(inode, old_path, new_path, preserve);
911 struct ocfs2_info info;
913 if (copy_from_user(&info, argp, sizeof(struct ocfs2_info)))
916 return ocfs2_info_handle(inode, &info, 0);
920 struct super_block *sb = inode->i_sb;
921 struct fstrim_range range;
924 if (!capable(CAP_SYS_ADMIN))
927 if (!bdev_max_discard_sectors(sb->s_bdev))
930 if (copy_from_user(&range, argp, sizeof(range)))
933 range.minlen = max_t(u64, bdev_discard_granularity(sb->s_bdev),
935 ret = ocfs2_trim_fs(sb, &range);
939 if (copy_to_user(argp, &range, sizeof(range)))
944 case OCFS2_IOC_MOVE_EXT:
945 return ocfs2_ioctl_move_extents(filp, argp);
952 long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
955 struct reflink_arguments args;
956 struct inode *inode = file_inode(file);
957 struct ocfs2_info info;
958 void __user *argp = (void __user *)arg;
961 case OCFS2_IOC_RESVSP:
962 case OCFS2_IOC_RESVSP64:
963 case OCFS2_IOC_UNRESVSP:
964 case OCFS2_IOC_UNRESVSP64:
965 case OCFS2_IOC_GROUP_EXTEND:
966 case OCFS2_IOC_GROUP_ADD:
967 case OCFS2_IOC_GROUP_ADD64:
969 case OCFS2_IOC_REFLINK:
970 if (copy_from_user(&args, argp, sizeof(args)))
972 preserve = (args.preserve != 0);
974 return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path),
975 compat_ptr(args.new_path), preserve);
977 if (copy_from_user(&info, argp, sizeof(struct ocfs2_info)))
980 return ocfs2_info_handle(inode, &info, 1);
982 case OCFS2_IOC_MOVE_EXT:
988 return ocfs2_ioctl(file, cmd, arg);