1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2004-2005 Silicon Graphics, Inc.
6 #include <linux/mount.h>
7 #include <linux/fsmap.h>
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_iwalk.h"
17 #include "xfs_itable.h"
18 #include "xfs_fsops.h"
19 #include "xfs_rtalloc.h"
21 #include "xfs_ioctl.h"
22 #include "xfs_ioctl32.h"
23 #include "xfs_trace.h"
26 #define _NATIVE_IOC(cmd, type) \
27 _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
29 #ifdef BROKEN_X86_ALIGNMENT
31 xfs_compat_flock64_copyin(
33 compat_xfs_flock64_t __user *arg32)
35 if (get_user(bf->l_type, &arg32->l_type) ||
36 get_user(bf->l_whence, &arg32->l_whence) ||
37 get_user(bf->l_start, &arg32->l_start) ||
38 get_user(bf->l_len, &arg32->l_len) ||
39 get_user(bf->l_sysid, &arg32->l_sysid) ||
40 get_user(bf->l_pid, &arg32->l_pid) ||
41 copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32)))
47 xfs_compat_ioc_fsgeometry_v1(
49 compat_xfs_fsop_geom_v1_t __user *arg32)
51 struct xfs_fsop_geom fsgeo;
53 xfs_fs_geometry(&mp->m_sb, &fsgeo, 3);
54 /* The 32-bit variant simply has some padding at the end */
55 if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1)))
61 xfs_compat_growfs_data_copyin(
62 struct xfs_growfs_data *in,
63 compat_xfs_growfs_data_t __user *arg32)
65 if (get_user(in->newblocks, &arg32->newblocks) ||
66 get_user(in->imaxpct, &arg32->imaxpct))
72 xfs_compat_growfs_rt_copyin(
73 struct xfs_growfs_rt *in,
74 compat_xfs_growfs_rt_t __user *arg32)
76 if (get_user(in->newblocks, &arg32->newblocks) ||
77 get_user(in->extsize, &arg32->extsize))
83 xfs_fsinumbers_fmt_compat(
84 struct xfs_ibulk *breq,
85 const struct xfs_inogrp *igrp)
87 struct compat_xfs_inogrp __user *p32 = breq->ubuffer;
89 if (put_user(igrp->xi_startino, &p32->xi_startino) ||
90 put_user(igrp->xi_alloccount, &p32->xi_alloccount) ||
91 put_user(igrp->xi_allocmask, &p32->xi_allocmask))
94 return xfs_ibulk_advance(breq, sizeof(struct compat_xfs_inogrp));
98 #define xfs_fsinumbers_fmt_compat xfs_fsinumbers_fmt
99 #endif /* BROKEN_X86_ALIGNMENT */
102 xfs_ioctl32_bstime_copyin(
103 xfs_bstime_t *bstime,
104 compat_xfs_bstime_t __user *bstime32)
106 compat_time_t sec32; /* tv_sec differs on 64 vs. 32 */
108 if (get_user(sec32, &bstime32->tv_sec) ||
109 get_user(bstime->tv_nsec, &bstime32->tv_nsec))
111 bstime->tv_sec = sec32;
116 * struct xfs_bstat has differing alignment on intel, & bstime_t sizes
120 xfs_ioctl32_bstat_copyin(
121 struct xfs_bstat *bstat,
122 struct compat_xfs_bstat __user *bstat32)
124 if (get_user(bstat->bs_ino, &bstat32->bs_ino) ||
125 get_user(bstat->bs_mode, &bstat32->bs_mode) ||
126 get_user(bstat->bs_nlink, &bstat32->bs_nlink) ||
127 get_user(bstat->bs_uid, &bstat32->bs_uid) ||
128 get_user(bstat->bs_gid, &bstat32->bs_gid) ||
129 get_user(bstat->bs_rdev, &bstat32->bs_rdev) ||
130 get_user(bstat->bs_blksize, &bstat32->bs_blksize) ||
131 get_user(bstat->bs_size, &bstat32->bs_size) ||
132 xfs_ioctl32_bstime_copyin(&bstat->bs_atime, &bstat32->bs_atime) ||
133 xfs_ioctl32_bstime_copyin(&bstat->bs_mtime, &bstat32->bs_mtime) ||
134 xfs_ioctl32_bstime_copyin(&bstat->bs_ctime, &bstat32->bs_ctime) ||
135 get_user(bstat->bs_blocks, &bstat32->bs_size) ||
136 get_user(bstat->bs_xflags, &bstat32->bs_size) ||
137 get_user(bstat->bs_extsize, &bstat32->bs_extsize) ||
138 get_user(bstat->bs_extents, &bstat32->bs_extents) ||
139 get_user(bstat->bs_gen, &bstat32->bs_gen) ||
140 get_user(bstat->bs_projid_lo, &bstat32->bs_projid_lo) ||
141 get_user(bstat->bs_projid_hi, &bstat32->bs_projid_hi) ||
142 get_user(bstat->bs_forkoff, &bstat32->bs_forkoff) ||
143 get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) ||
144 get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) ||
145 get_user(bstat->bs_aextents, &bstat32->bs_aextents))
150 /* XFS_IOC_FSBULKSTAT and friends */
153 xfs_bstime_store_compat(
154 compat_xfs_bstime_t __user *p32,
155 const xfs_bstime_t *p)
160 if (put_user(sec32, &p32->tv_sec) ||
161 put_user(p->tv_nsec, &p32->tv_nsec))
166 /* Return 0 on success or positive error (to xfs_bulkstat()) */
168 xfs_fsbulkstat_one_fmt_compat(
169 struct xfs_ibulk *breq,
170 const struct xfs_bulkstat *bstat)
172 struct compat_xfs_bstat __user *p32 = breq->ubuffer;
173 struct xfs_bstat bs1;
174 struct xfs_bstat *buffer = &bs1;
176 xfs_bulkstat_to_bstat(breq->mp, &bs1, bstat);
178 if (put_user(buffer->bs_ino, &p32->bs_ino) ||
179 put_user(buffer->bs_mode, &p32->bs_mode) ||
180 put_user(buffer->bs_nlink, &p32->bs_nlink) ||
181 put_user(buffer->bs_uid, &p32->bs_uid) ||
182 put_user(buffer->bs_gid, &p32->bs_gid) ||
183 put_user(buffer->bs_rdev, &p32->bs_rdev) ||
184 put_user(buffer->bs_blksize, &p32->bs_blksize) ||
185 put_user(buffer->bs_size, &p32->bs_size) ||
186 xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) ||
187 xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) ||
188 xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) ||
189 put_user(buffer->bs_blocks, &p32->bs_blocks) ||
190 put_user(buffer->bs_xflags, &p32->bs_xflags) ||
191 put_user(buffer->bs_extsize, &p32->bs_extsize) ||
192 put_user(buffer->bs_extents, &p32->bs_extents) ||
193 put_user(buffer->bs_gen, &p32->bs_gen) ||
194 put_user(buffer->bs_projid, &p32->bs_projid) ||
195 put_user(buffer->bs_projid_hi, &p32->bs_projid_hi) ||
196 put_user(buffer->bs_forkoff, &p32->bs_forkoff) ||
197 put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) ||
198 put_user(buffer->bs_dmstate, &p32->bs_dmstate) ||
199 put_user(buffer->bs_aextents, &p32->bs_aextents))
202 return xfs_ibulk_advance(breq, sizeof(struct compat_xfs_bstat));
205 /* copied from xfs_ioctl.c */
207 xfs_compat_ioc_fsbulkstat(
210 struct compat_xfs_fsop_bulkreq __user *p32)
213 struct xfs_fsop_bulkreq bulkreq;
214 struct xfs_ibulk breq = {
222 * Output structure handling functions. Depending on the command,
223 * either the xfs_bstat and xfs_inogrp structures are written out
224 * to userpace memory via bulkreq.ubuffer. Normally the compat
225 * functions and structure size are the correct ones to use ...
227 inumbers_fmt_pf inumbers_func = xfs_fsinumbers_fmt_compat;
228 bulkstat_one_fmt_pf bs_one_func = xfs_fsbulkstat_one_fmt_compat;
230 #ifdef CONFIG_X86_X32
231 if (in_x32_syscall()) {
233 * ... but on x32 the input xfs_fsop_bulkreq has pointers
234 * which must be handled in the "compat" (32-bit) way, while
235 * the xfs_bstat and xfs_inogrp structures follow native 64-
236 * bit layout convention. So adjust accordingly, otherwise
237 * the data written out in compat layout will not match what
238 * x32 userspace expects.
240 inumbers_func = xfs_fsinumbers_fmt;
241 bs_one_func = xfs_fsbulkstat_one_fmt;
245 /* done = 1 if there are more stats to get and if bulkstat */
246 /* should be called again (unused here, but used in dmapi) */
248 if (!capable(CAP_SYS_ADMIN))
251 if (XFS_FORCED_SHUTDOWN(mp))
254 if (get_user(addr, &p32->lastip))
256 bulkreq.lastip = compat_ptr(addr);
257 if (get_user(bulkreq.icount, &p32->icount) ||
258 get_user(addr, &p32->ubuffer))
260 bulkreq.ubuffer = compat_ptr(addr);
261 if (get_user(addr, &p32->ocount))
263 bulkreq.ocount = compat_ptr(addr);
265 if (copy_from_user(&lastino, bulkreq.lastip, sizeof(__s64)))
268 if (bulkreq.icount <= 0)
271 if (bulkreq.ubuffer == NULL)
274 breq.ubuffer = bulkreq.ubuffer;
275 breq.icount = bulkreq.icount;
278 * FSBULKSTAT_SINGLE expects that *lastip contains the inode number
279 * that we want to stat. However, FSINUMBERS and FSBULKSTAT expect
280 * that *lastip contains either zero or the number of the last inode to
281 * be examined by the previous call and return results starting with
282 * the next inode after that. The new bulk request back end functions
283 * take the inode to start with, so we have to compute the startino
284 * parameter from lastino to maintain correct function. lastino == 0
285 * is a special case because it has traditionally meant "first inode
288 if (cmd == XFS_IOC_FSINUMBERS_32) {
289 breq.startino = lastino ? lastino + 1 : 0;
290 error = xfs_inumbers(&breq, inumbers_func);
291 lastino = breq.startino - 1;
292 } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) {
293 breq.startino = lastino;
295 error = xfs_bulkstat_one(&breq, bs_one_func);
296 lastino = breq.startino;
297 } else if (cmd == XFS_IOC_FSBULKSTAT_32) {
298 breq.startino = lastino ? lastino + 1 : 0;
299 error = xfs_bulkstat(&breq, bs_one_func);
300 lastino = breq.startino - 1;
307 if (bulkreq.lastip != NULL &&
308 copy_to_user(bulkreq.lastip, &lastino, sizeof(xfs_ino_t)))
311 if (bulkreq.ocount != NULL &&
312 copy_to_user(bulkreq.ocount, &breq.ocount, sizeof(__s32)))
319 xfs_compat_handlereq_copyin(
320 xfs_fsop_handlereq_t *hreq,
321 compat_xfs_fsop_handlereq_t __user *arg32)
323 compat_xfs_fsop_handlereq_t hreq32;
325 if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t)))
328 hreq->fd = hreq32.fd;
329 hreq->path = compat_ptr(hreq32.path);
330 hreq->oflags = hreq32.oflags;
331 hreq->ihandle = compat_ptr(hreq32.ihandle);
332 hreq->ihandlen = hreq32.ihandlen;
333 hreq->ohandle = compat_ptr(hreq32.ohandle);
334 hreq->ohandlen = compat_ptr(hreq32.ohandlen);
339 STATIC struct dentry *
340 xfs_compat_handlereq_to_dentry(
341 struct file *parfilp,
342 compat_xfs_fsop_handlereq_t *hreq)
344 return xfs_handle_to_dentry(parfilp,
345 compat_ptr(hreq->ihandle), hreq->ihandlen);
349 xfs_compat_attrlist_by_handle(
350 struct file *parfilp,
354 attrlist_cursor_kern_t *cursor;
355 compat_xfs_fsop_attrlist_handlereq_t __user *p = arg;
356 compat_xfs_fsop_attrlist_handlereq_t al_hreq;
357 struct dentry *dentry;
360 if (!capable(CAP_SYS_ADMIN))
362 if (copy_from_user(&al_hreq, arg,
363 sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
365 if (al_hreq.buflen < sizeof(struct attrlist) ||
366 al_hreq.buflen > XFS_XATTR_LIST_MAX)
370 * Reject flags, only allow namespaces.
372 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
375 dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq);
377 return PTR_ERR(dentry);
380 kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP);
384 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
385 error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen,
386 al_hreq.flags, cursor);
390 if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
395 if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen))
406 xfs_compat_attrmulti_by_handle(
407 struct file *parfilp,
411 compat_xfs_attr_multiop_t *ops;
412 compat_xfs_fsop_attrmulti_handlereq_t am_hreq;
413 struct dentry *dentry;
414 unsigned int i, size;
415 unsigned char *attr_name;
417 if (!capable(CAP_SYS_ADMIN))
419 if (copy_from_user(&am_hreq, arg,
420 sizeof(compat_xfs_fsop_attrmulti_handlereq_t)))
424 if (am_hreq.opcount >= INT_MAX / sizeof(compat_xfs_attr_multiop_t))
427 dentry = xfs_compat_handlereq_to_dentry(parfilp, &am_hreq.hreq);
429 return PTR_ERR(dentry);
432 size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t);
433 if (!size || size > 16 * PAGE_SIZE)
436 ops = memdup_user(compat_ptr(am_hreq.ops), size);
438 error = PTR_ERR(ops);
443 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
448 for (i = 0; i < am_hreq.opcount; i++) {
449 ops[i].am_error = strncpy_from_user((char *)attr_name,
450 compat_ptr(ops[i].am_attrname),
452 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
454 if (ops[i].am_error < 0)
457 switch (ops[i].am_opcode) {
459 ops[i].am_error = xfs_attrmulti_attr_get(
460 d_inode(dentry), attr_name,
461 compat_ptr(ops[i].am_attrvalue),
462 &ops[i].am_length, ops[i].am_flags);
465 ops[i].am_error = mnt_want_write_file(parfilp);
468 ops[i].am_error = xfs_attrmulti_attr_set(
469 d_inode(dentry), attr_name,
470 compat_ptr(ops[i].am_attrvalue),
471 ops[i].am_length, ops[i].am_flags);
472 mnt_drop_write_file(parfilp);
475 ops[i].am_error = mnt_want_write_file(parfilp);
478 ops[i].am_error = xfs_attrmulti_attr_remove(
479 d_inode(dentry), attr_name,
481 mnt_drop_write_file(parfilp);
484 ops[i].am_error = -EINVAL;
488 if (copy_to_user(compat_ptr(am_hreq.ops), ops, size))
500 xfs_compat_fssetdm_by_handle(
501 struct file *parfilp,
505 struct fsdmidata fsd;
506 compat_xfs_fsop_setdm_handlereq_t dmhreq;
507 struct dentry *dentry;
509 if (!capable(CAP_MKNOD))
511 if (copy_from_user(&dmhreq, arg,
512 sizeof(compat_xfs_fsop_setdm_handlereq_t)))
515 dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq);
517 return PTR_ERR(dentry);
519 if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) {
524 if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) {
529 error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask,
538 xfs_file_compat_ioctl(
543 struct inode *inode = file_inode(filp);
544 struct xfs_inode *ip = XFS_I(inode);
545 struct xfs_mount *mp = ip->i_mount;
546 void __user *arg = (void __user *)p;
549 trace_xfs_file_compat_ioctl(ip);
552 /* No size or alignment issues on any arch */
553 case XFS_IOC_DIOINFO:
554 case XFS_IOC_FSGEOMETRY_V4:
555 case XFS_IOC_FSGEOMETRY:
556 case XFS_IOC_AG_GEOMETRY:
557 case XFS_IOC_FSGETXATTR:
558 case XFS_IOC_FSSETXATTR:
559 case XFS_IOC_FSGETXATTRA:
560 case XFS_IOC_FSSETDM:
561 case XFS_IOC_GETBMAP:
562 case XFS_IOC_GETBMAPA:
563 case XFS_IOC_GETBMAPX:
564 case XFS_IOC_FSCOUNTS:
565 case XFS_IOC_SET_RESBLKS:
566 case XFS_IOC_GET_RESBLKS:
567 case XFS_IOC_FSGROWFSLOG:
568 case XFS_IOC_GOINGDOWN:
569 case XFS_IOC_ERROR_INJECTION:
570 case XFS_IOC_ERROR_CLEARALL:
571 case FS_IOC_GETFSMAP:
572 case XFS_IOC_SCRUB_METADATA:
573 return xfs_file_ioctl(filp, cmd, p);
574 #if !defined(BROKEN_X86_ALIGNMENT) || defined(CONFIG_X86_X32)
576 * These are handled fine if no alignment issues. To support x32
577 * which uses native 64-bit alignment we must emit these cases in
578 * addition to the ia-32 compat set below.
580 case XFS_IOC_ALLOCSP:
583 case XFS_IOC_UNRESVSP:
584 case XFS_IOC_ALLOCSP64:
585 case XFS_IOC_FREESP64:
586 case XFS_IOC_RESVSP64:
587 case XFS_IOC_UNRESVSP64:
588 case XFS_IOC_FSGEOMETRY_V1:
589 case XFS_IOC_FSGROWFSDATA:
590 case XFS_IOC_FSGROWFSRT:
591 case XFS_IOC_ZERO_RANGE:
592 #ifdef CONFIG_X86_X32
594 * x32 special: this gets a different cmd number from the ia-32 compat
595 * case below; the associated data will match native 64-bit alignment.
597 case XFS_IOC_SWAPEXT:
599 return xfs_file_ioctl(filp, cmd, p);
601 #if defined(BROKEN_X86_ALIGNMENT)
602 case XFS_IOC_ALLOCSP_32:
603 case XFS_IOC_FREESP_32:
604 case XFS_IOC_ALLOCSP64_32:
605 case XFS_IOC_FREESP64_32:
606 case XFS_IOC_RESVSP_32:
607 case XFS_IOC_UNRESVSP_32:
608 case XFS_IOC_RESVSP64_32:
609 case XFS_IOC_UNRESVSP64_32:
610 case XFS_IOC_ZERO_RANGE_32: {
611 struct xfs_flock64 bf;
613 if (xfs_compat_flock64_copyin(&bf, arg))
615 cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
616 return xfs_ioc_space(filp, cmd, &bf);
618 case XFS_IOC_FSGEOMETRY_V1_32:
619 return xfs_compat_ioc_fsgeometry_v1(mp, arg);
620 case XFS_IOC_FSGROWFSDATA_32: {
621 struct xfs_growfs_data in;
623 if (xfs_compat_growfs_data_copyin(&in, arg))
625 error = mnt_want_write_file(filp);
628 error = xfs_growfs_data(mp, &in);
629 mnt_drop_write_file(filp);
632 case XFS_IOC_FSGROWFSRT_32: {
633 struct xfs_growfs_rt in;
635 if (xfs_compat_growfs_rt_copyin(&in, arg))
637 error = mnt_want_write_file(filp);
640 error = xfs_growfs_rt(mp, &in);
641 mnt_drop_write_file(filp);
645 /* long changes size, but xfs only copiese out 32 bits */
646 case XFS_IOC_GETXFLAGS_32:
647 case XFS_IOC_SETXFLAGS_32:
648 case XFS_IOC_GETVERSION_32:
649 cmd = _NATIVE_IOC(cmd, long);
650 return xfs_file_ioctl(filp, cmd, p);
651 case XFS_IOC_SWAPEXT_32: {
652 struct xfs_swapext sxp;
653 struct compat_xfs_swapext __user *sxu = arg;
655 /* Bulk copy in up to the sx_stat field, then copy bstat */
656 if (copy_from_user(&sxp, sxu,
657 offsetof(struct xfs_swapext, sx_stat)) ||
658 xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat))
660 error = mnt_want_write_file(filp);
663 error = xfs_ioc_swapext(&sxp);
664 mnt_drop_write_file(filp);
667 case XFS_IOC_FSBULKSTAT_32:
668 case XFS_IOC_FSBULKSTAT_SINGLE_32:
669 case XFS_IOC_FSINUMBERS_32:
670 return xfs_compat_ioc_fsbulkstat(mp, cmd, arg);
671 case XFS_IOC_FD_TO_HANDLE_32:
672 case XFS_IOC_PATH_TO_HANDLE_32:
673 case XFS_IOC_PATH_TO_FSHANDLE_32: {
674 struct xfs_fsop_handlereq hreq;
676 if (xfs_compat_handlereq_copyin(&hreq, arg))
678 cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq);
679 return xfs_find_handle(cmd, &hreq);
681 case XFS_IOC_OPEN_BY_HANDLE_32: {
682 struct xfs_fsop_handlereq hreq;
684 if (xfs_compat_handlereq_copyin(&hreq, arg))
686 return xfs_open_by_handle(filp, &hreq);
688 case XFS_IOC_READLINK_BY_HANDLE_32: {
689 struct xfs_fsop_handlereq hreq;
691 if (xfs_compat_handlereq_copyin(&hreq, arg))
693 return xfs_readlink_by_handle(filp, &hreq);
695 case XFS_IOC_ATTRLIST_BY_HANDLE_32:
696 return xfs_compat_attrlist_by_handle(filp, arg);
697 case XFS_IOC_ATTRMULTI_BY_HANDLE_32:
698 return xfs_compat_attrmulti_by_handle(filp, arg);
699 case XFS_IOC_FSSETDM_BY_HANDLE_32:
700 return xfs_compat_fssetdm_by_handle(filp, arg);