1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2004-2005 Silicon Graphics, Inc.
6 #include <linux/mount.h>
7 #include <linux/fsmap.h>
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_iwalk.h"
17 #include "xfs_itable.h"
18 #include "xfs_fsops.h"
19 #include "xfs_rtalloc.h"
21 #include "xfs_ioctl.h"
22 #include "xfs_ioctl32.h"
23 #include "xfs_trace.h"
26 #define _NATIVE_IOC(cmd, type) \
27 _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
29 #ifdef BROKEN_X86_ALIGNMENT
31 xfs_compat_flock64_copyin(
33 compat_xfs_flock64_t __user *arg32)
35 if (get_user(bf->l_type, &arg32->l_type) ||
36 get_user(bf->l_whence, &arg32->l_whence) ||
37 get_user(bf->l_start, &arg32->l_start) ||
38 get_user(bf->l_len, &arg32->l_len) ||
39 get_user(bf->l_sysid, &arg32->l_sysid) ||
40 get_user(bf->l_pid, &arg32->l_pid) ||
41 copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32)))
47 xfs_compat_ioc_fsgeometry_v1(
49 compat_xfs_fsop_geom_v1_t __user *arg32)
51 struct xfs_fsop_geom fsgeo;
53 xfs_fs_geometry(&mp->m_sb, &fsgeo, 3);
54 /* The 32-bit variant simply has some padding at the end */
55 if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1)))
61 xfs_compat_growfs_data_copyin(
62 struct xfs_growfs_data *in,
63 compat_xfs_growfs_data_t __user *arg32)
65 if (get_user(in->newblocks, &arg32->newblocks) ||
66 get_user(in->imaxpct, &arg32->imaxpct))
72 xfs_compat_growfs_rt_copyin(
73 struct xfs_growfs_rt *in,
74 compat_xfs_growfs_rt_t __user *arg32)
76 if (get_user(in->newblocks, &arg32->newblocks) ||
77 get_user(in->extsize, &arg32->extsize))
83 xfs_fsinumbers_fmt_compat(
84 struct xfs_ibulk *breq,
85 const struct xfs_inumbers *ig)
87 struct compat_xfs_inogrp __user *p32 = breq->ubuffer;
88 struct xfs_inogrp ig1;
89 struct xfs_inogrp *igrp = &ig1;
91 xfs_inumbers_to_inogrp(&ig1, ig);
93 if (put_user(igrp->xi_startino, &p32->xi_startino) ||
94 put_user(igrp->xi_alloccount, &p32->xi_alloccount) ||
95 put_user(igrp->xi_allocmask, &p32->xi_allocmask))
98 return xfs_ibulk_advance(breq, sizeof(struct compat_xfs_inogrp));
102 #define xfs_fsinumbers_fmt_compat xfs_fsinumbers_fmt
103 #endif /* BROKEN_X86_ALIGNMENT */
106 xfs_ioctl32_bstime_copyin(
107 xfs_bstime_t *bstime,
108 compat_xfs_bstime_t __user *bstime32)
110 compat_time_t sec32; /* tv_sec differs on 64 vs. 32 */
112 if (get_user(sec32, &bstime32->tv_sec) ||
113 get_user(bstime->tv_nsec, &bstime32->tv_nsec))
115 bstime->tv_sec = sec32;
120 * struct xfs_bstat has differing alignment on intel, & bstime_t sizes
124 xfs_ioctl32_bstat_copyin(
125 struct xfs_bstat *bstat,
126 struct compat_xfs_bstat __user *bstat32)
128 if (get_user(bstat->bs_ino, &bstat32->bs_ino) ||
129 get_user(bstat->bs_mode, &bstat32->bs_mode) ||
130 get_user(bstat->bs_nlink, &bstat32->bs_nlink) ||
131 get_user(bstat->bs_uid, &bstat32->bs_uid) ||
132 get_user(bstat->bs_gid, &bstat32->bs_gid) ||
133 get_user(bstat->bs_rdev, &bstat32->bs_rdev) ||
134 get_user(bstat->bs_blksize, &bstat32->bs_blksize) ||
135 get_user(bstat->bs_size, &bstat32->bs_size) ||
136 xfs_ioctl32_bstime_copyin(&bstat->bs_atime, &bstat32->bs_atime) ||
137 xfs_ioctl32_bstime_copyin(&bstat->bs_mtime, &bstat32->bs_mtime) ||
138 xfs_ioctl32_bstime_copyin(&bstat->bs_ctime, &bstat32->bs_ctime) ||
139 get_user(bstat->bs_blocks, &bstat32->bs_size) ||
140 get_user(bstat->bs_xflags, &bstat32->bs_size) ||
141 get_user(bstat->bs_extsize, &bstat32->bs_extsize) ||
142 get_user(bstat->bs_extents, &bstat32->bs_extents) ||
143 get_user(bstat->bs_gen, &bstat32->bs_gen) ||
144 get_user(bstat->bs_projid_lo, &bstat32->bs_projid_lo) ||
145 get_user(bstat->bs_projid_hi, &bstat32->bs_projid_hi) ||
146 get_user(bstat->bs_forkoff, &bstat32->bs_forkoff) ||
147 get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) ||
148 get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) ||
149 get_user(bstat->bs_aextents, &bstat32->bs_aextents))
154 /* XFS_IOC_FSBULKSTAT and friends */
157 xfs_bstime_store_compat(
158 compat_xfs_bstime_t __user *p32,
159 const xfs_bstime_t *p)
164 if (put_user(sec32, &p32->tv_sec) ||
165 put_user(p->tv_nsec, &p32->tv_nsec))
170 /* Return 0 on success or positive error (to xfs_bulkstat()) */
172 xfs_fsbulkstat_one_fmt_compat(
173 struct xfs_ibulk *breq,
174 const struct xfs_bulkstat *bstat)
176 struct compat_xfs_bstat __user *p32 = breq->ubuffer;
177 struct xfs_bstat bs1;
178 struct xfs_bstat *buffer = &bs1;
180 xfs_bulkstat_to_bstat(breq->mp, &bs1, bstat);
182 if (put_user(buffer->bs_ino, &p32->bs_ino) ||
183 put_user(buffer->bs_mode, &p32->bs_mode) ||
184 put_user(buffer->bs_nlink, &p32->bs_nlink) ||
185 put_user(buffer->bs_uid, &p32->bs_uid) ||
186 put_user(buffer->bs_gid, &p32->bs_gid) ||
187 put_user(buffer->bs_rdev, &p32->bs_rdev) ||
188 put_user(buffer->bs_blksize, &p32->bs_blksize) ||
189 put_user(buffer->bs_size, &p32->bs_size) ||
190 xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) ||
191 xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) ||
192 xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) ||
193 put_user(buffer->bs_blocks, &p32->bs_blocks) ||
194 put_user(buffer->bs_xflags, &p32->bs_xflags) ||
195 put_user(buffer->bs_extsize, &p32->bs_extsize) ||
196 put_user(buffer->bs_extents, &p32->bs_extents) ||
197 put_user(buffer->bs_gen, &p32->bs_gen) ||
198 put_user(buffer->bs_projid, &p32->bs_projid) ||
199 put_user(buffer->bs_projid_hi, &p32->bs_projid_hi) ||
200 put_user(buffer->bs_forkoff, &p32->bs_forkoff) ||
201 put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) ||
202 put_user(buffer->bs_dmstate, &p32->bs_dmstate) ||
203 put_user(buffer->bs_aextents, &p32->bs_aextents))
206 return xfs_ibulk_advance(breq, sizeof(struct compat_xfs_bstat));
209 /* copied from xfs_ioctl.c */
211 xfs_compat_ioc_fsbulkstat(
214 struct compat_xfs_fsop_bulkreq __user *p32)
217 struct xfs_fsop_bulkreq bulkreq;
218 struct xfs_ibulk breq = {
226 * Output structure handling functions. Depending on the command,
227 * either the xfs_bstat and xfs_inogrp structures are written out
228 * to userpace memory via bulkreq.ubuffer. Normally the compat
229 * functions and structure size are the correct ones to use ...
231 inumbers_fmt_pf inumbers_func = xfs_fsinumbers_fmt_compat;
232 bulkstat_one_fmt_pf bs_one_func = xfs_fsbulkstat_one_fmt_compat;
234 #ifdef CONFIG_X86_X32
235 if (in_x32_syscall()) {
237 * ... but on x32 the input xfs_fsop_bulkreq has pointers
238 * which must be handled in the "compat" (32-bit) way, while
239 * the xfs_bstat and xfs_inogrp structures follow native 64-
240 * bit layout convention. So adjust accordingly, otherwise
241 * the data written out in compat layout will not match what
242 * x32 userspace expects.
244 inumbers_func = xfs_fsinumbers_fmt;
245 bs_one_func = xfs_fsbulkstat_one_fmt;
249 /* done = 1 if there are more stats to get and if bulkstat */
250 /* should be called again (unused here, but used in dmapi) */
252 if (!capable(CAP_SYS_ADMIN))
255 if (XFS_FORCED_SHUTDOWN(mp))
258 if (get_user(addr, &p32->lastip))
260 bulkreq.lastip = compat_ptr(addr);
261 if (get_user(bulkreq.icount, &p32->icount) ||
262 get_user(addr, &p32->ubuffer))
264 bulkreq.ubuffer = compat_ptr(addr);
265 if (get_user(addr, &p32->ocount))
267 bulkreq.ocount = compat_ptr(addr);
269 if (copy_from_user(&lastino, bulkreq.lastip, sizeof(__s64)))
272 if (bulkreq.icount <= 0)
275 if (bulkreq.ubuffer == NULL)
278 breq.ubuffer = bulkreq.ubuffer;
279 breq.icount = bulkreq.icount;
282 * FSBULKSTAT_SINGLE expects that *lastip contains the inode number
283 * that we want to stat. However, FSINUMBERS and FSBULKSTAT expect
284 * that *lastip contains either zero or the number of the last inode to
285 * be examined by the previous call and return results starting with
286 * the next inode after that. The new bulk request back end functions
287 * take the inode to start with, so we have to compute the startino
288 * parameter from lastino to maintain correct function. lastino == 0
289 * is a special case because it has traditionally meant "first inode
292 if (cmd == XFS_IOC_FSINUMBERS_32) {
293 breq.startino = lastino ? lastino + 1 : 0;
294 error = xfs_inumbers(&breq, inumbers_func);
295 lastino = breq.startino - 1;
296 } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) {
297 breq.startino = lastino;
299 error = xfs_bulkstat_one(&breq, bs_one_func);
300 lastino = breq.startino;
301 } else if (cmd == XFS_IOC_FSBULKSTAT_32) {
302 breq.startino = lastino ? lastino + 1 : 0;
303 error = xfs_bulkstat(&breq, bs_one_func);
304 lastino = breq.startino - 1;
311 if (bulkreq.lastip != NULL &&
312 copy_to_user(bulkreq.lastip, &lastino, sizeof(xfs_ino_t)))
315 if (bulkreq.ocount != NULL &&
316 copy_to_user(bulkreq.ocount, &breq.ocount, sizeof(__s32)))
323 xfs_compat_handlereq_copyin(
324 xfs_fsop_handlereq_t *hreq,
325 compat_xfs_fsop_handlereq_t __user *arg32)
327 compat_xfs_fsop_handlereq_t hreq32;
329 if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t)))
332 hreq->fd = hreq32.fd;
333 hreq->path = compat_ptr(hreq32.path);
334 hreq->oflags = hreq32.oflags;
335 hreq->ihandle = compat_ptr(hreq32.ihandle);
336 hreq->ihandlen = hreq32.ihandlen;
337 hreq->ohandle = compat_ptr(hreq32.ohandle);
338 hreq->ohandlen = compat_ptr(hreq32.ohandlen);
343 STATIC struct dentry *
344 xfs_compat_handlereq_to_dentry(
345 struct file *parfilp,
346 compat_xfs_fsop_handlereq_t *hreq)
348 return xfs_handle_to_dentry(parfilp,
349 compat_ptr(hreq->ihandle), hreq->ihandlen);
353 xfs_compat_attrlist_by_handle(
354 struct file *parfilp,
358 attrlist_cursor_kern_t *cursor;
359 compat_xfs_fsop_attrlist_handlereq_t __user *p = arg;
360 compat_xfs_fsop_attrlist_handlereq_t al_hreq;
361 struct dentry *dentry;
364 if (!capable(CAP_SYS_ADMIN))
366 if (copy_from_user(&al_hreq, arg,
367 sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
369 if (al_hreq.buflen < sizeof(struct attrlist) ||
370 al_hreq.buflen > XFS_XATTR_LIST_MAX)
374 * Reject flags, only allow namespaces.
376 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
379 dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq);
381 return PTR_ERR(dentry);
384 kbuf = kmem_zalloc_large(al_hreq.buflen, 0);
388 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
389 error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen,
390 al_hreq.flags, cursor);
394 if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
399 if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen))
410 xfs_compat_attrmulti_by_handle(
411 struct file *parfilp,
415 compat_xfs_attr_multiop_t *ops;
416 compat_xfs_fsop_attrmulti_handlereq_t am_hreq;
417 struct dentry *dentry;
418 unsigned int i, size;
419 unsigned char *attr_name;
421 if (!capable(CAP_SYS_ADMIN))
423 if (copy_from_user(&am_hreq, arg,
424 sizeof(compat_xfs_fsop_attrmulti_handlereq_t)))
428 if (am_hreq.opcount >= INT_MAX / sizeof(compat_xfs_attr_multiop_t))
431 dentry = xfs_compat_handlereq_to_dentry(parfilp, &am_hreq.hreq);
433 return PTR_ERR(dentry);
436 size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t);
437 if (!size || size > 16 * PAGE_SIZE)
440 ops = memdup_user(compat_ptr(am_hreq.ops), size);
442 error = PTR_ERR(ops);
447 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
452 for (i = 0; i < am_hreq.opcount; i++) {
453 ops[i].am_error = strncpy_from_user((char *)attr_name,
454 compat_ptr(ops[i].am_attrname),
456 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
458 if (ops[i].am_error < 0)
461 switch (ops[i].am_opcode) {
463 ops[i].am_error = xfs_attrmulti_attr_get(
464 d_inode(dentry), attr_name,
465 compat_ptr(ops[i].am_attrvalue),
466 &ops[i].am_length, ops[i].am_flags);
469 ops[i].am_error = mnt_want_write_file(parfilp);
472 ops[i].am_error = xfs_attrmulti_attr_set(
473 d_inode(dentry), attr_name,
474 compat_ptr(ops[i].am_attrvalue),
475 ops[i].am_length, ops[i].am_flags);
476 mnt_drop_write_file(parfilp);
479 ops[i].am_error = mnt_want_write_file(parfilp);
482 ops[i].am_error = xfs_attrmulti_attr_remove(
483 d_inode(dentry), attr_name,
485 mnt_drop_write_file(parfilp);
488 ops[i].am_error = -EINVAL;
492 if (copy_to_user(compat_ptr(am_hreq.ops), ops, size))
504 xfs_compat_fssetdm_by_handle(
505 struct file *parfilp,
509 struct fsdmidata fsd;
510 compat_xfs_fsop_setdm_handlereq_t dmhreq;
511 struct dentry *dentry;
513 if (!capable(CAP_MKNOD))
515 if (copy_from_user(&dmhreq, arg,
516 sizeof(compat_xfs_fsop_setdm_handlereq_t)))
519 dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq);
521 return PTR_ERR(dentry);
523 if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) {
528 if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) {
533 error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask,
542 xfs_file_compat_ioctl(
547 struct inode *inode = file_inode(filp);
548 struct xfs_inode *ip = XFS_I(inode);
549 struct xfs_mount *mp = ip->i_mount;
550 void __user *arg = compat_ptr(p);
553 trace_xfs_file_compat_ioctl(ip);
556 #if defined(BROKEN_X86_ALIGNMENT)
557 case XFS_IOC_ALLOCSP_32:
558 case XFS_IOC_FREESP_32:
559 case XFS_IOC_ALLOCSP64_32:
560 case XFS_IOC_FREESP64_32:
561 case XFS_IOC_RESVSP_32:
562 case XFS_IOC_UNRESVSP_32:
563 case XFS_IOC_RESVSP64_32:
564 case XFS_IOC_UNRESVSP64_32:
565 case XFS_IOC_ZERO_RANGE_32: {
566 struct xfs_flock64 bf;
568 if (xfs_compat_flock64_copyin(&bf, arg))
570 cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
571 return xfs_ioc_space(filp, cmd, &bf);
573 case XFS_IOC_FSGEOMETRY_V1_32:
574 return xfs_compat_ioc_fsgeometry_v1(mp, arg);
575 case XFS_IOC_FSGROWFSDATA_32: {
576 struct xfs_growfs_data in;
578 if (xfs_compat_growfs_data_copyin(&in, arg))
580 error = mnt_want_write_file(filp);
583 error = xfs_growfs_data(mp, &in);
584 mnt_drop_write_file(filp);
587 case XFS_IOC_FSGROWFSRT_32: {
588 struct xfs_growfs_rt in;
590 if (xfs_compat_growfs_rt_copyin(&in, arg))
592 error = mnt_want_write_file(filp);
595 error = xfs_growfs_rt(mp, &in);
596 mnt_drop_write_file(filp);
600 /* long changes size, but xfs only copiese out 32 bits */
601 case XFS_IOC_GETXFLAGS_32:
602 case XFS_IOC_SETXFLAGS_32:
603 case XFS_IOC_GETVERSION_32:
604 cmd = _NATIVE_IOC(cmd, long);
605 return xfs_file_ioctl(filp, cmd, p);
606 case XFS_IOC_SWAPEXT_32: {
607 struct xfs_swapext sxp;
608 struct compat_xfs_swapext __user *sxu = arg;
610 /* Bulk copy in up to the sx_stat field, then copy bstat */
611 if (copy_from_user(&sxp, sxu,
612 offsetof(struct xfs_swapext, sx_stat)) ||
613 xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat))
615 error = mnt_want_write_file(filp);
618 error = xfs_ioc_swapext(&sxp);
619 mnt_drop_write_file(filp);
622 case XFS_IOC_FSBULKSTAT_32:
623 case XFS_IOC_FSBULKSTAT_SINGLE_32:
624 case XFS_IOC_FSINUMBERS_32:
625 return xfs_compat_ioc_fsbulkstat(mp, cmd, arg);
626 case XFS_IOC_FD_TO_HANDLE_32:
627 case XFS_IOC_PATH_TO_HANDLE_32:
628 case XFS_IOC_PATH_TO_FSHANDLE_32: {
629 struct xfs_fsop_handlereq hreq;
631 if (xfs_compat_handlereq_copyin(&hreq, arg))
633 cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq);
634 return xfs_find_handle(cmd, &hreq);
636 case XFS_IOC_OPEN_BY_HANDLE_32: {
637 struct xfs_fsop_handlereq hreq;
639 if (xfs_compat_handlereq_copyin(&hreq, arg))
641 return xfs_open_by_handle(filp, &hreq);
643 case XFS_IOC_READLINK_BY_HANDLE_32: {
644 struct xfs_fsop_handlereq hreq;
646 if (xfs_compat_handlereq_copyin(&hreq, arg))
648 return xfs_readlink_by_handle(filp, &hreq);
650 case XFS_IOC_ATTRLIST_BY_HANDLE_32:
651 return xfs_compat_attrlist_by_handle(filp, arg);
652 case XFS_IOC_ATTRMULTI_BY_HANDLE_32:
653 return xfs_compat_attrmulti_by_handle(filp, arg);
654 case XFS_IOC_FSSETDM_BY_HANDLE_32:
655 return xfs_compat_fssetdm_by_handle(filp, arg);
657 /* try the native version */
658 return xfs_file_ioctl(filp, cmd, (unsigned long)arg);