1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/read_write.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
8 #include <linux/slab.h>
9 #include <linux/stat.h>
10 #include <linux/sched/xacct.h>
11 #include <linux/fcntl.h>
12 #include <linux/file.h>
13 #include <linux/uio.h>
14 #include <linux/fsnotify.h>
15 #include <linux/security.h>
16 #include <linux/export.h>
17 #include <linux/syscalls.h>
18 #include <linux/pagemap.h>
19 #include <linux/splice.h>
20 #include <linux/compat.h>
21 #include <linux/mount.h>
25 #include <linux/uaccess.h>
26 #include <asm/unistd.h>
28 const struct file_operations generic_ro_fops = {
29 .llseek = generic_file_llseek,
30 .read_iter = generic_file_read_iter,
31 .mmap = generic_file_readonly_mmap,
32 .splice_read = generic_file_splice_read,
35 EXPORT_SYMBOL(generic_ro_fops);
37 static inline bool unsigned_offsets(struct file *file)
39 return file->f_mode & FMODE_UNSIGNED_OFFSET;
43 * vfs_setpos - update the file offset for lseek
44 * @file: file structure in question
45 * @offset: file offset to seek to
46 * @maxsize: maximum file size
48 * This is a low-level filesystem helper for updating the file offset to
49 * the value specified by @offset if the given offset is valid and it is
50 * not equal to the current file offset.
52 * Return the specified offset on success and -EINVAL on invalid offset.
54 loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize)
56 if (offset < 0 && !unsigned_offsets(file))
61 if (offset != file->f_pos) {
67 EXPORT_SYMBOL(vfs_setpos);
70 * generic_file_llseek_size - generic llseek implementation for regular files
71 * @file: file structure to seek on
72 * @offset: file offset to seek to
73 * @whence: type of seek
74 * @size: max size of this file in file system
75 * @eof: offset used for SEEK_END position
77 * This is a variant of generic_file_llseek that allows passing in a custom
78 * maximum file size and a custom EOF position, for e.g. hashed directories
81 * SEEK_SET and SEEK_END are unsynchronized (but atomic on 64bit platforms)
82 * SEEK_CUR is synchronized against other SEEK_CURs, but not read/writes.
83 * read/writes behave like SEEK_SET against seeks.
86 generic_file_llseek_size(struct file *file, loff_t offset, int whence,
87 loff_t maxsize, loff_t eof)
95 * Here we special-case the lseek(fd, 0, SEEK_CUR)
96 * position-querying operation. Avoid rewriting the "same"
97 * f_pos value back to the file because a concurrent read(),
98 * write() or lseek() might have altered it
103 * f_lock protects against read/modify/write race with other
104 * SEEK_CURs. Note that parallel writes and reads behave
107 spin_lock(&file->f_lock);
108 offset = vfs_setpos(file, file->f_pos + offset, maxsize);
109 spin_unlock(&file->f_lock);
113 * In the generic case the entire file is data, so as long as
114 * offset isn't at the end of the file then the offset is data.
116 if ((unsigned long long)offset >= eof)
121 * There is a virtual hole at the end of the file, so as long as
122 * offset isn't i_size or larger, return i_size.
124 if ((unsigned long long)offset >= eof)
130 return vfs_setpos(file, offset, maxsize);
132 EXPORT_SYMBOL(generic_file_llseek_size);
135 * generic_file_llseek - generic llseek implementation for regular files
136 * @file: file structure to seek on
137 * @offset: file offset to seek to
138 * @whence: type of seek
140 * This is a generic implemenation of ->llseek useable for all normal local
141 * filesystems. It just updates the file offset to the value specified by
142 * @offset and @whence.
144 loff_t generic_file_llseek(struct file *file, loff_t offset, int whence)
146 struct inode *inode = file->f_mapping->host;
148 return generic_file_llseek_size(file, offset, whence,
149 inode->i_sb->s_maxbytes,
152 EXPORT_SYMBOL(generic_file_llseek);
155 * fixed_size_llseek - llseek implementation for fixed-sized devices
156 * @file: file structure to seek on
157 * @offset: file offset to seek to
158 * @whence: type of seek
159 * @size: size of the file
162 loff_t fixed_size_llseek(struct file *file, loff_t offset, int whence, loff_t size)
165 case SEEK_SET: case SEEK_CUR: case SEEK_END:
166 return generic_file_llseek_size(file, offset, whence,
172 EXPORT_SYMBOL(fixed_size_llseek);
175 * no_seek_end_llseek - llseek implementation for fixed-sized devices
176 * @file: file structure to seek on
177 * @offset: file offset to seek to
178 * @whence: type of seek
181 loff_t no_seek_end_llseek(struct file *file, loff_t offset, int whence)
184 case SEEK_SET: case SEEK_CUR:
185 return generic_file_llseek_size(file, offset, whence,
191 EXPORT_SYMBOL(no_seek_end_llseek);
194 * no_seek_end_llseek_size - llseek implementation for fixed-sized devices
195 * @file: file structure to seek on
196 * @offset: file offset to seek to
197 * @whence: type of seek
198 * @size: maximal offset allowed
201 loff_t no_seek_end_llseek_size(struct file *file, loff_t offset, int whence, loff_t size)
204 case SEEK_SET: case SEEK_CUR:
205 return generic_file_llseek_size(file, offset, whence,
211 EXPORT_SYMBOL(no_seek_end_llseek_size);
214 * noop_llseek - No Operation Performed llseek implementation
215 * @file: file structure to seek on
216 * @offset: file offset to seek to
217 * @whence: type of seek
219 * This is an implementation of ->llseek useable for the rare special case when
220 * userspace expects the seek to succeed but the (device) file is actually not
221 * able to perform the seek. In this case you use noop_llseek() instead of
222 * falling back to the default implementation of ->llseek.
224 loff_t noop_llseek(struct file *file, loff_t offset, int whence)
228 EXPORT_SYMBOL(noop_llseek);
230 loff_t no_llseek(struct file *file, loff_t offset, int whence)
234 EXPORT_SYMBOL(no_llseek);
236 loff_t default_llseek(struct file *file, loff_t offset, int whence)
238 struct inode *inode = file_inode(file);
244 offset += i_size_read(inode);
248 retval = file->f_pos;
251 offset += file->f_pos;
255 * In the generic case the entire file is data, so as
256 * long as offset isn't at the end of the file then the
259 if (offset >= inode->i_size) {
266 * There is a virtual hole at the end of the file, so
267 * as long as offset isn't i_size or larger, return
270 if (offset >= inode->i_size) {
274 offset = inode->i_size;
278 if (offset >= 0 || unsigned_offsets(file)) {
279 if (offset != file->f_pos) {
280 file->f_pos = offset;
289 EXPORT_SYMBOL(default_llseek);
291 loff_t vfs_llseek(struct file *file, loff_t offset, int whence)
293 loff_t (*fn)(struct file *, loff_t, int);
296 if (file->f_mode & FMODE_LSEEK) {
297 if (file->f_op->llseek)
298 fn = file->f_op->llseek;
300 return fn(file, offset, whence);
302 EXPORT_SYMBOL(vfs_llseek);
304 static off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence)
307 struct fd f = fdget_pos(fd);
312 if (whence <= SEEK_MAX) {
313 loff_t res = vfs_llseek(f.file, offset, whence);
315 if (res != (loff_t)retval)
316 retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */
322 SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
324 return ksys_lseek(fd, offset, whence);
328 COMPAT_SYSCALL_DEFINE3(lseek, unsigned int, fd, compat_off_t, offset, unsigned int, whence)
330 return ksys_lseek(fd, offset, whence);
334 #if !defined(CONFIG_64BIT) || defined(CONFIG_COMPAT) || \
335 defined(__ARCH_WANT_SYS_LLSEEK)
336 SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
337 unsigned long, offset_low, loff_t __user *, result,
338 unsigned int, whence)
341 struct fd f = fdget_pos(fd);
348 if (whence > SEEK_MAX)
351 offset = vfs_llseek(f.file, ((loff_t) offset_high << 32) | offset_low,
354 retval = (int)offset;
357 if (!copy_to_user(result, &offset, sizeof(offset)))
366 int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t count)
368 if (unlikely((ssize_t) count < 0))
372 * ranged mandatory locking does not apply to streams - it makes sense
373 * only for files where position has a meaning.
378 if (unlikely(pos < 0)) {
379 if (!unsigned_offsets(file))
381 if (count >= -pos) /* both values are in 0..LLONG_MAX */
383 } else if (unlikely((loff_t) (pos + count) < 0)) {
384 if (!unsigned_offsets(file))
389 return security_file_permission(file,
390 read_write == READ ? MAY_READ : MAY_WRITE);
393 static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
395 struct iovec iov = { .iov_base = buf, .iov_len = len };
397 struct iov_iter iter;
400 init_sync_kiocb(&kiocb, filp);
401 kiocb.ki_pos = (ppos ? *ppos : 0);
402 iov_iter_init(&iter, READ, &iov, 1, len);
404 ret = call_read_iter(filp, &kiocb, &iter);
405 BUG_ON(ret == -EIOCBQUEUED);
407 *ppos = kiocb.ki_pos;
411 static int warn_unsupported(struct file *file, const char *op)
414 "kernel %s not supported for file %pD4 (pid: %d comm: %.20s)\n",
415 op, file, current->pid, current->comm);
419 ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
423 .iov_len = min_t(size_t, count, MAX_RW_COUNT),
426 struct iov_iter iter;
429 if (WARN_ON_ONCE(!(file->f_mode & FMODE_READ)))
431 if (!(file->f_mode & FMODE_CAN_READ))
434 * Also fail if ->read_iter and ->read are both wired up as that
435 * implies very convoluted semantics.
437 if (unlikely(!file->f_op->read_iter || file->f_op->read))
438 return warn_unsupported(file, "read");
440 init_sync_kiocb(&kiocb, file);
441 kiocb.ki_pos = pos ? *pos : 0;
442 iov_iter_kvec(&iter, READ, &iov, 1, iov.iov_len);
443 ret = file->f_op->read_iter(&kiocb, &iter);
447 fsnotify_access(file);
448 add_rchar(current, ret);
454 ssize_t kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
458 ret = rw_verify_area(READ, file, pos, count);
461 return __kernel_read(file, buf, count, pos);
463 EXPORT_SYMBOL(kernel_read);
465 ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
469 if (!(file->f_mode & FMODE_READ))
471 if (!(file->f_mode & FMODE_CAN_READ))
473 if (unlikely(!access_ok(buf, count)))
476 ret = rw_verify_area(READ, file, pos, count);
479 if (count > MAX_RW_COUNT)
480 count = MAX_RW_COUNT;
482 if (file->f_op->read)
483 ret = file->f_op->read(file, buf, count, pos);
484 else if (file->f_op->read_iter)
485 ret = new_sync_read(file, buf, count, pos);
489 fsnotify_access(file);
490 add_rchar(current, ret);
496 static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
498 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
500 struct iov_iter iter;
503 init_sync_kiocb(&kiocb, filp);
504 kiocb.ki_pos = (ppos ? *ppos : 0);
505 iov_iter_init(&iter, WRITE, &iov, 1, len);
507 ret = call_write_iter(filp, &kiocb, &iter);
508 BUG_ON(ret == -EIOCBQUEUED);
510 *ppos = kiocb.ki_pos;
514 /* caller is responsible for file_start_write/file_end_write */
515 ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos)
518 .iov_base = (void *)buf,
519 .iov_len = min_t(size_t, count, MAX_RW_COUNT),
522 struct iov_iter iter;
525 if (WARN_ON_ONCE(!(file->f_mode & FMODE_WRITE)))
527 if (!(file->f_mode & FMODE_CAN_WRITE))
530 * Also fail if ->write_iter and ->write are both wired up as that
531 * implies very convoluted semantics.
533 if (unlikely(!file->f_op->write_iter || file->f_op->write))
534 return warn_unsupported(file, "write");
536 init_sync_kiocb(&kiocb, file);
537 kiocb.ki_pos = pos ? *pos : 0;
538 iov_iter_kvec(&iter, WRITE, &iov, 1, iov.iov_len);
539 ret = file->f_op->write_iter(&kiocb, &iter);
543 fsnotify_modify(file);
544 add_wchar(current, ret);
550 * This "EXPORT_SYMBOL_GPL()" is more of a "EXPORT_SYMBOL_DONTUSE()",
551 * but autofs is one of the few internal kernel users that actually
552 * wants this _and_ can be built as a module. So we need to export
553 * this symbol for autofs, even though it really isn't appropriate
554 * for any other kernel modules.
556 EXPORT_SYMBOL_GPL(__kernel_write);
558 ssize_t kernel_write(struct file *file, const void *buf, size_t count,
563 ret = rw_verify_area(WRITE, file, pos, count);
567 file_start_write(file);
568 ret = __kernel_write(file, buf, count, pos);
569 file_end_write(file);
572 EXPORT_SYMBOL(kernel_write);
574 ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
578 if (!(file->f_mode & FMODE_WRITE))
580 if (!(file->f_mode & FMODE_CAN_WRITE))
582 if (unlikely(!access_ok(buf, count)))
585 ret = rw_verify_area(WRITE, file, pos, count);
588 if (count > MAX_RW_COUNT)
589 count = MAX_RW_COUNT;
590 file_start_write(file);
591 if (file->f_op->write)
592 ret = file->f_op->write(file, buf, count, pos);
593 else if (file->f_op->write_iter)
594 ret = new_sync_write(file, buf, count, pos);
598 fsnotify_modify(file);
599 add_wchar(current, ret);
602 file_end_write(file);
606 /* file_ppos returns &file->f_pos or NULL if file is stream */
607 static inline loff_t *file_ppos(struct file *file)
609 return file->f_mode & FMODE_STREAM ? NULL : &file->f_pos;
612 ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count)
614 struct fd f = fdget_pos(fd);
615 ssize_t ret = -EBADF;
618 loff_t pos, *ppos = file_ppos(f.file);
623 ret = vfs_read(f.file, buf, count, ppos);
624 if (ret >= 0 && ppos)
631 SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
633 return ksys_read(fd, buf, count);
636 ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count)
638 struct fd f = fdget_pos(fd);
639 ssize_t ret = -EBADF;
642 loff_t pos, *ppos = file_ppos(f.file);
647 ret = vfs_write(f.file, buf, count, ppos);
648 if (ret >= 0 && ppos)
656 SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
659 return ksys_write(fd, buf, count);
662 ssize_t ksys_pread64(unsigned int fd, char __user *buf, size_t count,
666 ssize_t ret = -EBADF;
674 if (f.file->f_mode & FMODE_PREAD)
675 ret = vfs_read(f.file, buf, count, &pos);
682 SYSCALL_DEFINE4(pread64, unsigned int, fd, char __user *, buf,
683 size_t, count, loff_t, pos)
685 return ksys_pread64(fd, buf, count, pos);
688 ssize_t ksys_pwrite64(unsigned int fd, const char __user *buf,
689 size_t count, loff_t pos)
692 ssize_t ret = -EBADF;
700 if (f.file->f_mode & FMODE_PWRITE)
701 ret = vfs_write(f.file, buf, count, &pos);
708 SYSCALL_DEFINE4(pwrite64, unsigned int, fd, const char __user *, buf,
709 size_t, count, loff_t, pos)
711 return ksys_pwrite64(fd, buf, count, pos);
714 static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
715 loff_t *ppos, int type, rwf_t flags)
720 init_sync_kiocb(&kiocb, filp);
721 ret = kiocb_set_rw_flags(&kiocb, flags);
724 kiocb.ki_pos = (ppos ? *ppos : 0);
727 ret = call_read_iter(filp, &kiocb, iter);
729 ret = call_write_iter(filp, &kiocb, iter);
730 BUG_ON(ret == -EIOCBQUEUED);
732 *ppos = kiocb.ki_pos;
736 /* Do it by hand, with file-ops */
737 static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
738 loff_t *ppos, int type, rwf_t flags)
742 if (flags & ~RWF_HIPRI)
745 while (iov_iter_count(iter)) {
746 struct iovec iovec = iov_iter_iovec(iter);
750 nr = filp->f_op->read(filp, iovec.iov_base,
751 iovec.iov_len, ppos);
753 nr = filp->f_op->write(filp, iovec.iov_base,
754 iovec.iov_len, ppos);
763 if (nr != iovec.iov_len)
765 iov_iter_advance(iter, nr);
771 static ssize_t do_iter_read(struct file *file, struct iov_iter *iter,
772 loff_t *pos, rwf_t flags)
777 if (!(file->f_mode & FMODE_READ))
779 if (!(file->f_mode & FMODE_CAN_READ))
782 tot_len = iov_iter_count(iter);
785 ret = rw_verify_area(READ, file, pos, tot_len);
789 if (file->f_op->read_iter)
790 ret = do_iter_readv_writev(file, iter, pos, READ, flags);
792 ret = do_loop_readv_writev(file, iter, pos, READ, flags);
795 fsnotify_access(file);
799 ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
800 struct iov_iter *iter)
805 if (!file->f_op->read_iter)
807 if (!(file->f_mode & FMODE_READ))
809 if (!(file->f_mode & FMODE_CAN_READ))
812 tot_len = iov_iter_count(iter);
815 ret = rw_verify_area(READ, file, &iocb->ki_pos, tot_len);
819 ret = call_read_iter(file, iocb, iter);
822 fsnotify_access(file);
825 EXPORT_SYMBOL(vfs_iocb_iter_read);
827 ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
830 if (!file->f_op->read_iter)
832 return do_iter_read(file, iter, ppos, flags);
834 EXPORT_SYMBOL(vfs_iter_read);
836 static ssize_t do_iter_write(struct file *file, struct iov_iter *iter,
837 loff_t *pos, rwf_t flags)
842 if (!(file->f_mode & FMODE_WRITE))
844 if (!(file->f_mode & FMODE_CAN_WRITE))
847 tot_len = iov_iter_count(iter);
850 ret = rw_verify_area(WRITE, file, pos, tot_len);
854 if (file->f_op->write_iter)
855 ret = do_iter_readv_writev(file, iter, pos, WRITE, flags);
857 ret = do_loop_readv_writev(file, iter, pos, WRITE, flags);
859 fsnotify_modify(file);
863 ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
864 struct iov_iter *iter)
869 if (!file->f_op->write_iter)
871 if (!(file->f_mode & FMODE_WRITE))
873 if (!(file->f_mode & FMODE_CAN_WRITE))
876 tot_len = iov_iter_count(iter);
879 ret = rw_verify_area(WRITE, file, &iocb->ki_pos, tot_len);
883 ret = call_write_iter(file, iocb, iter);
885 fsnotify_modify(file);
889 EXPORT_SYMBOL(vfs_iocb_iter_write);
891 ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
894 if (!file->f_op->write_iter)
896 return do_iter_write(file, iter, ppos, flags);
898 EXPORT_SYMBOL(vfs_iter_write);
900 static ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
901 unsigned long vlen, loff_t *pos, rwf_t flags)
903 struct iovec iovstack[UIO_FASTIOV];
904 struct iovec *iov = iovstack;
905 struct iov_iter iter;
908 ret = import_iovec(READ, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
910 ret = do_iter_read(file, &iter, pos, flags);
917 static ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
918 unsigned long vlen, loff_t *pos, rwf_t flags)
920 struct iovec iovstack[UIO_FASTIOV];
921 struct iovec *iov = iovstack;
922 struct iov_iter iter;
925 ret = import_iovec(WRITE, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
927 file_start_write(file);
928 ret = do_iter_write(file, &iter, pos, flags);
929 file_end_write(file);
935 static ssize_t do_readv(unsigned long fd, const struct iovec __user *vec,
936 unsigned long vlen, rwf_t flags)
938 struct fd f = fdget_pos(fd);
939 ssize_t ret = -EBADF;
942 loff_t pos, *ppos = file_ppos(f.file);
947 ret = vfs_readv(f.file, vec, vlen, ppos, flags);
948 if (ret >= 0 && ppos)
954 add_rchar(current, ret);
959 static ssize_t do_writev(unsigned long fd, const struct iovec __user *vec,
960 unsigned long vlen, rwf_t flags)
962 struct fd f = fdget_pos(fd);
963 ssize_t ret = -EBADF;
966 loff_t pos, *ppos = file_ppos(f.file);
971 ret = vfs_writev(f.file, vec, vlen, ppos, flags);
972 if (ret >= 0 && ppos)
978 add_wchar(current, ret);
983 static inline loff_t pos_from_hilo(unsigned long high, unsigned long low)
985 #define HALF_LONG_BITS (BITS_PER_LONG / 2)
986 return (((loff_t)high << HALF_LONG_BITS) << HALF_LONG_BITS) | low;
989 static ssize_t do_preadv(unsigned long fd, const struct iovec __user *vec,
990 unsigned long vlen, loff_t pos, rwf_t flags)
993 ssize_t ret = -EBADF;
1001 if (f.file->f_mode & FMODE_PREAD)
1002 ret = vfs_readv(f.file, vec, vlen, &pos, flags);
1007 add_rchar(current, ret);
1012 static ssize_t do_pwritev(unsigned long fd, const struct iovec __user *vec,
1013 unsigned long vlen, loff_t pos, rwf_t flags)
1016 ssize_t ret = -EBADF;
1024 if (f.file->f_mode & FMODE_PWRITE)
1025 ret = vfs_writev(f.file, vec, vlen, &pos, flags);
1030 add_wchar(current, ret);
1035 SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
1036 unsigned long, vlen)
1038 return do_readv(fd, vec, vlen, 0);
1041 SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
1042 unsigned long, vlen)
1044 return do_writev(fd, vec, vlen, 0);
1047 SYSCALL_DEFINE5(preadv, unsigned long, fd, const struct iovec __user *, vec,
1048 unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
1050 loff_t pos = pos_from_hilo(pos_h, pos_l);
1052 return do_preadv(fd, vec, vlen, pos, 0);
1055 SYSCALL_DEFINE6(preadv2, unsigned long, fd, const struct iovec __user *, vec,
1056 unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h,
1059 loff_t pos = pos_from_hilo(pos_h, pos_l);
1062 return do_readv(fd, vec, vlen, flags);
1064 return do_preadv(fd, vec, vlen, pos, flags);
1067 SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec,
1068 unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
1070 loff_t pos = pos_from_hilo(pos_h, pos_l);
1072 return do_pwritev(fd, vec, vlen, pos, 0);
1075 SYSCALL_DEFINE6(pwritev2, unsigned long, fd, const struct iovec __user *, vec,
1076 unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h,
1079 loff_t pos = pos_from_hilo(pos_h, pos_l);
1082 return do_writev(fd, vec, vlen, flags);
1084 return do_pwritev(fd, vec, vlen, pos, flags);
1088 * Various compat syscalls. Note that they all pretend to take a native
1089 * iovec - import_iovec will properly treat those as compat_iovecs based on
1090 * in_compat_syscall().
1092 #ifdef CONFIG_COMPAT
1093 #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
1094 COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
1095 const struct iovec __user *, vec,
1096 unsigned long, vlen, loff_t, pos)
1098 return do_preadv(fd, vec, vlen, pos, 0);
1102 COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
1103 const struct iovec __user *, vec,
1104 compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
1106 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1108 return do_preadv(fd, vec, vlen, pos, 0);
1111 #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2
1112 COMPAT_SYSCALL_DEFINE5(preadv64v2, unsigned long, fd,
1113 const struct iovec __user *, vec,
1114 unsigned long, vlen, loff_t, pos, rwf_t, flags)
1117 return do_readv(fd, vec, vlen, flags);
1118 return do_preadv(fd, vec, vlen, pos, flags);
1122 COMPAT_SYSCALL_DEFINE6(preadv2, compat_ulong_t, fd,
1123 const struct iovec __user *, vec,
1124 compat_ulong_t, vlen, u32, pos_low, u32, pos_high,
1127 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1130 return do_readv(fd, vec, vlen, flags);
1131 return do_preadv(fd, vec, vlen, pos, flags);
1134 #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
1135 COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
1136 const struct iovec __user *, vec,
1137 unsigned long, vlen, loff_t, pos)
1139 return do_pwritev(fd, vec, vlen, pos, 0);
1143 COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
1144 const struct iovec __user *,vec,
1145 compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
1147 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1149 return do_pwritev(fd, vec, vlen, pos, 0);
1152 #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
1153 COMPAT_SYSCALL_DEFINE5(pwritev64v2, unsigned long, fd,
1154 const struct iovec __user *, vec,
1155 unsigned long, vlen, loff_t, pos, rwf_t, flags)
1158 return do_writev(fd, vec, vlen, flags);
1159 return do_pwritev(fd, vec, vlen, pos, flags);
1163 COMPAT_SYSCALL_DEFINE6(pwritev2, compat_ulong_t, fd,
1164 const struct iovec __user *,vec,
1165 compat_ulong_t, vlen, u32, pos_low, u32, pos_high, rwf_t, flags)
1167 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1170 return do_writev(fd, vec, vlen, flags);
1171 return do_pwritev(fd, vec, vlen, pos, flags);
1173 #endif /* CONFIG_COMPAT */
1175 static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
1176 size_t count, loff_t max)
1179 struct inode *in_inode, *out_inode;
1180 struct pipe_inode_info *opipe;
1187 * Get input file, and verify that it is ok..
1193 if (!(in.file->f_mode & FMODE_READ))
1197 pos = in.file->f_pos;
1200 if (!(in.file->f_mode & FMODE_PREAD))
1203 retval = rw_verify_area(READ, in.file, &pos, count);
1206 if (count > MAX_RW_COUNT)
1207 count = MAX_RW_COUNT;
1210 * Get output file, and verify that it is ok..
1213 out = fdget(out_fd);
1216 if (!(out.file->f_mode & FMODE_WRITE))
1218 in_inode = file_inode(in.file);
1219 out_inode = file_inode(out.file);
1220 out_pos = out.file->f_pos;
1223 max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
1225 if (unlikely(pos + count > max)) {
1226 retval = -EOVERFLOW;
1235 * We need to debate whether we can enable this or not. The
1236 * man page documents EAGAIN return for the output at least,
1237 * and the application is arguably buggy if it doesn't expect
1238 * EAGAIN on a non-blocking file descriptor.
1240 if (in.file->f_flags & O_NONBLOCK)
1241 fl = SPLICE_F_NONBLOCK;
1243 opipe = get_pipe_info(out.file, true);
1245 retval = rw_verify_area(WRITE, out.file, &out_pos, count);
1248 file_start_write(out.file);
1249 retval = do_splice_direct(in.file, &pos, out.file, &out_pos,
1251 file_end_write(out.file);
1253 if (out.file->f_flags & O_NONBLOCK)
1254 fl |= SPLICE_F_NONBLOCK;
1256 retval = splice_file_to_pipe(in.file, opipe, &pos, count, fl);
1260 add_rchar(current, retval);
1261 add_wchar(current, retval);
1262 fsnotify_access(in.file);
1263 fsnotify_modify(out.file);
1264 out.file->f_pos = out_pos;
1268 in.file->f_pos = pos;
1274 retval = -EOVERFLOW;
1284 SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd, off_t __user *, offset, size_t, count)
1291 if (unlikely(get_user(off, offset)))
1294 ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
1295 if (unlikely(put_user(pos, offset)))
1300 return do_sendfile(out_fd, in_fd, NULL, count, 0);
1303 SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd, loff_t __user *, offset, size_t, count)
1309 if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
1311 ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
1312 if (unlikely(put_user(pos, offset)))
1317 return do_sendfile(out_fd, in_fd, NULL, count, 0);
1320 #ifdef CONFIG_COMPAT
1321 COMPAT_SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd,
1322 compat_off_t __user *, offset, compat_size_t, count)
1329 if (unlikely(get_user(off, offset)))
1332 ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
1333 if (unlikely(put_user(pos, offset)))
1338 return do_sendfile(out_fd, in_fd, NULL, count, 0);
1341 COMPAT_SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd,
1342 compat_loff_t __user *, offset, compat_size_t, count)
1348 if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
1350 ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
1351 if (unlikely(put_user(pos, offset)))
1356 return do_sendfile(out_fd, in_fd, NULL, count, 0);
1361 * generic_copy_file_range - copy data between two files
1362 * @file_in: file structure to read from
1363 * @pos_in: file offset to read from
1364 * @file_out: file structure to write data to
1365 * @pos_out: file offset to write data to
1366 * @len: amount of data to copy
1367 * @flags: copy flags
1369 * This is a generic filesystem helper to copy data from one file to another.
1370 * It has no constraints on the source or destination file owners - the files
1371 * can belong to different superblocks and different filesystem types. Short
1372 * copies are allowed.
1374 * This should be called from the @file_out filesystem, as per the
1375 * ->copy_file_range() method.
1377 * Returns the number of bytes copied or a negative error indicating the
1381 ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in,
1382 struct file *file_out, loff_t pos_out,
1383 size_t len, unsigned int flags)
1385 return do_splice_direct(file_in, &pos_in, file_out, &pos_out,
1386 len > MAX_RW_COUNT ? MAX_RW_COUNT : len, 0);
1388 EXPORT_SYMBOL(generic_copy_file_range);
1391 * Performs necessary checks before doing a file copy
1393 * Can adjust amount of bytes to copy via @req_count argument.
1394 * Returns appropriate error code that caller should return or
1395 * zero in case the copy should be allowed.
1397 static int generic_copy_file_checks(struct file *file_in, loff_t pos_in,
1398 struct file *file_out, loff_t pos_out,
1399 size_t *req_count, unsigned int flags)
1401 struct inode *inode_in = file_inode(file_in);
1402 struct inode *inode_out = file_inode(file_out);
1403 uint64_t count = *req_count;
1407 ret = generic_file_rw_checks(file_in, file_out);
1412 * We allow some filesystems to handle cross sb copy, but passing
1413 * a file of the wrong filesystem type to filesystem driver can result
1414 * in an attempt to dereference the wrong type of ->private_data, so
1415 * avoid doing that until we really have a good reason.
1417 * nfs and cifs define several different file_system_type structures
1418 * and several different sets of file_operations, but they all end up
1419 * using the same ->copy_file_range() function pointer.
1421 if (file_out->f_op->copy_file_range) {
1422 if (file_in->f_op->copy_file_range !=
1423 file_out->f_op->copy_file_range)
1425 } else if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) {
1429 /* Don't touch certain kinds of inodes */
1430 if (IS_IMMUTABLE(inode_out))
1433 if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
1436 /* Ensure offsets don't wrap. */
1437 if (pos_in + count < pos_in || pos_out + count < pos_out)
1440 /* Shorten the copy to EOF */
1441 size_in = i_size_read(inode_in);
1442 if (pos_in >= size_in)
1445 count = min(count, size_in - (uint64_t)pos_in);
1447 ret = generic_write_check_limits(file_out, pos_out, &count);
1451 /* Don't allow overlapped copying within the same file. */
1452 if (inode_in == inode_out &&
1453 pos_out + count > pos_in &&
1454 pos_out < pos_in + count)
1462 * copy_file_range() differs from regular file read and write in that it
1463 * specifically allows return partial success. When it does so is up to
1464 * the copy_file_range method.
1466 ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
1467 struct file *file_out, loff_t pos_out,
1468 size_t len, unsigned int flags)
1475 ret = generic_copy_file_checks(file_in, pos_in, file_out, pos_out, &len,
1480 ret = rw_verify_area(READ, file_in, &pos_in, len);
1484 ret = rw_verify_area(WRITE, file_out, &pos_out, len);
1491 file_start_write(file_out);
1494 * Cloning is supported by more file systems, so we implement copy on
1495 * same sb using clone, but for filesystems where both clone and copy
1496 * are supported (e.g. nfs,cifs), we only call the copy method.
1498 if (file_out->f_op->copy_file_range) {
1499 ret = file_out->f_op->copy_file_range(file_in, pos_in,
1505 if (file_in->f_op->remap_file_range &&
1506 file_inode(file_in)->i_sb == file_inode(file_out)->i_sb) {
1507 ret = file_in->f_op->remap_file_range(file_in, pos_in,
1509 min_t(loff_t, MAX_RW_COUNT, len),
1510 REMAP_FILE_CAN_SHORTEN);
1516 * We can get here for same sb copy of filesystems that do not implement
1517 * ->copy_file_range() in case filesystem does not support clone or in
1518 * case filesystem supports clone but rejected the clone request (e.g.
1519 * because it was not block aligned).
1521 * In both cases, fall back to kernel copy so we are able to maintain a
1522 * consistent story about which filesystems support copy_file_range()
1523 * and which filesystems do not, that will allow userspace tools to
1524 * make consistent desicions w.r.t using copy_file_range().
1526 ret = generic_copy_file_range(file_in, pos_in, file_out, pos_out, len,
1531 fsnotify_access(file_in);
1532 add_rchar(current, ret);
1533 fsnotify_modify(file_out);
1534 add_wchar(current, ret);
1540 file_end_write(file_out);
1544 EXPORT_SYMBOL(vfs_copy_file_range);
1546 SYSCALL_DEFINE6(copy_file_range, int, fd_in, loff_t __user *, off_in,
1547 int, fd_out, loff_t __user *, off_out,
1548 size_t, len, unsigned int, flags)
1554 ssize_t ret = -EBADF;
1556 f_in = fdget(fd_in);
1560 f_out = fdget(fd_out);
1566 if (copy_from_user(&pos_in, off_in, sizeof(loff_t)))
1569 pos_in = f_in.file->f_pos;
1573 if (copy_from_user(&pos_out, off_out, sizeof(loff_t)))
1576 pos_out = f_out.file->f_pos;
1579 ret = vfs_copy_file_range(f_in.file, pos_in, f_out.file, pos_out, len,
1586 if (copy_to_user(off_in, &pos_in, sizeof(loff_t)))
1589 f_in.file->f_pos = pos_in;
1593 if (copy_to_user(off_out, &pos_out, sizeof(loff_t)))
1596 f_out.file->f_pos = pos_out;
1609 * Don't operate on ranges the page cache doesn't support, and don't exceed the
1610 * LFS limits. If pos is under the limit it becomes a short access. If it
1611 * exceeds the limit we return -EFBIG.
1613 int generic_write_check_limits(struct file *file, loff_t pos, loff_t *count)
1615 struct inode *inode = file->f_mapping->host;
1616 loff_t max_size = inode->i_sb->s_maxbytes;
1617 loff_t limit = rlimit(RLIMIT_FSIZE);
1619 if (limit != RLIM_INFINITY) {
1621 send_sig(SIGXFSZ, current, 0);
1624 *count = min(*count, limit - pos);
1627 if (!(file->f_flags & O_LARGEFILE))
1628 max_size = MAX_NON_LFS;
1630 if (unlikely(pos >= max_size))
1633 *count = min(*count, max_size - pos);
1639 * Performs necessary checks before doing a write
1641 * Can adjust writing position or amount of bytes to write.
1642 * Returns appropriate error code that caller should return or
1643 * zero in case that write should be allowed.
1645 ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
1647 struct file *file = iocb->ki_filp;
1648 struct inode *inode = file->f_mapping->host;
1652 if (IS_SWAPFILE(inode))
1655 if (!iov_iter_count(from))
1658 /* FIXME: this is for backwards compatibility with 2.4 */
1659 if (iocb->ki_flags & IOCB_APPEND)
1660 iocb->ki_pos = i_size_read(inode);
1662 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
1665 count = iov_iter_count(from);
1666 ret = generic_write_check_limits(file, iocb->ki_pos, &count);
1670 iov_iter_truncate(from, count);
1671 return iov_iter_count(from);
1673 EXPORT_SYMBOL(generic_write_checks);
1676 * Performs common checks before doing a file copy/clone
1677 * from @file_in to @file_out.
1679 int generic_file_rw_checks(struct file *file_in, struct file *file_out)
1681 struct inode *inode_in = file_inode(file_in);
1682 struct inode *inode_out = file_inode(file_out);
1684 /* Don't copy dirs, pipes, sockets... */
1685 if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
1687 if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
1690 if (!(file_in->f_mode & FMODE_READ) ||
1691 !(file_out->f_mode & FMODE_WRITE) ||
1692 (file_out->f_flags & O_APPEND))