1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
7 #include <linux/pagemap.h>
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 #include <linux/splice.h>
11 #include <linux/compat.h>
12 #include <net/checksum.h>
13 #include <linux/scatterlist.h>
14 #include <linux/instrumented.h>
16 #define PIPE_PARANOIA /* for now */
18 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
22 __v.iov_len = min(n, __p->iov_len - skip); \
23 if (likely(__v.iov_len)) { \
24 __v.iov_base = __p->iov_base + skip; \
26 __v.iov_len -= left; \
27 skip += __v.iov_len; \
32 while (unlikely(!left && n)) { \
34 __v.iov_len = min(n, __p->iov_len); \
35 if (unlikely(!__v.iov_len)) \
37 __v.iov_base = __p->iov_base; \
39 __v.iov_len -= left; \
46 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
49 __v.iov_len = min(n, __p->iov_len - skip); \
50 if (likely(__v.iov_len)) { \
51 __v.iov_base = __p->iov_base + skip; \
53 skip += __v.iov_len; \
56 while (unlikely(n)) { \
58 __v.iov_len = min(n, __p->iov_len); \
59 if (unlikely(!__v.iov_len)) \
61 __v.iov_base = __p->iov_base; \
69 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
70 struct bvec_iter __start; \
71 __start.bi_size = n; \
72 __start.bi_bvec_done = skip; \
74 for_each_bvec(__v, i->bvec, __bi, __start) { \
81 #define iterate_all_kinds(i, n, v, I, B, K) { \
83 size_t skip = i->iov_offset; \
84 if (unlikely(i->type & ITER_BVEC)) { \
86 struct bvec_iter __bi; \
87 iterate_bvec(i, n, v, __bi, skip, (B)) \
88 } else if (unlikely(i->type & ITER_KVEC)) { \
89 const struct kvec *kvec; \
91 iterate_kvec(i, n, v, kvec, skip, (K)) \
92 } else if (unlikely(i->type & ITER_DISCARD)) { \
94 const struct iovec *iov; \
96 iterate_iovec(i, n, v, iov, skip, (I)) \
101 #define iterate_and_advance(i, n, v, I, B, K) { \
102 if (unlikely(i->count < n)) \
105 size_t skip = i->iov_offset; \
106 if (unlikely(i->type & ITER_BVEC)) { \
107 const struct bio_vec *bvec = i->bvec; \
109 struct bvec_iter __bi; \
110 iterate_bvec(i, n, v, __bi, skip, (B)) \
111 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
112 i->nr_segs -= i->bvec - bvec; \
113 skip = __bi.bi_bvec_done; \
114 } else if (unlikely(i->type & ITER_KVEC)) { \
115 const struct kvec *kvec; \
117 iterate_kvec(i, n, v, kvec, skip, (K)) \
118 if (skip == kvec->iov_len) { \
122 i->nr_segs -= kvec - i->kvec; \
124 } else if (unlikely(i->type & ITER_DISCARD)) { \
127 const struct iovec *iov; \
129 iterate_iovec(i, n, v, iov, skip, (I)) \
130 if (skip == iov->iov_len) { \
134 i->nr_segs -= iov - i->iov; \
138 i->iov_offset = skip; \
142 static int copyout(void __user *to, const void *from, size_t n)
144 if (should_fail_usercopy())
146 if (access_ok(to, n)) {
147 instrument_copy_to_user(to, from, n);
148 n = raw_copy_to_user(to, from, n);
153 static int copyin(void *to, const void __user *from, size_t n)
155 if (should_fail_usercopy())
157 if (access_ok(from, n)) {
158 instrument_copy_from_user(to, from, n);
159 n = raw_copy_from_user(to, from, n);
164 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
167 size_t skip, copy, left, wanted;
168 const struct iovec *iov;
172 if (unlikely(bytes > i->count))
175 if (unlikely(!bytes))
181 skip = i->iov_offset;
182 buf = iov->iov_base + skip;
183 copy = min(bytes, iov->iov_len - skip);
185 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
186 kaddr = kmap_atomic(page);
187 from = kaddr + offset;
189 /* first chunk, usually the only one */
190 left = copyout(buf, from, copy);
196 while (unlikely(!left && bytes)) {
199 copy = min(bytes, iov->iov_len);
200 left = copyout(buf, from, copy);
206 if (likely(!bytes)) {
207 kunmap_atomic(kaddr);
210 offset = from - kaddr;
212 kunmap_atomic(kaddr);
213 copy = min(bytes, iov->iov_len - skip);
215 /* Too bad - revert to non-atomic kmap */
218 from = kaddr + offset;
219 left = copyout(buf, from, copy);
224 while (unlikely(!left && bytes)) {
227 copy = min(bytes, iov->iov_len);
228 left = copyout(buf, from, copy);
237 if (skip == iov->iov_len) {
241 i->count -= wanted - bytes;
242 i->nr_segs -= iov - i->iov;
244 i->iov_offset = skip;
245 return wanted - bytes;
248 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
251 size_t skip, copy, left, wanted;
252 const struct iovec *iov;
256 if (unlikely(bytes > i->count))
259 if (unlikely(!bytes))
265 skip = i->iov_offset;
266 buf = iov->iov_base + skip;
267 copy = min(bytes, iov->iov_len - skip);
269 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
270 kaddr = kmap_atomic(page);
273 /* first chunk, usually the only one */
274 left = copyin(to, buf, copy);
280 while (unlikely(!left && bytes)) {
283 copy = min(bytes, iov->iov_len);
284 left = copyin(to, buf, copy);
290 if (likely(!bytes)) {
291 kunmap_atomic(kaddr);
296 kunmap_atomic(kaddr);
297 copy = min(bytes, iov->iov_len - skip);
299 /* Too bad - revert to non-atomic kmap */
303 left = copyin(to, buf, copy);
308 while (unlikely(!left && bytes)) {
311 copy = min(bytes, iov->iov_len);
312 left = copyin(to, buf, copy);
321 if (skip == iov->iov_len) {
325 i->count -= wanted - bytes;
326 i->nr_segs -= iov - i->iov;
328 i->iov_offset = skip;
329 return wanted - bytes;
333 static bool sanity(const struct iov_iter *i)
335 struct pipe_inode_info *pipe = i->pipe;
336 unsigned int p_head = pipe->head;
337 unsigned int p_tail = pipe->tail;
338 unsigned int p_mask = pipe->ring_size - 1;
339 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
340 unsigned int i_head = i->head;
344 struct pipe_buffer *p;
345 if (unlikely(p_occupancy == 0))
346 goto Bad; // pipe must be non-empty
347 if (unlikely(i_head != p_head - 1))
348 goto Bad; // must be at the last buffer...
350 p = &pipe->bufs[i_head & p_mask];
351 if (unlikely(p->offset + p->len != i->iov_offset))
352 goto Bad; // ... at the end of segment
354 if (i_head != p_head)
355 goto Bad; // must be right after the last buffer
359 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
360 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
361 p_head, p_tail, pipe->ring_size);
362 for (idx = 0; idx < pipe->ring_size; idx++)
363 printk(KERN_ERR "[%p %p %d %d]\n",
365 pipe->bufs[idx].page,
366 pipe->bufs[idx].offset,
367 pipe->bufs[idx].len);
372 #define sanity(i) true
375 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
378 struct pipe_inode_info *pipe = i->pipe;
379 struct pipe_buffer *buf;
380 unsigned int p_tail = pipe->tail;
381 unsigned int p_mask = pipe->ring_size - 1;
382 unsigned int i_head = i->head;
385 if (unlikely(bytes > i->count))
388 if (unlikely(!bytes))
395 buf = &pipe->bufs[i_head & p_mask];
397 if (offset == off && buf->page == page) {
398 /* merge with the last one */
400 i->iov_offset += bytes;
404 buf = &pipe->bufs[i_head & p_mask];
406 if (pipe_full(i_head, p_tail, pipe->max_usage))
409 buf->ops = &page_cache_pipe_buf_ops;
412 buf->offset = offset;
415 pipe->head = i_head + 1;
416 i->iov_offset = offset + bytes;
424 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
425 * bytes. For each iovec, fault in each page that constitutes the iovec.
427 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
428 * because it is an invalid address).
430 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
432 size_t skip = i->iov_offset;
433 const struct iovec *iov;
437 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
438 iterate_iovec(i, bytes, v, iov, skip, ({
439 err = fault_in_pages_readable(v.iov_base, v.iov_len);
446 EXPORT_SYMBOL(iov_iter_fault_in_readable);
448 void iov_iter_init(struct iov_iter *i, unsigned int direction,
449 const struct iovec *iov, unsigned long nr_segs,
452 WARN_ON(direction & ~(READ | WRITE));
453 direction &= READ | WRITE;
455 /* It will get better. Eventually... */
456 if (uaccess_kernel()) {
457 i->type = ITER_KVEC | direction;
458 i->kvec = (struct kvec *)iov;
460 i->type = ITER_IOVEC | direction;
463 i->nr_segs = nr_segs;
467 EXPORT_SYMBOL(iov_iter_init);
469 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
471 char *from = kmap_atomic(page);
472 memcpy(to, from + offset, len);
476 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
478 char *to = kmap_atomic(page);
479 memcpy(to + offset, from, len);
483 static void memzero_page(struct page *page, size_t offset, size_t len)
485 char *addr = kmap_atomic(page);
486 memset(addr + offset, 0, len);
490 static inline bool allocated(struct pipe_buffer *buf)
492 return buf->ops == &default_pipe_buf_ops;
495 static inline void data_start(const struct iov_iter *i,
496 unsigned int *iter_headp, size_t *offp)
498 unsigned int p_mask = i->pipe->ring_size - 1;
499 unsigned int iter_head = i->head;
500 size_t off = i->iov_offset;
502 if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
507 *iter_headp = iter_head;
511 static size_t push_pipe(struct iov_iter *i, size_t size,
512 int *iter_headp, size_t *offp)
514 struct pipe_inode_info *pipe = i->pipe;
515 unsigned int p_tail = pipe->tail;
516 unsigned int p_mask = pipe->ring_size - 1;
517 unsigned int iter_head;
521 if (unlikely(size > i->count))
527 data_start(i, &iter_head, &off);
528 *iter_headp = iter_head;
531 left -= PAGE_SIZE - off;
533 pipe->bufs[iter_head & p_mask].len += size;
536 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
539 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
540 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
541 struct page *page = alloc_page(GFP_USER);
545 buf->ops = &default_pipe_buf_ops;
548 buf->len = min_t(ssize_t, left, PAGE_SIZE);
551 pipe->head = iter_head;
559 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
562 struct pipe_inode_info *pipe = i->pipe;
563 unsigned int p_mask = pipe->ring_size - 1;
570 bytes = n = push_pipe(i, bytes, &i_head, &off);
574 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
575 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
577 i->iov_offset = off + chunk;
587 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
588 __wsum sum, size_t off)
590 __wsum next = csum_partial_copy_nocheck(from, to, len);
591 return csum_block_add(sum, next, off);
594 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
595 __wsum *csum, struct iov_iter *i)
597 struct pipe_inode_info *pipe = i->pipe;
598 unsigned int p_mask = pipe->ring_size - 1;
607 bytes = n = push_pipe(i, bytes, &i_head, &r);
611 size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
612 char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
613 sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
616 i->iov_offset = r + chunk;
628 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
630 const char *from = addr;
631 if (unlikely(iov_iter_is_pipe(i)))
632 return copy_pipe_to_iter(addr, bytes, i);
633 if (iter_is_iovec(i))
635 iterate_and_advance(i, bytes, v,
636 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
637 memcpy_to_page(v.bv_page, v.bv_offset,
638 (from += v.bv_len) - v.bv_len, v.bv_len),
639 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
644 EXPORT_SYMBOL(_copy_to_iter);
646 #ifdef CONFIG_ARCH_HAS_COPY_MC
647 static int copyout_mc(void __user *to, const void *from, size_t n)
649 if (access_ok(to, n)) {
650 instrument_copy_to_user(to, from, n);
651 n = copy_mc_to_user((__force void *) to, from, n);
656 static unsigned long copy_mc_to_page(struct page *page, size_t offset,
657 const char *from, size_t len)
662 to = kmap_atomic(page);
663 ret = copy_mc_to_kernel(to + offset, from, len);
669 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
672 struct pipe_inode_info *pipe = i->pipe;
673 unsigned int p_mask = pipe->ring_size - 1;
675 size_t n, off, xfer = 0;
680 bytes = n = push_pipe(i, bytes, &i_head, &off);
684 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
687 rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
690 i->iov_offset = off + chunk - rem;
704 * _copy_mc_to_iter - copy to iter with source memory error exception handling
705 * @addr: source kernel address
706 * @bytes: total transfer length
707 * @iter: destination iterator
709 * The pmem driver deploys this for the dax operation
710 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
711 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
712 * successfully copied.
714 * The main differences between this and typical _copy_to_iter().
716 * * Typical tail/residue handling after a fault retries the copy
717 * byte-by-byte until the fault happens again. Re-triggering machine
718 * checks is potentially fatal so the implementation uses source
719 * alignment and poison alignment assumptions to avoid re-triggering
720 * hardware exceptions.
722 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
723 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
726 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
728 const char *from = addr;
729 unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
731 if (unlikely(iov_iter_is_pipe(i)))
732 return copy_mc_pipe_to_iter(addr, bytes, i);
733 if (iter_is_iovec(i))
735 iterate_and_advance(i, bytes, v,
736 copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
739 rem = copy_mc_to_page(v.bv_page, v.bv_offset,
740 (from += v.bv_len) - v.bv_len, v.bv_len);
742 curr_addr = (unsigned long) from;
743 bytes = curr_addr - s_addr - rem;
748 rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
749 - v.iov_len, v.iov_len);
751 curr_addr = (unsigned long) from;
752 bytes = curr_addr - s_addr - rem;
760 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
761 #endif /* CONFIG_ARCH_HAS_COPY_MC */
763 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
766 if (unlikely(iov_iter_is_pipe(i))) {
770 if (iter_is_iovec(i))
772 iterate_and_advance(i, bytes, v,
773 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
774 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
775 v.bv_offset, v.bv_len),
776 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
781 EXPORT_SYMBOL(_copy_from_iter);
783 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
786 if (unlikely(iov_iter_is_pipe(i))) {
790 if (unlikely(i->count < bytes))
793 if (iter_is_iovec(i))
795 iterate_all_kinds(i, bytes, v, ({
796 if (copyin((to += v.iov_len) - v.iov_len,
797 v.iov_base, v.iov_len))
800 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
801 v.bv_offset, v.bv_len),
802 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
805 iov_iter_advance(i, bytes);
808 EXPORT_SYMBOL(_copy_from_iter_full);
810 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
813 if (unlikely(iov_iter_is_pipe(i))) {
817 iterate_and_advance(i, bytes, v,
818 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
819 v.iov_base, v.iov_len),
820 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
821 v.bv_offset, v.bv_len),
822 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
827 EXPORT_SYMBOL(_copy_from_iter_nocache);
829 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
831 * _copy_from_iter_flushcache - write destination through cpu cache
832 * @addr: destination kernel address
833 * @bytes: total transfer length
834 * @iter: source iterator
836 * The pmem driver arranges for filesystem-dax to use this facility via
837 * dax_copy_from_iter() for ensuring that writes to persistent memory
838 * are flushed through the CPU cache. It is differentiated from
839 * _copy_from_iter_nocache() in that guarantees all data is flushed for
840 * all iterator types. The _copy_from_iter_nocache() only attempts to
841 * bypass the cache for the ITER_IOVEC case, and on some archs may use
842 * instructions that strand dirty-data in the cache.
844 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
847 if (unlikely(iov_iter_is_pipe(i))) {
851 iterate_and_advance(i, bytes, v,
852 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
853 v.iov_base, v.iov_len),
854 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
855 v.bv_offset, v.bv_len),
856 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
862 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
865 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
868 if (unlikely(iov_iter_is_pipe(i))) {
872 if (unlikely(i->count < bytes))
874 iterate_all_kinds(i, bytes, v, ({
875 if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
876 v.iov_base, v.iov_len))
879 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
880 v.bv_offset, v.bv_len),
881 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
884 iov_iter_advance(i, bytes);
887 EXPORT_SYMBOL(_copy_from_iter_full_nocache);
889 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
892 size_t v = n + offset;
895 * The general case needs to access the page order in order
896 * to compute the page size.
897 * However, we mostly deal with order-0 pages and thus can
898 * avoid a possible cache line miss for requests that fit all
901 if (n <= v && v <= PAGE_SIZE)
904 head = compound_head(page);
905 v += (page - head) << PAGE_SHIFT;
907 if (likely(n <= v && v <= (page_size(head))))
913 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
916 if (unlikely(!page_copy_sane(page, offset, bytes)))
918 if (i->type & (ITER_BVEC|ITER_KVEC)) {
919 void *kaddr = kmap_atomic(page);
920 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
921 kunmap_atomic(kaddr);
923 } else if (unlikely(iov_iter_is_discard(i)))
925 else if (likely(!iov_iter_is_pipe(i)))
926 return copy_page_to_iter_iovec(page, offset, bytes, i);
928 return copy_page_to_iter_pipe(page, offset, bytes, i);
930 EXPORT_SYMBOL(copy_page_to_iter);
932 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
935 if (unlikely(!page_copy_sane(page, offset, bytes)))
937 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
941 if (i->type & (ITER_BVEC|ITER_KVEC)) {
942 void *kaddr = kmap_atomic(page);
943 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
944 kunmap_atomic(kaddr);
947 return copy_page_from_iter_iovec(page, offset, bytes, i);
949 EXPORT_SYMBOL(copy_page_from_iter);
951 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
953 struct pipe_inode_info *pipe = i->pipe;
954 unsigned int p_mask = pipe->ring_size - 1;
961 bytes = n = push_pipe(i, bytes, &i_head, &off);
966 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
967 memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
969 i->iov_offset = off + chunk;
978 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
980 if (unlikely(iov_iter_is_pipe(i)))
981 return pipe_zero(bytes, i);
982 iterate_and_advance(i, bytes, v,
983 clear_user(v.iov_base, v.iov_len),
984 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
985 memset(v.iov_base, 0, v.iov_len)
990 EXPORT_SYMBOL(iov_iter_zero);
992 size_t iov_iter_copy_from_user_atomic(struct page *page,
993 struct iov_iter *i, unsigned long offset, size_t bytes)
995 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
996 if (unlikely(!page_copy_sane(page, offset, bytes))) {
997 kunmap_atomic(kaddr);
1000 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1001 kunmap_atomic(kaddr);
1005 iterate_all_kinds(i, bytes, v,
1006 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
1007 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
1008 v.bv_offset, v.bv_len),
1009 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
1011 kunmap_atomic(kaddr);
1014 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
1016 static inline void pipe_truncate(struct iov_iter *i)
1018 struct pipe_inode_info *pipe = i->pipe;
1019 unsigned int p_tail = pipe->tail;
1020 unsigned int p_head = pipe->head;
1021 unsigned int p_mask = pipe->ring_size - 1;
1023 if (!pipe_empty(p_head, p_tail)) {
1024 struct pipe_buffer *buf;
1025 unsigned int i_head = i->head;
1026 size_t off = i->iov_offset;
1029 buf = &pipe->bufs[i_head & p_mask];
1030 buf->len = off - buf->offset;
1033 while (p_head != i_head) {
1035 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
1038 pipe->head = p_head;
1042 static void pipe_advance(struct iov_iter *i, size_t size)
1044 struct pipe_inode_info *pipe = i->pipe;
1045 if (unlikely(i->count < size))
1048 struct pipe_buffer *buf;
1049 unsigned int p_mask = pipe->ring_size - 1;
1050 unsigned int i_head = i->head;
1051 size_t off = i->iov_offset, left = size;
1053 if (off) /* make it relative to the beginning of buffer */
1054 left += off - pipe->bufs[i_head & p_mask].offset;
1056 buf = &pipe->bufs[i_head & p_mask];
1057 if (left <= buf->len)
1063 i->iov_offset = buf->offset + left;
1066 /* ... and discard everything past that point */
1070 void iov_iter_advance(struct iov_iter *i, size_t size)
1072 if (unlikely(iov_iter_is_pipe(i))) {
1073 pipe_advance(i, size);
1076 if (unlikely(iov_iter_is_discard(i))) {
1080 iterate_and_advance(i, size, v, 0, 0, 0)
1082 EXPORT_SYMBOL(iov_iter_advance);
1084 void iov_iter_revert(struct iov_iter *i, size_t unroll)
1088 if (WARN_ON(unroll > MAX_RW_COUNT))
1091 if (unlikely(iov_iter_is_pipe(i))) {
1092 struct pipe_inode_info *pipe = i->pipe;
1093 unsigned int p_mask = pipe->ring_size - 1;
1094 unsigned int i_head = i->head;
1095 size_t off = i->iov_offset;
1097 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1098 size_t n = off - b->offset;
1104 if (!unroll && i_head == i->start_head) {
1109 b = &pipe->bufs[i_head & p_mask];
1110 off = b->offset + b->len;
1112 i->iov_offset = off;
1117 if (unlikely(iov_iter_is_discard(i)))
1119 if (unroll <= i->iov_offset) {
1120 i->iov_offset -= unroll;
1123 unroll -= i->iov_offset;
1124 if (iov_iter_is_bvec(i)) {
1125 const struct bio_vec *bvec = i->bvec;
1127 size_t n = (--bvec)->bv_len;
1131 i->iov_offset = n - unroll;
1136 } else { /* same logics for iovec and kvec */
1137 const struct iovec *iov = i->iov;
1139 size_t n = (--iov)->iov_len;
1143 i->iov_offset = n - unroll;
1150 EXPORT_SYMBOL(iov_iter_revert);
1153 * Return the count of just the current iov_iter segment.
1155 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1157 if (unlikely(iov_iter_is_pipe(i)))
1158 return i->count; // it is a silly place, anyway
1159 if (i->nr_segs == 1)
1161 if (unlikely(iov_iter_is_discard(i)))
1163 else if (iov_iter_is_bvec(i))
1164 return min(i->count, i->bvec->bv_len - i->iov_offset);
1166 return min(i->count, i->iov->iov_len - i->iov_offset);
1168 EXPORT_SYMBOL(iov_iter_single_seg_count);
1170 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1171 const struct kvec *kvec, unsigned long nr_segs,
1174 WARN_ON(direction & ~(READ | WRITE));
1175 i->type = ITER_KVEC | (direction & (READ | WRITE));
1177 i->nr_segs = nr_segs;
1181 EXPORT_SYMBOL(iov_iter_kvec);
1183 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1184 const struct bio_vec *bvec, unsigned long nr_segs,
1187 WARN_ON(direction & ~(READ | WRITE));
1188 i->type = ITER_BVEC | (direction & (READ | WRITE));
1190 i->nr_segs = nr_segs;
1194 EXPORT_SYMBOL(iov_iter_bvec);
1196 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1197 struct pipe_inode_info *pipe,
1200 BUG_ON(direction != READ);
1201 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1202 i->type = ITER_PIPE | READ;
1204 i->head = pipe->head;
1207 i->start_head = i->head;
1209 EXPORT_SYMBOL(iov_iter_pipe);
1212 * iov_iter_discard - Initialise an I/O iterator that discards data
1213 * @i: The iterator to initialise.
1214 * @direction: The direction of the transfer.
1215 * @count: The size of the I/O buffer in bytes.
1217 * Set up an I/O iterator that just discards everything that's written to it.
1218 * It's only available as a READ iterator.
1220 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1222 BUG_ON(direction != READ);
1223 i->type = ITER_DISCARD | READ;
1227 EXPORT_SYMBOL(iov_iter_discard);
1229 unsigned long iov_iter_alignment(const struct iov_iter *i)
1231 unsigned long res = 0;
1232 size_t size = i->count;
1234 if (unlikely(iov_iter_is_pipe(i))) {
1235 unsigned int p_mask = i->pipe->ring_size - 1;
1237 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1238 return size | i->iov_offset;
1241 iterate_all_kinds(i, size, v,
1242 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
1243 res |= v.bv_offset | v.bv_len,
1244 res |= (unsigned long)v.iov_base | v.iov_len
1248 EXPORT_SYMBOL(iov_iter_alignment);
1250 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1252 unsigned long res = 0;
1253 size_t size = i->count;
1255 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1260 iterate_all_kinds(i, size, v,
1261 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1262 (size != v.iov_len ? size : 0), 0),
1263 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1264 (size != v.bv_len ? size : 0)),
1265 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1266 (size != v.iov_len ? size : 0))
1270 EXPORT_SYMBOL(iov_iter_gap_alignment);
1272 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1274 struct page **pages,
1278 struct pipe_inode_info *pipe = i->pipe;
1279 unsigned int p_mask = pipe->ring_size - 1;
1280 ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1287 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1295 static ssize_t pipe_get_pages(struct iov_iter *i,
1296 struct page **pages, size_t maxsize, unsigned maxpages,
1299 unsigned int iter_head, npages;
1308 data_start(i, &iter_head, start);
1309 /* Amount of free space: some of this one + all after this one */
1310 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1311 capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1313 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1316 ssize_t iov_iter_get_pages(struct iov_iter *i,
1317 struct page **pages, size_t maxsize, unsigned maxpages,
1320 if (maxsize > i->count)
1323 if (unlikely(iov_iter_is_pipe(i)))
1324 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1325 if (unlikely(iov_iter_is_discard(i)))
1328 iterate_all_kinds(i, maxsize, v, ({
1329 unsigned long addr = (unsigned long)v.iov_base;
1330 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1334 if (len > maxpages * PAGE_SIZE)
1335 len = maxpages * PAGE_SIZE;
1336 addr &= ~(PAGE_SIZE - 1);
1337 n = DIV_ROUND_UP(len, PAGE_SIZE);
1338 res = get_user_pages_fast(addr, n,
1339 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0,
1341 if (unlikely(res < 0))
1343 return (res == n ? len : res * PAGE_SIZE) - *start;
1345 /* can't be more than PAGE_SIZE */
1346 *start = v.bv_offset;
1347 get_page(*pages = v.bv_page);
1355 EXPORT_SYMBOL(iov_iter_get_pages);
1357 static struct page **get_pages_array(size_t n)
1359 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1362 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1363 struct page ***pages, size_t maxsize,
1367 unsigned int iter_head, npages;
1376 data_start(i, &iter_head, start);
1377 /* Amount of free space: some of this one + all after this one */
1378 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1379 n = npages * PAGE_SIZE - *start;
1383 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1384 p = get_pages_array(npages);
1387 n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1395 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1396 struct page ***pages, size_t maxsize,
1401 if (maxsize > i->count)
1404 if (unlikely(iov_iter_is_pipe(i)))
1405 return pipe_get_pages_alloc(i, pages, maxsize, start);
1406 if (unlikely(iov_iter_is_discard(i)))
1409 iterate_all_kinds(i, maxsize, v, ({
1410 unsigned long addr = (unsigned long)v.iov_base;
1411 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1415 addr &= ~(PAGE_SIZE - 1);
1416 n = DIV_ROUND_UP(len, PAGE_SIZE);
1417 p = get_pages_array(n);
1420 res = get_user_pages_fast(addr, n,
1421 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p);
1422 if (unlikely(res < 0)) {
1427 return (res == n ? len : res * PAGE_SIZE) - *start;
1429 /* can't be more than PAGE_SIZE */
1430 *start = v.bv_offset;
1431 *pages = p = get_pages_array(1);
1434 get_page(*p = v.bv_page);
1442 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1444 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1451 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1455 iterate_and_advance(i, bytes, v, ({
1456 next = csum_and_copy_from_user(v.iov_base,
1457 (to += v.iov_len) - v.iov_len,
1460 sum = csum_block_add(sum, next, off);
1463 next ? 0 : v.iov_len;
1465 char *p = kmap_atomic(v.bv_page);
1466 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1467 p + v.bv_offset, v.bv_len,
1472 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1473 v.iov_base, v.iov_len,
1481 EXPORT_SYMBOL(csum_and_copy_from_iter);
1483 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1490 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1494 if (unlikely(i->count < bytes))
1496 iterate_all_kinds(i, bytes, v, ({
1497 next = csum_and_copy_from_user(v.iov_base,
1498 (to += v.iov_len) - v.iov_len,
1502 sum = csum_block_add(sum, next, off);
1506 char *p = kmap_atomic(v.bv_page);
1507 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1508 p + v.bv_offset, v.bv_len,
1513 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1514 v.iov_base, v.iov_len,
1520 iov_iter_advance(i, bytes);
1523 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1525 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
1528 const char *from = addr;
1529 __wsum *csum = csump;
1533 if (unlikely(iov_iter_is_pipe(i)))
1534 return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
1537 if (unlikely(iov_iter_is_discard(i))) {
1538 WARN_ON(1); /* for now */
1541 iterate_and_advance(i, bytes, v, ({
1542 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1546 sum = csum_block_add(sum, next, off);
1549 next ? 0 : v.iov_len;
1551 char *p = kmap_atomic(v.bv_page);
1552 sum = csum_and_memcpy(p + v.bv_offset,
1553 (from += v.bv_len) - v.bv_len,
1554 v.bv_len, sum, off);
1558 sum = csum_and_memcpy(v.iov_base,
1559 (from += v.iov_len) - v.iov_len,
1560 v.iov_len, sum, off);
1567 EXPORT_SYMBOL(csum_and_copy_to_iter);
1569 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1572 #ifdef CONFIG_CRYPTO_HASH
1573 struct ahash_request *hash = hashp;
1574 struct scatterlist sg;
1577 copied = copy_to_iter(addr, bytes, i);
1578 sg_init_one(&sg, addr, copied);
1579 ahash_request_set_crypt(hash, &sg, NULL, copied);
1580 crypto_ahash_update(hash);
1586 EXPORT_SYMBOL(hash_and_copy_to_iter);
1588 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1590 size_t size = i->count;
1595 if (unlikely(iov_iter_is_discard(i)))
1598 if (unlikely(iov_iter_is_pipe(i))) {
1599 struct pipe_inode_info *pipe = i->pipe;
1600 unsigned int iter_head;
1606 data_start(i, &iter_head, &off);
1607 /* some of this one + all after this one */
1608 npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
1609 if (npages >= maxpages)
1611 } else iterate_all_kinds(i, size, v, ({
1612 unsigned long p = (unsigned long)v.iov_base;
1613 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1615 if (npages >= maxpages)
1619 if (npages >= maxpages)
1622 unsigned long p = (unsigned long)v.iov_base;
1623 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1625 if (npages >= maxpages)
1631 EXPORT_SYMBOL(iov_iter_npages);
1633 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1636 if (unlikely(iov_iter_is_pipe(new))) {
1640 if (unlikely(iov_iter_is_discard(new)))
1642 if (iov_iter_is_bvec(new))
1643 return new->bvec = kmemdup(new->bvec,
1644 new->nr_segs * sizeof(struct bio_vec),
1647 /* iovec and kvec have identical layout */
1648 return new->iov = kmemdup(new->iov,
1649 new->nr_segs * sizeof(struct iovec),
1652 EXPORT_SYMBOL(dup_iter);
1654 static int copy_compat_iovec_from_user(struct iovec *iov,
1655 const struct iovec __user *uvec, unsigned long nr_segs)
1657 const struct compat_iovec __user *uiov =
1658 (const struct compat_iovec __user *)uvec;
1659 int ret = -EFAULT, i;
1661 if (!user_access_begin(uvec, nr_segs * sizeof(*uvec)))
1664 for (i = 0; i < nr_segs; i++) {
1668 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1669 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1671 /* check for compat_size_t not fitting in compat_ssize_t .. */
1676 iov[i].iov_base = compat_ptr(buf);
1677 iov[i].iov_len = len;
1686 static int copy_iovec_from_user(struct iovec *iov,
1687 const struct iovec __user *uvec, unsigned long nr_segs)
1691 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1693 for (seg = 0; seg < nr_segs; seg++) {
1694 if ((ssize_t)iov[seg].iov_len < 0)
1701 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1702 unsigned long nr_segs, unsigned long fast_segs,
1703 struct iovec *fast_iov, bool compat)
1705 struct iovec *iov = fast_iov;
1709 * SuS says "The readv() function *may* fail if the iovcnt argument was
1710 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1711 * traditionally returned zero for zero segments, so...
1715 if (nr_segs > UIO_MAXIOV)
1716 return ERR_PTR(-EINVAL);
1717 if (nr_segs > fast_segs) {
1718 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1720 return ERR_PTR(-ENOMEM);
1724 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1726 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1728 if (iov != fast_iov)
1730 return ERR_PTR(ret);
1736 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1737 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1738 struct iov_iter *i, bool compat)
1740 ssize_t total_len = 0;
1744 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1747 return PTR_ERR(iov);
1751 * According to the Single Unix Specification we should return EINVAL if
1752 * an element length is < 0 when cast to ssize_t or if the total length
1753 * would overflow the ssize_t return value of the system call.
1755 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1758 for (seg = 0; seg < nr_segs; seg++) {
1759 ssize_t len = (ssize_t)iov[seg].iov_len;
1761 if (!access_ok(iov[seg].iov_base, len)) {
1768 if (len > MAX_RW_COUNT - total_len) {
1769 len = MAX_RW_COUNT - total_len;
1770 iov[seg].iov_len = len;
1775 iov_iter_init(i, type, iov, nr_segs, total_len);
1784 * import_iovec() - Copy an array of &struct iovec from userspace
1785 * into the kernel, check that it is valid, and initialize a new
1786 * &struct iov_iter iterator to access it.
1788 * @type: One of %READ or %WRITE.
1789 * @uvec: Pointer to the userspace array.
1790 * @nr_segs: Number of elements in userspace array.
1791 * @fast_segs: Number of elements in @iov.
1792 * @iovp: (input and output parameter) Pointer to pointer to (usually small
1793 * on-stack) kernel array.
1794 * @i: Pointer to iterator that will be initialized on success.
1796 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1797 * then this function places %NULL in *@iov on return. Otherwise, a new
1798 * array will be allocated and the result placed in *@iov. This means that
1799 * the caller may call kfree() on *@iov regardless of whether the small
1800 * on-stack array was used or not (and regardless of whether this function
1801 * returns an error or not).
1803 * Return: Negative error code on error, bytes imported on success
1805 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1806 unsigned nr_segs, unsigned fast_segs,
1807 struct iovec **iovp, struct iov_iter *i)
1809 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1810 in_compat_syscall());
1812 EXPORT_SYMBOL(import_iovec);
1814 int import_single_range(int rw, void __user *buf, size_t len,
1815 struct iovec *iov, struct iov_iter *i)
1817 if (len > MAX_RW_COUNT)
1819 if (unlikely(!access_ok(buf, len)))
1822 iov->iov_base = buf;
1824 iov_iter_init(i, rw, iov, 1, len);
1827 EXPORT_SYMBOL(import_single_range);
1829 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
1830 int (*f)(struct kvec *vec, void *context),
1838 iterate_all_kinds(i, bytes, v, -EINVAL, ({
1839 w.iov_base = kmap(v.bv_page) + v.bv_offset;
1840 w.iov_len = v.bv_len;
1841 err = f(&w, context);
1845 err = f(&w, context);})
1849 EXPORT_SYMBOL(iov_iter_for_each_range);