1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
17 #define PIPE_PARANOIA /* for now */
19 /* covers ubuf and kbuf alike */
20 #define iterate_buf(i, n, base, len, off, __p, STEP) { \
21 size_t __maybe_unused off = 0; \
23 base = __p + i->iov_offset; \
25 i->iov_offset += len; \
29 /* covers iovec and kvec alike */
30 #define iterate_iovec(i, n, base, len, off, __p, STEP) { \
32 size_t skip = i->iov_offset; \
34 len = min(n, __p->iov_len - skip); \
36 base = __p->iov_base + skip; \
41 if (skip < __p->iov_len) \
47 i->iov_offset = skip; \
51 #define iterate_bvec(i, n, base, len, off, p, STEP) { \
53 unsigned skip = i->iov_offset; \
55 unsigned offset = p->bv_offset + skip; \
57 void *kaddr = kmap_local_page(p->bv_page + \
58 offset / PAGE_SIZE); \
59 base = kaddr + offset % PAGE_SIZE; \
60 len = min(min(n, (size_t)(p->bv_len - skip)), \
61 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
63 kunmap_local(kaddr); \
67 if (skip == p->bv_len) { \
75 i->iov_offset = skip; \
79 #define iterate_xarray(i, n, base, len, __off, STEP) { \
82 struct folio *folio; \
83 loff_t start = i->xarray_start + i->iov_offset; \
84 pgoff_t index = start / PAGE_SIZE; \
85 XA_STATE(xas, i->xarray, index); \
87 len = PAGE_SIZE - offset_in_page(start); \
89 xas_for_each(&xas, folio, ULONG_MAX) { \
92 if (xas_retry(&xas, folio)) \
94 if (WARN_ON(xa_is_value(folio))) \
96 if (WARN_ON(folio_test_hugetlb(folio))) \
98 offset = offset_in_folio(folio, start + __off); \
99 while (offset < folio_size(folio)) { \
100 base = kmap_local_folio(folio, offset); \
103 kunmap_local(base); \
107 if (left || n == 0) \
115 i->iov_offset += __off; \
119 #define __iterate_and_advance(i, n, base, len, off, I, K) { \
120 if (unlikely(i->count < n)) \
123 if (likely(iter_is_ubuf(i))) { \
126 iterate_buf(i, n, base, len, off, \
128 } else if (likely(iter_is_iovec(i))) { \
129 const struct iovec *iov = i->iov; \
132 iterate_iovec(i, n, base, len, off, \
134 i->nr_segs -= iov - i->iov; \
136 } else if (iov_iter_is_bvec(i)) { \
137 const struct bio_vec *bvec = i->bvec; \
140 iterate_bvec(i, n, base, len, off, \
142 i->nr_segs -= bvec - i->bvec; \
144 } else if (iov_iter_is_kvec(i)) { \
145 const struct kvec *kvec = i->kvec; \
148 iterate_iovec(i, n, base, len, off, \
150 i->nr_segs -= kvec - i->kvec; \
152 } else if (iov_iter_is_xarray(i)) { \
155 iterate_xarray(i, n, base, len, off, \
161 #define iterate_and_advance(i, n, base, len, off, I, K) \
162 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
164 static int copyout(void __user *to, const void *from, size_t n)
166 if (should_fail_usercopy())
168 if (access_ok(to, n)) {
169 instrument_copy_to_user(to, from, n);
170 n = raw_copy_to_user(to, from, n);
175 static int copyin(void *to, const void __user *from, size_t n)
179 if (should_fail_usercopy())
181 if (access_ok(from, n)) {
182 instrument_copy_from_user_before(to, from, n);
183 res = raw_copy_from_user(to, from, n);
184 instrument_copy_from_user_after(to, from, n, res);
190 static bool sanity(const struct iov_iter *i)
192 struct pipe_inode_info *pipe = i->pipe;
193 unsigned int p_head = pipe->head;
194 unsigned int p_tail = pipe->tail;
195 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
196 unsigned int i_head = i->head;
199 if (i->last_offset) {
200 struct pipe_buffer *p;
201 if (unlikely(p_occupancy == 0))
202 goto Bad; // pipe must be non-empty
203 if (unlikely(i_head != p_head - 1))
204 goto Bad; // must be at the last buffer...
206 p = pipe_buf(pipe, i_head);
207 if (unlikely(p->offset + p->len != abs(i->last_offset)))
208 goto Bad; // ... at the end of segment
210 if (i_head != p_head)
211 goto Bad; // must be right after the last buffer
215 printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset);
216 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
217 p_head, p_tail, pipe->ring_size);
218 for (idx = 0; idx < pipe->ring_size; idx++)
219 printk(KERN_ERR "[%p %p %d %d]\n",
221 pipe->bufs[idx].page,
222 pipe->bufs[idx].offset,
223 pipe->bufs[idx].len);
228 #define sanity(i) true
231 static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size)
233 struct page *page = alloc_page(GFP_USER);
235 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
236 *buf = (struct pipe_buffer) {
237 .ops = &default_pipe_buf_ops,
246 static void push_page(struct pipe_inode_info *pipe, struct page *page,
247 unsigned int offset, unsigned int size)
249 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
250 *buf = (struct pipe_buffer) {
251 .ops = &page_cache_pipe_buf_ops,
259 static inline int last_offset(const struct pipe_buffer *buf)
261 if (buf->ops == &default_pipe_buf_ops)
262 return buf->len; // buf->offset is 0 for those
264 return -(buf->offset + buf->len);
267 static struct page *append_pipe(struct iov_iter *i, size_t size,
270 struct pipe_inode_info *pipe = i->pipe;
271 int offset = i->last_offset;
272 struct pipe_buffer *buf;
275 if (offset > 0 && offset < PAGE_SIZE) {
276 // some space in the last buffer; add to it
277 buf = pipe_buf(pipe, pipe->head - 1);
278 size = min_t(size_t, size, PAGE_SIZE - offset);
280 i->last_offset += size;
285 // OK, we need a new buffer
287 size = min_t(size_t, size, PAGE_SIZE);
288 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
290 page = push_anon(pipe, size);
293 i->head = pipe->head - 1;
294 i->last_offset = size;
299 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
302 struct pipe_inode_info *pipe = i->pipe;
303 unsigned int head = pipe->head;
305 if (unlikely(bytes > i->count))
308 if (unlikely(!bytes))
314 if (offset && i->last_offset == -offset) { // could we merge it?
315 struct pipe_buffer *buf = pipe_buf(pipe, head - 1);
316 if (buf->page == page) {
318 i->last_offset -= bytes;
323 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
326 push_page(pipe, page, offset, bytes);
327 i->last_offset = -(offset + bytes);
334 * fault_in_iov_iter_readable - fault in iov iterator for reading
336 * @size: maximum length
338 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
339 * @size. For each iovec, fault in each page that constitutes the iovec.
341 * Returns the number of bytes not faulted in (like copy_to_user() and
344 * Always returns 0 for non-userspace iterators.
346 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
348 if (iter_is_ubuf(i)) {
349 size_t n = min(size, iov_iter_count(i));
350 n -= fault_in_readable(i->ubuf + i->iov_offset, n);
352 } else if (iter_is_iovec(i)) {
353 size_t count = min(size, iov_iter_count(i));
354 const struct iovec *p;
358 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
359 size_t len = min(count, p->iov_len - skip);
364 ret = fault_in_readable(p->iov_base + skip, len);
373 EXPORT_SYMBOL(fault_in_iov_iter_readable);
376 * fault_in_iov_iter_writeable - fault in iov iterator for writing
378 * @size: maximum length
380 * Faults in the iterator using get_user_pages(), i.e., without triggering
381 * hardware page faults. This is primarily useful when we already know that
382 * some or all of the pages in @i aren't in memory.
384 * Returns the number of bytes not faulted in, like copy_to_user() and
387 * Always returns 0 for non-user-space iterators.
389 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
391 if (iter_is_ubuf(i)) {
392 size_t n = min(size, iov_iter_count(i));
393 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
395 } else if (iter_is_iovec(i)) {
396 size_t count = min(size, iov_iter_count(i));
397 const struct iovec *p;
401 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
402 size_t len = min(count, p->iov_len - skip);
407 ret = fault_in_safe_writeable(p->iov_base + skip, len);
416 EXPORT_SYMBOL(fault_in_iov_iter_writeable);
418 void iov_iter_init(struct iov_iter *i, unsigned int direction,
419 const struct iovec *iov, unsigned long nr_segs,
422 WARN_ON(direction & ~(READ | WRITE));
423 *i = (struct iov_iter) {
424 .iter_type = ITER_IOVEC,
427 .data_source = direction,
434 EXPORT_SYMBOL(iov_iter_init);
436 // returns the offset in partial buffer (if any)
437 static inline unsigned int pipe_npages(const struct iov_iter *i, int *npages)
439 struct pipe_inode_info *pipe = i->pipe;
440 int used = pipe->head - pipe->tail;
441 int off = i->last_offset;
443 *npages = max((int)pipe->max_usage - used, 0);
445 if (off > 0 && off < PAGE_SIZE) { // anon and not full
452 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
455 unsigned int off, chunk;
457 if (unlikely(bytes > i->count))
459 if (unlikely(!bytes))
465 for (size_t n = bytes; n; n -= chunk) {
466 struct page *page = append_pipe(i, n, &off);
467 chunk = min_t(size_t, n, PAGE_SIZE - off);
470 memcpy_to_page(page, off, addr, chunk);
476 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
477 __wsum sum, size_t off)
479 __wsum next = csum_partial_copy_nocheck(from, to, len);
480 return csum_block_add(sum, next, off);
483 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
484 struct iov_iter *i, __wsum *sump)
488 unsigned int chunk, r;
490 if (unlikely(bytes > i->count))
492 if (unlikely(!bytes))
499 struct page *page = append_pipe(i, bytes, &r);
504 chunk = min_t(size_t, bytes, PAGE_SIZE - r);
505 p = kmap_local_page(page);
506 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
515 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
517 if (WARN_ON_ONCE(i->data_source))
519 if (unlikely(iov_iter_is_pipe(i)))
520 return copy_pipe_to_iter(addr, bytes, i);
521 if (user_backed_iter(i))
523 iterate_and_advance(i, bytes, base, len, off,
524 copyout(base, addr + off, len),
525 memcpy(base, addr + off, len)
530 EXPORT_SYMBOL(_copy_to_iter);
532 #ifdef CONFIG_ARCH_HAS_COPY_MC
533 static int copyout_mc(void __user *to, const void *from, size_t n)
535 if (access_ok(to, n)) {
536 instrument_copy_to_user(to, from, n);
537 n = copy_mc_to_user((__force void *) to, from, n);
542 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
546 unsigned int off, chunk;
548 if (unlikely(bytes > i->count))
550 if (unlikely(!bytes))
557 struct page *page = append_pipe(i, bytes, &off);
563 chunk = min_t(size_t, bytes, PAGE_SIZE - off);
564 p = kmap_local_page(page);
565 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
571 iov_iter_revert(i, rem);
579 * _copy_mc_to_iter - copy to iter with source memory error exception handling
580 * @addr: source kernel address
581 * @bytes: total transfer length
582 * @i: destination iterator
584 * The pmem driver deploys this for the dax operation
585 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
586 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
587 * successfully copied.
589 * The main differences between this and typical _copy_to_iter().
591 * * Typical tail/residue handling after a fault retries the copy
592 * byte-by-byte until the fault happens again. Re-triggering machine
593 * checks is potentially fatal so the implementation uses source
594 * alignment and poison alignment assumptions to avoid re-triggering
595 * hardware exceptions.
597 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
598 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
601 * Return: number of bytes copied (may be %0)
603 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
605 if (WARN_ON_ONCE(i->data_source))
607 if (unlikely(iov_iter_is_pipe(i)))
608 return copy_mc_pipe_to_iter(addr, bytes, i);
609 if (user_backed_iter(i))
611 __iterate_and_advance(i, bytes, base, len, off,
612 copyout_mc(base, addr + off, len),
613 copy_mc_to_kernel(base, addr + off, len)
618 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
619 #endif /* CONFIG_ARCH_HAS_COPY_MC */
621 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
623 if (WARN_ON_ONCE(!i->data_source))
626 if (user_backed_iter(i))
628 iterate_and_advance(i, bytes, base, len, off,
629 copyin(addr + off, base, len),
630 memcpy(addr + off, base, len)
635 EXPORT_SYMBOL(_copy_from_iter);
637 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
639 if (WARN_ON_ONCE(!i->data_source))
642 iterate_and_advance(i, bytes, base, len, off,
643 __copy_from_user_inatomic_nocache(addr + off, base, len),
644 memcpy(addr + off, base, len)
649 EXPORT_SYMBOL(_copy_from_iter_nocache);
651 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
653 * _copy_from_iter_flushcache - write destination through cpu cache
654 * @addr: destination kernel address
655 * @bytes: total transfer length
656 * @i: source iterator
658 * The pmem driver arranges for filesystem-dax to use this facility via
659 * dax_copy_from_iter() for ensuring that writes to persistent memory
660 * are flushed through the CPU cache. It is differentiated from
661 * _copy_from_iter_nocache() in that guarantees all data is flushed for
662 * all iterator types. The _copy_from_iter_nocache() only attempts to
663 * bypass the cache for the ITER_IOVEC case, and on some archs may use
664 * instructions that strand dirty-data in the cache.
666 * Return: number of bytes copied (may be %0)
668 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
670 if (WARN_ON_ONCE(!i->data_source))
673 iterate_and_advance(i, bytes, base, len, off,
674 __copy_from_user_flushcache(addr + off, base, len),
675 memcpy_flushcache(addr + off, base, len)
680 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
683 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
686 size_t v = n + offset;
689 * The general case needs to access the page order in order
690 * to compute the page size.
691 * However, we mostly deal with order-0 pages and thus can
692 * avoid a possible cache line miss for requests that fit all
695 if (n <= v && v <= PAGE_SIZE)
698 head = compound_head(page);
699 v += (page - head) << PAGE_SHIFT;
701 if (WARN_ON(n > v || v > page_size(head)))
706 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
710 if (!page_copy_sane(page, offset, bytes))
712 if (WARN_ON_ONCE(i->data_source))
714 if (unlikely(iov_iter_is_pipe(i)))
715 return copy_page_to_iter_pipe(page, offset, bytes, i);
716 page += offset / PAGE_SIZE; // first subpage
719 void *kaddr = kmap_local_page(page);
720 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
721 n = _copy_to_iter(kaddr + offset, n, i);
728 if (offset == PAGE_SIZE) {
735 EXPORT_SYMBOL(copy_page_to_iter);
737 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
741 if (!page_copy_sane(page, offset, bytes))
743 page += offset / PAGE_SIZE; // first subpage
746 void *kaddr = kmap_local_page(page);
747 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
748 n = _copy_from_iter(kaddr + offset, n, i);
755 if (offset == PAGE_SIZE) {
762 EXPORT_SYMBOL(copy_page_from_iter);
764 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
766 unsigned int chunk, off;
768 if (unlikely(bytes > i->count))
770 if (unlikely(!bytes))
776 for (size_t n = bytes; n; n -= chunk) {
777 struct page *page = append_pipe(i, n, &off);
782 chunk = min_t(size_t, n, PAGE_SIZE - off);
783 p = kmap_local_page(page);
784 memset(p + off, 0, chunk);
790 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
792 if (unlikely(iov_iter_is_pipe(i)))
793 return pipe_zero(bytes, i);
794 iterate_and_advance(i, bytes, base, len, count,
795 clear_user(base, len),
801 EXPORT_SYMBOL(iov_iter_zero);
803 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
806 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
807 if (!page_copy_sane(page, offset, bytes)) {
808 kunmap_atomic(kaddr);
811 if (WARN_ON_ONCE(!i->data_source)) {
812 kunmap_atomic(kaddr);
815 iterate_and_advance(i, bytes, base, len, off,
816 copyin(p + off, base, len),
817 memcpy(p + off, base, len)
819 kunmap_atomic(kaddr);
822 EXPORT_SYMBOL(copy_page_from_iter_atomic);
824 static void pipe_advance(struct iov_iter *i, size_t size)
826 struct pipe_inode_info *pipe = i->pipe;
827 int off = i->last_offset;
830 pipe_discard_from(pipe, i->start_head); // discard everything
835 struct pipe_buffer *buf = pipe_buf(pipe, i->head);
836 if (off) /* make it relative to the beginning of buffer */
837 size += abs(off) - buf->offset;
838 if (size <= buf->len) {
840 i->last_offset = last_offset(buf);
847 pipe_discard_from(pipe, i->head + 1); // discard everything past this one
850 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
852 const struct bio_vec *bvec, *end;
858 size += i->iov_offset;
860 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
861 if (likely(size < bvec->bv_len))
863 size -= bvec->bv_len;
865 i->iov_offset = size;
866 i->nr_segs -= bvec - i->bvec;
870 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
872 const struct iovec *iov, *end;
878 size += i->iov_offset; // from beginning of current segment
879 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
880 if (likely(size < iov->iov_len))
882 size -= iov->iov_len;
884 i->iov_offset = size;
885 i->nr_segs -= iov - i->iov;
889 void iov_iter_advance(struct iov_iter *i, size_t size)
891 if (unlikely(i->count < size))
893 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
894 i->iov_offset += size;
896 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
897 /* iovec and kvec have identical layouts */
898 iov_iter_iovec_advance(i, size);
899 } else if (iov_iter_is_bvec(i)) {
900 iov_iter_bvec_advance(i, size);
901 } else if (iov_iter_is_pipe(i)) {
902 pipe_advance(i, size);
903 } else if (iov_iter_is_discard(i)) {
907 EXPORT_SYMBOL(iov_iter_advance);
909 void iov_iter_revert(struct iov_iter *i, size_t unroll)
913 if (WARN_ON(unroll > MAX_RW_COUNT))
916 if (unlikely(iov_iter_is_pipe(i))) {
917 struct pipe_inode_info *pipe = i->pipe;
918 unsigned int head = pipe->head;
920 while (head > i->start_head) {
921 struct pipe_buffer *b = pipe_buf(pipe, --head);
922 if (unroll < b->len) {
924 i->last_offset = last_offset(b);
929 pipe_buf_release(pipe, b);
936 if (unlikely(iov_iter_is_discard(i)))
938 if (unroll <= i->iov_offset) {
939 i->iov_offset -= unroll;
942 unroll -= i->iov_offset;
943 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
944 BUG(); /* We should never go beyond the start of the specified
945 * range since we might then be straying into pages that
948 } else if (iov_iter_is_bvec(i)) {
949 const struct bio_vec *bvec = i->bvec;
951 size_t n = (--bvec)->bv_len;
955 i->iov_offset = n - unroll;
960 } else { /* same logics for iovec and kvec */
961 const struct iovec *iov = i->iov;
963 size_t n = (--iov)->iov_len;
967 i->iov_offset = n - unroll;
974 EXPORT_SYMBOL(iov_iter_revert);
977 * Return the count of just the current iov_iter segment.
979 size_t iov_iter_single_seg_count(const struct iov_iter *i)
981 if (i->nr_segs > 1) {
982 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
983 return min(i->count, i->iov->iov_len - i->iov_offset);
984 if (iov_iter_is_bvec(i))
985 return min(i->count, i->bvec->bv_len - i->iov_offset);
989 EXPORT_SYMBOL(iov_iter_single_seg_count);
991 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
992 const struct kvec *kvec, unsigned long nr_segs,
995 WARN_ON(direction & ~(READ | WRITE));
996 *i = (struct iov_iter){
997 .iter_type = ITER_KVEC,
998 .data_source = direction,
1005 EXPORT_SYMBOL(iov_iter_kvec);
1007 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1008 const struct bio_vec *bvec, unsigned long nr_segs,
1011 WARN_ON(direction & ~(READ | WRITE));
1012 *i = (struct iov_iter){
1013 .iter_type = ITER_BVEC,
1014 .data_source = direction,
1021 EXPORT_SYMBOL(iov_iter_bvec);
1023 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1024 struct pipe_inode_info *pipe,
1027 BUG_ON(direction != READ);
1028 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1029 *i = (struct iov_iter){
1030 .iter_type = ITER_PIPE,
1031 .data_source = false,
1034 .start_head = pipe->head,
1039 EXPORT_SYMBOL(iov_iter_pipe);
1042 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1043 * @i: The iterator to initialise.
1044 * @direction: The direction of the transfer.
1045 * @xarray: The xarray to access.
1046 * @start: The start file position.
1047 * @count: The size of the I/O buffer in bytes.
1049 * Set up an I/O iterator to either draw data out of the pages attached to an
1050 * inode or to inject data into those pages. The pages *must* be prevented
1051 * from evaporation, either by taking a ref on them or locking them by the
1054 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1055 struct xarray *xarray, loff_t start, size_t count)
1057 BUG_ON(direction & ~1);
1058 *i = (struct iov_iter) {
1059 .iter_type = ITER_XARRAY,
1060 .data_source = direction,
1062 .xarray_start = start,
1067 EXPORT_SYMBOL(iov_iter_xarray);
1070 * iov_iter_discard - Initialise an I/O iterator that discards data
1071 * @i: The iterator to initialise.
1072 * @direction: The direction of the transfer.
1073 * @count: The size of the I/O buffer in bytes.
1075 * Set up an I/O iterator that just discards everything that's written to it.
1076 * It's only available as a READ iterator.
1078 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1080 BUG_ON(direction != READ);
1081 *i = (struct iov_iter){
1082 .iter_type = ITER_DISCARD,
1083 .data_source = false,
1088 EXPORT_SYMBOL(iov_iter_discard);
1090 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
1093 size_t size = i->count;
1094 size_t skip = i->iov_offset;
1097 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1098 size_t len = i->iov[k].iov_len - skip;
1104 if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask)
1114 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
1117 size_t size = i->count;
1118 unsigned skip = i->iov_offset;
1121 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1122 size_t len = i->bvec[k].bv_len - skip;
1128 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
1139 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
1140 * are aligned to the parameters.
1142 * @i: &struct iov_iter to restore
1143 * @addr_mask: bit mask to check against the iov element's addresses
1144 * @len_mask: bit mask to check against the iov element's lengths
1146 * Return: false if any addresses or lengths intersect with the provided masks
1148 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
1151 if (likely(iter_is_ubuf(i))) {
1152 if (i->count & len_mask)
1154 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
1159 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1160 return iov_iter_aligned_iovec(i, addr_mask, len_mask);
1162 if (iov_iter_is_bvec(i))
1163 return iov_iter_aligned_bvec(i, addr_mask, len_mask);
1165 if (iov_iter_is_pipe(i)) {
1166 size_t size = i->count;
1168 if (size & len_mask)
1170 if (size && i->last_offset > 0) {
1171 if (i->last_offset & addr_mask)
1178 if (iov_iter_is_xarray(i)) {
1179 if (i->count & len_mask)
1181 if ((i->xarray_start + i->iov_offset) & addr_mask)
1187 EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
1189 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1191 unsigned long res = 0;
1192 size_t size = i->count;
1193 size_t skip = i->iov_offset;
1196 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1197 size_t len = i->iov[k].iov_len - skip;
1199 res |= (unsigned long)i->iov[k].iov_base + skip;
1211 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1214 size_t size = i->count;
1215 unsigned skip = i->iov_offset;
1218 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1219 size_t len = i->bvec[k].bv_len - skip;
1220 res |= (unsigned long)i->bvec[k].bv_offset + skip;
1231 unsigned long iov_iter_alignment(const struct iov_iter *i)
1233 if (likely(iter_is_ubuf(i))) {
1234 size_t size = i->count;
1236 return ((unsigned long)i->ubuf + i->iov_offset) | size;
1240 /* iovec and kvec have identical layouts */
1241 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1242 return iov_iter_alignment_iovec(i);
1244 if (iov_iter_is_bvec(i))
1245 return iov_iter_alignment_bvec(i);
1247 if (iov_iter_is_pipe(i)) {
1248 size_t size = i->count;
1250 if (size && i->last_offset > 0)
1251 return size | i->last_offset;
1255 if (iov_iter_is_xarray(i))
1256 return (i->xarray_start + i->iov_offset) | i->count;
1260 EXPORT_SYMBOL(iov_iter_alignment);
1262 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1264 unsigned long res = 0;
1265 unsigned long v = 0;
1266 size_t size = i->count;
1269 if (iter_is_ubuf(i))
1272 if (WARN_ON(!iter_is_iovec(i)))
1275 for (k = 0; k < i->nr_segs; k++) {
1276 if (i->iov[k].iov_len) {
1277 unsigned long base = (unsigned long)i->iov[k].iov_base;
1278 if (v) // if not the first one
1279 res |= base | v; // this start | previous end
1280 v = base + i->iov[k].iov_len;
1281 if (size <= i->iov[k].iov_len)
1283 size -= i->iov[k].iov_len;
1288 EXPORT_SYMBOL(iov_iter_gap_alignment);
1290 static int want_pages_array(struct page ***res, size_t size,
1291 size_t start, unsigned int maxpages)
1293 unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
1295 if (count > maxpages)
1297 WARN_ON(!count); // caller should've prevented that
1299 *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
1306 static ssize_t pipe_get_pages(struct iov_iter *i,
1307 struct page ***pages, size_t maxsize, unsigned maxpages,
1310 unsigned int npages, count, off, chunk;
1317 *start = off = pipe_npages(i, &npages);
1320 count = want_pages_array(pages, maxsize, off, min(npages, maxpages));
1324 for (npages = 0, left = maxsize ; npages < count; npages++, left -= chunk) {
1325 struct page *page = append_pipe(i, left, &off);
1328 chunk = min_t(size_t, left, PAGE_SIZE - off);
1329 get_page(*p++ = page);
1333 return maxsize - left;
1336 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1337 pgoff_t index, unsigned int nr_pages)
1339 XA_STATE(xas, xa, index);
1341 unsigned int ret = 0;
1344 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1345 if (xas_retry(&xas, page))
1348 /* Has the page moved or been split? */
1349 if (unlikely(page != xas_reload(&xas))) {
1354 pages[ret] = find_subpage(page, xas.xa_index);
1355 get_page(pages[ret]);
1356 if (++ret == nr_pages)
1363 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1364 struct page ***pages, size_t maxsize,
1365 unsigned maxpages, size_t *_start_offset)
1367 unsigned nr, offset, count;
1371 pos = i->xarray_start + i->iov_offset;
1372 index = pos >> PAGE_SHIFT;
1373 offset = pos & ~PAGE_MASK;
1374 *_start_offset = offset;
1376 count = want_pages_array(pages, maxsize, offset, maxpages);
1379 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
1383 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1384 i->iov_offset += maxsize;
1385 i->count -= maxsize;
1389 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
1390 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
1395 if (iter_is_ubuf(i))
1396 return (unsigned long)i->ubuf + i->iov_offset;
1398 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1399 size_t len = i->iov[k].iov_len - skip;
1405 return (unsigned long)i->iov[k].iov_base + skip;
1407 BUG(); // if it had been empty, we wouldn't get called
1410 /* must be done on non-empty ITER_BVEC one */
1411 static struct page *first_bvec_segment(const struct iov_iter *i,
1412 size_t *size, size_t *start)
1415 size_t skip = i->iov_offset, len;
1417 len = i->bvec->bv_len - skip;
1420 skip += i->bvec->bv_offset;
1421 page = i->bvec->bv_page + skip / PAGE_SIZE;
1422 *start = skip % PAGE_SIZE;
1426 static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
1427 struct page ***pages, size_t maxsize,
1428 unsigned int maxpages, size_t *start,
1429 unsigned int gup_flags)
1433 if (maxsize > i->count)
1437 if (maxsize > MAX_RW_COUNT)
1438 maxsize = MAX_RW_COUNT;
1440 if (likely(user_backed_iter(i))) {
1444 if (iov_iter_rw(i) != WRITE)
1445 gup_flags |= FOLL_WRITE;
1447 gup_flags |= FOLL_NOFAULT;
1449 addr = first_iovec_segment(i, &maxsize);
1450 *start = addr % PAGE_SIZE;
1452 n = want_pages_array(pages, maxsize, *start, maxpages);
1455 res = get_user_pages_fast(addr, n, gup_flags, *pages);
1456 if (unlikely(res <= 0))
1458 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1459 iov_iter_advance(i, maxsize);
1462 if (iov_iter_is_bvec(i)) {
1466 page = first_bvec_segment(i, &maxsize, start);
1467 n = want_pages_array(pages, maxsize, *start, maxpages);
1471 for (int k = 0; k < n; k++)
1472 get_page(p[k] = page + k);
1473 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1474 i->count -= maxsize;
1475 i->iov_offset += maxsize;
1476 if (i->iov_offset == i->bvec->bv_len) {
1483 if (iov_iter_is_pipe(i))
1484 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1485 if (iov_iter_is_xarray(i))
1486 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1490 ssize_t iov_iter_get_pages(struct iov_iter *i,
1491 struct page **pages, size_t maxsize, unsigned maxpages,
1492 size_t *start, unsigned gup_flags)
1498 return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages,
1501 EXPORT_SYMBOL_GPL(iov_iter_get_pages);
1503 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
1504 size_t maxsize, unsigned maxpages, size_t *start)
1506 return iov_iter_get_pages(i, pages, maxsize, maxpages, start, 0);
1508 EXPORT_SYMBOL(iov_iter_get_pages2);
1510 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1511 struct page ***pages, size_t maxsize,
1512 size_t *start, unsigned gup_flags)
1518 len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start,
1526 EXPORT_SYMBOL_GPL(iov_iter_get_pages_alloc);
1528 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
1529 struct page ***pages, size_t maxsize, size_t *start)
1531 return iov_iter_get_pages_alloc(i, pages, maxsize, start, 0);
1533 EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
1535 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1540 if (WARN_ON_ONCE(!i->data_source))
1543 iterate_and_advance(i, bytes, base, len, off, ({
1544 next = csum_and_copy_from_user(base, addr + off, len);
1545 sum = csum_block_add(sum, next, off);
1548 sum = csum_and_memcpy(addr + off, base, len, sum, off);
1554 EXPORT_SYMBOL(csum_and_copy_from_iter);
1556 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1559 struct csum_state *csstate = _csstate;
1562 if (WARN_ON_ONCE(i->data_source))
1564 if (unlikely(iov_iter_is_discard(i))) {
1565 // can't use csum_memcpy() for that one - data is not copied
1566 csstate->csum = csum_block_add(csstate->csum,
1567 csum_partial(addr, bytes, 0),
1569 csstate->off += bytes;
1573 sum = csum_shift(csstate->csum, csstate->off);
1574 if (unlikely(iov_iter_is_pipe(i)))
1575 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
1576 else iterate_and_advance(i, bytes, base, len, off, ({
1577 next = csum_and_copy_to_user(addr + off, base, len);
1578 sum = csum_block_add(sum, next, off);
1581 sum = csum_and_memcpy(base, addr + off, len, sum, off);
1584 csstate->csum = csum_shift(sum, csstate->off);
1585 csstate->off += bytes;
1588 EXPORT_SYMBOL(csum_and_copy_to_iter);
1590 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1593 #ifdef CONFIG_CRYPTO_HASH
1594 struct ahash_request *hash = hashp;
1595 struct scatterlist sg;
1598 copied = copy_to_iter(addr, bytes, i);
1599 sg_init_one(&sg, addr, copied);
1600 ahash_request_set_crypt(hash, &sg, NULL, copied);
1601 crypto_ahash_update(hash);
1607 EXPORT_SYMBOL(hash_and_copy_to_iter);
1609 static int iov_npages(const struct iov_iter *i, int maxpages)
1611 size_t skip = i->iov_offset, size = i->count;
1612 const struct iovec *p;
1615 for (p = i->iov; size; skip = 0, p++) {
1616 unsigned offs = offset_in_page(p->iov_base + skip);
1617 size_t len = min(p->iov_len - skip, size);
1621 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1622 if (unlikely(npages > maxpages))
1629 static int bvec_npages(const struct iov_iter *i, int maxpages)
1631 size_t skip = i->iov_offset, size = i->count;
1632 const struct bio_vec *p;
1635 for (p = i->bvec; size; skip = 0, p++) {
1636 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1637 size_t len = min(p->bv_len - skip, size);
1640 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1641 if (unlikely(npages > maxpages))
1647 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1649 if (unlikely(!i->count))
1651 if (likely(iter_is_ubuf(i))) {
1652 unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
1653 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
1654 return min(npages, maxpages);
1656 /* iovec and kvec have identical layouts */
1657 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1658 return iov_npages(i, maxpages);
1659 if (iov_iter_is_bvec(i))
1660 return bvec_npages(i, maxpages);
1661 if (iov_iter_is_pipe(i)) {
1667 pipe_npages(i, &npages);
1668 return min(npages, maxpages);
1670 if (iov_iter_is_xarray(i)) {
1671 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1672 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1673 return min(npages, maxpages);
1677 EXPORT_SYMBOL(iov_iter_npages);
1679 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1682 if (unlikely(iov_iter_is_pipe(new))) {
1686 if (iov_iter_is_bvec(new))
1687 return new->bvec = kmemdup(new->bvec,
1688 new->nr_segs * sizeof(struct bio_vec),
1690 else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
1691 /* iovec and kvec have identical layout */
1692 return new->iov = kmemdup(new->iov,
1693 new->nr_segs * sizeof(struct iovec),
1697 EXPORT_SYMBOL(dup_iter);
1699 static int copy_compat_iovec_from_user(struct iovec *iov,
1700 const struct iovec __user *uvec, unsigned long nr_segs)
1702 const struct compat_iovec __user *uiov =
1703 (const struct compat_iovec __user *)uvec;
1704 int ret = -EFAULT, i;
1706 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1709 for (i = 0; i < nr_segs; i++) {
1713 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1714 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1716 /* check for compat_size_t not fitting in compat_ssize_t .. */
1721 iov[i].iov_base = compat_ptr(buf);
1722 iov[i].iov_len = len;
1731 static int copy_iovec_from_user(struct iovec *iov,
1732 const struct iovec __user *uvec, unsigned long nr_segs)
1736 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1738 for (seg = 0; seg < nr_segs; seg++) {
1739 if ((ssize_t)iov[seg].iov_len < 0)
1746 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1747 unsigned long nr_segs, unsigned long fast_segs,
1748 struct iovec *fast_iov, bool compat)
1750 struct iovec *iov = fast_iov;
1754 * SuS says "The readv() function *may* fail if the iovcnt argument was
1755 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1756 * traditionally returned zero for zero segments, so...
1760 if (nr_segs > UIO_MAXIOV)
1761 return ERR_PTR(-EINVAL);
1762 if (nr_segs > fast_segs) {
1763 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1765 return ERR_PTR(-ENOMEM);
1769 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1771 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1773 if (iov != fast_iov)
1775 return ERR_PTR(ret);
1781 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1782 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1783 struct iov_iter *i, bool compat)
1785 ssize_t total_len = 0;
1789 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1792 return PTR_ERR(iov);
1796 * According to the Single Unix Specification we should return EINVAL if
1797 * an element length is < 0 when cast to ssize_t or if the total length
1798 * would overflow the ssize_t return value of the system call.
1800 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1803 for (seg = 0; seg < nr_segs; seg++) {
1804 ssize_t len = (ssize_t)iov[seg].iov_len;
1806 if (!access_ok(iov[seg].iov_base, len)) {
1813 if (len > MAX_RW_COUNT - total_len) {
1814 len = MAX_RW_COUNT - total_len;
1815 iov[seg].iov_len = len;
1820 iov_iter_init(i, type, iov, nr_segs, total_len);
1829 * import_iovec() - Copy an array of &struct iovec from userspace
1830 * into the kernel, check that it is valid, and initialize a new
1831 * &struct iov_iter iterator to access it.
1833 * @type: One of %READ or %WRITE.
1834 * @uvec: Pointer to the userspace array.
1835 * @nr_segs: Number of elements in userspace array.
1836 * @fast_segs: Number of elements in @iov.
1837 * @iovp: (input and output parameter) Pointer to pointer to (usually small
1838 * on-stack) kernel array.
1839 * @i: Pointer to iterator that will be initialized on success.
1841 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1842 * then this function places %NULL in *@iov on return. Otherwise, a new
1843 * array will be allocated and the result placed in *@iov. This means that
1844 * the caller may call kfree() on *@iov regardless of whether the small
1845 * on-stack array was used or not (and regardless of whether this function
1846 * returns an error or not).
1848 * Return: Negative error code on error, bytes imported on success
1850 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1851 unsigned nr_segs, unsigned fast_segs,
1852 struct iovec **iovp, struct iov_iter *i)
1854 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1855 in_compat_syscall());
1857 EXPORT_SYMBOL(import_iovec);
1859 int import_single_range(int rw, void __user *buf, size_t len,
1860 struct iovec *iov, struct iov_iter *i)
1862 if (len > MAX_RW_COUNT)
1864 if (unlikely(!access_ok(buf, len)))
1867 iov->iov_base = buf;
1869 iov_iter_init(i, rw, iov, 1, len);
1872 EXPORT_SYMBOL(import_single_range);
1875 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1876 * iov_iter_save_state() was called.
1878 * @i: &struct iov_iter to restore
1879 * @state: state to restore from
1881 * Used after iov_iter_save_state() to bring restore @i, if operations may
1884 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
1886 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
1888 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
1889 !iov_iter_is_kvec(i) && !iter_is_ubuf(i))
1891 i->iov_offset = state->iov_offset;
1892 i->count = state->count;
1893 if (iter_is_ubuf(i))
1896 * For the *vec iters, nr_segs + iov is constant - if we increment
1897 * the vec, then we also decrement the nr_segs count. Hence we don't
1898 * need to track both of these, just one is enough and we can deduct
1899 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
1900 * size, so we can just increment the iov pointer as they are unionzed.
1901 * ITER_BVEC _may_ be the same size on some archs, but on others it is
1902 * not. Be safe and handle it separately.
1904 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
1905 if (iov_iter_is_bvec(i))
1906 i->bvec -= state->nr_segs - i->nr_segs;
1908 i->iov -= state->nr_segs - i->nr_segs;
1909 i->nr_segs = state->nr_segs;