1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
17 #define PIPE_PARANOIA /* for now */
19 /* covers iovec and kvec alike */
20 #define iterate_iovec(i, n, base, len, off, __p, STEP) { \
22 size_t skip = i->iov_offset; \
24 len = min(n, __p->iov_len - skip); \
26 base = __p->iov_base + skip; \
31 if (skip < __p->iov_len) \
37 i->iov_offset = skip; \
41 #define iterate_bvec(i, n, base, len, off, p, STEP) { \
43 unsigned skip = i->iov_offset; \
45 unsigned offset = p->bv_offset + skip; \
47 void *kaddr = kmap_local_page(p->bv_page + \
48 offset / PAGE_SIZE); \
49 base = kaddr + offset % PAGE_SIZE; \
50 len = min(min(n, (size_t)(p->bv_len - skip)), \
51 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
53 kunmap_local(kaddr); \
57 if (skip == p->bv_len) { \
65 i->iov_offset = skip; \
69 #define iterate_xarray(i, n, base, len, __off, STEP) { \
72 struct folio *folio; \
73 loff_t start = i->xarray_start + i->iov_offset; \
74 pgoff_t index = start / PAGE_SIZE; \
75 XA_STATE(xas, i->xarray, index); \
77 len = PAGE_SIZE - offset_in_page(start); \
79 xas_for_each(&xas, folio, ULONG_MAX) { \
82 if (xas_retry(&xas, folio)) \
84 if (WARN_ON(xa_is_value(folio))) \
86 if (WARN_ON(folio_test_hugetlb(folio))) \
88 offset = offset_in_folio(folio, start + __off); \
89 while (offset < folio_size(folio)) { \
90 base = kmap_local_folio(folio, offset); \
105 i->iov_offset += __off; \
109 #define __iterate_and_advance(i, n, base, len, off, I, K) { \
110 if (unlikely(i->count < n)) \
113 if (likely(iter_is_iovec(i))) { \
114 const struct iovec *iov = i->iov; \
117 iterate_iovec(i, n, base, len, off, \
119 i->nr_segs -= iov - i->iov; \
121 } else if (iov_iter_is_bvec(i)) { \
122 const struct bio_vec *bvec = i->bvec; \
125 iterate_bvec(i, n, base, len, off, \
127 i->nr_segs -= bvec - i->bvec; \
129 } else if (iov_iter_is_kvec(i)) { \
130 const struct kvec *kvec = i->kvec; \
133 iterate_iovec(i, n, base, len, off, \
135 i->nr_segs -= kvec - i->kvec; \
137 } else if (iov_iter_is_xarray(i)) { \
140 iterate_xarray(i, n, base, len, off, \
146 #define iterate_and_advance(i, n, base, len, off, I, K) \
147 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
149 static int copyout(void __user *to, const void *from, size_t n)
151 if (should_fail_usercopy())
153 if (access_ok(to, n)) {
154 instrument_copy_to_user(to, from, n);
155 n = raw_copy_to_user(to, from, n);
160 static int copyin(void *to, const void __user *from, size_t n)
162 if (should_fail_usercopy())
164 if (access_ok(from, n)) {
165 instrument_copy_from_user(to, from, n);
166 n = raw_copy_from_user(to, from, n);
172 static bool sanity(const struct iov_iter *i)
174 struct pipe_inode_info *pipe = i->pipe;
175 unsigned int p_head = pipe->head;
176 unsigned int p_tail = pipe->tail;
177 unsigned int p_mask = pipe->ring_size - 1;
178 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
179 unsigned int i_head = i->head;
183 struct pipe_buffer *p;
184 if (unlikely(p_occupancy == 0))
185 goto Bad; // pipe must be non-empty
186 if (unlikely(i_head != p_head - 1))
187 goto Bad; // must be at the last buffer...
189 p = &pipe->bufs[i_head & p_mask];
190 if (unlikely(p->offset + p->len != i->iov_offset))
191 goto Bad; // ... at the end of segment
193 if (i_head != p_head)
194 goto Bad; // must be right after the last buffer
198 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
199 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
200 p_head, p_tail, pipe->ring_size);
201 for (idx = 0; idx < pipe->ring_size; idx++)
202 printk(KERN_ERR "[%p %p %d %d]\n",
204 pipe->bufs[idx].page,
205 pipe->bufs[idx].offset,
206 pipe->bufs[idx].len);
211 #define sanity(i) true
214 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
217 struct pipe_inode_info *pipe = i->pipe;
218 struct pipe_buffer *buf;
219 unsigned int p_tail = pipe->tail;
220 unsigned int p_mask = pipe->ring_size - 1;
221 unsigned int i_head = i->head;
224 if (unlikely(bytes > i->count))
227 if (unlikely(!bytes))
234 buf = &pipe->bufs[i_head & p_mask];
236 if (offset == off && buf->page == page) {
237 /* merge with the last one */
239 i->iov_offset += bytes;
243 buf = &pipe->bufs[i_head & p_mask];
245 if (pipe_full(i_head, p_tail, pipe->max_usage))
248 buf->ops = &page_cache_pipe_buf_ops;
252 buf->offset = offset;
255 pipe->head = i_head + 1;
256 i->iov_offset = offset + bytes;
264 * fault_in_iov_iter_readable - fault in iov iterator for reading
266 * @size: maximum length
268 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
269 * @size. For each iovec, fault in each page that constitutes the iovec.
271 * Returns the number of bytes not faulted in (like copy_to_user() and
274 * Always returns 0 for non-userspace iterators.
276 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
278 if (iter_is_iovec(i)) {
279 size_t count = min(size, iov_iter_count(i));
280 const struct iovec *p;
284 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
285 size_t len = min(count, p->iov_len - skip);
290 ret = fault_in_readable(p->iov_base + skip, len);
299 EXPORT_SYMBOL(fault_in_iov_iter_readable);
302 * fault_in_iov_iter_writeable - fault in iov iterator for writing
304 * @size: maximum length
306 * Faults in the iterator using get_user_pages(), i.e., without triggering
307 * hardware page faults. This is primarily useful when we already know that
308 * some or all of the pages in @i aren't in memory.
310 * Returns the number of bytes not faulted in, like copy_to_user() and
313 * Always returns 0 for non-user-space iterators.
315 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
317 if (iter_is_iovec(i)) {
318 size_t count = min(size, iov_iter_count(i));
319 const struct iovec *p;
323 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
324 size_t len = min(count, p->iov_len - skip);
329 ret = fault_in_safe_writeable(p->iov_base + skip, len);
338 EXPORT_SYMBOL(fault_in_iov_iter_writeable);
340 void iov_iter_init(struct iov_iter *i, unsigned int direction,
341 const struct iovec *iov, unsigned long nr_segs,
344 WARN_ON(direction & ~(READ | WRITE));
345 *i = (struct iov_iter) {
346 .iter_type = ITER_IOVEC,
348 .data_source = direction,
355 EXPORT_SYMBOL(iov_iter_init);
357 static inline bool allocated(struct pipe_buffer *buf)
359 return buf->ops == &default_pipe_buf_ops;
362 static inline void data_start(const struct iov_iter *i,
363 unsigned int *iter_headp, size_t *offp)
365 unsigned int p_mask = i->pipe->ring_size - 1;
366 unsigned int iter_head = i->head;
367 size_t off = i->iov_offset;
369 if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
374 *iter_headp = iter_head;
378 static size_t push_pipe(struct iov_iter *i, size_t size,
379 int *iter_headp, size_t *offp)
381 struct pipe_inode_info *pipe = i->pipe;
382 unsigned int p_tail = pipe->tail;
383 unsigned int p_mask = pipe->ring_size - 1;
384 unsigned int iter_head;
388 if (unlikely(size > i->count))
394 data_start(i, &iter_head, &off);
395 *iter_headp = iter_head;
398 left -= PAGE_SIZE - off;
400 pipe->bufs[iter_head & p_mask].len += size;
403 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
406 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
407 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
408 struct page *page = alloc_page(GFP_USER);
412 buf->ops = &default_pipe_buf_ops;
416 buf->len = min_t(ssize_t, left, PAGE_SIZE);
419 pipe->head = iter_head;
427 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
430 struct pipe_inode_info *pipe = i->pipe;
431 unsigned int p_mask = pipe->ring_size - 1;
438 bytes = n = push_pipe(i, bytes, &i_head, &off);
442 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
443 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
445 i->iov_offset = off + chunk;
455 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
456 __wsum sum, size_t off)
458 __wsum next = csum_partial_copy_nocheck(from, to, len);
459 return csum_block_add(sum, next, off);
462 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
463 struct iov_iter *i, __wsum *sump)
465 struct pipe_inode_info *pipe = i->pipe;
466 unsigned int p_mask = pipe->ring_size - 1;
475 bytes = push_pipe(i, bytes, &i_head, &r);
477 size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r);
478 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
479 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
482 i->iov_offset = r + chunk;
493 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
495 if (unlikely(iov_iter_is_pipe(i)))
496 return copy_pipe_to_iter(addr, bytes, i);
497 if (iter_is_iovec(i))
499 iterate_and_advance(i, bytes, base, len, off,
500 copyout(base, addr + off, len),
501 memcpy(base, addr + off, len)
506 EXPORT_SYMBOL(_copy_to_iter);
508 #ifdef CONFIG_ARCH_HAS_COPY_MC
509 static int copyout_mc(void __user *to, const void *from, size_t n)
511 if (access_ok(to, n)) {
512 instrument_copy_to_user(to, from, n);
513 n = copy_mc_to_user((__force void *) to, from, n);
518 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
521 struct pipe_inode_info *pipe = i->pipe;
522 unsigned int p_mask = pipe->ring_size - 1;
524 unsigned int valid = pipe->head;
525 size_t n, off, xfer = 0;
530 n = push_pipe(i, bytes, &i_head, &off);
532 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
533 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
535 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
540 i->iov_offset = off + chunk;
545 pipe->bufs[i_head & p_mask].len -= rem;
546 pipe_discard_from(pipe, valid);
558 * _copy_mc_to_iter - copy to iter with source memory error exception handling
559 * @addr: source kernel address
560 * @bytes: total transfer length
561 * @i: destination iterator
563 * The pmem driver deploys this for the dax operation
564 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
565 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
566 * successfully copied.
568 * The main differences between this and typical _copy_to_iter().
570 * * Typical tail/residue handling after a fault retries the copy
571 * byte-by-byte until the fault happens again. Re-triggering machine
572 * checks is potentially fatal so the implementation uses source
573 * alignment and poison alignment assumptions to avoid re-triggering
574 * hardware exceptions.
576 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
577 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
580 * Return: number of bytes copied (may be %0)
582 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
584 if (unlikely(iov_iter_is_pipe(i)))
585 return copy_mc_pipe_to_iter(addr, bytes, i);
586 if (iter_is_iovec(i))
588 __iterate_and_advance(i, bytes, base, len, off,
589 copyout_mc(base, addr + off, len),
590 copy_mc_to_kernel(base, addr + off, len)
595 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
596 #endif /* CONFIG_ARCH_HAS_COPY_MC */
598 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
600 if (unlikely(iov_iter_is_pipe(i))) {
604 if (iter_is_iovec(i))
606 iterate_and_advance(i, bytes, base, len, off,
607 copyin(addr + off, base, len),
608 memcpy(addr + off, base, len)
613 EXPORT_SYMBOL(_copy_from_iter);
615 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
617 if (unlikely(iov_iter_is_pipe(i))) {
621 iterate_and_advance(i, bytes, base, len, off,
622 __copy_from_user_inatomic_nocache(addr + off, base, len),
623 memcpy(addr + off, base, len)
628 EXPORT_SYMBOL(_copy_from_iter_nocache);
630 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
632 * _copy_from_iter_flushcache - write destination through cpu cache
633 * @addr: destination kernel address
634 * @bytes: total transfer length
635 * @i: source iterator
637 * The pmem driver arranges for filesystem-dax to use this facility via
638 * dax_copy_from_iter() for ensuring that writes to persistent memory
639 * are flushed through the CPU cache. It is differentiated from
640 * _copy_from_iter_nocache() in that guarantees all data is flushed for
641 * all iterator types. The _copy_from_iter_nocache() only attempts to
642 * bypass the cache for the ITER_IOVEC case, and on some archs may use
643 * instructions that strand dirty-data in the cache.
645 * Return: number of bytes copied (may be %0)
647 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
649 if (unlikely(iov_iter_is_pipe(i))) {
653 iterate_and_advance(i, bytes, base, len, off,
654 __copy_from_user_flushcache(addr + off, base, len),
655 memcpy_flushcache(addr + off, base, len)
660 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
663 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
666 size_t v = n + offset;
669 * The general case needs to access the page order in order
670 * to compute the page size.
671 * However, we mostly deal with order-0 pages and thus can
672 * avoid a possible cache line miss for requests that fit all
675 if (n <= v && v <= PAGE_SIZE)
678 head = compound_head(page);
679 v += (page - head) << PAGE_SHIFT;
681 if (likely(n <= v && v <= (page_size(head))))
687 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
690 if (unlikely(iov_iter_is_pipe(i))) {
691 return copy_page_to_iter_pipe(page, offset, bytes, i);
693 void *kaddr = kmap_local_page(page);
694 size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
700 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
704 if (unlikely(!page_copy_sane(page, offset, bytes)))
706 page += offset / PAGE_SIZE; // first subpage
709 size_t n = __copy_page_to_iter(page, offset,
710 min(bytes, (size_t)PAGE_SIZE - offset), i);
716 if (offset == PAGE_SIZE) {
723 EXPORT_SYMBOL(copy_page_to_iter);
725 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
728 if (page_copy_sane(page, offset, bytes)) {
729 void *kaddr = kmap_local_page(page);
730 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
736 EXPORT_SYMBOL(copy_page_from_iter);
738 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
740 struct pipe_inode_info *pipe = i->pipe;
741 unsigned int p_mask = pipe->ring_size - 1;
748 bytes = n = push_pipe(i, bytes, &i_head, &off);
753 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
754 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
755 memset(p + off, 0, chunk);
758 i->iov_offset = off + chunk;
767 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
769 if (unlikely(iov_iter_is_pipe(i)))
770 return pipe_zero(bytes, i);
771 iterate_and_advance(i, bytes, base, len, count,
772 clear_user(base, len),
778 EXPORT_SYMBOL(iov_iter_zero);
780 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
783 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
784 if (unlikely(!page_copy_sane(page, offset, bytes))) {
785 kunmap_atomic(kaddr);
788 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
789 kunmap_atomic(kaddr);
793 iterate_and_advance(i, bytes, base, len, off,
794 copyin(p + off, base, len),
795 memcpy(p + off, base, len)
797 kunmap_atomic(kaddr);
800 EXPORT_SYMBOL(copy_page_from_iter_atomic);
802 static inline void pipe_truncate(struct iov_iter *i)
804 struct pipe_inode_info *pipe = i->pipe;
805 unsigned int p_tail = pipe->tail;
806 unsigned int p_head = pipe->head;
807 unsigned int p_mask = pipe->ring_size - 1;
809 if (!pipe_empty(p_head, p_tail)) {
810 struct pipe_buffer *buf;
811 unsigned int i_head = i->head;
812 size_t off = i->iov_offset;
815 buf = &pipe->bufs[i_head & p_mask];
816 buf->len = off - buf->offset;
819 while (p_head != i_head) {
821 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
828 static void pipe_advance(struct iov_iter *i, size_t size)
830 struct pipe_inode_info *pipe = i->pipe;
832 struct pipe_buffer *buf;
833 unsigned int p_mask = pipe->ring_size - 1;
834 unsigned int i_head = i->head;
835 size_t off = i->iov_offset, left = size;
837 if (off) /* make it relative to the beginning of buffer */
838 left += off - pipe->bufs[i_head & p_mask].offset;
840 buf = &pipe->bufs[i_head & p_mask];
841 if (left <= buf->len)
847 i->iov_offset = buf->offset + left;
850 /* ... and discard everything past that point */
854 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
856 const struct bio_vec *bvec, *end;
862 size += i->iov_offset;
864 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
865 if (likely(size < bvec->bv_len))
867 size -= bvec->bv_len;
869 i->iov_offset = size;
870 i->nr_segs -= bvec - i->bvec;
874 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
876 const struct iovec *iov, *end;
882 size += i->iov_offset; // from beginning of current segment
883 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
884 if (likely(size < iov->iov_len))
886 size -= iov->iov_len;
888 i->iov_offset = size;
889 i->nr_segs -= iov - i->iov;
893 void iov_iter_advance(struct iov_iter *i, size_t size)
895 if (unlikely(i->count < size))
897 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
898 /* iovec and kvec have identical layouts */
899 iov_iter_iovec_advance(i, size);
900 } else if (iov_iter_is_bvec(i)) {
901 iov_iter_bvec_advance(i, size);
902 } else if (iov_iter_is_pipe(i)) {
903 pipe_advance(i, size);
904 } else if (unlikely(iov_iter_is_xarray(i))) {
905 i->iov_offset += size;
907 } else if (iov_iter_is_discard(i)) {
911 EXPORT_SYMBOL(iov_iter_advance);
913 void iov_iter_revert(struct iov_iter *i, size_t unroll)
917 if (WARN_ON(unroll > MAX_RW_COUNT))
920 if (unlikely(iov_iter_is_pipe(i))) {
921 struct pipe_inode_info *pipe = i->pipe;
922 unsigned int p_mask = pipe->ring_size - 1;
923 unsigned int i_head = i->head;
924 size_t off = i->iov_offset;
926 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
927 size_t n = off - b->offset;
933 if (!unroll && i_head == i->start_head) {
938 b = &pipe->bufs[i_head & p_mask];
939 off = b->offset + b->len;
946 if (unlikely(iov_iter_is_discard(i)))
948 if (unroll <= i->iov_offset) {
949 i->iov_offset -= unroll;
952 unroll -= i->iov_offset;
953 if (iov_iter_is_xarray(i)) {
954 BUG(); /* We should never go beyond the start of the specified
955 * range since we might then be straying into pages that
958 } else if (iov_iter_is_bvec(i)) {
959 const struct bio_vec *bvec = i->bvec;
961 size_t n = (--bvec)->bv_len;
965 i->iov_offset = n - unroll;
970 } else { /* same logics for iovec and kvec */
971 const struct iovec *iov = i->iov;
973 size_t n = (--iov)->iov_len;
977 i->iov_offset = n - unroll;
984 EXPORT_SYMBOL(iov_iter_revert);
987 * Return the count of just the current iov_iter segment.
989 size_t iov_iter_single_seg_count(const struct iov_iter *i)
991 if (i->nr_segs > 1) {
992 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
993 return min(i->count, i->iov->iov_len - i->iov_offset);
994 if (iov_iter_is_bvec(i))
995 return min(i->count, i->bvec->bv_len - i->iov_offset);
999 EXPORT_SYMBOL(iov_iter_single_seg_count);
1001 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1002 const struct kvec *kvec, unsigned long nr_segs,
1005 WARN_ON(direction & ~(READ | WRITE));
1006 *i = (struct iov_iter){
1007 .iter_type = ITER_KVEC,
1008 .data_source = direction,
1015 EXPORT_SYMBOL(iov_iter_kvec);
1017 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1018 const struct bio_vec *bvec, unsigned long nr_segs,
1021 WARN_ON(direction & ~(READ | WRITE));
1022 *i = (struct iov_iter){
1023 .iter_type = ITER_BVEC,
1024 .data_source = direction,
1031 EXPORT_SYMBOL(iov_iter_bvec);
1033 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1034 struct pipe_inode_info *pipe,
1037 BUG_ON(direction != READ);
1038 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1039 *i = (struct iov_iter){
1040 .iter_type = ITER_PIPE,
1041 .data_source = false,
1044 .start_head = pipe->head,
1049 EXPORT_SYMBOL(iov_iter_pipe);
1052 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1053 * @i: The iterator to initialise.
1054 * @direction: The direction of the transfer.
1055 * @xarray: The xarray to access.
1056 * @start: The start file position.
1057 * @count: The size of the I/O buffer in bytes.
1059 * Set up an I/O iterator to either draw data out of the pages attached to an
1060 * inode or to inject data into those pages. The pages *must* be prevented
1061 * from evaporation, either by taking a ref on them or locking them by the
1064 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1065 struct xarray *xarray, loff_t start, size_t count)
1067 BUG_ON(direction & ~1);
1068 *i = (struct iov_iter) {
1069 .iter_type = ITER_XARRAY,
1070 .data_source = direction,
1072 .xarray_start = start,
1077 EXPORT_SYMBOL(iov_iter_xarray);
1080 * iov_iter_discard - Initialise an I/O iterator that discards data
1081 * @i: The iterator to initialise.
1082 * @direction: The direction of the transfer.
1083 * @count: The size of the I/O buffer in bytes.
1085 * Set up an I/O iterator that just discards everything that's written to it.
1086 * It's only available as a READ iterator.
1088 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1090 BUG_ON(direction != READ);
1091 *i = (struct iov_iter){
1092 .iter_type = ITER_DISCARD,
1093 .data_source = false,
1098 EXPORT_SYMBOL(iov_iter_discard);
1100 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
1103 size_t size = i->count;
1104 size_t skip = i->iov_offset;
1107 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1108 size_t len = i->iov[k].iov_len - skip;
1114 if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask)
1124 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
1127 size_t size = i->count;
1128 unsigned skip = i->iov_offset;
1131 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1132 size_t len = i->bvec[k].bv_len - skip;
1138 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
1149 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
1150 * are aligned to the parameters.
1152 * @i: &struct iov_iter to restore
1153 * @addr_mask: bit mask to check against the iov element's addresses
1154 * @len_mask: bit mask to check against the iov element's lengths
1156 * Return: false if any addresses or lengths intersect with the provided masks
1158 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
1161 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1162 return iov_iter_aligned_iovec(i, addr_mask, len_mask);
1164 if (iov_iter_is_bvec(i))
1165 return iov_iter_aligned_bvec(i, addr_mask, len_mask);
1167 if (iov_iter_is_pipe(i)) {
1168 unsigned int p_mask = i->pipe->ring_size - 1;
1169 size_t size = i->count;
1171 if (size & len_mask)
1173 if (size && allocated(&i->pipe->bufs[i->head & p_mask])) {
1174 if (i->iov_offset & addr_mask)
1181 if (iov_iter_is_xarray(i)) {
1182 if (i->count & len_mask)
1184 if ((i->xarray_start + i->iov_offset) & addr_mask)
1190 EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
1192 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1194 unsigned long res = 0;
1195 size_t size = i->count;
1196 size_t skip = i->iov_offset;
1199 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1200 size_t len = i->iov[k].iov_len - skip;
1202 res |= (unsigned long)i->iov[k].iov_base + skip;
1214 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1217 size_t size = i->count;
1218 unsigned skip = i->iov_offset;
1221 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1222 size_t len = i->bvec[k].bv_len - skip;
1223 res |= (unsigned long)i->bvec[k].bv_offset + skip;
1234 unsigned long iov_iter_alignment(const struct iov_iter *i)
1236 /* iovec and kvec have identical layouts */
1237 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1238 return iov_iter_alignment_iovec(i);
1240 if (iov_iter_is_bvec(i))
1241 return iov_iter_alignment_bvec(i);
1243 if (iov_iter_is_pipe(i)) {
1244 unsigned int p_mask = i->pipe->ring_size - 1;
1245 size_t size = i->count;
1247 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1248 return size | i->iov_offset;
1252 if (iov_iter_is_xarray(i))
1253 return (i->xarray_start + i->iov_offset) | i->count;
1257 EXPORT_SYMBOL(iov_iter_alignment);
1259 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1261 unsigned long res = 0;
1262 unsigned long v = 0;
1263 size_t size = i->count;
1266 if (WARN_ON(!iter_is_iovec(i)))
1269 for (k = 0; k < i->nr_segs; k++) {
1270 if (i->iov[k].iov_len) {
1271 unsigned long base = (unsigned long)i->iov[k].iov_base;
1272 if (v) // if not the first one
1273 res |= base | v; // this start | previous end
1274 v = base + i->iov[k].iov_len;
1275 if (size <= i->iov[k].iov_len)
1277 size -= i->iov[k].iov_len;
1282 EXPORT_SYMBOL(iov_iter_gap_alignment);
1284 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1286 struct page **pages,
1290 struct pipe_inode_info *pipe = i->pipe;
1291 unsigned int p_mask = pipe->ring_size - 1;
1292 ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1299 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1307 static ssize_t pipe_get_pages(struct iov_iter *i,
1308 struct page **pages, size_t maxsize, unsigned maxpages,
1311 unsigned int iter_head, npages;
1317 data_start(i, &iter_head, start);
1318 /* Amount of free space: some of this one + all after this one */
1319 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1320 capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1322 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1325 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1326 pgoff_t index, unsigned int nr_pages)
1328 XA_STATE(xas, xa, index);
1330 unsigned int ret = 0;
1333 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1334 if (xas_retry(&xas, page))
1337 /* Has the page moved or been split? */
1338 if (unlikely(page != xas_reload(&xas))) {
1343 pages[ret] = find_subpage(page, xas.xa_index);
1344 get_page(pages[ret]);
1345 if (++ret == nr_pages)
1352 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1353 struct page **pages, size_t maxsize,
1354 unsigned maxpages, size_t *_start_offset)
1356 unsigned nr, offset;
1357 pgoff_t index, count;
1358 size_t size = maxsize;
1361 if (!size || !maxpages)
1364 pos = i->xarray_start + i->iov_offset;
1365 index = pos >> PAGE_SHIFT;
1366 offset = pos & ~PAGE_MASK;
1367 *_start_offset = offset;
1370 if (size > PAGE_SIZE - offset) {
1371 size -= PAGE_SIZE - offset;
1372 count += size >> PAGE_SHIFT;
1378 if (count > maxpages)
1381 nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
1385 return min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1388 /* must be done on non-empty ITER_IOVEC one */
1389 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
1394 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1395 size_t len = i->iov[k].iov_len - skip;
1401 return (unsigned long)i->iov[k].iov_base + skip;
1403 BUG(); // if it had been empty, we wouldn't get called
1406 /* must be done on non-empty ITER_BVEC one */
1407 static struct page *first_bvec_segment(const struct iov_iter *i,
1408 size_t *size, size_t *start)
1411 size_t skip = i->iov_offset, len;
1413 len = i->bvec->bv_len - skip;
1416 skip += i->bvec->bv_offset;
1417 page = i->bvec->bv_page + skip / PAGE_SIZE;
1418 *start = skip % PAGE_SIZE;
1422 ssize_t iov_iter_get_pages(struct iov_iter *i,
1423 struct page **pages, size_t maxsize, unsigned maxpages,
1428 if (maxsize > i->count)
1432 if (maxsize > MAX_RW_COUNT)
1433 maxsize = MAX_RW_COUNT;
1435 if (likely(iter_is_iovec(i))) {
1436 unsigned int gup_flags = 0;
1439 if (iov_iter_rw(i) != WRITE)
1440 gup_flags |= FOLL_WRITE;
1442 gup_flags |= FOLL_NOFAULT;
1444 addr = first_iovec_segment(i, &maxsize);
1445 *start = addr % PAGE_SIZE;
1447 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1450 res = get_user_pages_fast(addr, n, gup_flags, pages);
1451 if (unlikely(res <= 0))
1453 return min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1455 if (iov_iter_is_bvec(i)) {
1458 page = first_bvec_segment(i, &maxsize, start);
1459 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1462 for (int k = 0; k < n; k++)
1463 get_page(*pages++ = page++);
1464 return min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1466 if (iov_iter_is_pipe(i))
1467 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1468 if (iov_iter_is_xarray(i))
1469 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1472 EXPORT_SYMBOL(iov_iter_get_pages);
1474 static struct page **get_pages_array(size_t n)
1476 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1479 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1480 struct page ***pages, size_t maxsize,
1484 unsigned int iter_head, npages;
1490 data_start(i, &iter_head, start);
1491 /* Amount of free space: some of this one + all after this one */
1492 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1493 n = npages * PAGE_SIZE - *start;
1497 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1498 p = get_pages_array(npages);
1501 n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1509 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
1510 struct page ***pages, size_t maxsize,
1511 size_t *_start_offset)
1514 unsigned nr, offset;
1515 pgoff_t index, count;
1516 size_t size = maxsize;
1522 pos = i->xarray_start + i->iov_offset;
1523 index = pos >> PAGE_SHIFT;
1524 offset = pos & ~PAGE_MASK;
1525 *_start_offset = offset;
1528 if (size > PAGE_SIZE - offset) {
1529 size -= PAGE_SIZE - offset;
1530 count += size >> PAGE_SHIFT;
1536 p = get_pages_array(count);
1541 nr = iter_xarray_populate_pages(p, i->xarray, index, count);
1545 return min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1548 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1549 struct page ***pages, size_t maxsize,
1555 if (maxsize > i->count)
1559 if (maxsize > MAX_RW_COUNT)
1560 maxsize = MAX_RW_COUNT;
1562 if (likely(iter_is_iovec(i))) {
1563 unsigned int gup_flags = 0;
1566 if (iov_iter_rw(i) != WRITE)
1567 gup_flags |= FOLL_WRITE;
1569 gup_flags |= FOLL_NOFAULT;
1571 addr = first_iovec_segment(i, &maxsize);
1572 *start = addr % PAGE_SIZE;
1574 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1575 p = get_pages_array(n);
1578 res = get_user_pages_fast(addr, n, gup_flags, p);
1579 if (unlikely(res <= 0)) {
1585 return min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1587 if (iov_iter_is_bvec(i)) {
1590 page = first_bvec_segment(i, &maxsize, start);
1591 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1592 *pages = p = get_pages_array(n);
1595 for (int k = 0; k < n; k++)
1596 get_page(*p++ = page++);
1597 return min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1599 if (iov_iter_is_pipe(i))
1600 return pipe_get_pages_alloc(i, pages, maxsize, start);
1601 if (iov_iter_is_xarray(i))
1602 return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
1605 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1607 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1612 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1616 iterate_and_advance(i, bytes, base, len, off, ({
1617 next = csum_and_copy_from_user(base, addr + off, len);
1618 sum = csum_block_add(sum, next, off);
1621 sum = csum_and_memcpy(addr + off, base, len, sum, off);
1627 EXPORT_SYMBOL(csum_and_copy_from_iter);
1629 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1632 struct csum_state *csstate = _csstate;
1635 if (unlikely(iov_iter_is_discard(i))) {
1636 WARN_ON(1); /* for now */
1640 sum = csum_shift(csstate->csum, csstate->off);
1641 if (unlikely(iov_iter_is_pipe(i)))
1642 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
1643 else iterate_and_advance(i, bytes, base, len, off, ({
1644 next = csum_and_copy_to_user(addr + off, base, len);
1645 sum = csum_block_add(sum, next, off);
1648 sum = csum_and_memcpy(base, addr + off, len, sum, off);
1651 csstate->csum = csum_shift(sum, csstate->off);
1652 csstate->off += bytes;
1655 EXPORT_SYMBOL(csum_and_copy_to_iter);
1657 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1660 #ifdef CONFIG_CRYPTO_HASH
1661 struct ahash_request *hash = hashp;
1662 struct scatterlist sg;
1665 copied = copy_to_iter(addr, bytes, i);
1666 sg_init_one(&sg, addr, copied);
1667 ahash_request_set_crypt(hash, &sg, NULL, copied);
1668 crypto_ahash_update(hash);
1674 EXPORT_SYMBOL(hash_and_copy_to_iter);
1676 static int iov_npages(const struct iov_iter *i, int maxpages)
1678 size_t skip = i->iov_offset, size = i->count;
1679 const struct iovec *p;
1682 for (p = i->iov; size; skip = 0, p++) {
1683 unsigned offs = offset_in_page(p->iov_base + skip);
1684 size_t len = min(p->iov_len - skip, size);
1688 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1689 if (unlikely(npages > maxpages))
1696 static int bvec_npages(const struct iov_iter *i, int maxpages)
1698 size_t skip = i->iov_offset, size = i->count;
1699 const struct bio_vec *p;
1702 for (p = i->bvec; size; skip = 0, p++) {
1703 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1704 size_t len = min(p->bv_len - skip, size);
1707 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1708 if (unlikely(npages > maxpages))
1714 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1716 if (unlikely(!i->count))
1718 /* iovec and kvec have identical layouts */
1719 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1720 return iov_npages(i, maxpages);
1721 if (iov_iter_is_bvec(i))
1722 return bvec_npages(i, maxpages);
1723 if (iov_iter_is_pipe(i)) {
1724 unsigned int iter_head;
1731 data_start(i, &iter_head, &off);
1732 /* some of this one + all after this one */
1733 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1734 return min(npages, maxpages);
1736 if (iov_iter_is_xarray(i)) {
1737 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1738 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1739 return min(npages, maxpages);
1743 EXPORT_SYMBOL(iov_iter_npages);
1745 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1748 if (unlikely(iov_iter_is_pipe(new))) {
1752 if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
1754 if (iov_iter_is_bvec(new))
1755 return new->bvec = kmemdup(new->bvec,
1756 new->nr_segs * sizeof(struct bio_vec),
1759 /* iovec and kvec have identical layout */
1760 return new->iov = kmemdup(new->iov,
1761 new->nr_segs * sizeof(struct iovec),
1764 EXPORT_SYMBOL(dup_iter);
1766 static int copy_compat_iovec_from_user(struct iovec *iov,
1767 const struct iovec __user *uvec, unsigned long nr_segs)
1769 const struct compat_iovec __user *uiov =
1770 (const struct compat_iovec __user *)uvec;
1771 int ret = -EFAULT, i;
1773 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1776 for (i = 0; i < nr_segs; i++) {
1780 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1781 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1783 /* check for compat_size_t not fitting in compat_ssize_t .. */
1788 iov[i].iov_base = compat_ptr(buf);
1789 iov[i].iov_len = len;
1798 static int copy_iovec_from_user(struct iovec *iov,
1799 const struct iovec __user *uvec, unsigned long nr_segs)
1803 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1805 for (seg = 0; seg < nr_segs; seg++) {
1806 if ((ssize_t)iov[seg].iov_len < 0)
1813 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1814 unsigned long nr_segs, unsigned long fast_segs,
1815 struct iovec *fast_iov, bool compat)
1817 struct iovec *iov = fast_iov;
1821 * SuS says "The readv() function *may* fail if the iovcnt argument was
1822 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1823 * traditionally returned zero for zero segments, so...
1827 if (nr_segs > UIO_MAXIOV)
1828 return ERR_PTR(-EINVAL);
1829 if (nr_segs > fast_segs) {
1830 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1832 return ERR_PTR(-ENOMEM);
1836 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1838 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1840 if (iov != fast_iov)
1842 return ERR_PTR(ret);
1848 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1849 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1850 struct iov_iter *i, bool compat)
1852 ssize_t total_len = 0;
1856 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1859 return PTR_ERR(iov);
1863 * According to the Single Unix Specification we should return EINVAL if
1864 * an element length is < 0 when cast to ssize_t or if the total length
1865 * would overflow the ssize_t return value of the system call.
1867 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1870 for (seg = 0; seg < nr_segs; seg++) {
1871 ssize_t len = (ssize_t)iov[seg].iov_len;
1873 if (!access_ok(iov[seg].iov_base, len)) {
1880 if (len > MAX_RW_COUNT - total_len) {
1881 len = MAX_RW_COUNT - total_len;
1882 iov[seg].iov_len = len;
1887 iov_iter_init(i, type, iov, nr_segs, total_len);
1896 * import_iovec() - Copy an array of &struct iovec from userspace
1897 * into the kernel, check that it is valid, and initialize a new
1898 * &struct iov_iter iterator to access it.
1900 * @type: One of %READ or %WRITE.
1901 * @uvec: Pointer to the userspace array.
1902 * @nr_segs: Number of elements in userspace array.
1903 * @fast_segs: Number of elements in @iov.
1904 * @iovp: (input and output parameter) Pointer to pointer to (usually small
1905 * on-stack) kernel array.
1906 * @i: Pointer to iterator that will be initialized on success.
1908 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1909 * then this function places %NULL in *@iov on return. Otherwise, a new
1910 * array will be allocated and the result placed in *@iov. This means that
1911 * the caller may call kfree() on *@iov regardless of whether the small
1912 * on-stack array was used or not (and regardless of whether this function
1913 * returns an error or not).
1915 * Return: Negative error code on error, bytes imported on success
1917 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1918 unsigned nr_segs, unsigned fast_segs,
1919 struct iovec **iovp, struct iov_iter *i)
1921 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1922 in_compat_syscall());
1924 EXPORT_SYMBOL(import_iovec);
1926 int import_single_range(int rw, void __user *buf, size_t len,
1927 struct iovec *iov, struct iov_iter *i)
1929 if (len > MAX_RW_COUNT)
1931 if (unlikely(!access_ok(buf, len)))
1934 iov->iov_base = buf;
1936 iov_iter_init(i, rw, iov, 1, len);
1939 EXPORT_SYMBOL(import_single_range);
1942 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1943 * iov_iter_save_state() was called.
1945 * @i: &struct iov_iter to restore
1946 * @state: state to restore from
1948 * Used after iov_iter_save_state() to bring restore @i, if operations may
1951 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
1953 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
1955 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
1956 !iov_iter_is_kvec(i))
1958 i->iov_offset = state->iov_offset;
1959 i->count = state->count;
1961 * For the *vec iters, nr_segs + iov is constant - if we increment
1962 * the vec, then we also decrement the nr_segs count. Hence we don't
1963 * need to track both of these, just one is enough and we can deduct
1964 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
1965 * size, so we can just increment the iov pointer as they are unionzed.
1966 * ITER_BVEC _may_ be the same size on some archs, but on others it is
1967 * not. Be safe and handle it separately.
1969 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
1970 if (iov_iter_is_bvec(i))
1971 i->bvec -= state->nr_segs - i->nr_segs;
1973 i->iov -= state->nr_segs - i->nr_segs;
1974 i->nr_segs = state->nr_segs;