1 #include <linux/export.h>
2 #include <linux/bvec.h>
4 #include <linux/pagemap.h>
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/splice.h>
8 #include <net/checksum.h>
10 #define PIPE_PARANOIA /* for now */
12 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
16 __v.iov_len = min(n, __p->iov_len - skip); \
17 if (likely(__v.iov_len)) { \
18 __v.iov_base = __p->iov_base + skip; \
20 __v.iov_len -= left; \
21 skip += __v.iov_len; \
26 while (unlikely(!left && n)) { \
28 __v.iov_len = min(n, __p->iov_len); \
29 if (unlikely(!__v.iov_len)) \
31 __v.iov_base = __p->iov_base; \
33 __v.iov_len -= left; \
40 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
43 __v.iov_len = min(n, __p->iov_len - skip); \
44 if (likely(__v.iov_len)) { \
45 __v.iov_base = __p->iov_base + skip; \
47 skip += __v.iov_len; \
50 while (unlikely(n)) { \
52 __v.iov_len = min(n, __p->iov_len); \
53 if (unlikely(!__v.iov_len)) \
55 __v.iov_base = __p->iov_base; \
63 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
64 struct bvec_iter __start; \
65 __start.bi_size = n; \
66 __start.bi_bvec_done = skip; \
68 for_each_bvec(__v, i->bvec, __bi, __start) { \
75 #define iterate_all_kinds(i, n, v, I, B, K) { \
77 size_t skip = i->iov_offset; \
78 if (unlikely(i->type & ITER_BVEC)) { \
80 struct bvec_iter __bi; \
81 iterate_bvec(i, n, v, __bi, skip, (B)) \
82 } else if (unlikely(i->type & ITER_KVEC)) { \
83 const struct kvec *kvec; \
85 iterate_kvec(i, n, v, kvec, skip, (K)) \
87 const struct iovec *iov; \
89 iterate_iovec(i, n, v, iov, skip, (I)) \
94 #define iterate_and_advance(i, n, v, I, B, K) { \
95 if (unlikely(i->count < n)) \
98 size_t skip = i->iov_offset; \
99 if (unlikely(i->type & ITER_BVEC)) { \
100 const struct bio_vec *bvec = i->bvec; \
102 struct bvec_iter __bi; \
103 iterate_bvec(i, n, v, __bi, skip, (B)) \
104 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
105 i->nr_segs -= i->bvec - bvec; \
106 skip = __bi.bi_bvec_done; \
107 } else if (unlikely(i->type & ITER_KVEC)) { \
108 const struct kvec *kvec; \
110 iterate_kvec(i, n, v, kvec, skip, (K)) \
111 if (skip == kvec->iov_len) { \
115 i->nr_segs -= kvec - i->kvec; \
118 const struct iovec *iov; \
120 iterate_iovec(i, n, v, iov, skip, (I)) \
121 if (skip == iov->iov_len) { \
125 i->nr_segs -= iov - i->iov; \
129 i->iov_offset = skip; \
133 static int copyout(void __user *to, const void *from, size_t n)
135 if (access_ok(VERIFY_WRITE, to, n)) {
136 kasan_check_read(from, n);
137 n = raw_copy_to_user(to, from, n);
142 static int copyin(void *to, const void __user *from, size_t n)
144 if (access_ok(VERIFY_READ, from, n)) {
145 kasan_check_write(to, n);
146 n = raw_copy_from_user(to, from, n);
151 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
154 size_t skip, copy, left, wanted;
155 const struct iovec *iov;
159 if (unlikely(bytes > i->count))
162 if (unlikely(!bytes))
168 skip = i->iov_offset;
169 buf = iov->iov_base + skip;
170 copy = min(bytes, iov->iov_len - skip);
172 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
173 kaddr = kmap_atomic(page);
174 from = kaddr + offset;
176 /* first chunk, usually the only one */
177 left = copyout(buf, from, copy);
183 while (unlikely(!left && bytes)) {
186 copy = min(bytes, iov->iov_len);
187 left = copyout(buf, from, copy);
193 if (likely(!bytes)) {
194 kunmap_atomic(kaddr);
197 offset = from - kaddr;
199 kunmap_atomic(kaddr);
200 copy = min(bytes, iov->iov_len - skip);
202 /* Too bad - revert to non-atomic kmap */
205 from = kaddr + offset;
206 left = copyout(buf, from, copy);
211 while (unlikely(!left && bytes)) {
214 copy = min(bytes, iov->iov_len);
215 left = copyout(buf, from, copy);
224 if (skip == iov->iov_len) {
228 i->count -= wanted - bytes;
229 i->nr_segs -= iov - i->iov;
231 i->iov_offset = skip;
232 return wanted - bytes;
235 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
238 size_t skip, copy, left, wanted;
239 const struct iovec *iov;
243 if (unlikely(bytes > i->count))
246 if (unlikely(!bytes))
252 skip = i->iov_offset;
253 buf = iov->iov_base + skip;
254 copy = min(bytes, iov->iov_len - skip);
256 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
257 kaddr = kmap_atomic(page);
260 /* first chunk, usually the only one */
261 left = copyin(to, buf, copy);
267 while (unlikely(!left && bytes)) {
270 copy = min(bytes, iov->iov_len);
271 left = copyin(to, buf, copy);
277 if (likely(!bytes)) {
278 kunmap_atomic(kaddr);
283 kunmap_atomic(kaddr);
284 copy = min(bytes, iov->iov_len - skip);
286 /* Too bad - revert to non-atomic kmap */
290 left = copyin(to, buf, copy);
295 while (unlikely(!left && bytes)) {
298 copy = min(bytes, iov->iov_len);
299 left = copyin(to, buf, copy);
308 if (skip == iov->iov_len) {
312 i->count -= wanted - bytes;
313 i->nr_segs -= iov - i->iov;
315 i->iov_offset = skip;
316 return wanted - bytes;
320 static bool sanity(const struct iov_iter *i)
322 struct pipe_inode_info *pipe = i->pipe;
324 int next = pipe->curbuf + pipe->nrbufs;
326 struct pipe_buffer *p;
327 if (unlikely(!pipe->nrbufs))
328 goto Bad; // pipe must be non-empty
329 if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
330 goto Bad; // must be at the last buffer...
332 p = &pipe->bufs[idx];
333 if (unlikely(p->offset + p->len != i->iov_offset))
334 goto Bad; // ... at the end of segment
336 if (idx != (next & (pipe->buffers - 1)))
337 goto Bad; // must be right after the last buffer
341 printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
342 printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
343 pipe->curbuf, pipe->nrbufs, pipe->buffers);
344 for (idx = 0; idx < pipe->buffers; idx++)
345 printk(KERN_ERR "[%p %p %d %d]\n",
347 pipe->bufs[idx].page,
348 pipe->bufs[idx].offset,
349 pipe->bufs[idx].len);
354 #define sanity(i) true
357 static inline int next_idx(int idx, struct pipe_inode_info *pipe)
359 return (idx + 1) & (pipe->buffers - 1);
362 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
365 struct pipe_inode_info *pipe = i->pipe;
366 struct pipe_buffer *buf;
370 if (unlikely(bytes > i->count))
373 if (unlikely(!bytes))
381 buf = &pipe->bufs[idx];
383 if (offset == off && buf->page == page) {
384 /* merge with the last one */
386 i->iov_offset += bytes;
389 idx = next_idx(idx, pipe);
390 buf = &pipe->bufs[idx];
392 if (idx == pipe->curbuf && pipe->nrbufs)
395 buf->ops = &page_cache_pipe_buf_ops;
396 get_page(buf->page = page);
397 buf->offset = offset;
399 i->iov_offset = offset + bytes;
407 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
408 * bytes. For each iovec, fault in each page that constitutes the iovec.
410 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
411 * because it is an invalid address).
413 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
415 size_t skip = i->iov_offset;
416 const struct iovec *iov;
420 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
421 iterate_iovec(i, bytes, v, iov, skip, ({
422 err = fault_in_pages_readable(v.iov_base, v.iov_len);
429 EXPORT_SYMBOL(iov_iter_fault_in_readable);
431 void iov_iter_init(struct iov_iter *i, int direction,
432 const struct iovec *iov, unsigned long nr_segs,
435 /* It will get better. Eventually... */
436 if (uaccess_kernel()) {
437 direction |= ITER_KVEC;
439 i->kvec = (struct kvec *)iov;
444 i->nr_segs = nr_segs;
448 EXPORT_SYMBOL(iov_iter_init);
450 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
452 char *from = kmap_atomic(page);
453 memcpy(to, from + offset, len);
457 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
459 char *to = kmap_atomic(page);
460 memcpy(to + offset, from, len);
464 static void memzero_page(struct page *page, size_t offset, size_t len)
466 char *addr = kmap_atomic(page);
467 memset(addr + offset, 0, len);
471 static inline bool allocated(struct pipe_buffer *buf)
473 return buf->ops == &default_pipe_buf_ops;
476 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
478 size_t off = i->iov_offset;
480 if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
481 idx = next_idx(idx, i->pipe);
488 static size_t push_pipe(struct iov_iter *i, size_t size,
489 int *idxp, size_t *offp)
491 struct pipe_inode_info *pipe = i->pipe;
496 if (unlikely(size > i->count))
502 data_start(i, &idx, &off);
506 left -= PAGE_SIZE - off;
508 pipe->bufs[idx].len += size;
511 pipe->bufs[idx].len = PAGE_SIZE;
512 idx = next_idx(idx, pipe);
514 while (idx != pipe->curbuf || !pipe->nrbufs) {
515 struct page *page = alloc_page(GFP_USER);
519 pipe->bufs[idx].ops = &default_pipe_buf_ops;
520 pipe->bufs[idx].page = page;
521 pipe->bufs[idx].offset = 0;
522 if (left <= PAGE_SIZE) {
523 pipe->bufs[idx].len = left;
526 pipe->bufs[idx].len = PAGE_SIZE;
528 idx = next_idx(idx, pipe);
533 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
536 struct pipe_inode_info *pipe = i->pipe;
543 bytes = n = push_pipe(i, bytes, &idx, &off);
546 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
547 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
548 memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
550 i->iov_offset = off + chunk;
558 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
560 const char *from = addr;
561 if (unlikely(i->type & ITER_PIPE))
562 return copy_pipe_to_iter(addr, bytes, i);
563 if (iter_is_iovec(i))
565 iterate_and_advance(i, bytes, v,
566 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
567 memcpy_to_page(v.bv_page, v.bv_offset,
568 (from += v.bv_len) - v.bv_len, v.bv_len),
569 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
574 EXPORT_SYMBOL(_copy_to_iter);
576 #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
577 static int copyout_mcsafe(void __user *to, const void *from, size_t n)
579 if (access_ok(VERIFY_WRITE, to, n)) {
580 kasan_check_read(from, n);
581 n = copy_to_user_mcsafe((__force void *) to, from, n);
586 static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
587 const char *from, size_t len)
592 to = kmap_atomic(page);
593 ret = memcpy_mcsafe(to + offset, from, len);
599 static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
602 struct pipe_inode_info *pipe = i->pipe;
603 size_t n, off, xfer = 0;
609 bytes = n = push_pipe(i, bytes, &idx, &off);
612 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
613 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
616 rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr,
619 i->iov_offset = off + chunk - rem;
631 * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
632 * @addr: source kernel address
633 * @bytes: total transfer length
634 * @iter: destination iterator
636 * The pmem driver arranges for filesystem-dax to use this facility via
637 * dax_copy_to_iter() for protecting read/write to persistent memory.
638 * Unless / until an architecture can guarantee identical performance
639 * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
640 * performance regression to switch more users to the mcsafe version.
642 * Otherwise, the main differences between this and typical _copy_to_iter().
644 * * Typical tail/residue handling after a fault retries the copy
645 * byte-by-byte until the fault happens again. Re-triggering machine
646 * checks is potentially fatal so the implementation uses source
647 * alignment and poison alignment assumptions to avoid re-triggering
648 * hardware exceptions.
650 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
651 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
654 * See MCSAFE_TEST for self-test.
656 size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
658 const char *from = addr;
659 unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
661 if (unlikely(i->type & ITER_PIPE))
662 return copy_pipe_to_iter_mcsafe(addr, bytes, i);
663 if (iter_is_iovec(i))
665 iterate_and_advance(i, bytes, v,
666 copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
668 rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
669 (from += v.bv_len) - v.bv_len, v.bv_len);
671 curr_addr = (unsigned long) from;
672 bytes = curr_addr - s_addr - rem;
677 rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
680 curr_addr = (unsigned long) from;
681 bytes = curr_addr - s_addr - rem;
689 EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
690 #endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
692 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
695 if (unlikely(i->type & ITER_PIPE)) {
699 if (iter_is_iovec(i))
701 iterate_and_advance(i, bytes, v,
702 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
703 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
704 v.bv_offset, v.bv_len),
705 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
710 EXPORT_SYMBOL(_copy_from_iter);
712 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
715 if (unlikely(i->type & ITER_PIPE)) {
719 if (unlikely(i->count < bytes))
722 if (iter_is_iovec(i))
724 iterate_all_kinds(i, bytes, v, ({
725 if (copyin((to += v.iov_len) - v.iov_len,
726 v.iov_base, v.iov_len))
729 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
730 v.bv_offset, v.bv_len),
731 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
734 iov_iter_advance(i, bytes);
737 EXPORT_SYMBOL(_copy_from_iter_full);
739 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
742 if (unlikely(i->type & ITER_PIPE)) {
746 iterate_and_advance(i, bytes, v,
747 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
748 v.iov_base, v.iov_len),
749 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
750 v.bv_offset, v.bv_len),
751 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
756 EXPORT_SYMBOL(_copy_from_iter_nocache);
758 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
760 * _copy_from_iter_flushcache - write destination through cpu cache
761 * @addr: destination kernel address
762 * @bytes: total transfer length
763 * @iter: source iterator
765 * The pmem driver arranges for filesystem-dax to use this facility via
766 * dax_copy_from_iter() for ensuring that writes to persistent memory
767 * are flushed through the CPU cache. It is differentiated from
768 * _copy_from_iter_nocache() in that guarantees all data is flushed for
769 * all iterator types. The _copy_from_iter_nocache() only attempts to
770 * bypass the cache for the ITER_IOVEC case, and on some archs may use
771 * instructions that strand dirty-data in the cache.
773 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
776 if (unlikely(i->type & ITER_PIPE)) {
780 iterate_and_advance(i, bytes, v,
781 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
782 v.iov_base, v.iov_len),
783 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
784 v.bv_offset, v.bv_len),
785 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
791 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
794 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
797 if (unlikely(i->type & ITER_PIPE)) {
801 if (unlikely(i->count < bytes))
803 iterate_all_kinds(i, bytes, v, ({
804 if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
805 v.iov_base, v.iov_len))
808 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
809 v.bv_offset, v.bv_len),
810 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
813 iov_iter_advance(i, bytes);
816 EXPORT_SYMBOL(_copy_from_iter_full_nocache);
818 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
821 size_t v = n + offset;
824 * The general case needs to access the page order in order
825 * to compute the page size.
826 * However, we mostly deal with order-0 pages and thus can
827 * avoid a possible cache line miss for requests that fit all
830 if (n <= v && v <= PAGE_SIZE)
833 head = compound_head(page);
834 v += (page - head) << PAGE_SHIFT;
836 if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
842 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
845 if (unlikely(!page_copy_sane(page, offset, bytes)))
847 if (i->type & (ITER_BVEC|ITER_KVEC)) {
848 void *kaddr = kmap_atomic(page);
849 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
850 kunmap_atomic(kaddr);
852 } else if (likely(!(i->type & ITER_PIPE)))
853 return copy_page_to_iter_iovec(page, offset, bytes, i);
855 return copy_page_to_iter_pipe(page, offset, bytes, i);
857 EXPORT_SYMBOL(copy_page_to_iter);
859 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
862 if (unlikely(!page_copy_sane(page, offset, bytes)))
864 if (unlikely(i->type & ITER_PIPE)) {
868 if (i->type & (ITER_BVEC|ITER_KVEC)) {
869 void *kaddr = kmap_atomic(page);
870 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
871 kunmap_atomic(kaddr);
874 return copy_page_from_iter_iovec(page, offset, bytes, i);
876 EXPORT_SYMBOL(copy_page_from_iter);
878 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
880 struct pipe_inode_info *pipe = i->pipe;
887 bytes = n = push_pipe(i, bytes, &idx, &off);
891 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
892 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
893 memzero_page(pipe->bufs[idx].page, off, chunk);
895 i->iov_offset = off + chunk;
902 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
904 if (unlikely(i->type & ITER_PIPE))
905 return pipe_zero(bytes, i);
906 iterate_and_advance(i, bytes, v,
907 clear_user(v.iov_base, v.iov_len),
908 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
909 memset(v.iov_base, 0, v.iov_len)
914 EXPORT_SYMBOL(iov_iter_zero);
916 size_t iov_iter_copy_from_user_atomic(struct page *page,
917 struct iov_iter *i, unsigned long offset, size_t bytes)
919 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
920 if (unlikely(!page_copy_sane(page, offset, bytes))) {
921 kunmap_atomic(kaddr);
924 if (unlikely(i->type & ITER_PIPE)) {
925 kunmap_atomic(kaddr);
929 iterate_all_kinds(i, bytes, v,
930 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
931 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
932 v.bv_offset, v.bv_len),
933 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
935 kunmap_atomic(kaddr);
938 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
940 static inline void pipe_truncate(struct iov_iter *i)
942 struct pipe_inode_info *pipe = i->pipe;
944 size_t off = i->iov_offset;
946 int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
948 pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
949 idx = next_idx(idx, pipe);
952 while (pipe->nrbufs > nrbufs) {
953 pipe_buf_release(pipe, &pipe->bufs[idx]);
954 idx = next_idx(idx, pipe);
960 static void pipe_advance(struct iov_iter *i, size_t size)
962 struct pipe_inode_info *pipe = i->pipe;
963 if (unlikely(i->count < size))
966 struct pipe_buffer *buf;
967 size_t off = i->iov_offset, left = size;
969 if (off) /* make it relative to the beginning of buffer */
970 left += off - pipe->bufs[idx].offset;
972 buf = &pipe->bufs[idx];
973 if (left <= buf->len)
976 idx = next_idx(idx, pipe);
979 i->iov_offset = buf->offset + left;
982 /* ... and discard everything past that point */
986 void iov_iter_advance(struct iov_iter *i, size_t size)
988 if (unlikely(i->type & ITER_PIPE)) {
989 pipe_advance(i, size);
992 iterate_and_advance(i, size, v, 0, 0, 0)
994 EXPORT_SYMBOL(iov_iter_advance);
996 void iov_iter_revert(struct iov_iter *i, size_t unroll)
1000 if (WARN_ON(unroll > MAX_RW_COUNT))
1003 if (unlikely(i->type & ITER_PIPE)) {
1004 struct pipe_inode_info *pipe = i->pipe;
1006 size_t off = i->iov_offset;
1008 size_t n = off - pipe->bufs[idx].offset;
1014 if (!unroll && idx == i->start_idx) {
1019 idx = pipe->buffers - 1;
1020 off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
1022 i->iov_offset = off;
1027 if (unroll <= i->iov_offset) {
1028 i->iov_offset -= unroll;
1031 unroll -= i->iov_offset;
1032 if (i->type & ITER_BVEC) {
1033 const struct bio_vec *bvec = i->bvec;
1035 size_t n = (--bvec)->bv_len;
1039 i->iov_offset = n - unroll;
1044 } else { /* same logics for iovec and kvec */
1045 const struct iovec *iov = i->iov;
1047 size_t n = (--iov)->iov_len;
1051 i->iov_offset = n - unroll;
1058 EXPORT_SYMBOL(iov_iter_revert);
1061 * Return the count of just the current iov_iter segment.
1063 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1065 if (unlikely(i->type & ITER_PIPE))
1066 return i->count; // it is a silly place, anyway
1067 if (i->nr_segs == 1)
1069 else if (i->type & ITER_BVEC)
1070 return min(i->count, i->bvec->bv_len - i->iov_offset);
1072 return min(i->count, i->iov->iov_len - i->iov_offset);
1074 EXPORT_SYMBOL(iov_iter_single_seg_count);
1076 void iov_iter_kvec(struct iov_iter *i, int direction,
1077 const struct kvec *kvec, unsigned long nr_segs,
1080 BUG_ON(!(direction & ITER_KVEC));
1081 i->type = direction;
1083 i->nr_segs = nr_segs;
1087 EXPORT_SYMBOL(iov_iter_kvec);
1089 void iov_iter_bvec(struct iov_iter *i, int direction,
1090 const struct bio_vec *bvec, unsigned long nr_segs,
1093 BUG_ON(!(direction & ITER_BVEC));
1094 i->type = direction;
1096 i->nr_segs = nr_segs;
1100 EXPORT_SYMBOL(iov_iter_bvec);
1102 void iov_iter_pipe(struct iov_iter *i, int direction,
1103 struct pipe_inode_info *pipe,
1106 BUG_ON(direction != ITER_PIPE);
1107 WARN_ON(pipe->nrbufs == pipe->buffers);
1108 i->type = direction;
1110 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1113 i->start_idx = i->idx;
1115 EXPORT_SYMBOL(iov_iter_pipe);
1117 unsigned long iov_iter_alignment(const struct iov_iter *i)
1119 unsigned long res = 0;
1120 size_t size = i->count;
1122 if (unlikely(i->type & ITER_PIPE)) {
1123 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
1124 return size | i->iov_offset;
1127 iterate_all_kinds(i, size, v,
1128 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
1129 res |= v.bv_offset | v.bv_len,
1130 res |= (unsigned long)v.iov_base | v.iov_len
1134 EXPORT_SYMBOL(iov_iter_alignment);
1136 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1138 unsigned long res = 0;
1139 size_t size = i->count;
1141 if (unlikely(i->type & ITER_PIPE)) {
1146 iterate_all_kinds(i, size, v,
1147 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1148 (size != v.iov_len ? size : 0), 0),
1149 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1150 (size != v.bv_len ? size : 0)),
1151 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1152 (size != v.iov_len ? size : 0))
1156 EXPORT_SYMBOL(iov_iter_gap_alignment);
1158 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1160 struct page **pages,
1164 struct pipe_inode_info *pipe = i->pipe;
1165 ssize_t n = push_pipe(i, maxsize, &idx, start);
1172 get_page(*pages++ = pipe->bufs[idx].page);
1173 idx = next_idx(idx, pipe);
1180 static ssize_t pipe_get_pages(struct iov_iter *i,
1181 struct page **pages, size_t maxsize, unsigned maxpages,
1194 data_start(i, &idx, start);
1195 /* some of this one + all after this one */
1196 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1197 capacity = min(npages,maxpages) * PAGE_SIZE - *start;
1199 return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
1202 ssize_t iov_iter_get_pages(struct iov_iter *i,
1203 struct page **pages, size_t maxsize, unsigned maxpages,
1206 if (maxsize > i->count)
1209 if (unlikely(i->type & ITER_PIPE))
1210 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1211 iterate_all_kinds(i, maxsize, v, ({
1212 unsigned long addr = (unsigned long)v.iov_base;
1213 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1217 if (len > maxpages * PAGE_SIZE)
1218 len = maxpages * PAGE_SIZE;
1219 addr &= ~(PAGE_SIZE - 1);
1220 n = DIV_ROUND_UP(len, PAGE_SIZE);
1221 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
1222 if (unlikely(res < 0))
1224 return (res == n ? len : res * PAGE_SIZE) - *start;
1226 /* can't be more than PAGE_SIZE */
1227 *start = v.bv_offset;
1228 get_page(*pages = v.bv_page);
1236 EXPORT_SYMBOL(iov_iter_get_pages);
1238 static struct page **get_pages_array(size_t n)
1240 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1243 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1244 struct page ***pages, size_t maxsize,
1258 data_start(i, &idx, start);
1259 /* some of this one + all after this one */
1260 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1261 n = npages * PAGE_SIZE - *start;
1265 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1266 p = get_pages_array(npages);
1269 n = __pipe_get_pages(i, maxsize, p, idx, start);
1277 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1278 struct page ***pages, size_t maxsize,
1283 if (maxsize > i->count)
1286 if (unlikely(i->type & ITER_PIPE))
1287 return pipe_get_pages_alloc(i, pages, maxsize, start);
1288 iterate_all_kinds(i, maxsize, v, ({
1289 unsigned long addr = (unsigned long)v.iov_base;
1290 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1294 addr &= ~(PAGE_SIZE - 1);
1295 n = DIV_ROUND_UP(len, PAGE_SIZE);
1296 p = get_pages_array(n);
1299 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
1300 if (unlikely(res < 0)) {
1305 return (res == n ? len : res * PAGE_SIZE) - *start;
1307 /* can't be more than PAGE_SIZE */
1308 *start = v.bv_offset;
1309 *pages = p = get_pages_array(1);
1312 get_page(*p = v.bv_page);
1320 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1322 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1329 if (unlikely(i->type & ITER_PIPE)) {
1333 iterate_and_advance(i, bytes, v, ({
1335 next = csum_and_copy_from_user(v.iov_base,
1336 (to += v.iov_len) - v.iov_len,
1337 v.iov_len, 0, &err);
1339 sum = csum_block_add(sum, next, off);
1342 err ? v.iov_len : 0;
1344 char *p = kmap_atomic(v.bv_page);
1345 next = csum_partial_copy_nocheck(p + v.bv_offset,
1346 (to += v.bv_len) - v.bv_len,
1349 sum = csum_block_add(sum, next, off);
1352 next = csum_partial_copy_nocheck(v.iov_base,
1353 (to += v.iov_len) - v.iov_len,
1355 sum = csum_block_add(sum, next, off);
1362 EXPORT_SYMBOL(csum_and_copy_from_iter);
1364 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1371 if (unlikely(i->type & ITER_PIPE)) {
1375 if (unlikely(i->count < bytes))
1377 iterate_all_kinds(i, bytes, v, ({
1379 next = csum_and_copy_from_user(v.iov_base,
1380 (to += v.iov_len) - v.iov_len,
1381 v.iov_len, 0, &err);
1384 sum = csum_block_add(sum, next, off);
1388 char *p = kmap_atomic(v.bv_page);
1389 next = csum_partial_copy_nocheck(p + v.bv_offset,
1390 (to += v.bv_len) - v.bv_len,
1393 sum = csum_block_add(sum, next, off);
1396 next = csum_partial_copy_nocheck(v.iov_base,
1397 (to += v.iov_len) - v.iov_len,
1399 sum = csum_block_add(sum, next, off);
1404 iov_iter_advance(i, bytes);
1407 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1409 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1412 const char *from = addr;
1416 if (unlikely(i->type & ITER_PIPE)) {
1417 WARN_ON(1); /* for now */
1420 iterate_and_advance(i, bytes, v, ({
1422 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1424 v.iov_len, 0, &err);
1426 sum = csum_block_add(sum, next, off);
1429 err ? v.iov_len : 0;
1431 char *p = kmap_atomic(v.bv_page);
1432 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1436 sum = csum_block_add(sum, next, off);
1439 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1442 sum = csum_block_add(sum, next, off);
1449 EXPORT_SYMBOL(csum_and_copy_to_iter);
1451 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1453 size_t size = i->count;
1459 if (unlikely(i->type & ITER_PIPE)) {
1460 struct pipe_inode_info *pipe = i->pipe;
1467 data_start(i, &idx, &off);
1468 /* some of this one + all after this one */
1469 npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1470 if (npages >= maxpages)
1472 } else iterate_all_kinds(i, size, v, ({
1473 unsigned long p = (unsigned long)v.iov_base;
1474 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1476 if (npages >= maxpages)
1480 if (npages >= maxpages)
1483 unsigned long p = (unsigned long)v.iov_base;
1484 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1486 if (npages >= maxpages)
1492 EXPORT_SYMBOL(iov_iter_npages);
1494 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1497 if (unlikely(new->type & ITER_PIPE)) {
1501 if (new->type & ITER_BVEC)
1502 return new->bvec = kmemdup(new->bvec,
1503 new->nr_segs * sizeof(struct bio_vec),
1506 /* iovec and kvec have identical layout */
1507 return new->iov = kmemdup(new->iov,
1508 new->nr_segs * sizeof(struct iovec),
1511 EXPORT_SYMBOL(dup_iter);
1514 * import_iovec() - Copy an array of &struct iovec from userspace
1515 * into the kernel, check that it is valid, and initialize a new
1516 * &struct iov_iter iterator to access it.
1518 * @type: One of %READ or %WRITE.
1519 * @uvector: Pointer to the userspace array.
1520 * @nr_segs: Number of elements in userspace array.
1521 * @fast_segs: Number of elements in @iov.
1522 * @iov: (input and output parameter) Pointer to pointer to (usually small
1523 * on-stack) kernel array.
1524 * @i: Pointer to iterator that will be initialized on success.
1526 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1527 * then this function places %NULL in *@iov on return. Otherwise, a new
1528 * array will be allocated and the result placed in *@iov. This means that
1529 * the caller may call kfree() on *@iov regardless of whether the small
1530 * on-stack array was used or not (and regardless of whether this function
1531 * returns an error or not).
1533 * Return: 0 on success or negative error code on error.
1535 int import_iovec(int type, const struct iovec __user * uvector,
1536 unsigned nr_segs, unsigned fast_segs,
1537 struct iovec **iov, struct iov_iter *i)
1541 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1549 iov_iter_init(i, type, p, nr_segs, n);
1550 *iov = p == *iov ? NULL : p;
1553 EXPORT_SYMBOL(import_iovec);
1555 #ifdef CONFIG_COMPAT
1556 #include <linux/compat.h>
1558 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1559 unsigned nr_segs, unsigned fast_segs,
1560 struct iovec **iov, struct iov_iter *i)
1564 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1572 iov_iter_init(i, type, p, nr_segs, n);
1573 *iov = p == *iov ? NULL : p;
1578 int import_single_range(int rw, void __user *buf, size_t len,
1579 struct iovec *iov, struct iov_iter *i)
1581 if (len > MAX_RW_COUNT)
1583 if (unlikely(!access_ok(!rw, buf, len)))
1586 iov->iov_base = buf;
1588 iov_iter_init(i, rw, iov, 1, len);
1591 EXPORT_SYMBOL(import_single_range);
1593 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
1594 int (*f)(struct kvec *vec, void *context),
1602 iterate_all_kinds(i, bytes, v, -EINVAL, ({
1603 w.iov_base = kmap(v.bv_page) + v.bv_offset;
1604 w.iov_len = v.bv_len;
1605 err = f(&w, context);
1609 err = f(&w, context);})
1613 EXPORT_SYMBOL(iov_iter_for_each_range);