1 #include <linux/export.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <net/checksum.h>
8 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
12 __v.iov_len = min(n, __p->iov_len - skip); \
13 if (likely(__v.iov_len)) { \
14 __v.iov_base = __p->iov_base + skip; \
16 __v.iov_len -= left; \
17 skip += __v.iov_len; \
22 while (unlikely(!left && n)) { \
24 __v.iov_len = min(n, __p->iov_len); \
25 if (unlikely(!__v.iov_len)) \
27 __v.iov_base = __p->iov_base; \
29 __v.iov_len -= left; \
36 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
39 __v.iov_len = min(n, __p->iov_len - skip); \
40 if (likely(__v.iov_len)) { \
41 __v.iov_base = __p->iov_base + skip; \
43 skip += __v.iov_len; \
46 while (unlikely(n)) { \
48 __v.iov_len = min(n, __p->iov_len); \
49 if (unlikely(!__v.iov_len)) \
51 __v.iov_base = __p->iov_base; \
59 #define iterate_bvec(i, n, __v, __p, skip, STEP) { \
62 __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \
63 if (likely(__v.bv_len)) { \
64 __v.bv_page = __p->bv_page; \
65 __v.bv_offset = __p->bv_offset + skip; \
70 while (unlikely(n)) { \
72 __v.bv_len = min_t(size_t, n, __p->bv_len); \
73 if (unlikely(!__v.bv_len)) \
75 __v.bv_page = __p->bv_page; \
76 __v.bv_offset = __p->bv_offset; \
84 #define iterate_all_kinds(i, n, v, I, B, K) { \
85 size_t skip = i->iov_offset; \
86 if (unlikely(i->type & ITER_BVEC)) { \
87 const struct bio_vec *bvec; \
89 iterate_bvec(i, n, v, bvec, skip, (B)) \
90 } else if (unlikely(i->type & ITER_KVEC)) { \
91 const struct kvec *kvec; \
93 iterate_kvec(i, n, v, kvec, skip, (K)) \
95 const struct iovec *iov; \
97 iterate_iovec(i, n, v, iov, skip, (I)) \
101 #define iterate_and_advance(i, n, v, I, B, K) { \
102 if (unlikely(i->count < n)) \
105 size_t skip = i->iov_offset; \
106 if (unlikely(i->type & ITER_BVEC)) { \
107 const struct bio_vec *bvec; \
109 iterate_bvec(i, n, v, bvec, skip, (B)) \
110 if (skip == bvec->bv_len) { \
114 i->nr_segs -= bvec - i->bvec; \
116 } else if (unlikely(i->type & ITER_KVEC)) { \
117 const struct kvec *kvec; \
119 iterate_kvec(i, n, v, kvec, skip, (K)) \
120 if (skip == kvec->iov_len) { \
124 i->nr_segs -= kvec - i->kvec; \
127 const struct iovec *iov; \
129 iterate_iovec(i, n, v, iov, skip, (I)) \
130 if (skip == iov->iov_len) { \
134 i->nr_segs -= iov - i->iov; \
138 i->iov_offset = skip; \
142 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
145 size_t skip, copy, left, wanted;
146 const struct iovec *iov;
150 if (unlikely(bytes > i->count))
153 if (unlikely(!bytes))
158 skip = i->iov_offset;
159 buf = iov->iov_base + skip;
160 copy = min(bytes, iov->iov_len - skip);
162 if (!fault_in_pages_writeable(buf, copy)) {
163 kaddr = kmap_atomic(page);
164 from = kaddr + offset;
166 /* first chunk, usually the only one */
167 left = __copy_to_user_inatomic(buf, from, copy);
173 while (unlikely(!left && bytes)) {
176 copy = min(bytes, iov->iov_len);
177 left = __copy_to_user_inatomic(buf, from, copy);
183 if (likely(!bytes)) {
184 kunmap_atomic(kaddr);
187 offset = from - kaddr;
189 kunmap_atomic(kaddr);
190 copy = min(bytes, iov->iov_len - skip);
192 /* Too bad - revert to non-atomic kmap */
194 from = kaddr + offset;
195 left = __copy_to_user(buf, from, copy);
200 while (unlikely(!left && bytes)) {
203 copy = min(bytes, iov->iov_len);
204 left = __copy_to_user(buf, from, copy);
212 if (skip == iov->iov_len) {
216 i->count -= wanted - bytes;
217 i->nr_segs -= iov - i->iov;
219 i->iov_offset = skip;
220 return wanted - bytes;
223 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
226 size_t skip, copy, left, wanted;
227 const struct iovec *iov;
231 if (unlikely(bytes > i->count))
234 if (unlikely(!bytes))
239 skip = i->iov_offset;
240 buf = iov->iov_base + skip;
241 copy = min(bytes, iov->iov_len - skip);
243 if (!fault_in_pages_readable(buf, copy)) {
244 kaddr = kmap_atomic(page);
247 /* first chunk, usually the only one */
248 left = __copy_from_user_inatomic(to, buf, copy);
254 while (unlikely(!left && bytes)) {
257 copy = min(bytes, iov->iov_len);
258 left = __copy_from_user_inatomic(to, buf, copy);
264 if (likely(!bytes)) {
265 kunmap_atomic(kaddr);
270 kunmap_atomic(kaddr);
271 copy = min(bytes, iov->iov_len - skip);
273 /* Too bad - revert to non-atomic kmap */
276 left = __copy_from_user(to, buf, copy);
281 while (unlikely(!left && bytes)) {
284 copy = min(bytes, iov->iov_len);
285 left = __copy_from_user(to, buf, copy);
293 if (skip == iov->iov_len) {
297 i->count -= wanted - bytes;
298 i->nr_segs -= iov - i->iov;
300 i->iov_offset = skip;
301 return wanted - bytes;
305 * Fault in the first iovec of the given iov_iter, to a maximum length
306 * of bytes. Returns 0 on success, or non-zero if the memory could not be
307 * accessed (ie. because it is an invalid address).
309 * writev-intensive code may want this to prefault several iovecs -- that
310 * would be possible (callers must not rely on the fact that _only_ the
311 * first iovec will be faulted with the current implementation).
313 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
315 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
316 char __user *buf = i->iov->iov_base + i->iov_offset;
317 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
318 return fault_in_pages_readable(buf, bytes);
322 EXPORT_SYMBOL(iov_iter_fault_in_readable);
325 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
326 * bytes. For each iovec, fault in each page that constitutes the iovec.
328 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
329 * because it is an invalid address).
331 int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
333 size_t skip = i->iov_offset;
334 const struct iovec *iov;
338 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
339 iterate_iovec(i, bytes, v, iov, skip, ({
340 err = fault_in_multipages_readable(v.iov_base,
348 EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
350 void iov_iter_init(struct iov_iter *i, int direction,
351 const struct iovec *iov, unsigned long nr_segs,
354 /* It will get better. Eventually... */
355 if (segment_eq(get_fs(), KERNEL_DS)) {
356 direction |= ITER_KVEC;
358 i->kvec = (struct kvec *)iov;
363 i->nr_segs = nr_segs;
367 EXPORT_SYMBOL(iov_iter_init);
369 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
371 char *from = kmap_atomic(page);
372 memcpy(to, from + offset, len);
376 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
378 char *to = kmap_atomic(page);
379 memcpy(to + offset, from, len);
383 static void memzero_page(struct page *page, size_t offset, size_t len)
385 char *addr = kmap_atomic(page);
386 memset(addr + offset, 0, len);
390 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
392 const char *from = addr;
393 iterate_and_advance(i, bytes, v,
394 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
396 memcpy_to_page(v.bv_page, v.bv_offset,
397 (from += v.bv_len) - v.bv_len, v.bv_len),
398 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
403 EXPORT_SYMBOL(copy_to_iter);
405 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
408 iterate_and_advance(i, bytes, v,
409 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
411 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
412 v.bv_offset, v.bv_len),
413 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
418 EXPORT_SYMBOL(copy_from_iter);
420 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
423 iterate_and_advance(i, bytes, v,
424 __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
425 v.iov_base, v.iov_len),
426 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
427 v.bv_offset, v.bv_len),
428 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
433 EXPORT_SYMBOL(copy_from_iter_nocache);
435 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
438 if (i->type & (ITER_BVEC|ITER_KVEC)) {
439 void *kaddr = kmap_atomic(page);
440 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
441 kunmap_atomic(kaddr);
444 return copy_page_to_iter_iovec(page, offset, bytes, i);
446 EXPORT_SYMBOL(copy_page_to_iter);
448 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
451 if (i->type & (ITER_BVEC|ITER_KVEC)) {
452 void *kaddr = kmap_atomic(page);
453 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
454 kunmap_atomic(kaddr);
457 return copy_page_from_iter_iovec(page, offset, bytes, i);
459 EXPORT_SYMBOL(copy_page_from_iter);
461 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
463 iterate_and_advance(i, bytes, v,
464 __clear_user(v.iov_base, v.iov_len),
465 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
466 memset(v.iov_base, 0, v.iov_len)
471 EXPORT_SYMBOL(iov_iter_zero);
473 size_t iov_iter_copy_from_user_atomic(struct page *page,
474 struct iov_iter *i, unsigned long offset, size_t bytes)
476 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
477 iterate_all_kinds(i, bytes, v,
478 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
479 v.iov_base, v.iov_len),
480 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
481 v.bv_offset, v.bv_len),
482 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
484 kunmap_atomic(kaddr);
487 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
489 void iov_iter_advance(struct iov_iter *i, size_t size)
491 iterate_and_advance(i, size, v, 0, 0, 0)
493 EXPORT_SYMBOL(iov_iter_advance);
496 * Return the count of just the current iov_iter segment.
498 size_t iov_iter_single_seg_count(const struct iov_iter *i)
502 else if (i->type & ITER_BVEC)
503 return min(i->count, i->bvec->bv_len - i->iov_offset);
505 return min(i->count, i->iov->iov_len - i->iov_offset);
507 EXPORT_SYMBOL(iov_iter_single_seg_count);
509 void iov_iter_kvec(struct iov_iter *i, int direction,
510 const struct kvec *kvec, unsigned long nr_segs,
513 BUG_ON(!(direction & ITER_KVEC));
516 i->nr_segs = nr_segs;
520 EXPORT_SYMBOL(iov_iter_kvec);
522 void iov_iter_bvec(struct iov_iter *i, int direction,
523 const struct bio_vec *bvec, unsigned long nr_segs,
526 BUG_ON(!(direction & ITER_BVEC));
529 i->nr_segs = nr_segs;
533 EXPORT_SYMBOL(iov_iter_bvec);
535 unsigned long iov_iter_alignment(const struct iov_iter *i)
537 unsigned long res = 0;
538 size_t size = i->count;
543 iterate_all_kinds(i, size, v,
544 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
545 res |= v.bv_offset | v.bv_len,
546 res |= (unsigned long)v.iov_base | v.iov_len
550 EXPORT_SYMBOL(iov_iter_alignment);
552 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
554 unsigned long res = 0;
555 size_t size = i->count;
559 iterate_all_kinds(i, size, v,
560 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
561 (size != v.iov_len ? size : 0), 0),
562 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
563 (size != v.bv_len ? size : 0)),
564 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
565 (size != v.iov_len ? size : 0))
569 EXPORT_SYMBOL(iov_iter_gap_alignment);
571 ssize_t iov_iter_get_pages(struct iov_iter *i,
572 struct page **pages, size_t maxsize, unsigned maxpages,
575 if (maxsize > i->count)
581 iterate_all_kinds(i, maxsize, v, ({
582 unsigned long addr = (unsigned long)v.iov_base;
583 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
587 if (len > maxpages * PAGE_SIZE)
588 len = maxpages * PAGE_SIZE;
589 addr &= ~(PAGE_SIZE - 1);
590 n = DIV_ROUND_UP(len, PAGE_SIZE);
591 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
592 if (unlikely(res < 0))
594 return (res == n ? len : res * PAGE_SIZE) - *start;
596 /* can't be more than PAGE_SIZE */
597 *start = v.bv_offset;
598 get_page(*pages = v.bv_page);
606 EXPORT_SYMBOL(iov_iter_get_pages);
608 static struct page **get_pages_array(size_t n)
610 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
612 p = vmalloc(n * sizeof(struct page *));
616 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
617 struct page ***pages, size_t maxsize,
622 if (maxsize > i->count)
628 iterate_all_kinds(i, maxsize, v, ({
629 unsigned long addr = (unsigned long)v.iov_base;
630 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
634 addr &= ~(PAGE_SIZE - 1);
635 n = DIV_ROUND_UP(len, PAGE_SIZE);
636 p = get_pages_array(n);
639 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
640 if (unlikely(res < 0)) {
645 return (res == n ? len : res * PAGE_SIZE) - *start;
647 /* can't be more than PAGE_SIZE */
648 *start = v.bv_offset;
649 *pages = p = get_pages_array(1);
652 get_page(*p = v.bv_page);
660 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
662 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
669 iterate_and_advance(i, bytes, v, ({
671 next = csum_and_copy_from_user(v.iov_base,
672 (to += v.iov_len) - v.iov_len,
675 sum = csum_block_add(sum, next, off);
680 char *p = kmap_atomic(v.bv_page);
681 next = csum_partial_copy_nocheck(p + v.bv_offset,
682 (to += v.bv_len) - v.bv_len,
685 sum = csum_block_add(sum, next, off);
688 next = csum_partial_copy_nocheck(v.iov_base,
689 (to += v.iov_len) - v.iov_len,
691 sum = csum_block_add(sum, next, off);
698 EXPORT_SYMBOL(csum_and_copy_from_iter);
700 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
703 const char *from = addr;
707 iterate_and_advance(i, bytes, v, ({
709 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
713 sum = csum_block_add(sum, next, off);
718 char *p = kmap_atomic(v.bv_page);
719 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
723 sum = csum_block_add(sum, next, off);
726 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
729 sum = csum_block_add(sum, next, off);
736 EXPORT_SYMBOL(csum_and_copy_to_iter);
738 int iov_iter_npages(const struct iov_iter *i, int maxpages)
740 size_t size = i->count;
746 iterate_all_kinds(i, size, v, ({
747 unsigned long p = (unsigned long)v.iov_base;
748 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
750 if (npages >= maxpages)
754 if (npages >= maxpages)
757 unsigned long p = (unsigned long)v.iov_base;
758 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
760 if (npages >= maxpages)
766 EXPORT_SYMBOL(iov_iter_npages);
768 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
771 if (new->type & ITER_BVEC)
772 return new->bvec = kmemdup(new->bvec,
773 new->nr_segs * sizeof(struct bio_vec),
776 /* iovec and kvec have identical layout */
777 return new->iov = kmemdup(new->iov,
778 new->nr_segs * sizeof(struct iovec),
781 EXPORT_SYMBOL(dup_iter);
783 int import_iovec(int type, const struct iovec __user * uvector,
784 unsigned nr_segs, unsigned fast_segs,
785 struct iovec **iov, struct iov_iter *i)
789 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
797 iov_iter_init(i, type, p, nr_segs, n);
798 *iov = p == *iov ? NULL : p;
801 EXPORT_SYMBOL(import_iovec);
804 #include <linux/compat.h>
806 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
807 unsigned nr_segs, unsigned fast_segs,
808 struct iovec **iov, struct iov_iter *i)
812 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
820 iov_iter_init(i, type, p, nr_segs, n);
821 *iov = p == *iov ? NULL : p;
826 int import_single_range(int rw, void __user *buf, size_t len,
827 struct iovec *iov, struct iov_iter *i)
829 if (len > MAX_RW_COUNT)
831 if (unlikely(!access_ok(!rw, buf, len)))
836 iov_iter_init(i, rw, iov, 1, len);
839 EXPORT_SYMBOL(import_single_range);