nvme-tcp: don't ask if controller is fabrics
[platform/kernel/linux-rpi.git] / lib / iov_iter.c
1 #include <linux/export.h>
2 #include <linux/bvec.h>
3 #include <linux/uio.h>
4 #include <linux/pagemap.h>
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/splice.h>
8 #include <net/checksum.h>
9 #include <linux/scatterlist.h>
10
11 #define PIPE_PARANOIA /* for now */
12
13 #define iterate_iovec(i, n, __v, __p, skip, STEP) {     \
14         size_t left;                                    \
15         size_t wanted = n;                              \
16         __p = i->iov;                                   \
17         __v.iov_len = min(n, __p->iov_len - skip);      \
18         if (likely(__v.iov_len)) {                      \
19                 __v.iov_base = __p->iov_base + skip;    \
20                 left = (STEP);                          \
21                 __v.iov_len -= left;                    \
22                 skip += __v.iov_len;                    \
23                 n -= __v.iov_len;                       \
24         } else {                                        \
25                 left = 0;                               \
26         }                                               \
27         while (unlikely(!left && n)) {                  \
28                 __p++;                                  \
29                 __v.iov_len = min(n, __p->iov_len);     \
30                 if (unlikely(!__v.iov_len))             \
31                         continue;                       \
32                 __v.iov_base = __p->iov_base;           \
33                 left = (STEP);                          \
34                 __v.iov_len -= left;                    \
35                 skip = __v.iov_len;                     \
36                 n -= __v.iov_len;                       \
37         }                                               \
38         n = wanted - n;                                 \
39 }
40
41 #define iterate_kvec(i, n, __v, __p, skip, STEP) {      \
42         size_t wanted = n;                              \
43         __p = i->kvec;                                  \
44         __v.iov_len = min(n, __p->iov_len - skip);      \
45         if (likely(__v.iov_len)) {                      \
46                 __v.iov_base = __p->iov_base + skip;    \
47                 (void)(STEP);                           \
48                 skip += __v.iov_len;                    \
49                 n -= __v.iov_len;                       \
50         }                                               \
51         while (unlikely(n)) {                           \
52                 __p++;                                  \
53                 __v.iov_len = min(n, __p->iov_len);     \
54                 if (unlikely(!__v.iov_len))             \
55                         continue;                       \
56                 __v.iov_base = __p->iov_base;           \
57                 (void)(STEP);                           \
58                 skip = __v.iov_len;                     \
59                 n -= __v.iov_len;                       \
60         }                                               \
61         n = wanted;                                     \
62 }
63
64 #define iterate_bvec(i, n, __v, __bi, skip, STEP) {     \
65         struct bvec_iter __start;                       \
66         __start.bi_size = n;                            \
67         __start.bi_bvec_done = skip;                    \
68         __start.bi_idx = 0;                             \
69         for_each_bvec(__v, i->bvec, __bi, __start) {    \
70                 if (!__v.bv_len)                        \
71                         continue;                       \
72                 (void)(STEP);                           \
73         }                                               \
74 }
75
76 #define iterate_all_kinds(i, n, v, I, B, K) {                   \
77         if (likely(n)) {                                        \
78                 size_t skip = i->iov_offset;                    \
79                 if (unlikely(i->type & ITER_BVEC)) {            \
80                         struct bio_vec v;                       \
81                         struct bvec_iter __bi;                  \
82                         iterate_bvec(i, n, v, __bi, skip, (B))  \
83                 } else if (unlikely(i->type & ITER_KVEC)) {     \
84                         const struct kvec *kvec;                \
85                         struct kvec v;                          \
86                         iterate_kvec(i, n, v, kvec, skip, (K))  \
87                 } else if (unlikely(i->type & ITER_DISCARD)) {  \
88                 } else {                                        \
89                         const struct iovec *iov;                \
90                         struct iovec v;                         \
91                         iterate_iovec(i, n, v, iov, skip, (I))  \
92                 }                                               \
93         }                                                       \
94 }
95
96 #define iterate_and_advance(i, n, v, I, B, K) {                 \
97         if (unlikely(i->count < n))                             \
98                 n = i->count;                                   \
99         if (i->count) {                                         \
100                 size_t skip = i->iov_offset;                    \
101                 if (unlikely(i->type & ITER_BVEC)) {            \
102                         const struct bio_vec *bvec = i->bvec;   \
103                         struct bio_vec v;                       \
104                         struct bvec_iter __bi;                  \
105                         iterate_bvec(i, n, v, __bi, skip, (B))  \
106                         i->bvec = __bvec_iter_bvec(i->bvec, __bi);      \
107                         i->nr_segs -= i->bvec - bvec;           \
108                         skip = __bi.bi_bvec_done;               \
109                 } else if (unlikely(i->type & ITER_KVEC)) {     \
110                         const struct kvec *kvec;                \
111                         struct kvec v;                          \
112                         iterate_kvec(i, n, v, kvec, skip, (K))  \
113                         if (skip == kvec->iov_len) {            \
114                                 kvec++;                         \
115                                 skip = 0;                       \
116                         }                                       \
117                         i->nr_segs -= kvec - i->kvec;           \
118                         i->kvec = kvec;                         \
119                 } else if (unlikely(i->type & ITER_DISCARD)) {  \
120                         skip += n;                              \
121                 } else {                                        \
122                         const struct iovec *iov;                \
123                         struct iovec v;                         \
124                         iterate_iovec(i, n, v, iov, skip, (I))  \
125                         if (skip == iov->iov_len) {             \
126                                 iov++;                          \
127                                 skip = 0;                       \
128                         }                                       \
129                         i->nr_segs -= iov - i->iov;             \
130                         i->iov = iov;                           \
131                 }                                               \
132                 i->count -= n;                                  \
133                 i->iov_offset = skip;                           \
134         }                                                       \
135 }
136
137 static int copyout(void __user *to, const void *from, size_t n)
138 {
139         if (access_ok(VERIFY_WRITE, to, n)) {
140                 kasan_check_read(from, n);
141                 n = raw_copy_to_user(to, from, n);
142         }
143         return n;
144 }
145
146 static int copyin(void *to, const void __user *from, size_t n)
147 {
148         if (access_ok(VERIFY_READ, from, n)) {
149                 kasan_check_write(to, n);
150                 n = raw_copy_from_user(to, from, n);
151         }
152         return n;
153 }
154
155 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
156                          struct iov_iter *i)
157 {
158         size_t skip, copy, left, wanted;
159         const struct iovec *iov;
160         char __user *buf;
161         void *kaddr, *from;
162
163         if (unlikely(bytes > i->count))
164                 bytes = i->count;
165
166         if (unlikely(!bytes))
167                 return 0;
168
169         might_fault();
170         wanted = bytes;
171         iov = i->iov;
172         skip = i->iov_offset;
173         buf = iov->iov_base + skip;
174         copy = min(bytes, iov->iov_len - skip);
175
176         if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
177                 kaddr = kmap_atomic(page);
178                 from = kaddr + offset;
179
180                 /* first chunk, usually the only one */
181                 left = copyout(buf, from, copy);
182                 copy -= left;
183                 skip += copy;
184                 from += copy;
185                 bytes -= copy;
186
187                 while (unlikely(!left && bytes)) {
188                         iov++;
189                         buf = iov->iov_base;
190                         copy = min(bytes, iov->iov_len);
191                         left = copyout(buf, from, copy);
192                         copy -= left;
193                         skip = copy;
194                         from += copy;
195                         bytes -= copy;
196                 }
197                 if (likely(!bytes)) {
198                         kunmap_atomic(kaddr);
199                         goto done;
200                 }
201                 offset = from - kaddr;
202                 buf += copy;
203                 kunmap_atomic(kaddr);
204                 copy = min(bytes, iov->iov_len - skip);
205         }
206         /* Too bad - revert to non-atomic kmap */
207
208         kaddr = kmap(page);
209         from = kaddr + offset;
210         left = copyout(buf, from, copy);
211         copy -= left;
212         skip += copy;
213         from += copy;
214         bytes -= copy;
215         while (unlikely(!left && bytes)) {
216                 iov++;
217                 buf = iov->iov_base;
218                 copy = min(bytes, iov->iov_len);
219                 left = copyout(buf, from, copy);
220                 copy -= left;
221                 skip = copy;
222                 from += copy;
223                 bytes -= copy;
224         }
225         kunmap(page);
226
227 done:
228         if (skip == iov->iov_len) {
229                 iov++;
230                 skip = 0;
231         }
232         i->count -= wanted - bytes;
233         i->nr_segs -= iov - i->iov;
234         i->iov = iov;
235         i->iov_offset = skip;
236         return wanted - bytes;
237 }
238
239 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
240                          struct iov_iter *i)
241 {
242         size_t skip, copy, left, wanted;
243         const struct iovec *iov;
244         char __user *buf;
245         void *kaddr, *to;
246
247         if (unlikely(bytes > i->count))
248                 bytes = i->count;
249
250         if (unlikely(!bytes))
251                 return 0;
252
253         might_fault();
254         wanted = bytes;
255         iov = i->iov;
256         skip = i->iov_offset;
257         buf = iov->iov_base + skip;
258         copy = min(bytes, iov->iov_len - skip);
259
260         if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
261                 kaddr = kmap_atomic(page);
262                 to = kaddr + offset;
263
264                 /* first chunk, usually the only one */
265                 left = copyin(to, buf, copy);
266                 copy -= left;
267                 skip += copy;
268                 to += copy;
269                 bytes -= copy;
270
271                 while (unlikely(!left && bytes)) {
272                         iov++;
273                         buf = iov->iov_base;
274                         copy = min(bytes, iov->iov_len);
275                         left = copyin(to, buf, copy);
276                         copy -= left;
277                         skip = copy;
278                         to += copy;
279                         bytes -= copy;
280                 }
281                 if (likely(!bytes)) {
282                         kunmap_atomic(kaddr);
283                         goto done;
284                 }
285                 offset = to - kaddr;
286                 buf += copy;
287                 kunmap_atomic(kaddr);
288                 copy = min(bytes, iov->iov_len - skip);
289         }
290         /* Too bad - revert to non-atomic kmap */
291
292         kaddr = kmap(page);
293         to = kaddr + offset;
294         left = copyin(to, buf, copy);
295         copy -= left;
296         skip += copy;
297         to += copy;
298         bytes -= copy;
299         while (unlikely(!left && bytes)) {
300                 iov++;
301                 buf = iov->iov_base;
302                 copy = min(bytes, iov->iov_len);
303                 left = copyin(to, buf, copy);
304                 copy -= left;
305                 skip = copy;
306                 to += copy;
307                 bytes -= copy;
308         }
309         kunmap(page);
310
311 done:
312         if (skip == iov->iov_len) {
313                 iov++;
314                 skip = 0;
315         }
316         i->count -= wanted - bytes;
317         i->nr_segs -= iov - i->iov;
318         i->iov = iov;
319         i->iov_offset = skip;
320         return wanted - bytes;
321 }
322
323 #ifdef PIPE_PARANOIA
324 static bool sanity(const struct iov_iter *i)
325 {
326         struct pipe_inode_info *pipe = i->pipe;
327         int idx = i->idx;
328         int next = pipe->curbuf + pipe->nrbufs;
329         if (i->iov_offset) {
330                 struct pipe_buffer *p;
331                 if (unlikely(!pipe->nrbufs))
332                         goto Bad;       // pipe must be non-empty
333                 if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
334                         goto Bad;       // must be at the last buffer...
335
336                 p = &pipe->bufs[idx];
337                 if (unlikely(p->offset + p->len != i->iov_offset))
338                         goto Bad;       // ... at the end of segment
339         } else {
340                 if (idx != (next & (pipe->buffers - 1)))
341                         goto Bad;       // must be right after the last buffer
342         }
343         return true;
344 Bad:
345         printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
346         printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
347                         pipe->curbuf, pipe->nrbufs, pipe->buffers);
348         for (idx = 0; idx < pipe->buffers; idx++)
349                 printk(KERN_ERR "[%p %p %d %d]\n",
350                         pipe->bufs[idx].ops,
351                         pipe->bufs[idx].page,
352                         pipe->bufs[idx].offset,
353                         pipe->bufs[idx].len);
354         WARN_ON(1);
355         return false;
356 }
357 #else
358 #define sanity(i) true
359 #endif
360
361 static inline int next_idx(int idx, struct pipe_inode_info *pipe)
362 {
363         return (idx + 1) & (pipe->buffers - 1);
364 }
365
366 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
367                          struct iov_iter *i)
368 {
369         struct pipe_inode_info *pipe = i->pipe;
370         struct pipe_buffer *buf;
371         size_t off;
372         int idx;
373
374         if (unlikely(bytes > i->count))
375                 bytes = i->count;
376
377         if (unlikely(!bytes))
378                 return 0;
379
380         if (!sanity(i))
381                 return 0;
382
383         off = i->iov_offset;
384         idx = i->idx;
385         buf = &pipe->bufs[idx];
386         if (off) {
387                 if (offset == off && buf->page == page) {
388                         /* merge with the last one */
389                         buf->len += bytes;
390                         i->iov_offset += bytes;
391                         goto out;
392                 }
393                 idx = next_idx(idx, pipe);
394                 buf = &pipe->bufs[idx];
395         }
396         if (idx == pipe->curbuf && pipe->nrbufs)
397                 return 0;
398         pipe->nrbufs++;
399         buf->ops = &page_cache_pipe_buf_ops;
400         get_page(buf->page = page);
401         buf->offset = offset;
402         buf->len = bytes;
403         i->iov_offset = offset + bytes;
404         i->idx = idx;
405 out:
406         i->count -= bytes;
407         return bytes;
408 }
409
410 /*
411  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
412  * bytes.  For each iovec, fault in each page that constitutes the iovec.
413  *
414  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
415  * because it is an invalid address).
416  */
417 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
418 {
419         size_t skip = i->iov_offset;
420         const struct iovec *iov;
421         int err;
422         struct iovec v;
423
424         if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
425                 iterate_iovec(i, bytes, v, iov, skip, ({
426                         err = fault_in_pages_readable(v.iov_base, v.iov_len);
427                         if (unlikely(err))
428                         return err;
429                 0;}))
430         }
431         return 0;
432 }
433 EXPORT_SYMBOL(iov_iter_fault_in_readable);
434
435 void iov_iter_init(struct iov_iter *i, unsigned int direction,
436                         const struct iovec *iov, unsigned long nr_segs,
437                         size_t count)
438 {
439         WARN_ON(direction & ~(READ | WRITE));
440         direction &= READ | WRITE;
441
442         /* It will get better.  Eventually... */
443         if (uaccess_kernel()) {
444                 i->type = ITER_KVEC | direction;
445                 i->kvec = (struct kvec *)iov;
446         } else {
447                 i->type = ITER_IOVEC | direction;
448                 i->iov = iov;
449         }
450         i->nr_segs = nr_segs;
451         i->iov_offset = 0;
452         i->count = count;
453 }
454 EXPORT_SYMBOL(iov_iter_init);
455
456 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
457 {
458         char *from = kmap_atomic(page);
459         memcpy(to, from + offset, len);
460         kunmap_atomic(from);
461 }
462
463 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
464 {
465         char *to = kmap_atomic(page);
466         memcpy(to + offset, from, len);
467         kunmap_atomic(to);
468 }
469
470 static void memzero_page(struct page *page, size_t offset, size_t len)
471 {
472         char *addr = kmap_atomic(page);
473         memset(addr + offset, 0, len);
474         kunmap_atomic(addr);
475 }
476
477 static inline bool allocated(struct pipe_buffer *buf)
478 {
479         return buf->ops == &default_pipe_buf_ops;
480 }
481
482 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
483 {
484         size_t off = i->iov_offset;
485         int idx = i->idx;
486         if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
487                 idx = next_idx(idx, i->pipe);
488                 off = 0;
489         }
490         *idxp = idx;
491         *offp = off;
492 }
493
494 static size_t push_pipe(struct iov_iter *i, size_t size,
495                         int *idxp, size_t *offp)
496 {
497         struct pipe_inode_info *pipe = i->pipe;
498         size_t off;
499         int idx;
500         ssize_t left;
501
502         if (unlikely(size > i->count))
503                 size = i->count;
504         if (unlikely(!size))
505                 return 0;
506
507         left = size;
508         data_start(i, &idx, &off);
509         *idxp = idx;
510         *offp = off;
511         if (off) {
512                 left -= PAGE_SIZE - off;
513                 if (left <= 0) {
514                         pipe->bufs[idx].len += size;
515                         return size;
516                 }
517                 pipe->bufs[idx].len = PAGE_SIZE;
518                 idx = next_idx(idx, pipe);
519         }
520         while (idx != pipe->curbuf || !pipe->nrbufs) {
521                 struct page *page = alloc_page(GFP_USER);
522                 if (!page)
523                         break;
524                 pipe->nrbufs++;
525                 pipe->bufs[idx].ops = &default_pipe_buf_ops;
526                 pipe->bufs[idx].page = page;
527                 pipe->bufs[idx].offset = 0;
528                 if (left <= PAGE_SIZE) {
529                         pipe->bufs[idx].len = left;
530                         return size;
531                 }
532                 pipe->bufs[idx].len = PAGE_SIZE;
533                 left -= PAGE_SIZE;
534                 idx = next_idx(idx, pipe);
535         }
536         return size - left;
537 }
538
539 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
540                                 struct iov_iter *i)
541 {
542         struct pipe_inode_info *pipe = i->pipe;
543         size_t n, off;
544         int idx;
545
546         if (!sanity(i))
547                 return 0;
548
549         bytes = n = push_pipe(i, bytes, &idx, &off);
550         if (unlikely(!n))
551                 return 0;
552         for ( ; n; idx = next_idx(idx, pipe), off = 0) {
553                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
554                 memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
555                 i->idx = idx;
556                 i->iov_offset = off + chunk;
557                 n -= chunk;
558                 addr += chunk;
559         }
560         i->count -= bytes;
561         return bytes;
562 }
563
564 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
565                                 __wsum *csum, struct iov_iter *i)
566 {
567         struct pipe_inode_info *pipe = i->pipe;
568         size_t n, r;
569         size_t off = 0;
570         __wsum sum = *csum, next;
571         int idx;
572
573         if (!sanity(i))
574                 return 0;
575
576         bytes = n = push_pipe(i, bytes, &idx, &r);
577         if (unlikely(!n))
578                 return 0;
579         for ( ; n; idx = next_idx(idx, pipe), r = 0) {
580                 size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
581                 char *p = kmap_atomic(pipe->bufs[idx].page);
582                 next = csum_partial_copy_nocheck(addr, p + r, chunk, 0);
583                 sum = csum_block_add(sum, next, off);
584                 kunmap_atomic(p);
585                 i->idx = idx;
586                 i->iov_offset = r + chunk;
587                 n -= chunk;
588                 off += chunk;
589                 addr += chunk;
590         }
591         i->count -= bytes;
592         *csum = sum;
593         return bytes;
594 }
595
596 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
597 {
598         const char *from = addr;
599         if (unlikely(iov_iter_is_pipe(i)))
600                 return copy_pipe_to_iter(addr, bytes, i);
601         if (iter_is_iovec(i))
602                 might_fault();
603         iterate_and_advance(i, bytes, v,
604                 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
605                 memcpy_to_page(v.bv_page, v.bv_offset,
606                                (from += v.bv_len) - v.bv_len, v.bv_len),
607                 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
608         )
609
610         return bytes;
611 }
612 EXPORT_SYMBOL(_copy_to_iter);
613
614 #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
615 static int copyout_mcsafe(void __user *to, const void *from, size_t n)
616 {
617         if (access_ok(VERIFY_WRITE, to, n)) {
618                 kasan_check_read(from, n);
619                 n = copy_to_user_mcsafe((__force void *) to, from, n);
620         }
621         return n;
622 }
623
624 static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
625                 const char *from, size_t len)
626 {
627         unsigned long ret;
628         char *to;
629
630         to = kmap_atomic(page);
631         ret = memcpy_mcsafe(to + offset, from, len);
632         kunmap_atomic(to);
633
634         return ret;
635 }
636
637 static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
638                                 struct iov_iter *i)
639 {
640         struct pipe_inode_info *pipe = i->pipe;
641         size_t n, off, xfer = 0;
642         int idx;
643
644         if (!sanity(i))
645                 return 0;
646
647         bytes = n = push_pipe(i, bytes, &idx, &off);
648         if (unlikely(!n))
649                 return 0;
650         for ( ; n; idx = next_idx(idx, pipe), off = 0) {
651                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
652                 unsigned long rem;
653
654                 rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr,
655                                 chunk);
656                 i->idx = idx;
657                 i->iov_offset = off + chunk - rem;
658                 xfer += chunk - rem;
659                 if (rem)
660                         break;
661                 n -= chunk;
662                 addr += chunk;
663         }
664         i->count -= xfer;
665         return xfer;
666 }
667
668 /**
669  * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
670  * @addr: source kernel address
671  * @bytes: total transfer length
672  * @iter: destination iterator
673  *
674  * The pmem driver arranges for filesystem-dax to use this facility via
675  * dax_copy_to_iter() for protecting read/write to persistent memory.
676  * Unless / until an architecture can guarantee identical performance
677  * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
678  * performance regression to switch more users to the mcsafe version.
679  *
680  * Otherwise, the main differences between this and typical _copy_to_iter().
681  *
682  * * Typical tail/residue handling after a fault retries the copy
683  *   byte-by-byte until the fault happens again. Re-triggering machine
684  *   checks is potentially fatal so the implementation uses source
685  *   alignment and poison alignment assumptions to avoid re-triggering
686  *   hardware exceptions.
687  *
688  * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
689  *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
690  *   a short copy.
691  *
692  * See MCSAFE_TEST for self-test.
693  */
694 size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
695 {
696         const char *from = addr;
697         unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
698
699         if (unlikely(iov_iter_is_pipe(i)))
700                 return copy_pipe_to_iter_mcsafe(addr, bytes, i);
701         if (iter_is_iovec(i))
702                 might_fault();
703         iterate_and_advance(i, bytes, v,
704                 copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
705                 ({
706                 rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
707                                (from += v.bv_len) - v.bv_len, v.bv_len);
708                 if (rem) {
709                         curr_addr = (unsigned long) from;
710                         bytes = curr_addr - s_addr - rem;
711                         return bytes;
712                 }
713                 }),
714                 ({
715                 rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
716                                 v.iov_len);
717                 if (rem) {
718                         curr_addr = (unsigned long) from;
719                         bytes = curr_addr - s_addr - rem;
720                         return bytes;
721                 }
722                 })
723         )
724
725         return bytes;
726 }
727 EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
728 #endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
729
730 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
731 {
732         char *to = addr;
733         if (unlikely(iov_iter_is_pipe(i))) {
734                 WARN_ON(1);
735                 return 0;
736         }
737         if (iter_is_iovec(i))
738                 might_fault();
739         iterate_and_advance(i, bytes, v,
740                 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
741                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
742                                  v.bv_offset, v.bv_len),
743                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
744         )
745
746         return bytes;
747 }
748 EXPORT_SYMBOL(_copy_from_iter);
749
750 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
751 {
752         char *to = addr;
753         if (unlikely(iov_iter_is_pipe(i))) {
754                 WARN_ON(1);
755                 return false;
756         }
757         if (unlikely(i->count < bytes))
758                 return false;
759
760         if (iter_is_iovec(i))
761                 might_fault();
762         iterate_all_kinds(i, bytes, v, ({
763                 if (copyin((to += v.iov_len) - v.iov_len,
764                                       v.iov_base, v.iov_len))
765                         return false;
766                 0;}),
767                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
768                                  v.bv_offset, v.bv_len),
769                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
770         )
771
772         iov_iter_advance(i, bytes);
773         return true;
774 }
775 EXPORT_SYMBOL(_copy_from_iter_full);
776
777 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
778 {
779         char *to = addr;
780         if (unlikely(iov_iter_is_pipe(i))) {
781                 WARN_ON(1);
782                 return 0;
783         }
784         iterate_and_advance(i, bytes, v,
785                 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
786                                          v.iov_base, v.iov_len),
787                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
788                                  v.bv_offset, v.bv_len),
789                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
790         )
791
792         return bytes;
793 }
794 EXPORT_SYMBOL(_copy_from_iter_nocache);
795
796 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
797 /**
798  * _copy_from_iter_flushcache - write destination through cpu cache
799  * @addr: destination kernel address
800  * @bytes: total transfer length
801  * @iter: source iterator
802  *
803  * The pmem driver arranges for filesystem-dax to use this facility via
804  * dax_copy_from_iter() for ensuring that writes to persistent memory
805  * are flushed through the CPU cache. It is differentiated from
806  * _copy_from_iter_nocache() in that guarantees all data is flushed for
807  * all iterator types. The _copy_from_iter_nocache() only attempts to
808  * bypass the cache for the ITER_IOVEC case, and on some archs may use
809  * instructions that strand dirty-data in the cache.
810  */
811 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
812 {
813         char *to = addr;
814         if (unlikely(iov_iter_is_pipe(i))) {
815                 WARN_ON(1);
816                 return 0;
817         }
818         iterate_and_advance(i, bytes, v,
819                 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
820                                          v.iov_base, v.iov_len),
821                 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
822                                  v.bv_offset, v.bv_len),
823                 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
824                         v.iov_len)
825         )
826
827         return bytes;
828 }
829 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
830 #endif
831
832 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
833 {
834         char *to = addr;
835         if (unlikely(iov_iter_is_pipe(i))) {
836                 WARN_ON(1);
837                 return false;
838         }
839         if (unlikely(i->count < bytes))
840                 return false;
841         iterate_all_kinds(i, bytes, v, ({
842                 if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
843                                              v.iov_base, v.iov_len))
844                         return false;
845                 0;}),
846                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
847                                  v.bv_offset, v.bv_len),
848                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
849         )
850
851         iov_iter_advance(i, bytes);
852         return true;
853 }
854 EXPORT_SYMBOL(_copy_from_iter_full_nocache);
855
856 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
857 {
858         struct page *head = compound_head(page);
859         size_t v = n + offset + page_address(page) - page_address(head);
860
861         if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
862                 return true;
863         WARN_ON(1);
864         return false;
865 }
866
867 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
868                          struct iov_iter *i)
869 {
870         if (unlikely(!page_copy_sane(page, offset, bytes)))
871                 return 0;
872         if (i->type & (ITER_BVEC|ITER_KVEC)) {
873                 void *kaddr = kmap_atomic(page);
874                 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
875                 kunmap_atomic(kaddr);
876                 return wanted;
877         } else if (unlikely(iov_iter_is_discard(i)))
878                 return bytes;
879         else if (likely(!iov_iter_is_pipe(i)))
880                 return copy_page_to_iter_iovec(page, offset, bytes, i);
881         else
882                 return copy_page_to_iter_pipe(page, offset, bytes, i);
883 }
884 EXPORT_SYMBOL(copy_page_to_iter);
885
886 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
887                          struct iov_iter *i)
888 {
889         if (unlikely(!page_copy_sane(page, offset, bytes)))
890                 return 0;
891         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
892                 WARN_ON(1);
893                 return 0;
894         }
895         if (i->type & (ITER_BVEC|ITER_KVEC)) {
896                 void *kaddr = kmap_atomic(page);
897                 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
898                 kunmap_atomic(kaddr);
899                 return wanted;
900         } else
901                 return copy_page_from_iter_iovec(page, offset, bytes, i);
902 }
903 EXPORT_SYMBOL(copy_page_from_iter);
904
905 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
906 {
907         struct pipe_inode_info *pipe = i->pipe;
908         size_t n, off;
909         int idx;
910
911         if (!sanity(i))
912                 return 0;
913
914         bytes = n = push_pipe(i, bytes, &idx, &off);
915         if (unlikely(!n))
916                 return 0;
917
918         for ( ; n; idx = next_idx(idx, pipe), off = 0) {
919                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
920                 memzero_page(pipe->bufs[idx].page, off, chunk);
921                 i->idx = idx;
922                 i->iov_offset = off + chunk;
923                 n -= chunk;
924         }
925         i->count -= bytes;
926         return bytes;
927 }
928
929 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
930 {
931         if (unlikely(iov_iter_is_pipe(i)))
932                 return pipe_zero(bytes, i);
933         iterate_and_advance(i, bytes, v,
934                 clear_user(v.iov_base, v.iov_len),
935                 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
936                 memset(v.iov_base, 0, v.iov_len)
937         )
938
939         return bytes;
940 }
941 EXPORT_SYMBOL(iov_iter_zero);
942
943 size_t iov_iter_copy_from_user_atomic(struct page *page,
944                 struct iov_iter *i, unsigned long offset, size_t bytes)
945 {
946         char *kaddr = kmap_atomic(page), *p = kaddr + offset;
947         if (unlikely(!page_copy_sane(page, offset, bytes))) {
948                 kunmap_atomic(kaddr);
949                 return 0;
950         }
951         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
952                 kunmap_atomic(kaddr);
953                 WARN_ON(1);
954                 return 0;
955         }
956         iterate_all_kinds(i, bytes, v,
957                 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
958                 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
959                                  v.bv_offset, v.bv_len),
960                 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
961         )
962         kunmap_atomic(kaddr);
963         return bytes;
964 }
965 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
966
967 static inline void pipe_truncate(struct iov_iter *i)
968 {
969         struct pipe_inode_info *pipe = i->pipe;
970         if (pipe->nrbufs) {
971                 size_t off = i->iov_offset;
972                 int idx = i->idx;
973                 int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
974                 if (off) {
975                         pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
976                         idx = next_idx(idx, pipe);
977                         nrbufs++;
978                 }
979                 while (pipe->nrbufs > nrbufs) {
980                         pipe_buf_release(pipe, &pipe->bufs[idx]);
981                         idx = next_idx(idx, pipe);
982                         pipe->nrbufs--;
983                 }
984         }
985 }
986
987 static void pipe_advance(struct iov_iter *i, size_t size)
988 {
989         struct pipe_inode_info *pipe = i->pipe;
990         if (unlikely(i->count < size))
991                 size = i->count;
992         if (size) {
993                 struct pipe_buffer *buf;
994                 size_t off = i->iov_offset, left = size;
995                 int idx = i->idx;
996                 if (off) /* make it relative to the beginning of buffer */
997                         left += off - pipe->bufs[idx].offset;
998                 while (1) {
999                         buf = &pipe->bufs[idx];
1000                         if (left <= buf->len)
1001                                 break;
1002                         left -= buf->len;
1003                         idx = next_idx(idx, pipe);
1004                 }
1005                 i->idx = idx;
1006                 i->iov_offset = buf->offset + left;
1007         }
1008         i->count -= size;
1009         /* ... and discard everything past that point */
1010         pipe_truncate(i);
1011 }
1012
1013 void iov_iter_advance(struct iov_iter *i, size_t size)
1014 {
1015         if (unlikely(iov_iter_is_pipe(i))) {
1016                 pipe_advance(i, size);
1017                 return;
1018         }
1019         if (unlikely(iov_iter_is_discard(i))) {
1020                 i->count -= size;
1021                 return;
1022         }
1023         iterate_and_advance(i, size, v, 0, 0, 0)
1024 }
1025 EXPORT_SYMBOL(iov_iter_advance);
1026
1027 void iov_iter_revert(struct iov_iter *i, size_t unroll)
1028 {
1029         if (!unroll)
1030                 return;
1031         if (WARN_ON(unroll > MAX_RW_COUNT))
1032                 return;
1033         i->count += unroll;
1034         if (unlikely(iov_iter_is_pipe(i))) {
1035                 struct pipe_inode_info *pipe = i->pipe;
1036                 int idx = i->idx;
1037                 size_t off = i->iov_offset;
1038                 while (1) {
1039                         size_t n = off - pipe->bufs[idx].offset;
1040                         if (unroll < n) {
1041                                 off -= unroll;
1042                                 break;
1043                         }
1044                         unroll -= n;
1045                         if (!unroll && idx == i->start_idx) {
1046                                 off = 0;
1047                                 break;
1048                         }
1049                         if (!idx--)
1050                                 idx = pipe->buffers - 1;
1051                         off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
1052                 }
1053                 i->iov_offset = off;
1054                 i->idx = idx;
1055                 pipe_truncate(i);
1056                 return;
1057         }
1058         if (unlikely(iov_iter_is_discard(i)))
1059                 return;
1060         if (unroll <= i->iov_offset) {
1061                 i->iov_offset -= unroll;
1062                 return;
1063         }
1064         unroll -= i->iov_offset;
1065         if (iov_iter_is_bvec(i)) {
1066                 const struct bio_vec *bvec = i->bvec;
1067                 while (1) {
1068                         size_t n = (--bvec)->bv_len;
1069                         i->nr_segs++;
1070                         if (unroll <= n) {
1071                                 i->bvec = bvec;
1072                                 i->iov_offset = n - unroll;
1073                                 return;
1074                         }
1075                         unroll -= n;
1076                 }
1077         } else { /* same logics for iovec and kvec */
1078                 const struct iovec *iov = i->iov;
1079                 while (1) {
1080                         size_t n = (--iov)->iov_len;
1081                         i->nr_segs++;
1082                         if (unroll <= n) {
1083                                 i->iov = iov;
1084                                 i->iov_offset = n - unroll;
1085                                 return;
1086                         }
1087                         unroll -= n;
1088                 }
1089         }
1090 }
1091 EXPORT_SYMBOL(iov_iter_revert);
1092
1093 /*
1094  * Return the count of just the current iov_iter segment.
1095  */
1096 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1097 {
1098         if (unlikely(iov_iter_is_pipe(i)))
1099                 return i->count;        // it is a silly place, anyway
1100         if (i->nr_segs == 1)
1101                 return i->count;
1102         if (unlikely(iov_iter_is_discard(i)))
1103                 return i->count;
1104         else if (iov_iter_is_bvec(i))
1105                 return min(i->count, i->bvec->bv_len - i->iov_offset);
1106         else
1107                 return min(i->count, i->iov->iov_len - i->iov_offset);
1108 }
1109 EXPORT_SYMBOL(iov_iter_single_seg_count);
1110
1111 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1112                         const struct kvec *kvec, unsigned long nr_segs,
1113                         size_t count)
1114 {
1115         WARN_ON(direction & ~(READ | WRITE));
1116         i->type = ITER_KVEC | (direction & (READ | WRITE));
1117         i->kvec = kvec;
1118         i->nr_segs = nr_segs;
1119         i->iov_offset = 0;
1120         i->count = count;
1121 }
1122 EXPORT_SYMBOL(iov_iter_kvec);
1123
1124 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1125                         const struct bio_vec *bvec, unsigned long nr_segs,
1126                         size_t count)
1127 {
1128         WARN_ON(direction & ~(READ | WRITE));
1129         i->type = ITER_BVEC | (direction & (READ | WRITE));
1130         i->bvec = bvec;
1131         i->nr_segs = nr_segs;
1132         i->iov_offset = 0;
1133         i->count = count;
1134 }
1135 EXPORT_SYMBOL(iov_iter_bvec);
1136
1137 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1138                         struct pipe_inode_info *pipe,
1139                         size_t count)
1140 {
1141         BUG_ON(direction != READ);
1142         WARN_ON(pipe->nrbufs == pipe->buffers);
1143         i->type = ITER_PIPE | READ;
1144         i->pipe = pipe;
1145         i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1146         i->iov_offset = 0;
1147         i->count = count;
1148         i->start_idx = i->idx;
1149 }
1150 EXPORT_SYMBOL(iov_iter_pipe);
1151
1152 /**
1153  * iov_iter_discard - Initialise an I/O iterator that discards data
1154  * @i: The iterator to initialise.
1155  * @direction: The direction of the transfer.
1156  * @count: The size of the I/O buffer in bytes.
1157  *
1158  * Set up an I/O iterator that just discards everything that's written to it.
1159  * It's only available as a READ iterator.
1160  */
1161 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1162 {
1163         BUG_ON(direction != READ);
1164         i->type = ITER_DISCARD | READ;
1165         i->count = count;
1166         i->iov_offset = 0;
1167 }
1168 EXPORT_SYMBOL(iov_iter_discard);
1169
1170 unsigned long iov_iter_alignment(const struct iov_iter *i)
1171 {
1172         unsigned long res = 0;
1173         size_t size = i->count;
1174
1175         if (unlikely(iov_iter_is_pipe(i))) {
1176                 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
1177                         return size | i->iov_offset;
1178                 return size;
1179         }
1180         iterate_all_kinds(i, size, v,
1181                 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
1182                 res |= v.bv_offset | v.bv_len,
1183                 res |= (unsigned long)v.iov_base | v.iov_len
1184         )
1185         return res;
1186 }
1187 EXPORT_SYMBOL(iov_iter_alignment);
1188
1189 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1190 {
1191         unsigned long res = 0;
1192         size_t size = i->count;
1193
1194         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1195                 WARN_ON(1);
1196                 return ~0U;
1197         }
1198
1199         iterate_all_kinds(i, size, v,
1200                 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1201                         (size != v.iov_len ? size : 0), 0),
1202                 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1203                         (size != v.bv_len ? size : 0)),
1204                 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1205                         (size != v.iov_len ? size : 0))
1206                 );
1207         return res;
1208 }
1209 EXPORT_SYMBOL(iov_iter_gap_alignment);
1210
1211 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1212                                 size_t maxsize,
1213                                 struct page **pages,
1214                                 int idx,
1215                                 size_t *start)
1216 {
1217         struct pipe_inode_info *pipe = i->pipe;
1218         ssize_t n = push_pipe(i, maxsize, &idx, start);
1219         if (!n)
1220                 return -EFAULT;
1221
1222         maxsize = n;
1223         n += *start;
1224         while (n > 0) {
1225                 get_page(*pages++ = pipe->bufs[idx].page);
1226                 idx = next_idx(idx, pipe);
1227                 n -= PAGE_SIZE;
1228         }
1229
1230         return maxsize;
1231 }
1232
1233 static ssize_t pipe_get_pages(struct iov_iter *i,
1234                    struct page **pages, size_t maxsize, unsigned maxpages,
1235                    size_t *start)
1236 {
1237         unsigned npages;
1238         size_t capacity;
1239         int idx;
1240
1241         if (!maxsize)
1242                 return 0;
1243
1244         if (!sanity(i))
1245                 return -EFAULT;
1246
1247         data_start(i, &idx, start);
1248         /* some of this one + all after this one */
1249         npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1250         capacity = min(npages,maxpages) * PAGE_SIZE - *start;
1251
1252         return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
1253 }
1254
1255 ssize_t iov_iter_get_pages(struct iov_iter *i,
1256                    struct page **pages, size_t maxsize, unsigned maxpages,
1257                    size_t *start)
1258 {
1259         if (maxsize > i->count)
1260                 maxsize = i->count;
1261
1262         if (unlikely(iov_iter_is_pipe(i)))
1263                 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1264         if (unlikely(iov_iter_is_discard(i)))
1265                 return -EFAULT;
1266
1267         iterate_all_kinds(i, maxsize, v, ({
1268                 unsigned long addr = (unsigned long)v.iov_base;
1269                 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1270                 int n;
1271                 int res;
1272
1273                 if (len > maxpages * PAGE_SIZE)
1274                         len = maxpages * PAGE_SIZE;
1275                 addr &= ~(PAGE_SIZE - 1);
1276                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1277                 res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, pages);
1278                 if (unlikely(res < 0))
1279                         return res;
1280                 return (res == n ? len : res * PAGE_SIZE) - *start;
1281         0;}),({
1282                 /* can't be more than PAGE_SIZE */
1283                 *start = v.bv_offset;
1284                 get_page(*pages = v.bv_page);
1285                 return v.bv_len;
1286         }),({
1287                 return -EFAULT;
1288         })
1289         )
1290         return 0;
1291 }
1292 EXPORT_SYMBOL(iov_iter_get_pages);
1293
1294 static struct page **get_pages_array(size_t n)
1295 {
1296         return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1297 }
1298
1299 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1300                    struct page ***pages, size_t maxsize,
1301                    size_t *start)
1302 {
1303         struct page **p;
1304         ssize_t n;
1305         int idx;
1306         int npages;
1307
1308         if (!maxsize)
1309                 return 0;
1310
1311         if (!sanity(i))
1312                 return -EFAULT;
1313
1314         data_start(i, &idx, start);
1315         /* some of this one + all after this one */
1316         npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1317         n = npages * PAGE_SIZE - *start;
1318         if (maxsize > n)
1319                 maxsize = n;
1320         else
1321                 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1322         p = get_pages_array(npages);
1323         if (!p)
1324                 return -ENOMEM;
1325         n = __pipe_get_pages(i, maxsize, p, idx, start);
1326         if (n > 0)
1327                 *pages = p;
1328         else
1329                 kvfree(p);
1330         return n;
1331 }
1332
1333 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1334                    struct page ***pages, size_t maxsize,
1335                    size_t *start)
1336 {
1337         struct page **p;
1338
1339         if (maxsize > i->count)
1340                 maxsize = i->count;
1341
1342         if (unlikely(iov_iter_is_pipe(i)))
1343                 return pipe_get_pages_alloc(i, pages, maxsize, start);
1344         if (unlikely(iov_iter_is_discard(i)))
1345                 return -EFAULT;
1346
1347         iterate_all_kinds(i, maxsize, v, ({
1348                 unsigned long addr = (unsigned long)v.iov_base;
1349                 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1350                 int n;
1351                 int res;
1352
1353                 addr &= ~(PAGE_SIZE - 1);
1354                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1355                 p = get_pages_array(n);
1356                 if (!p)
1357                         return -ENOMEM;
1358                 res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, p);
1359                 if (unlikely(res < 0)) {
1360                         kvfree(p);
1361                         return res;
1362                 }
1363                 *pages = p;
1364                 return (res == n ? len : res * PAGE_SIZE) - *start;
1365         0;}),({
1366                 /* can't be more than PAGE_SIZE */
1367                 *start = v.bv_offset;
1368                 *pages = p = get_pages_array(1);
1369                 if (!p)
1370                         return -ENOMEM;
1371                 get_page(*p = v.bv_page);
1372                 return v.bv_len;
1373         }),({
1374                 return -EFAULT;
1375         })
1376         )
1377         return 0;
1378 }
1379 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1380
1381 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1382                                struct iov_iter *i)
1383 {
1384         char *to = addr;
1385         __wsum sum, next;
1386         size_t off = 0;
1387         sum = *csum;
1388         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1389                 WARN_ON(1);
1390                 return 0;
1391         }
1392         iterate_and_advance(i, bytes, v, ({
1393                 int err = 0;
1394                 next = csum_and_copy_from_user(v.iov_base,
1395                                                (to += v.iov_len) - v.iov_len,
1396                                                v.iov_len, 0, &err);
1397                 if (!err) {
1398                         sum = csum_block_add(sum, next, off);
1399                         off += v.iov_len;
1400                 }
1401                 err ? v.iov_len : 0;
1402         }), ({
1403                 char *p = kmap_atomic(v.bv_page);
1404                 next = csum_partial_copy_nocheck(p + v.bv_offset,
1405                                                  (to += v.bv_len) - v.bv_len,
1406                                                  v.bv_len, 0);
1407                 kunmap_atomic(p);
1408                 sum = csum_block_add(sum, next, off);
1409                 off += v.bv_len;
1410         }),({
1411                 next = csum_partial_copy_nocheck(v.iov_base,
1412                                                  (to += v.iov_len) - v.iov_len,
1413                                                  v.iov_len, 0);
1414                 sum = csum_block_add(sum, next, off);
1415                 off += v.iov_len;
1416         })
1417         )
1418         *csum = sum;
1419         return bytes;
1420 }
1421 EXPORT_SYMBOL(csum_and_copy_from_iter);
1422
1423 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1424                                struct iov_iter *i)
1425 {
1426         char *to = addr;
1427         __wsum sum, next;
1428         size_t off = 0;
1429         sum = *csum;
1430         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1431                 WARN_ON(1);
1432                 return false;
1433         }
1434         if (unlikely(i->count < bytes))
1435                 return false;
1436         iterate_all_kinds(i, bytes, v, ({
1437                 int err = 0;
1438                 next = csum_and_copy_from_user(v.iov_base,
1439                                                (to += v.iov_len) - v.iov_len,
1440                                                v.iov_len, 0, &err);
1441                 if (err)
1442                         return false;
1443                 sum = csum_block_add(sum, next, off);
1444                 off += v.iov_len;
1445                 0;
1446         }), ({
1447                 char *p = kmap_atomic(v.bv_page);
1448                 next = csum_partial_copy_nocheck(p + v.bv_offset,
1449                                                  (to += v.bv_len) - v.bv_len,
1450                                                  v.bv_len, 0);
1451                 kunmap_atomic(p);
1452                 sum = csum_block_add(sum, next, off);
1453                 off += v.bv_len;
1454         }),({
1455                 next = csum_partial_copy_nocheck(v.iov_base,
1456                                                  (to += v.iov_len) - v.iov_len,
1457                                                  v.iov_len, 0);
1458                 sum = csum_block_add(sum, next, off);
1459                 off += v.iov_len;
1460         })
1461         )
1462         *csum = sum;
1463         iov_iter_advance(i, bytes);
1464         return true;
1465 }
1466 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1467
1468 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
1469                              struct iov_iter *i)
1470 {
1471         const char *from = addr;
1472         __wsum *csum = csump;
1473         __wsum sum, next;
1474         size_t off = 0;
1475
1476         if (unlikely(iov_iter_is_pipe(i)))
1477                 return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
1478
1479         sum = *csum;
1480         if (unlikely(iov_iter_is_discard(i))) {
1481                 WARN_ON(1);     /* for now */
1482                 return 0;
1483         }
1484         iterate_and_advance(i, bytes, v, ({
1485                 int err = 0;
1486                 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1487                                              v.iov_base,
1488                                              v.iov_len, 0, &err);
1489                 if (!err) {
1490                         sum = csum_block_add(sum, next, off);
1491                         off += v.iov_len;
1492                 }
1493                 err ? v.iov_len : 0;
1494         }), ({
1495                 char *p = kmap_atomic(v.bv_page);
1496                 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1497                                                  p + v.bv_offset,
1498                                                  v.bv_len, 0);
1499                 kunmap_atomic(p);
1500                 sum = csum_block_add(sum, next, off);
1501                 off += v.bv_len;
1502         }),({
1503                 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1504                                                  v.iov_base,
1505                                                  v.iov_len, 0);
1506                 sum = csum_block_add(sum, next, off);
1507                 off += v.iov_len;
1508         })
1509         )
1510         *csum = sum;
1511         return bytes;
1512 }
1513 EXPORT_SYMBOL(csum_and_copy_to_iter);
1514
1515 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1516                 struct iov_iter *i)
1517 {
1518         struct ahash_request *hash = hashp;
1519         struct scatterlist sg;
1520         size_t copied;
1521
1522         copied = copy_to_iter(addr, bytes, i);
1523         sg_init_one(&sg, addr, copied);
1524         ahash_request_set_crypt(hash, &sg, NULL, copied);
1525         crypto_ahash_update(hash);
1526         return copied;
1527 }
1528 EXPORT_SYMBOL(hash_and_copy_to_iter);
1529
1530 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1531 {
1532         size_t size = i->count;
1533         int npages = 0;
1534
1535         if (!size)
1536                 return 0;
1537         if (unlikely(iov_iter_is_discard(i)))
1538                 return 0;
1539
1540         if (unlikely(iov_iter_is_pipe(i))) {
1541                 struct pipe_inode_info *pipe = i->pipe;
1542                 size_t off;
1543                 int idx;
1544
1545                 if (!sanity(i))
1546                         return 0;
1547
1548                 data_start(i, &idx, &off);
1549                 /* some of this one + all after this one */
1550                 npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1551                 if (npages >= maxpages)
1552                         return maxpages;
1553         } else iterate_all_kinds(i, size, v, ({
1554                 unsigned long p = (unsigned long)v.iov_base;
1555                 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1556                         - p / PAGE_SIZE;
1557                 if (npages >= maxpages)
1558                         return maxpages;
1559         0;}),({
1560                 npages++;
1561                 if (npages >= maxpages)
1562                         return maxpages;
1563         }),({
1564                 unsigned long p = (unsigned long)v.iov_base;
1565                 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1566                         - p / PAGE_SIZE;
1567                 if (npages >= maxpages)
1568                         return maxpages;
1569         })
1570         )
1571         return npages;
1572 }
1573 EXPORT_SYMBOL(iov_iter_npages);
1574
1575 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1576 {
1577         *new = *old;
1578         if (unlikely(iov_iter_is_pipe(new))) {
1579                 WARN_ON(1);
1580                 return NULL;
1581         }
1582         if (unlikely(iov_iter_is_discard(new)))
1583                 return NULL;
1584         if (iov_iter_is_bvec(new))
1585                 return new->bvec = kmemdup(new->bvec,
1586                                     new->nr_segs * sizeof(struct bio_vec),
1587                                     flags);
1588         else
1589                 /* iovec and kvec have identical layout */
1590                 return new->iov = kmemdup(new->iov,
1591                                    new->nr_segs * sizeof(struct iovec),
1592                                    flags);
1593 }
1594 EXPORT_SYMBOL(dup_iter);
1595
1596 /**
1597  * import_iovec() - Copy an array of &struct iovec from userspace
1598  *     into the kernel, check that it is valid, and initialize a new
1599  *     &struct iov_iter iterator to access it.
1600  *
1601  * @type: One of %READ or %WRITE.
1602  * @uvector: Pointer to the userspace array.
1603  * @nr_segs: Number of elements in userspace array.
1604  * @fast_segs: Number of elements in @iov.
1605  * @iov: (input and output parameter) Pointer to pointer to (usually small
1606  *     on-stack) kernel array.
1607  * @i: Pointer to iterator that will be initialized on success.
1608  *
1609  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1610  * then this function places %NULL in *@iov on return. Otherwise, a new
1611  * array will be allocated and the result placed in *@iov. This means that
1612  * the caller may call kfree() on *@iov regardless of whether the small
1613  * on-stack array was used or not (and regardless of whether this function
1614  * returns an error or not).
1615  *
1616  * Return: 0 on success or negative error code on error.
1617  */
1618 int import_iovec(int type, const struct iovec __user * uvector,
1619                  unsigned nr_segs, unsigned fast_segs,
1620                  struct iovec **iov, struct iov_iter *i)
1621 {
1622         ssize_t n;
1623         struct iovec *p;
1624         n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1625                                   *iov, &p);
1626         if (n < 0) {
1627                 if (p != *iov)
1628                         kfree(p);
1629                 *iov = NULL;
1630                 return n;
1631         }
1632         iov_iter_init(i, type, p, nr_segs, n);
1633         *iov = p == *iov ? NULL : p;
1634         return 0;
1635 }
1636 EXPORT_SYMBOL(import_iovec);
1637
1638 #ifdef CONFIG_COMPAT
1639 #include <linux/compat.h>
1640
1641 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1642                  unsigned nr_segs, unsigned fast_segs,
1643                  struct iovec **iov, struct iov_iter *i)
1644 {
1645         ssize_t n;
1646         struct iovec *p;
1647         n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1648                                   *iov, &p);
1649         if (n < 0) {
1650                 if (p != *iov)
1651                         kfree(p);
1652                 *iov = NULL;
1653                 return n;
1654         }
1655         iov_iter_init(i, type, p, nr_segs, n);
1656         *iov = p == *iov ? NULL : p;
1657         return 0;
1658 }
1659 #endif
1660
1661 int import_single_range(int rw, void __user *buf, size_t len,
1662                  struct iovec *iov, struct iov_iter *i)
1663 {
1664         if (len > MAX_RW_COUNT)
1665                 len = MAX_RW_COUNT;
1666         if (unlikely(!access_ok(!rw, buf, len)))
1667                 return -EFAULT;
1668
1669         iov->iov_base = buf;
1670         iov->iov_len = len;
1671         iov_iter_init(i, rw, iov, 1, len);
1672         return 0;
1673 }
1674 EXPORT_SYMBOL(import_single_range);
1675
1676 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
1677                             int (*f)(struct kvec *vec, void *context),
1678                             void *context)
1679 {
1680         struct kvec w;
1681         int err = -EINVAL;
1682         if (!bytes)
1683                 return 0;
1684
1685         iterate_all_kinds(i, bytes, v, -EINVAL, ({
1686                 w.iov_base = kmap(v.bv_page) + v.bv_offset;
1687                 w.iov_len = v.bv_len;
1688                 err = f(&w, context);
1689                 kunmap(v.bv_page);
1690                 err;}), ({
1691                 w = v;
1692                 err = f(&w, context);})
1693         )
1694         return err;
1695 }
1696 EXPORT_SYMBOL(iov_iter_for_each_range);