iov_iter_get_pages(): sanity-check arguments
[platform/kernel/linux-starfive.git] / lib / iov_iter.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
6 #include <linux/uio.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
16
17 #define PIPE_PARANOIA /* for now */
18
19 /* covers ubuf and kbuf alike */
20 #define iterate_buf(i, n, base, len, off, __p, STEP) {          \
21         size_t __maybe_unused off = 0;                          \
22         len = n;                                                \
23         base = __p + i->iov_offset;                             \
24         len -= (STEP);                                          \
25         i->iov_offset += len;                                   \
26         n = len;                                                \
27 }
28
29 /* covers iovec and kvec alike */
30 #define iterate_iovec(i, n, base, len, off, __p, STEP) {        \
31         size_t off = 0;                                         \
32         size_t skip = i->iov_offset;                            \
33         do {                                                    \
34                 len = min(n, __p->iov_len - skip);              \
35                 if (likely(len)) {                              \
36                         base = __p->iov_base + skip;            \
37                         len -= (STEP);                          \
38                         off += len;                             \
39                         skip += len;                            \
40                         n -= len;                               \
41                         if (skip < __p->iov_len)                \
42                                 break;                          \
43                 }                                               \
44                 __p++;                                          \
45                 skip = 0;                                       \
46         } while (n);                                            \
47         i->iov_offset = skip;                                   \
48         n = off;                                                \
49 }
50
51 #define iterate_bvec(i, n, base, len, off, p, STEP) {           \
52         size_t off = 0;                                         \
53         unsigned skip = i->iov_offset;                          \
54         while (n) {                                             \
55                 unsigned offset = p->bv_offset + skip;          \
56                 unsigned left;                                  \
57                 void *kaddr = kmap_local_page(p->bv_page +      \
58                                         offset / PAGE_SIZE);    \
59                 base = kaddr + offset % PAGE_SIZE;              \
60                 len = min(min(n, (size_t)(p->bv_len - skip)),   \
61                      (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
62                 left = (STEP);                                  \
63                 kunmap_local(kaddr);                            \
64                 len -= left;                                    \
65                 off += len;                                     \
66                 skip += len;                                    \
67                 if (skip == p->bv_len) {                        \
68                         skip = 0;                               \
69                         p++;                                    \
70                 }                                               \
71                 n -= len;                                       \
72                 if (left)                                       \
73                         break;                                  \
74         }                                                       \
75         i->iov_offset = skip;                                   \
76         n = off;                                                \
77 }
78
79 #define iterate_xarray(i, n, base, len, __off, STEP) {          \
80         __label__ __out;                                        \
81         size_t __off = 0;                                       \
82         struct folio *folio;                                    \
83         loff_t start = i->xarray_start + i->iov_offset;         \
84         pgoff_t index = start / PAGE_SIZE;                      \
85         XA_STATE(xas, i->xarray, index);                        \
86                                                                 \
87         len = PAGE_SIZE - offset_in_page(start);                \
88         rcu_read_lock();                                        \
89         xas_for_each(&xas, folio, ULONG_MAX) {                  \
90                 unsigned left;                                  \
91                 size_t offset;                                  \
92                 if (xas_retry(&xas, folio))                     \
93                         continue;                               \
94                 if (WARN_ON(xa_is_value(folio)))                \
95                         break;                                  \
96                 if (WARN_ON(folio_test_hugetlb(folio)))         \
97                         break;                                  \
98                 offset = offset_in_folio(folio, start + __off); \
99                 while (offset < folio_size(folio)) {            \
100                         base = kmap_local_folio(folio, offset); \
101                         len = min(n, len);                      \
102                         left = (STEP);                          \
103                         kunmap_local(base);                     \
104                         len -= left;                            \
105                         __off += len;                           \
106                         n -= len;                               \
107                         if (left || n == 0)                     \
108                                 goto __out;                     \
109                         offset += len;                          \
110                         len = PAGE_SIZE;                        \
111                 }                                               \
112         }                                                       \
113 __out:                                                          \
114         rcu_read_unlock();                                      \
115         i->iov_offset += __off;                                 \
116         n = __off;                                              \
117 }
118
119 #define __iterate_and_advance(i, n, base, len, off, I, K) {     \
120         if (unlikely(i->count < n))                             \
121                 n = i->count;                                   \
122         if (likely(n)) {                                        \
123                 if (likely(iter_is_ubuf(i))) {                  \
124                         void __user *base;                      \
125                         size_t len;                             \
126                         iterate_buf(i, n, base, len, off,       \
127                                                 i->ubuf, (I))   \
128                 } else if (likely(iter_is_iovec(i))) {          \
129                         const struct iovec *iov = i->iov;       \
130                         void __user *base;                      \
131                         size_t len;                             \
132                         iterate_iovec(i, n, base, len, off,     \
133                                                 iov, (I))       \
134                         i->nr_segs -= iov - i->iov;             \
135                         i->iov = iov;                           \
136                 } else if (iov_iter_is_bvec(i)) {               \
137                         const struct bio_vec *bvec = i->bvec;   \
138                         void *base;                             \
139                         size_t len;                             \
140                         iterate_bvec(i, n, base, len, off,      \
141                                                 bvec, (K))      \
142                         i->nr_segs -= bvec - i->bvec;           \
143                         i->bvec = bvec;                         \
144                 } else if (iov_iter_is_kvec(i)) {               \
145                         const struct kvec *kvec = i->kvec;      \
146                         void *base;                             \
147                         size_t len;                             \
148                         iterate_iovec(i, n, base, len, off,     \
149                                                 kvec, (K))      \
150                         i->nr_segs -= kvec - i->kvec;           \
151                         i->kvec = kvec;                         \
152                 } else if (iov_iter_is_xarray(i)) {             \
153                         void *base;                             \
154                         size_t len;                             \
155                         iterate_xarray(i, n, base, len, off,    \
156                                                         (K))    \
157                 }                                               \
158                 i->count -= n;                                  \
159         }                                                       \
160 }
161 #define iterate_and_advance(i, n, base, len, off, I, K) \
162         __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
163
164 static int copyout(void __user *to, const void *from, size_t n)
165 {
166         if (should_fail_usercopy())
167                 return n;
168         if (access_ok(to, n)) {
169                 instrument_copy_to_user(to, from, n);
170                 n = raw_copy_to_user(to, from, n);
171         }
172         return n;
173 }
174
175 static int copyin(void *to, const void __user *from, size_t n)
176 {
177         if (should_fail_usercopy())
178                 return n;
179         if (access_ok(from, n)) {
180                 instrument_copy_from_user(to, from, n);
181                 n = raw_copy_from_user(to, from, n);
182         }
183         return n;
184 }
185
186 static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe,
187                                            unsigned int slot)
188 {
189         return &pipe->bufs[slot & (pipe->ring_size - 1)];
190 }
191
192 #ifdef PIPE_PARANOIA
193 static bool sanity(const struct iov_iter *i)
194 {
195         struct pipe_inode_info *pipe = i->pipe;
196         unsigned int p_head = pipe->head;
197         unsigned int p_tail = pipe->tail;
198         unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
199         unsigned int i_head = i->head;
200         unsigned int idx;
201
202         if (i->last_offset) {
203                 struct pipe_buffer *p;
204                 if (unlikely(p_occupancy == 0))
205                         goto Bad;       // pipe must be non-empty
206                 if (unlikely(i_head != p_head - 1))
207                         goto Bad;       // must be at the last buffer...
208
209                 p = pipe_buf(pipe, i_head);
210                 if (unlikely(p->offset + p->len != abs(i->last_offset)))
211                         goto Bad;       // ... at the end of segment
212         } else {
213                 if (i_head != p_head)
214                         goto Bad;       // must be right after the last buffer
215         }
216         return true;
217 Bad:
218         printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset);
219         printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
220                         p_head, p_tail, pipe->ring_size);
221         for (idx = 0; idx < pipe->ring_size; idx++)
222                 printk(KERN_ERR "[%p %p %d %d]\n",
223                         pipe->bufs[idx].ops,
224                         pipe->bufs[idx].page,
225                         pipe->bufs[idx].offset,
226                         pipe->bufs[idx].len);
227         WARN_ON(1);
228         return false;
229 }
230 #else
231 #define sanity(i) true
232 #endif
233
234 static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size)
235 {
236         struct page *page = alloc_page(GFP_USER);
237         if (page) {
238                 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
239                 *buf = (struct pipe_buffer) {
240                         .ops = &default_pipe_buf_ops,
241                         .page = page,
242                         .offset = 0,
243                         .len = size
244                 };
245         }
246         return page;
247 }
248
249 static void push_page(struct pipe_inode_info *pipe, struct page *page,
250                         unsigned int offset, unsigned int size)
251 {
252         struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
253         *buf = (struct pipe_buffer) {
254                 .ops = &page_cache_pipe_buf_ops,
255                 .page = page,
256                 .offset = offset,
257                 .len = size
258         };
259         get_page(page);
260 }
261
262 static inline int last_offset(const struct pipe_buffer *buf)
263 {
264         if (buf->ops == &default_pipe_buf_ops)
265                 return buf->len;        // buf->offset is 0 for those
266         else
267                 return -(buf->offset + buf->len);
268 }
269
270 static struct page *append_pipe(struct iov_iter *i, size_t size,
271                                 unsigned int *off)
272 {
273         struct pipe_inode_info *pipe = i->pipe;
274         int offset = i->last_offset;
275         struct pipe_buffer *buf;
276         struct page *page;
277
278         if (offset > 0 && offset < PAGE_SIZE) {
279                 // some space in the last buffer; add to it
280                 buf = pipe_buf(pipe, pipe->head - 1);
281                 size = min_t(size_t, size, PAGE_SIZE - offset);
282                 buf->len += size;
283                 i->last_offset += size;
284                 i->count -= size;
285                 *off = offset;
286                 return buf->page;
287         }
288         // OK, we need a new buffer
289         *off = 0;
290         size = min_t(size_t, size, PAGE_SIZE);
291         if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
292                 return NULL;
293         page = push_anon(pipe, size);
294         if (!page)
295                 return NULL;
296         i->head = pipe->head - 1;
297         i->last_offset = size;
298         i->count -= size;
299         return page;
300 }
301
302 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
303                          struct iov_iter *i)
304 {
305         struct pipe_inode_info *pipe = i->pipe;
306         unsigned int head = pipe->head;
307
308         if (unlikely(bytes > i->count))
309                 bytes = i->count;
310
311         if (unlikely(!bytes))
312                 return 0;
313
314         if (!sanity(i))
315                 return 0;
316
317         if (offset && i->last_offset == -offset) { // could we merge it?
318                 struct pipe_buffer *buf = pipe_buf(pipe, head - 1);
319                 if (buf->page == page) {
320                         buf->len += bytes;
321                         i->last_offset -= bytes;
322                         i->count -= bytes;
323                         return bytes;
324                 }
325         }
326         if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
327                 return 0;
328
329         push_page(pipe, page, offset, bytes);
330         i->last_offset = -(offset + bytes);
331         i->head = head;
332         i->count -= bytes;
333         return bytes;
334 }
335
336 /*
337  * fault_in_iov_iter_readable - fault in iov iterator for reading
338  * @i: iterator
339  * @size: maximum length
340  *
341  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
342  * @size.  For each iovec, fault in each page that constitutes the iovec.
343  *
344  * Returns the number of bytes not faulted in (like copy_to_user() and
345  * copy_from_user()).
346  *
347  * Always returns 0 for non-userspace iterators.
348  */
349 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
350 {
351         if (iter_is_ubuf(i)) {
352                 size_t n = min(size, iov_iter_count(i));
353                 n -= fault_in_readable(i->ubuf + i->iov_offset, n);
354                 return size - n;
355         } else if (iter_is_iovec(i)) {
356                 size_t count = min(size, iov_iter_count(i));
357                 const struct iovec *p;
358                 size_t skip;
359
360                 size -= count;
361                 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
362                         size_t len = min(count, p->iov_len - skip);
363                         size_t ret;
364
365                         if (unlikely(!len))
366                                 continue;
367                         ret = fault_in_readable(p->iov_base + skip, len);
368                         count -= len - ret;
369                         if (ret)
370                                 break;
371                 }
372                 return count + size;
373         }
374         return 0;
375 }
376 EXPORT_SYMBOL(fault_in_iov_iter_readable);
377
378 /*
379  * fault_in_iov_iter_writeable - fault in iov iterator for writing
380  * @i: iterator
381  * @size: maximum length
382  *
383  * Faults in the iterator using get_user_pages(), i.e., without triggering
384  * hardware page faults.  This is primarily useful when we already know that
385  * some or all of the pages in @i aren't in memory.
386  *
387  * Returns the number of bytes not faulted in, like copy_to_user() and
388  * copy_from_user().
389  *
390  * Always returns 0 for non-user-space iterators.
391  */
392 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
393 {
394         if (iter_is_ubuf(i)) {
395                 size_t n = min(size, iov_iter_count(i));
396                 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
397                 return size - n;
398         } else if (iter_is_iovec(i)) {
399                 size_t count = min(size, iov_iter_count(i));
400                 const struct iovec *p;
401                 size_t skip;
402
403                 size -= count;
404                 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
405                         size_t len = min(count, p->iov_len - skip);
406                         size_t ret;
407
408                         if (unlikely(!len))
409                                 continue;
410                         ret = fault_in_safe_writeable(p->iov_base + skip, len);
411                         count -= len - ret;
412                         if (ret)
413                                 break;
414                 }
415                 return count + size;
416         }
417         return 0;
418 }
419 EXPORT_SYMBOL(fault_in_iov_iter_writeable);
420
421 void iov_iter_init(struct iov_iter *i, unsigned int direction,
422                         const struct iovec *iov, unsigned long nr_segs,
423                         size_t count)
424 {
425         WARN_ON(direction & ~(READ | WRITE));
426         *i = (struct iov_iter) {
427                 .iter_type = ITER_IOVEC,
428                 .nofault = false,
429                 .user_backed = true,
430                 .data_source = direction,
431                 .iov = iov,
432                 .nr_segs = nr_segs,
433                 .iov_offset = 0,
434                 .count = count
435         };
436 }
437 EXPORT_SYMBOL(iov_iter_init);
438
439 // returns the offset in partial buffer (if any)
440 static inline unsigned int pipe_npages(const struct iov_iter *i, int *npages)
441 {
442         struct pipe_inode_info *pipe = i->pipe;
443         int used = pipe->head - pipe->tail;
444         int off = i->last_offset;
445
446         *npages = max((int)pipe->max_usage - used, 0);
447
448         if (off > 0 && off < PAGE_SIZE) { // anon and not full
449                 (*npages)++;
450                 return off;
451         }
452         return 0;
453 }
454
455 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
456                                 struct iov_iter *i)
457 {
458         unsigned int off, chunk;
459
460         if (unlikely(bytes > i->count))
461                 bytes = i->count;
462         if (unlikely(!bytes))
463                 return 0;
464
465         if (!sanity(i))
466                 return 0;
467
468         for (size_t n = bytes; n; n -= chunk) {
469                 struct page *page = append_pipe(i, n, &off);
470                 chunk = min_t(size_t, n, PAGE_SIZE - off);
471                 if (!page)
472                         return bytes - n;
473                 memcpy_to_page(page, off, addr, chunk);
474                 addr += chunk;
475         }
476         return bytes;
477 }
478
479 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
480                               __wsum sum, size_t off)
481 {
482         __wsum next = csum_partial_copy_nocheck(from, to, len);
483         return csum_block_add(sum, next, off);
484 }
485
486 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
487                                          struct iov_iter *i, __wsum *sump)
488 {
489         __wsum sum = *sump;
490         size_t off = 0;
491         unsigned int chunk, r;
492
493         if (unlikely(bytes > i->count))
494                 bytes = i->count;
495         if (unlikely(!bytes))
496                 return 0;
497
498         if (!sanity(i))
499                 return 0;
500
501         while (bytes) {
502                 struct page *page = append_pipe(i, bytes, &r);
503                 char *p;
504
505                 if (!page)
506                         break;
507                 chunk = min_t(size_t, bytes, PAGE_SIZE - r);
508                 p = kmap_local_page(page);
509                 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
510                 kunmap_local(p);
511                 off += chunk;
512                 bytes -= chunk;
513         }
514         *sump = sum;
515         return off;
516 }
517
518 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
519 {
520         if (unlikely(iov_iter_is_pipe(i)))
521                 return copy_pipe_to_iter(addr, bytes, i);
522         if (user_backed_iter(i))
523                 might_fault();
524         iterate_and_advance(i, bytes, base, len, off,
525                 copyout(base, addr + off, len),
526                 memcpy(base, addr + off, len)
527         )
528
529         return bytes;
530 }
531 EXPORT_SYMBOL(_copy_to_iter);
532
533 #ifdef CONFIG_ARCH_HAS_COPY_MC
534 static int copyout_mc(void __user *to, const void *from, size_t n)
535 {
536         if (access_ok(to, n)) {
537                 instrument_copy_to_user(to, from, n);
538                 n = copy_mc_to_user((__force void *) to, from, n);
539         }
540         return n;
541 }
542
543 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
544                                 struct iov_iter *i)
545 {
546         size_t xfer = 0;
547         unsigned int off, chunk;
548
549         if (unlikely(bytes > i->count))
550                 bytes = i->count;
551         if (unlikely(!bytes))
552                 return 0;
553
554         if (!sanity(i))
555                 return 0;
556
557         while (bytes) {
558                 struct page *page = append_pipe(i, bytes, &off);
559                 unsigned long rem;
560                 char *p;
561
562                 if (!page)
563                         break;
564                 chunk = min_t(size_t, bytes, PAGE_SIZE - off);
565                 p = kmap_local_page(page);
566                 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
567                 chunk -= rem;
568                 kunmap_local(p);
569                 xfer += chunk;
570                 bytes -= chunk;
571                 if (rem) {
572                         iov_iter_revert(i, rem);
573                         break;
574                 }
575         }
576         return xfer;
577 }
578
579 /**
580  * _copy_mc_to_iter - copy to iter with source memory error exception handling
581  * @addr: source kernel address
582  * @bytes: total transfer length
583  * @i: destination iterator
584  *
585  * The pmem driver deploys this for the dax operation
586  * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
587  * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
588  * successfully copied.
589  *
590  * The main differences between this and typical _copy_to_iter().
591  *
592  * * Typical tail/residue handling after a fault retries the copy
593  *   byte-by-byte until the fault happens again. Re-triggering machine
594  *   checks is potentially fatal so the implementation uses source
595  *   alignment and poison alignment assumptions to avoid re-triggering
596  *   hardware exceptions.
597  *
598  * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
599  *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
600  *   a short copy.
601  *
602  * Return: number of bytes copied (may be %0)
603  */
604 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
605 {
606         if (unlikely(iov_iter_is_pipe(i)))
607                 return copy_mc_pipe_to_iter(addr, bytes, i);
608         if (user_backed_iter(i))
609                 might_fault();
610         __iterate_and_advance(i, bytes, base, len, off,
611                 copyout_mc(base, addr + off, len),
612                 copy_mc_to_kernel(base, addr + off, len)
613         )
614
615         return bytes;
616 }
617 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
618 #endif /* CONFIG_ARCH_HAS_COPY_MC */
619
620 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
621 {
622         if (unlikely(iov_iter_is_pipe(i))) {
623                 WARN_ON(1);
624                 return 0;
625         }
626         if (user_backed_iter(i))
627                 might_fault();
628         iterate_and_advance(i, bytes, base, len, off,
629                 copyin(addr + off, base, len),
630                 memcpy(addr + off, base, len)
631         )
632
633         return bytes;
634 }
635 EXPORT_SYMBOL(_copy_from_iter);
636
637 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
638 {
639         if (unlikely(iov_iter_is_pipe(i))) {
640                 WARN_ON(1);
641                 return 0;
642         }
643         iterate_and_advance(i, bytes, base, len, off,
644                 __copy_from_user_inatomic_nocache(addr + off, base, len),
645                 memcpy(addr + off, base, len)
646         )
647
648         return bytes;
649 }
650 EXPORT_SYMBOL(_copy_from_iter_nocache);
651
652 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
653 /**
654  * _copy_from_iter_flushcache - write destination through cpu cache
655  * @addr: destination kernel address
656  * @bytes: total transfer length
657  * @i: source iterator
658  *
659  * The pmem driver arranges for filesystem-dax to use this facility via
660  * dax_copy_from_iter() for ensuring that writes to persistent memory
661  * are flushed through the CPU cache. It is differentiated from
662  * _copy_from_iter_nocache() in that guarantees all data is flushed for
663  * all iterator types. The _copy_from_iter_nocache() only attempts to
664  * bypass the cache for the ITER_IOVEC case, and on some archs may use
665  * instructions that strand dirty-data in the cache.
666  *
667  * Return: number of bytes copied (may be %0)
668  */
669 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
670 {
671         if (unlikely(iov_iter_is_pipe(i))) {
672                 WARN_ON(1);
673                 return 0;
674         }
675         iterate_and_advance(i, bytes, base, len, off,
676                 __copy_from_user_flushcache(addr + off, base, len),
677                 memcpy_flushcache(addr + off, base, len)
678         )
679
680         return bytes;
681 }
682 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
683 #endif
684
685 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
686 {
687         struct page *head;
688         size_t v = n + offset;
689
690         /*
691          * The general case needs to access the page order in order
692          * to compute the page size.
693          * However, we mostly deal with order-0 pages and thus can
694          * avoid a possible cache line miss for requests that fit all
695          * page orders.
696          */
697         if (n <= v && v <= PAGE_SIZE)
698                 return true;
699
700         head = compound_head(page);
701         v += (page - head) << PAGE_SHIFT;
702
703         if (likely(n <= v && v <= (page_size(head))))
704                 return true;
705         WARN_ON(1);
706         return false;
707 }
708
709 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
710                          struct iov_iter *i)
711 {
712         if (unlikely(iov_iter_is_pipe(i))) {
713                 return copy_page_to_iter_pipe(page, offset, bytes, i);
714         } else {
715                 void *kaddr = kmap_local_page(page);
716                 size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
717                 kunmap_local(kaddr);
718                 return wanted;
719         }
720 }
721
722 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
723                          struct iov_iter *i)
724 {
725         size_t res = 0;
726         if (unlikely(!page_copy_sane(page, offset, bytes)))
727                 return 0;
728         page += offset / PAGE_SIZE; // first subpage
729         offset %= PAGE_SIZE;
730         while (1) {
731                 size_t n = __copy_page_to_iter(page, offset,
732                                 min(bytes, (size_t)PAGE_SIZE - offset), i);
733                 res += n;
734                 bytes -= n;
735                 if (!bytes || !n)
736                         break;
737                 offset += n;
738                 if (offset == PAGE_SIZE) {
739                         page++;
740                         offset = 0;
741                 }
742         }
743         return res;
744 }
745 EXPORT_SYMBOL(copy_page_to_iter);
746
747 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
748                          struct iov_iter *i)
749 {
750         if (page_copy_sane(page, offset, bytes)) {
751                 void *kaddr = kmap_local_page(page);
752                 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
753                 kunmap_local(kaddr);
754                 return wanted;
755         }
756         return 0;
757 }
758 EXPORT_SYMBOL(copy_page_from_iter);
759
760 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
761 {
762         unsigned int chunk, off;
763
764         if (unlikely(bytes > i->count))
765                 bytes = i->count;
766         if (unlikely(!bytes))
767                 return 0;
768
769         if (!sanity(i))
770                 return 0;
771
772         for (size_t n = bytes; n; n -= chunk) {
773                 struct page *page = append_pipe(i, n, &off);
774                 char *p;
775
776                 if (!page)
777                         return bytes - n;
778                 chunk = min_t(size_t, n, PAGE_SIZE - off);
779                 p = kmap_local_page(page);
780                 memset(p + off, 0, chunk);
781                 kunmap_local(p);
782         }
783         return bytes;
784 }
785
786 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
787 {
788         if (unlikely(iov_iter_is_pipe(i)))
789                 return pipe_zero(bytes, i);
790         iterate_and_advance(i, bytes, base, len, count,
791                 clear_user(base, len),
792                 memset(base, 0, len)
793         )
794
795         return bytes;
796 }
797 EXPORT_SYMBOL(iov_iter_zero);
798
799 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
800                                   struct iov_iter *i)
801 {
802         char *kaddr = kmap_atomic(page), *p = kaddr + offset;
803         if (unlikely(!page_copy_sane(page, offset, bytes))) {
804                 kunmap_atomic(kaddr);
805                 return 0;
806         }
807         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
808                 kunmap_atomic(kaddr);
809                 WARN_ON(1);
810                 return 0;
811         }
812         iterate_and_advance(i, bytes, base, len, off,
813                 copyin(p + off, base, len),
814                 memcpy(p + off, base, len)
815         )
816         kunmap_atomic(kaddr);
817         return bytes;
818 }
819 EXPORT_SYMBOL(copy_page_from_iter_atomic);
820
821 static void pipe_advance(struct iov_iter *i, size_t size)
822 {
823         struct pipe_inode_info *pipe = i->pipe;
824         int off = i->last_offset;
825
826         if (!off && !size) {
827                 pipe_discard_from(pipe, i->start_head); // discard everything
828                 return;
829         }
830         i->count -= size;
831         while (1) {
832                 struct pipe_buffer *buf = pipe_buf(pipe, i->head);
833                 if (off) /* make it relative to the beginning of buffer */
834                         size += abs(off) - buf->offset;
835                 if (size <= buf->len) {
836                         buf->len = size;
837                         i->last_offset = last_offset(buf);
838                         break;
839                 }
840                 size -= buf->len;
841                 i->head++;
842                 off = 0;
843         }
844         pipe_discard_from(pipe, i->head + 1); // discard everything past this one
845 }
846
847 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
848 {
849         const struct bio_vec *bvec, *end;
850
851         if (!i->count)
852                 return;
853         i->count -= size;
854
855         size += i->iov_offset;
856
857         for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
858                 if (likely(size < bvec->bv_len))
859                         break;
860                 size -= bvec->bv_len;
861         }
862         i->iov_offset = size;
863         i->nr_segs -= bvec - i->bvec;
864         i->bvec = bvec;
865 }
866
867 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
868 {
869         const struct iovec *iov, *end;
870
871         if (!i->count)
872                 return;
873         i->count -= size;
874
875         size += i->iov_offset; // from beginning of current segment
876         for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
877                 if (likely(size < iov->iov_len))
878                         break;
879                 size -= iov->iov_len;
880         }
881         i->iov_offset = size;
882         i->nr_segs -= iov - i->iov;
883         i->iov = iov;
884 }
885
886 void iov_iter_advance(struct iov_iter *i, size_t size)
887 {
888         if (unlikely(i->count < size))
889                 size = i->count;
890         if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
891                 i->iov_offset += size;
892                 i->count -= size;
893         } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
894                 /* iovec and kvec have identical layouts */
895                 iov_iter_iovec_advance(i, size);
896         } else if (iov_iter_is_bvec(i)) {
897                 iov_iter_bvec_advance(i, size);
898         } else if (iov_iter_is_pipe(i)) {
899                 pipe_advance(i, size);
900         } else if (iov_iter_is_discard(i)) {
901                 i->count -= size;
902         }
903 }
904 EXPORT_SYMBOL(iov_iter_advance);
905
906 void iov_iter_revert(struct iov_iter *i, size_t unroll)
907 {
908         if (!unroll)
909                 return;
910         if (WARN_ON(unroll > MAX_RW_COUNT))
911                 return;
912         i->count += unroll;
913         if (unlikely(iov_iter_is_pipe(i))) {
914                 struct pipe_inode_info *pipe = i->pipe;
915                 unsigned int head = pipe->head;
916
917                 while (head > i->start_head) {
918                         struct pipe_buffer *b = pipe_buf(pipe, --head);
919                         if (unroll < b->len) {
920                                 b->len -= unroll;
921                                 i->last_offset = last_offset(b);
922                                 i->head = head;
923                                 return;
924                         }
925                         unroll -= b->len;
926                         pipe_buf_release(pipe, b);
927                         pipe->head--;
928                 }
929                 i->last_offset = 0;
930                 i->head = head;
931                 return;
932         }
933         if (unlikely(iov_iter_is_discard(i)))
934                 return;
935         if (unroll <= i->iov_offset) {
936                 i->iov_offset -= unroll;
937                 return;
938         }
939         unroll -= i->iov_offset;
940         if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
941                 BUG(); /* We should never go beyond the start of the specified
942                         * range since we might then be straying into pages that
943                         * aren't pinned.
944                         */
945         } else if (iov_iter_is_bvec(i)) {
946                 const struct bio_vec *bvec = i->bvec;
947                 while (1) {
948                         size_t n = (--bvec)->bv_len;
949                         i->nr_segs++;
950                         if (unroll <= n) {
951                                 i->bvec = bvec;
952                                 i->iov_offset = n - unroll;
953                                 return;
954                         }
955                         unroll -= n;
956                 }
957         } else { /* same logics for iovec and kvec */
958                 const struct iovec *iov = i->iov;
959                 while (1) {
960                         size_t n = (--iov)->iov_len;
961                         i->nr_segs++;
962                         if (unroll <= n) {
963                                 i->iov = iov;
964                                 i->iov_offset = n - unroll;
965                                 return;
966                         }
967                         unroll -= n;
968                 }
969         }
970 }
971 EXPORT_SYMBOL(iov_iter_revert);
972
973 /*
974  * Return the count of just the current iov_iter segment.
975  */
976 size_t iov_iter_single_seg_count(const struct iov_iter *i)
977 {
978         if (i->nr_segs > 1) {
979                 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
980                         return min(i->count, i->iov->iov_len - i->iov_offset);
981                 if (iov_iter_is_bvec(i))
982                         return min(i->count, i->bvec->bv_len - i->iov_offset);
983         }
984         return i->count;
985 }
986 EXPORT_SYMBOL(iov_iter_single_seg_count);
987
988 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
989                         const struct kvec *kvec, unsigned long nr_segs,
990                         size_t count)
991 {
992         WARN_ON(direction & ~(READ | WRITE));
993         *i = (struct iov_iter){
994                 .iter_type = ITER_KVEC,
995                 .data_source = direction,
996                 .kvec = kvec,
997                 .nr_segs = nr_segs,
998                 .iov_offset = 0,
999                 .count = count
1000         };
1001 }
1002 EXPORT_SYMBOL(iov_iter_kvec);
1003
1004 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1005                         const struct bio_vec *bvec, unsigned long nr_segs,
1006                         size_t count)
1007 {
1008         WARN_ON(direction & ~(READ | WRITE));
1009         *i = (struct iov_iter){
1010                 .iter_type = ITER_BVEC,
1011                 .data_source = direction,
1012                 .bvec = bvec,
1013                 .nr_segs = nr_segs,
1014                 .iov_offset = 0,
1015                 .count = count
1016         };
1017 }
1018 EXPORT_SYMBOL(iov_iter_bvec);
1019
1020 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1021                         struct pipe_inode_info *pipe,
1022                         size_t count)
1023 {
1024         BUG_ON(direction != READ);
1025         WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1026         *i = (struct iov_iter){
1027                 .iter_type = ITER_PIPE,
1028                 .data_source = false,
1029                 .pipe = pipe,
1030                 .head = pipe->head,
1031                 .start_head = pipe->head,
1032                 .last_offset = 0,
1033                 .count = count
1034         };
1035 }
1036 EXPORT_SYMBOL(iov_iter_pipe);
1037
1038 /**
1039  * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1040  * @i: The iterator to initialise.
1041  * @direction: The direction of the transfer.
1042  * @xarray: The xarray to access.
1043  * @start: The start file position.
1044  * @count: The size of the I/O buffer in bytes.
1045  *
1046  * Set up an I/O iterator to either draw data out of the pages attached to an
1047  * inode or to inject data into those pages.  The pages *must* be prevented
1048  * from evaporation, either by taking a ref on them or locking them by the
1049  * caller.
1050  */
1051 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1052                      struct xarray *xarray, loff_t start, size_t count)
1053 {
1054         BUG_ON(direction & ~1);
1055         *i = (struct iov_iter) {
1056                 .iter_type = ITER_XARRAY,
1057                 .data_source = direction,
1058                 .xarray = xarray,
1059                 .xarray_start = start,
1060                 .count = count,
1061                 .iov_offset = 0
1062         };
1063 }
1064 EXPORT_SYMBOL(iov_iter_xarray);
1065
1066 /**
1067  * iov_iter_discard - Initialise an I/O iterator that discards data
1068  * @i: The iterator to initialise.
1069  * @direction: The direction of the transfer.
1070  * @count: The size of the I/O buffer in bytes.
1071  *
1072  * Set up an I/O iterator that just discards everything that's written to it.
1073  * It's only available as a READ iterator.
1074  */
1075 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1076 {
1077         BUG_ON(direction != READ);
1078         *i = (struct iov_iter){
1079                 .iter_type = ITER_DISCARD,
1080                 .data_source = false,
1081                 .count = count,
1082                 .iov_offset = 0
1083         };
1084 }
1085 EXPORT_SYMBOL(iov_iter_discard);
1086
1087 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
1088                                    unsigned len_mask)
1089 {
1090         size_t size = i->count;
1091         size_t skip = i->iov_offset;
1092         unsigned k;
1093
1094         for (k = 0; k < i->nr_segs; k++, skip = 0) {
1095                 size_t len = i->iov[k].iov_len - skip;
1096
1097                 if (len > size)
1098                         len = size;
1099                 if (len & len_mask)
1100                         return false;
1101                 if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask)
1102                         return false;
1103
1104                 size -= len;
1105                 if (!size)
1106                         break;
1107         }
1108         return true;
1109 }
1110
1111 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
1112                                   unsigned len_mask)
1113 {
1114         size_t size = i->count;
1115         unsigned skip = i->iov_offset;
1116         unsigned k;
1117
1118         for (k = 0; k < i->nr_segs; k++, skip = 0) {
1119                 size_t len = i->bvec[k].bv_len - skip;
1120
1121                 if (len > size)
1122                         len = size;
1123                 if (len & len_mask)
1124                         return false;
1125                 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
1126                         return false;
1127
1128                 size -= len;
1129                 if (!size)
1130                         break;
1131         }
1132         return true;
1133 }
1134
1135 /**
1136  * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
1137  *      are aligned to the parameters.
1138  *
1139  * @i: &struct iov_iter to restore
1140  * @addr_mask: bit mask to check against the iov element's addresses
1141  * @len_mask: bit mask to check against the iov element's lengths
1142  *
1143  * Return: false if any addresses or lengths intersect with the provided masks
1144  */
1145 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
1146                          unsigned len_mask)
1147 {
1148         if (likely(iter_is_ubuf(i))) {
1149                 if (i->count & len_mask)
1150                         return false;
1151                 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
1152                         return false;
1153                 return true;
1154         }
1155
1156         if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1157                 return iov_iter_aligned_iovec(i, addr_mask, len_mask);
1158
1159         if (iov_iter_is_bvec(i))
1160                 return iov_iter_aligned_bvec(i, addr_mask, len_mask);
1161
1162         if (iov_iter_is_pipe(i)) {
1163                 size_t size = i->count;
1164
1165                 if (size & len_mask)
1166                         return false;
1167                 if (size && i->last_offset > 0) {
1168                         if (i->last_offset & addr_mask)
1169                                 return false;
1170                 }
1171
1172                 return true;
1173         }
1174
1175         if (iov_iter_is_xarray(i)) {
1176                 if (i->count & len_mask)
1177                         return false;
1178                 if ((i->xarray_start + i->iov_offset) & addr_mask)
1179                         return false;
1180         }
1181
1182         return true;
1183 }
1184 EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
1185
1186 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1187 {
1188         unsigned long res = 0;
1189         size_t size = i->count;
1190         size_t skip = i->iov_offset;
1191         unsigned k;
1192
1193         for (k = 0; k < i->nr_segs; k++, skip = 0) {
1194                 size_t len = i->iov[k].iov_len - skip;
1195                 if (len) {
1196                         res |= (unsigned long)i->iov[k].iov_base + skip;
1197                         if (len > size)
1198                                 len = size;
1199                         res |= len;
1200                         size -= len;
1201                         if (!size)
1202                                 break;
1203                 }
1204         }
1205         return res;
1206 }
1207
1208 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1209 {
1210         unsigned res = 0;
1211         size_t size = i->count;
1212         unsigned skip = i->iov_offset;
1213         unsigned k;
1214
1215         for (k = 0; k < i->nr_segs; k++, skip = 0) {
1216                 size_t len = i->bvec[k].bv_len - skip;
1217                 res |= (unsigned long)i->bvec[k].bv_offset + skip;
1218                 if (len > size)
1219                         len = size;
1220                 res |= len;
1221                 size -= len;
1222                 if (!size)
1223                         break;
1224         }
1225         return res;
1226 }
1227
1228 unsigned long iov_iter_alignment(const struct iov_iter *i)
1229 {
1230         if (likely(iter_is_ubuf(i))) {
1231                 size_t size = i->count;
1232                 if (size)
1233                         return ((unsigned long)i->ubuf + i->iov_offset) | size;
1234                 return 0;
1235         }
1236
1237         /* iovec and kvec have identical layouts */
1238         if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1239                 return iov_iter_alignment_iovec(i);
1240
1241         if (iov_iter_is_bvec(i))
1242                 return iov_iter_alignment_bvec(i);
1243
1244         if (iov_iter_is_pipe(i)) {
1245                 size_t size = i->count;
1246
1247                 if (size && i->last_offset > 0)
1248                         return size | i->last_offset;
1249                 return size;
1250         }
1251
1252         if (iov_iter_is_xarray(i))
1253                 return (i->xarray_start + i->iov_offset) | i->count;
1254
1255         return 0;
1256 }
1257 EXPORT_SYMBOL(iov_iter_alignment);
1258
1259 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1260 {
1261         unsigned long res = 0;
1262         unsigned long v = 0;
1263         size_t size = i->count;
1264         unsigned k;
1265
1266         if (iter_is_ubuf(i))
1267                 return 0;
1268
1269         if (WARN_ON(!iter_is_iovec(i)))
1270                 return ~0U;
1271
1272         for (k = 0; k < i->nr_segs; k++) {
1273                 if (i->iov[k].iov_len) {
1274                         unsigned long base = (unsigned long)i->iov[k].iov_base;
1275                         if (v) // if not the first one
1276                                 res |= base | v; // this start | previous end
1277                         v = base + i->iov[k].iov_len;
1278                         if (size <= i->iov[k].iov_len)
1279                                 break;
1280                         size -= i->iov[k].iov_len;
1281                 }
1282         }
1283         return res;
1284 }
1285 EXPORT_SYMBOL(iov_iter_gap_alignment);
1286
1287 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1288                                 size_t maxsize,
1289                                 struct page **pages,
1290                                 size_t off)
1291 {
1292         struct pipe_inode_info *pipe = i->pipe;
1293         ssize_t left = maxsize;
1294
1295         if (off) {
1296                 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head - 1);
1297
1298                 get_page(*pages++ = buf->page);
1299                 left -= PAGE_SIZE - off;
1300                 if (left <= 0) {
1301                         buf->len += maxsize;
1302                         return maxsize;
1303                 }
1304                 buf->len = PAGE_SIZE;
1305         }
1306         while (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
1307                 struct page *page = push_anon(pipe,
1308                                               min_t(ssize_t, left, PAGE_SIZE));
1309                 if (!page)
1310                         break;
1311                 get_page(*pages++ = page);
1312                 left -= PAGE_SIZE;
1313                 if (left <= 0)
1314                         return maxsize;
1315         }
1316         return maxsize - left ? : -EFAULT;
1317 }
1318
1319 static ssize_t pipe_get_pages(struct iov_iter *i,
1320                    struct page **pages, size_t maxsize, unsigned maxpages,
1321                    size_t *start)
1322 {
1323         unsigned int npages, off;
1324         size_t capacity;
1325
1326         if (!sanity(i))
1327                 return -EFAULT;
1328
1329         *start = off = pipe_npages(i, &npages);
1330         capacity = min(npages, maxpages) * PAGE_SIZE - off;
1331
1332         return __pipe_get_pages(i, min(maxsize, capacity), pages, off);
1333 }
1334
1335 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1336                                           pgoff_t index, unsigned int nr_pages)
1337 {
1338         XA_STATE(xas, xa, index);
1339         struct page *page;
1340         unsigned int ret = 0;
1341
1342         rcu_read_lock();
1343         for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1344                 if (xas_retry(&xas, page))
1345                         continue;
1346
1347                 /* Has the page moved or been split? */
1348                 if (unlikely(page != xas_reload(&xas))) {
1349                         xas_reset(&xas);
1350                         continue;
1351                 }
1352
1353                 pages[ret] = find_subpage(page, xas.xa_index);
1354                 get_page(pages[ret]);
1355                 if (++ret == nr_pages)
1356                         break;
1357         }
1358         rcu_read_unlock();
1359         return ret;
1360 }
1361
1362 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1363                                      struct page **pages, size_t maxsize,
1364                                      unsigned maxpages, size_t *_start_offset)
1365 {
1366         unsigned nr, offset;
1367         pgoff_t index, count;
1368         size_t size = maxsize;
1369         loff_t pos;
1370
1371         pos = i->xarray_start + i->iov_offset;
1372         index = pos >> PAGE_SHIFT;
1373         offset = pos & ~PAGE_MASK;
1374         *_start_offset = offset;
1375
1376         count = 1;
1377         if (size > PAGE_SIZE - offset) {
1378                 size -= PAGE_SIZE - offset;
1379                 count += size >> PAGE_SHIFT;
1380                 size &= ~PAGE_MASK;
1381                 if (size)
1382                         count++;
1383         }
1384
1385         if (count > maxpages)
1386                 count = maxpages;
1387
1388         nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
1389         if (nr == 0)
1390                 return 0;
1391
1392         return min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1393 }
1394
1395 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
1396 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
1397 {
1398         size_t skip;
1399         long k;
1400
1401         if (iter_is_ubuf(i))
1402                 return (unsigned long)i->ubuf + i->iov_offset;
1403
1404         for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1405                 size_t len = i->iov[k].iov_len - skip;
1406
1407                 if (unlikely(!len))
1408                         continue;
1409                 if (*size > len)
1410                         *size = len;
1411                 return (unsigned long)i->iov[k].iov_base + skip;
1412         }
1413         BUG(); // if it had been empty, we wouldn't get called
1414 }
1415
1416 /* must be done on non-empty ITER_BVEC one */
1417 static struct page *first_bvec_segment(const struct iov_iter *i,
1418                                        size_t *size, size_t *start)
1419 {
1420         struct page *page;
1421         size_t skip = i->iov_offset, len;
1422
1423         len = i->bvec->bv_len - skip;
1424         if (*size > len)
1425                 *size = len;
1426         skip += i->bvec->bv_offset;
1427         page = i->bvec->bv_page + skip / PAGE_SIZE;
1428         *start = skip % PAGE_SIZE;
1429         return page;
1430 }
1431
1432 ssize_t iov_iter_get_pages(struct iov_iter *i,
1433                    struct page **pages, size_t maxsize, unsigned maxpages,
1434                    size_t *start)
1435 {
1436         int n, res;
1437
1438         if (maxsize > i->count)
1439                 maxsize = i->count;
1440         if (!maxsize || !maxpages)
1441                 return 0;
1442         if (maxsize > MAX_RW_COUNT)
1443                 maxsize = MAX_RW_COUNT;
1444         BUG_ON(!pages);
1445
1446         if (likely(user_backed_iter(i))) {
1447                 unsigned int gup_flags = 0;
1448                 unsigned long addr;
1449
1450                 if (iov_iter_rw(i) != WRITE)
1451                         gup_flags |= FOLL_WRITE;
1452                 if (i->nofault)
1453                         gup_flags |= FOLL_NOFAULT;
1454
1455                 addr = first_iovec_segment(i, &maxsize);
1456                 *start = addr % PAGE_SIZE;
1457                 addr &= PAGE_MASK;
1458                 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1459                 if (n > maxpages)
1460                         n = maxpages;
1461                 res = get_user_pages_fast(addr, n, gup_flags, pages);
1462                 if (unlikely(res <= 0))
1463                         return res;
1464                 return min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1465         }
1466         if (iov_iter_is_bvec(i)) {
1467                 struct page *page;
1468
1469                 page = first_bvec_segment(i, &maxsize, start);
1470                 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1471                 if (n > maxpages)
1472                         n = maxpages;
1473                 for (int k = 0; k < n; k++)
1474                         get_page(*pages++ = page++);
1475                 return min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1476         }
1477         if (iov_iter_is_pipe(i))
1478                 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1479         if (iov_iter_is_xarray(i))
1480                 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1481         return -EFAULT;
1482 }
1483 EXPORT_SYMBOL(iov_iter_get_pages);
1484
1485 static struct page **get_pages_array(size_t n)
1486 {
1487         return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1488 }
1489
1490 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1491                    struct page ***pages, size_t maxsize,
1492                    size_t *start)
1493 {
1494         struct page **p;
1495         unsigned int npages, off;
1496         ssize_t n;
1497
1498         if (!sanity(i))
1499                 return -EFAULT;
1500
1501         *start = off = pipe_npages(i, &npages);
1502         n = npages * PAGE_SIZE - off;
1503         if (maxsize > n)
1504                 maxsize = n;
1505         else
1506                 npages = DIV_ROUND_UP(maxsize + off, PAGE_SIZE);
1507         *pages = p = get_pages_array(npages);
1508         if (!p)
1509                 return -ENOMEM;
1510         return __pipe_get_pages(i, maxsize, p, off);
1511 }
1512
1513 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
1514                                            struct page ***pages, size_t maxsize,
1515                                            size_t *_start_offset)
1516 {
1517         struct page **p;
1518         unsigned nr, offset;
1519         pgoff_t index, count;
1520         size_t size = maxsize;
1521         loff_t pos;
1522
1523         pos = i->xarray_start + i->iov_offset;
1524         index = pos >> PAGE_SHIFT;
1525         offset = pos & ~PAGE_MASK;
1526         *_start_offset = offset;
1527
1528         count = 1;
1529         if (size > PAGE_SIZE - offset) {
1530                 size -= PAGE_SIZE - offset;
1531                 count += size >> PAGE_SHIFT;
1532                 size &= ~PAGE_MASK;
1533                 if (size)
1534                         count++;
1535         }
1536
1537         *pages = p = get_pages_array(count);
1538         if (!p)
1539                 return -ENOMEM;
1540
1541         nr = iter_xarray_populate_pages(p, i->xarray, index, count);
1542         if (nr == 0)
1543                 return 0;
1544
1545         return min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1546 }
1547
1548 static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
1549                    struct page ***pages, size_t maxsize,
1550                    size_t *start)
1551 {
1552         struct page **p;
1553         int n, res;
1554
1555         if (maxsize > i->count)
1556                 maxsize = i->count;
1557         if (!maxsize)
1558                 return 0;
1559         if (maxsize > MAX_RW_COUNT)
1560                 maxsize = MAX_RW_COUNT;
1561
1562         if (likely(user_backed_iter(i))) {
1563                 unsigned int gup_flags = 0;
1564                 unsigned long addr;
1565
1566                 if (iov_iter_rw(i) != WRITE)
1567                         gup_flags |= FOLL_WRITE;
1568                 if (i->nofault)
1569                         gup_flags |= FOLL_NOFAULT;
1570
1571                 addr = first_iovec_segment(i, &maxsize);
1572                 *start = addr % PAGE_SIZE;
1573                 addr &= PAGE_MASK;
1574                 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1575                 *pages = p = get_pages_array(n);
1576                 if (!p)
1577                         return -ENOMEM;
1578                 res = get_user_pages_fast(addr, n, gup_flags, p);
1579                 if (unlikely(res <= 0))
1580                         return res;
1581                 return min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1582         }
1583         if (iov_iter_is_bvec(i)) {
1584                 struct page *page;
1585
1586                 page = first_bvec_segment(i, &maxsize, start);
1587                 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1588                 *pages = p = get_pages_array(n);
1589                 if (!p)
1590                         return -ENOMEM;
1591                 for (int k = 0; k < n; k++)
1592                         get_page(*p++ = page++);
1593                 return min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1594         }
1595         if (iov_iter_is_pipe(i))
1596                 return pipe_get_pages_alloc(i, pages, maxsize, start);
1597         if (iov_iter_is_xarray(i))
1598                 return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
1599         return -EFAULT;
1600 }
1601
1602 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1603                    struct page ***pages, size_t maxsize,
1604                    size_t *start)
1605 {
1606         ssize_t len;
1607
1608         *pages = NULL;
1609
1610         len = __iov_iter_get_pages_alloc(i, pages, maxsize, start);
1611         if (len <= 0) {
1612                 kvfree(*pages);
1613                 *pages = NULL;
1614         }
1615         return len;
1616 }
1617 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1618
1619 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1620                                struct iov_iter *i)
1621 {
1622         __wsum sum, next;
1623         sum = *csum;
1624         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1625                 WARN_ON(1);
1626                 return 0;
1627         }
1628         iterate_and_advance(i, bytes, base, len, off, ({
1629                 next = csum_and_copy_from_user(base, addr + off, len);
1630                 sum = csum_block_add(sum, next, off);
1631                 next ? 0 : len;
1632         }), ({
1633                 sum = csum_and_memcpy(addr + off, base, len, sum, off);
1634         })
1635         )
1636         *csum = sum;
1637         return bytes;
1638 }
1639 EXPORT_SYMBOL(csum_and_copy_from_iter);
1640
1641 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1642                              struct iov_iter *i)
1643 {
1644         struct csum_state *csstate = _csstate;
1645         __wsum sum, next;
1646
1647         if (unlikely(iov_iter_is_discard(i))) {
1648                 WARN_ON(1);     /* for now */
1649                 return 0;
1650         }
1651
1652         sum = csum_shift(csstate->csum, csstate->off);
1653         if (unlikely(iov_iter_is_pipe(i)))
1654                 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
1655         else iterate_and_advance(i, bytes, base, len, off, ({
1656                 next = csum_and_copy_to_user(addr + off, base, len);
1657                 sum = csum_block_add(sum, next, off);
1658                 next ? 0 : len;
1659         }), ({
1660                 sum = csum_and_memcpy(base, addr + off, len, sum, off);
1661         })
1662         )
1663         csstate->csum = csum_shift(sum, csstate->off);
1664         csstate->off += bytes;
1665         return bytes;
1666 }
1667 EXPORT_SYMBOL(csum_and_copy_to_iter);
1668
1669 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1670                 struct iov_iter *i)
1671 {
1672 #ifdef CONFIG_CRYPTO_HASH
1673         struct ahash_request *hash = hashp;
1674         struct scatterlist sg;
1675         size_t copied;
1676
1677         copied = copy_to_iter(addr, bytes, i);
1678         sg_init_one(&sg, addr, copied);
1679         ahash_request_set_crypt(hash, &sg, NULL, copied);
1680         crypto_ahash_update(hash);
1681         return copied;
1682 #else
1683         return 0;
1684 #endif
1685 }
1686 EXPORT_SYMBOL(hash_and_copy_to_iter);
1687
1688 static int iov_npages(const struct iov_iter *i, int maxpages)
1689 {
1690         size_t skip = i->iov_offset, size = i->count;
1691         const struct iovec *p;
1692         int npages = 0;
1693
1694         for (p = i->iov; size; skip = 0, p++) {
1695                 unsigned offs = offset_in_page(p->iov_base + skip);
1696                 size_t len = min(p->iov_len - skip, size);
1697
1698                 if (len) {
1699                         size -= len;
1700                         npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1701                         if (unlikely(npages > maxpages))
1702                                 return maxpages;
1703                 }
1704         }
1705         return npages;
1706 }
1707
1708 static int bvec_npages(const struct iov_iter *i, int maxpages)
1709 {
1710         size_t skip = i->iov_offset, size = i->count;
1711         const struct bio_vec *p;
1712         int npages = 0;
1713
1714         for (p = i->bvec; size; skip = 0, p++) {
1715                 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1716                 size_t len = min(p->bv_len - skip, size);
1717
1718                 size -= len;
1719                 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1720                 if (unlikely(npages > maxpages))
1721                         return maxpages;
1722         }
1723         return npages;
1724 }
1725
1726 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1727 {
1728         if (unlikely(!i->count))
1729                 return 0;
1730         if (likely(iter_is_ubuf(i))) {
1731                 unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
1732                 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
1733                 return min(npages, maxpages);
1734         }
1735         /* iovec and kvec have identical layouts */
1736         if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1737                 return iov_npages(i, maxpages);
1738         if (iov_iter_is_bvec(i))
1739                 return bvec_npages(i, maxpages);
1740         if (iov_iter_is_pipe(i)) {
1741                 int npages;
1742
1743                 if (!sanity(i))
1744                         return 0;
1745
1746                 pipe_npages(i, &npages);
1747                 return min(npages, maxpages);
1748         }
1749         if (iov_iter_is_xarray(i)) {
1750                 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1751                 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1752                 return min(npages, maxpages);
1753         }
1754         return 0;
1755 }
1756 EXPORT_SYMBOL(iov_iter_npages);
1757
1758 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1759 {
1760         *new = *old;
1761         if (unlikely(iov_iter_is_pipe(new))) {
1762                 WARN_ON(1);
1763                 return NULL;
1764         }
1765         if (iov_iter_is_bvec(new))
1766                 return new->bvec = kmemdup(new->bvec,
1767                                     new->nr_segs * sizeof(struct bio_vec),
1768                                     flags);
1769         else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
1770                 /* iovec and kvec have identical layout */
1771                 return new->iov = kmemdup(new->iov,
1772                                    new->nr_segs * sizeof(struct iovec),
1773                                    flags);
1774         return NULL;
1775 }
1776 EXPORT_SYMBOL(dup_iter);
1777
1778 static int copy_compat_iovec_from_user(struct iovec *iov,
1779                 const struct iovec __user *uvec, unsigned long nr_segs)
1780 {
1781         const struct compat_iovec __user *uiov =
1782                 (const struct compat_iovec __user *)uvec;
1783         int ret = -EFAULT, i;
1784
1785         if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1786                 return -EFAULT;
1787
1788         for (i = 0; i < nr_segs; i++) {
1789                 compat_uptr_t buf;
1790                 compat_ssize_t len;
1791
1792                 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1793                 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1794
1795                 /* check for compat_size_t not fitting in compat_ssize_t .. */
1796                 if (len < 0) {
1797                         ret = -EINVAL;
1798                         goto uaccess_end;
1799                 }
1800                 iov[i].iov_base = compat_ptr(buf);
1801                 iov[i].iov_len = len;
1802         }
1803
1804         ret = 0;
1805 uaccess_end:
1806         user_access_end();
1807         return ret;
1808 }
1809
1810 static int copy_iovec_from_user(struct iovec *iov,
1811                 const struct iovec __user *uvec, unsigned long nr_segs)
1812 {
1813         unsigned long seg;
1814
1815         if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1816                 return -EFAULT;
1817         for (seg = 0; seg < nr_segs; seg++) {
1818                 if ((ssize_t)iov[seg].iov_len < 0)
1819                         return -EINVAL;
1820         }
1821
1822         return 0;
1823 }
1824
1825 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1826                 unsigned long nr_segs, unsigned long fast_segs,
1827                 struct iovec *fast_iov, bool compat)
1828 {
1829         struct iovec *iov = fast_iov;
1830         int ret;
1831
1832         /*
1833          * SuS says "The readv() function *may* fail if the iovcnt argument was
1834          * less than or equal to 0, or greater than {IOV_MAX}.  Linux has
1835          * traditionally returned zero for zero segments, so...
1836          */
1837         if (nr_segs == 0)
1838                 return iov;
1839         if (nr_segs > UIO_MAXIOV)
1840                 return ERR_PTR(-EINVAL);
1841         if (nr_segs > fast_segs) {
1842                 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1843                 if (!iov)
1844                         return ERR_PTR(-ENOMEM);
1845         }
1846
1847         if (compat)
1848                 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1849         else
1850                 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1851         if (ret) {
1852                 if (iov != fast_iov)
1853                         kfree(iov);
1854                 return ERR_PTR(ret);
1855         }
1856
1857         return iov;
1858 }
1859
1860 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1861                  unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1862                  struct iov_iter *i, bool compat)
1863 {
1864         ssize_t total_len = 0;
1865         unsigned long seg;
1866         struct iovec *iov;
1867
1868         iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1869         if (IS_ERR(iov)) {
1870                 *iovp = NULL;
1871                 return PTR_ERR(iov);
1872         }
1873
1874         /*
1875          * According to the Single Unix Specification we should return EINVAL if
1876          * an element length is < 0 when cast to ssize_t or if the total length
1877          * would overflow the ssize_t return value of the system call.
1878          *
1879          * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1880          * overflow case.
1881          */
1882         for (seg = 0; seg < nr_segs; seg++) {
1883                 ssize_t len = (ssize_t)iov[seg].iov_len;
1884
1885                 if (!access_ok(iov[seg].iov_base, len)) {
1886                         if (iov != *iovp)
1887                                 kfree(iov);
1888                         *iovp = NULL;
1889                         return -EFAULT;
1890                 }
1891
1892                 if (len > MAX_RW_COUNT - total_len) {
1893                         len = MAX_RW_COUNT - total_len;
1894                         iov[seg].iov_len = len;
1895                 }
1896                 total_len += len;
1897         }
1898
1899         iov_iter_init(i, type, iov, nr_segs, total_len);
1900         if (iov == *iovp)
1901                 *iovp = NULL;
1902         else
1903                 *iovp = iov;
1904         return total_len;
1905 }
1906
1907 /**
1908  * import_iovec() - Copy an array of &struct iovec from userspace
1909  *     into the kernel, check that it is valid, and initialize a new
1910  *     &struct iov_iter iterator to access it.
1911  *
1912  * @type: One of %READ or %WRITE.
1913  * @uvec: Pointer to the userspace array.
1914  * @nr_segs: Number of elements in userspace array.
1915  * @fast_segs: Number of elements in @iov.
1916  * @iovp: (input and output parameter) Pointer to pointer to (usually small
1917  *     on-stack) kernel array.
1918  * @i: Pointer to iterator that will be initialized on success.
1919  *
1920  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1921  * then this function places %NULL in *@iov on return. Otherwise, a new
1922  * array will be allocated and the result placed in *@iov. This means that
1923  * the caller may call kfree() on *@iov regardless of whether the small
1924  * on-stack array was used or not (and regardless of whether this function
1925  * returns an error or not).
1926  *
1927  * Return: Negative error code on error, bytes imported on success
1928  */
1929 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1930                  unsigned nr_segs, unsigned fast_segs,
1931                  struct iovec **iovp, struct iov_iter *i)
1932 {
1933         return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1934                               in_compat_syscall());
1935 }
1936 EXPORT_SYMBOL(import_iovec);
1937
1938 int import_single_range(int rw, void __user *buf, size_t len,
1939                  struct iovec *iov, struct iov_iter *i)
1940 {
1941         if (len > MAX_RW_COUNT)
1942                 len = MAX_RW_COUNT;
1943         if (unlikely(!access_ok(buf, len)))
1944                 return -EFAULT;
1945
1946         iov->iov_base = buf;
1947         iov->iov_len = len;
1948         iov_iter_init(i, rw, iov, 1, len);
1949         return 0;
1950 }
1951 EXPORT_SYMBOL(import_single_range);
1952
1953 /**
1954  * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1955  *     iov_iter_save_state() was called.
1956  *
1957  * @i: &struct iov_iter to restore
1958  * @state: state to restore from
1959  *
1960  * Used after iov_iter_save_state() to bring restore @i, if operations may
1961  * have advanced it.
1962  *
1963  * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
1964  */
1965 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
1966 {
1967         if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
1968                          !iov_iter_is_kvec(i) && !iter_is_ubuf(i))
1969                 return;
1970         i->iov_offset = state->iov_offset;
1971         i->count = state->count;
1972         if (iter_is_ubuf(i))
1973                 return;
1974         /*
1975          * For the *vec iters, nr_segs + iov is constant - if we increment
1976          * the vec, then we also decrement the nr_segs count. Hence we don't
1977          * need to track both of these, just one is enough and we can deduct
1978          * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
1979          * size, so we can just increment the iov pointer as they are unionzed.
1980          * ITER_BVEC _may_ be the same size on some archs, but on others it is
1981          * not. Be safe and handle it separately.
1982          */
1983         BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
1984         if (iov_iter_is_bvec(i))
1985                 i->bvec -= state->nr_segs - i->nr_segs;
1986         else
1987                 i->iov -= state->nr_segs - i->nr_segs;
1988         i->nr_segs = state->nr_segs;
1989 }