Merge branch 'for-6.5/core' into for-linus
[platform/kernel/linux-rpi.git] / lib / iov_iter.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
6 #include <linux/uio.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
16
17 #define PIPE_PARANOIA /* for now */
18
19 /* covers ubuf and kbuf alike */
20 #define iterate_buf(i, n, base, len, off, __p, STEP) {          \
21         size_t __maybe_unused off = 0;                          \
22         len = n;                                                \
23         base = __p + i->iov_offset;                             \
24         len -= (STEP);                                          \
25         i->iov_offset += len;                                   \
26         n = len;                                                \
27 }
28
29 /* covers iovec and kvec alike */
30 #define iterate_iovec(i, n, base, len, off, __p, STEP) {        \
31         size_t off = 0;                                         \
32         size_t skip = i->iov_offset;                            \
33         do {                                                    \
34                 len = min(n, __p->iov_len - skip);              \
35                 if (likely(len)) {                              \
36                         base = __p->iov_base + skip;            \
37                         len -= (STEP);                          \
38                         off += len;                             \
39                         skip += len;                            \
40                         n -= len;                               \
41                         if (skip < __p->iov_len)                \
42                                 break;                          \
43                 }                                               \
44                 __p++;                                          \
45                 skip = 0;                                       \
46         } while (n);                                            \
47         i->iov_offset = skip;                                   \
48         n = off;                                                \
49 }
50
51 #define iterate_bvec(i, n, base, len, off, p, STEP) {           \
52         size_t off = 0;                                         \
53         unsigned skip = i->iov_offset;                          \
54         while (n) {                                             \
55                 unsigned offset = p->bv_offset + skip;          \
56                 unsigned left;                                  \
57                 void *kaddr = kmap_local_page(p->bv_page +      \
58                                         offset / PAGE_SIZE);    \
59                 base = kaddr + offset % PAGE_SIZE;              \
60                 len = min(min(n, (size_t)(p->bv_len - skip)),   \
61                      (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
62                 left = (STEP);                                  \
63                 kunmap_local(kaddr);                            \
64                 len -= left;                                    \
65                 off += len;                                     \
66                 skip += len;                                    \
67                 if (skip == p->bv_len) {                        \
68                         skip = 0;                               \
69                         p++;                                    \
70                 }                                               \
71                 n -= len;                                       \
72                 if (left)                                       \
73                         break;                                  \
74         }                                                       \
75         i->iov_offset = skip;                                   \
76         n = off;                                                \
77 }
78
79 #define iterate_xarray(i, n, base, len, __off, STEP) {          \
80         __label__ __out;                                        \
81         size_t __off = 0;                                       \
82         struct folio *folio;                                    \
83         loff_t start = i->xarray_start + i->iov_offset;         \
84         pgoff_t index = start / PAGE_SIZE;                      \
85         XA_STATE(xas, i->xarray, index);                        \
86                                                                 \
87         len = PAGE_SIZE - offset_in_page(start);                \
88         rcu_read_lock();                                        \
89         xas_for_each(&xas, folio, ULONG_MAX) {                  \
90                 unsigned left;                                  \
91                 size_t offset;                                  \
92                 if (xas_retry(&xas, folio))                     \
93                         continue;                               \
94                 if (WARN_ON(xa_is_value(folio)))                \
95                         break;                                  \
96                 if (WARN_ON(folio_test_hugetlb(folio)))         \
97                         break;                                  \
98                 offset = offset_in_folio(folio, start + __off); \
99                 while (offset < folio_size(folio)) {            \
100                         base = kmap_local_folio(folio, offset); \
101                         len = min(n, len);                      \
102                         left = (STEP);                          \
103                         kunmap_local(base);                     \
104                         len -= left;                            \
105                         __off += len;                           \
106                         n -= len;                               \
107                         if (left || n == 0)                     \
108                                 goto __out;                     \
109                         offset += len;                          \
110                         len = PAGE_SIZE;                        \
111                 }                                               \
112         }                                                       \
113 __out:                                                          \
114         rcu_read_unlock();                                      \
115         i->iov_offset += __off;                                 \
116         n = __off;                                              \
117 }
118
119 #define __iterate_and_advance(i, n, base, len, off, I, K) {     \
120         if (unlikely(i->count < n))                             \
121                 n = i->count;                                   \
122         if (likely(n)) {                                        \
123                 if (likely(iter_is_ubuf(i))) {                  \
124                         void __user *base;                      \
125                         size_t len;                             \
126                         iterate_buf(i, n, base, len, off,       \
127                                                 i->ubuf, (I))   \
128                 } else if (likely(iter_is_iovec(i))) {          \
129                         const struct iovec *iov = iter_iov(i);  \
130                         void __user *base;                      \
131                         size_t len;                             \
132                         iterate_iovec(i, n, base, len, off,     \
133                                                 iov, (I))       \
134                         i->nr_segs -= iov - iter_iov(i);        \
135                         i->__iov = iov;                         \
136                 } else if (iov_iter_is_bvec(i)) {               \
137                         const struct bio_vec *bvec = i->bvec;   \
138                         void *base;                             \
139                         size_t len;                             \
140                         iterate_bvec(i, n, base, len, off,      \
141                                                 bvec, (K))      \
142                         i->nr_segs -= bvec - i->bvec;           \
143                         i->bvec = bvec;                         \
144                 } else if (iov_iter_is_kvec(i)) {               \
145                         const struct kvec *kvec = i->kvec;      \
146                         void *base;                             \
147                         size_t len;                             \
148                         iterate_iovec(i, n, base, len, off,     \
149                                                 kvec, (K))      \
150                         i->nr_segs -= kvec - i->kvec;           \
151                         i->kvec = kvec;                         \
152                 } else if (iov_iter_is_xarray(i)) {             \
153                         void *base;                             \
154                         size_t len;                             \
155                         iterate_xarray(i, n, base, len, off,    \
156                                                         (K))    \
157                 }                                               \
158                 i->count -= n;                                  \
159         }                                                       \
160 }
161 #define iterate_and_advance(i, n, base, len, off, I, K) \
162         __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
163
164 static int copyout(void __user *to, const void *from, size_t n)
165 {
166         if (should_fail_usercopy())
167                 return n;
168         if (access_ok(to, n)) {
169                 instrument_copy_to_user(to, from, n);
170                 n = raw_copy_to_user(to, from, n);
171         }
172         return n;
173 }
174
175 static int copyout_nofault(void __user *to, const void *from, size_t n)
176 {
177         long res;
178
179         if (should_fail_usercopy())
180                 return n;
181
182         res = copy_to_user_nofault(to, from, n);
183
184         return res < 0 ? n : res;
185 }
186
187 static int copyin(void *to, const void __user *from, size_t n)
188 {
189         size_t res = n;
190
191         if (should_fail_usercopy())
192                 return n;
193         if (access_ok(from, n)) {
194                 instrument_copy_from_user_before(to, from, n);
195                 res = raw_copy_from_user(to, from, n);
196                 instrument_copy_from_user_after(to, from, n, res);
197         }
198         return res;
199 }
200
201 #ifdef PIPE_PARANOIA
202 static bool sanity(const struct iov_iter *i)
203 {
204         struct pipe_inode_info *pipe = i->pipe;
205         unsigned int p_head = pipe->head;
206         unsigned int p_tail = pipe->tail;
207         unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
208         unsigned int i_head = i->head;
209         unsigned int idx;
210
211         if (i->last_offset) {
212                 struct pipe_buffer *p;
213                 if (unlikely(p_occupancy == 0))
214                         goto Bad;       // pipe must be non-empty
215                 if (unlikely(i_head != p_head - 1))
216                         goto Bad;       // must be at the last buffer...
217
218                 p = pipe_buf(pipe, i_head);
219                 if (unlikely(p->offset + p->len != abs(i->last_offset)))
220                         goto Bad;       // ... at the end of segment
221         } else {
222                 if (i_head != p_head)
223                         goto Bad;       // must be right after the last buffer
224         }
225         return true;
226 Bad:
227         printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset);
228         printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
229                         p_head, p_tail, pipe->ring_size);
230         for (idx = 0; idx < pipe->ring_size; idx++)
231                 printk(KERN_ERR "[%p %p %d %d]\n",
232                         pipe->bufs[idx].ops,
233                         pipe->bufs[idx].page,
234                         pipe->bufs[idx].offset,
235                         pipe->bufs[idx].len);
236         WARN_ON(1);
237         return false;
238 }
239 #else
240 #define sanity(i) true
241 #endif
242
243 static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size)
244 {
245         struct page *page = alloc_page(GFP_USER);
246         if (page) {
247                 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
248                 *buf = (struct pipe_buffer) {
249                         .ops = &default_pipe_buf_ops,
250                         .page = page,
251                         .offset = 0,
252                         .len = size
253                 };
254         }
255         return page;
256 }
257
258 static void push_page(struct pipe_inode_info *pipe, struct page *page,
259                         unsigned int offset, unsigned int size)
260 {
261         struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
262         *buf = (struct pipe_buffer) {
263                 .ops = &page_cache_pipe_buf_ops,
264                 .page = page,
265                 .offset = offset,
266                 .len = size
267         };
268         get_page(page);
269 }
270
271 static inline int last_offset(const struct pipe_buffer *buf)
272 {
273         if (buf->ops == &default_pipe_buf_ops)
274                 return buf->len;        // buf->offset is 0 for those
275         else
276                 return -(buf->offset + buf->len);
277 }
278
279 static struct page *append_pipe(struct iov_iter *i, size_t size,
280                                 unsigned int *off)
281 {
282         struct pipe_inode_info *pipe = i->pipe;
283         int offset = i->last_offset;
284         struct pipe_buffer *buf;
285         struct page *page;
286
287         if (offset > 0 && offset < PAGE_SIZE) {
288                 // some space in the last buffer; add to it
289                 buf = pipe_buf(pipe, pipe->head - 1);
290                 size = min_t(size_t, size, PAGE_SIZE - offset);
291                 buf->len += size;
292                 i->last_offset += size;
293                 i->count -= size;
294                 *off = offset;
295                 return buf->page;
296         }
297         // OK, we need a new buffer
298         *off = 0;
299         size = min_t(size_t, size, PAGE_SIZE);
300         if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
301                 return NULL;
302         page = push_anon(pipe, size);
303         if (!page)
304                 return NULL;
305         i->head = pipe->head - 1;
306         i->last_offset = size;
307         i->count -= size;
308         return page;
309 }
310
311 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
312                          struct iov_iter *i)
313 {
314         struct pipe_inode_info *pipe = i->pipe;
315         unsigned int head = pipe->head;
316
317         if (unlikely(bytes > i->count))
318                 bytes = i->count;
319
320         if (unlikely(!bytes))
321                 return 0;
322
323         if (!sanity(i))
324                 return 0;
325
326         if (offset && i->last_offset == -offset) { // could we merge it?
327                 struct pipe_buffer *buf = pipe_buf(pipe, head - 1);
328                 if (buf->page == page) {
329                         buf->len += bytes;
330                         i->last_offset -= bytes;
331                         i->count -= bytes;
332                         return bytes;
333                 }
334         }
335         if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
336                 return 0;
337
338         push_page(pipe, page, offset, bytes);
339         i->last_offset = -(offset + bytes);
340         i->head = head;
341         i->count -= bytes;
342         return bytes;
343 }
344
345 /*
346  * fault_in_iov_iter_readable - fault in iov iterator for reading
347  * @i: iterator
348  * @size: maximum length
349  *
350  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
351  * @size.  For each iovec, fault in each page that constitutes the iovec.
352  *
353  * Returns the number of bytes not faulted in (like copy_to_user() and
354  * copy_from_user()).
355  *
356  * Always returns 0 for non-userspace iterators.
357  */
358 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
359 {
360         if (iter_is_ubuf(i)) {
361                 size_t n = min(size, iov_iter_count(i));
362                 n -= fault_in_readable(i->ubuf + i->iov_offset, n);
363                 return size - n;
364         } else if (iter_is_iovec(i)) {
365                 size_t count = min(size, iov_iter_count(i));
366                 const struct iovec *p;
367                 size_t skip;
368
369                 size -= count;
370                 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
371                         size_t len = min(count, p->iov_len - skip);
372                         size_t ret;
373
374                         if (unlikely(!len))
375                                 continue;
376                         ret = fault_in_readable(p->iov_base + skip, len);
377                         count -= len - ret;
378                         if (ret)
379                                 break;
380                 }
381                 return count + size;
382         }
383         return 0;
384 }
385 EXPORT_SYMBOL(fault_in_iov_iter_readable);
386
387 /*
388  * fault_in_iov_iter_writeable - fault in iov iterator for writing
389  * @i: iterator
390  * @size: maximum length
391  *
392  * Faults in the iterator using get_user_pages(), i.e., without triggering
393  * hardware page faults.  This is primarily useful when we already know that
394  * some or all of the pages in @i aren't in memory.
395  *
396  * Returns the number of bytes not faulted in, like copy_to_user() and
397  * copy_from_user().
398  *
399  * Always returns 0 for non-user-space iterators.
400  */
401 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
402 {
403         if (iter_is_ubuf(i)) {
404                 size_t n = min(size, iov_iter_count(i));
405                 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
406                 return size - n;
407         } else if (iter_is_iovec(i)) {
408                 size_t count = min(size, iov_iter_count(i));
409                 const struct iovec *p;
410                 size_t skip;
411
412                 size -= count;
413                 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
414                         size_t len = min(count, p->iov_len - skip);
415                         size_t ret;
416
417                         if (unlikely(!len))
418                                 continue;
419                         ret = fault_in_safe_writeable(p->iov_base + skip, len);
420                         count -= len - ret;
421                         if (ret)
422                                 break;
423                 }
424                 return count + size;
425         }
426         return 0;
427 }
428 EXPORT_SYMBOL(fault_in_iov_iter_writeable);
429
430 void iov_iter_init(struct iov_iter *i, unsigned int direction,
431                         const struct iovec *iov, unsigned long nr_segs,
432                         size_t count)
433 {
434         WARN_ON(direction & ~(READ | WRITE));
435         *i = (struct iov_iter) {
436                 .iter_type = ITER_IOVEC,
437                 .copy_mc = false,
438                 .nofault = false,
439                 .user_backed = true,
440                 .data_source = direction,
441                 .__iov = iov,
442                 .nr_segs = nr_segs,
443                 .iov_offset = 0,
444                 .count = count
445         };
446 }
447 EXPORT_SYMBOL(iov_iter_init);
448
449 // returns the offset in partial buffer (if any)
450 static inline unsigned int pipe_npages(const struct iov_iter *i, int *npages)
451 {
452         struct pipe_inode_info *pipe = i->pipe;
453         int used = pipe->head - pipe->tail;
454         int off = i->last_offset;
455
456         *npages = max((int)pipe->max_usage - used, 0);
457
458         if (off > 0 && off < PAGE_SIZE) { // anon and not full
459                 (*npages)++;
460                 return off;
461         }
462         return 0;
463 }
464
465 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
466                                 struct iov_iter *i)
467 {
468         unsigned int off, chunk;
469
470         if (unlikely(bytes > i->count))
471                 bytes = i->count;
472         if (unlikely(!bytes))
473                 return 0;
474
475         if (!sanity(i))
476                 return 0;
477
478         for (size_t n = bytes; n; n -= chunk) {
479                 struct page *page = append_pipe(i, n, &off);
480                 chunk = min_t(size_t, n, PAGE_SIZE - off);
481                 if (!page)
482                         return bytes - n;
483                 memcpy_to_page(page, off, addr, chunk);
484                 addr += chunk;
485         }
486         return bytes;
487 }
488
489 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
490                               __wsum sum, size_t off)
491 {
492         __wsum next = csum_partial_copy_nocheck(from, to, len);
493         return csum_block_add(sum, next, off);
494 }
495
496 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
497                                          struct iov_iter *i, __wsum *sump)
498 {
499         __wsum sum = *sump;
500         size_t off = 0;
501         unsigned int chunk, r;
502
503         if (unlikely(bytes > i->count))
504                 bytes = i->count;
505         if (unlikely(!bytes))
506                 return 0;
507
508         if (!sanity(i))
509                 return 0;
510
511         while (bytes) {
512                 struct page *page = append_pipe(i, bytes, &r);
513                 char *p;
514
515                 if (!page)
516                         break;
517                 chunk = min_t(size_t, bytes, PAGE_SIZE - r);
518                 p = kmap_local_page(page);
519                 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
520                 kunmap_local(p);
521                 off += chunk;
522                 bytes -= chunk;
523         }
524         *sump = sum;
525         return off;
526 }
527
528 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
529 {
530         if (WARN_ON_ONCE(i->data_source))
531                 return 0;
532         if (unlikely(iov_iter_is_pipe(i)))
533                 return copy_pipe_to_iter(addr, bytes, i);
534         if (user_backed_iter(i))
535                 might_fault();
536         iterate_and_advance(i, bytes, base, len, off,
537                 copyout(base, addr + off, len),
538                 memcpy(base, addr + off, len)
539         )
540
541         return bytes;
542 }
543 EXPORT_SYMBOL(_copy_to_iter);
544
545 #ifdef CONFIG_ARCH_HAS_COPY_MC
546 static int copyout_mc(void __user *to, const void *from, size_t n)
547 {
548         if (access_ok(to, n)) {
549                 instrument_copy_to_user(to, from, n);
550                 n = copy_mc_to_user((__force void *) to, from, n);
551         }
552         return n;
553 }
554
555 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
556                                 struct iov_iter *i)
557 {
558         size_t xfer = 0;
559         unsigned int off, chunk;
560
561         if (unlikely(bytes > i->count))
562                 bytes = i->count;
563         if (unlikely(!bytes))
564                 return 0;
565
566         if (!sanity(i))
567                 return 0;
568
569         while (bytes) {
570                 struct page *page = append_pipe(i, bytes, &off);
571                 unsigned long rem;
572                 char *p;
573
574                 if (!page)
575                         break;
576                 chunk = min_t(size_t, bytes, PAGE_SIZE - off);
577                 p = kmap_local_page(page);
578                 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
579                 chunk -= rem;
580                 kunmap_local(p);
581                 xfer += chunk;
582                 bytes -= chunk;
583                 if (rem) {
584                         iov_iter_revert(i, rem);
585                         break;
586                 }
587         }
588         return xfer;
589 }
590
591 /**
592  * _copy_mc_to_iter - copy to iter with source memory error exception handling
593  * @addr: source kernel address
594  * @bytes: total transfer length
595  * @i: destination iterator
596  *
597  * The pmem driver deploys this for the dax operation
598  * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
599  * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
600  * successfully copied.
601  *
602  * The main differences between this and typical _copy_to_iter().
603  *
604  * * Typical tail/residue handling after a fault retries the copy
605  *   byte-by-byte until the fault happens again. Re-triggering machine
606  *   checks is potentially fatal so the implementation uses source
607  *   alignment and poison alignment assumptions to avoid re-triggering
608  *   hardware exceptions.
609  *
610  * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
611  *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
612  *   a short copy.
613  *
614  * Return: number of bytes copied (may be %0)
615  */
616 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
617 {
618         if (WARN_ON_ONCE(i->data_source))
619                 return 0;
620         if (unlikely(iov_iter_is_pipe(i)))
621                 return copy_mc_pipe_to_iter(addr, bytes, i);
622         if (user_backed_iter(i))
623                 might_fault();
624         __iterate_and_advance(i, bytes, base, len, off,
625                 copyout_mc(base, addr + off, len),
626                 copy_mc_to_kernel(base, addr + off, len)
627         )
628
629         return bytes;
630 }
631 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
632 #endif /* CONFIG_ARCH_HAS_COPY_MC */
633
634 static void *memcpy_from_iter(struct iov_iter *i, void *to, const void *from,
635                                  size_t size)
636 {
637         if (iov_iter_is_copy_mc(i))
638                 return (void *)copy_mc_to_kernel(to, from, size);
639         return memcpy(to, from, size);
640 }
641
642 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
643 {
644         if (WARN_ON_ONCE(!i->data_source))
645                 return 0;
646
647         if (user_backed_iter(i))
648                 might_fault();
649         iterate_and_advance(i, bytes, base, len, off,
650                 copyin(addr + off, base, len),
651                 memcpy_from_iter(i, addr + off, base, len)
652         )
653
654         return bytes;
655 }
656 EXPORT_SYMBOL(_copy_from_iter);
657
658 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
659 {
660         if (WARN_ON_ONCE(!i->data_source))
661                 return 0;
662
663         iterate_and_advance(i, bytes, base, len, off,
664                 __copy_from_user_inatomic_nocache(addr + off, base, len),
665                 memcpy(addr + off, base, len)
666         )
667
668         return bytes;
669 }
670 EXPORT_SYMBOL(_copy_from_iter_nocache);
671
672 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
673 /**
674  * _copy_from_iter_flushcache - write destination through cpu cache
675  * @addr: destination kernel address
676  * @bytes: total transfer length
677  * @i: source iterator
678  *
679  * The pmem driver arranges for filesystem-dax to use this facility via
680  * dax_copy_from_iter() for ensuring that writes to persistent memory
681  * are flushed through the CPU cache. It is differentiated from
682  * _copy_from_iter_nocache() in that guarantees all data is flushed for
683  * all iterator types. The _copy_from_iter_nocache() only attempts to
684  * bypass the cache for the ITER_IOVEC case, and on some archs may use
685  * instructions that strand dirty-data in the cache.
686  *
687  * Return: number of bytes copied (may be %0)
688  */
689 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
690 {
691         if (WARN_ON_ONCE(!i->data_source))
692                 return 0;
693
694         iterate_and_advance(i, bytes, base, len, off,
695                 __copy_from_user_flushcache(addr + off, base, len),
696                 memcpy_flushcache(addr + off, base, len)
697         )
698
699         return bytes;
700 }
701 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
702 #endif
703
704 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
705 {
706         struct page *head;
707         size_t v = n + offset;
708
709         /*
710          * The general case needs to access the page order in order
711          * to compute the page size.
712          * However, we mostly deal with order-0 pages and thus can
713          * avoid a possible cache line miss for requests that fit all
714          * page orders.
715          */
716         if (n <= v && v <= PAGE_SIZE)
717                 return true;
718
719         head = compound_head(page);
720         v += (page - head) << PAGE_SHIFT;
721
722         if (WARN_ON(n > v || v > page_size(head)))
723                 return false;
724         return true;
725 }
726
727 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
728                          struct iov_iter *i)
729 {
730         size_t res = 0;
731         if (!page_copy_sane(page, offset, bytes))
732                 return 0;
733         if (WARN_ON_ONCE(i->data_source))
734                 return 0;
735         if (unlikely(iov_iter_is_pipe(i)))
736                 return copy_page_to_iter_pipe(page, offset, bytes, i);
737         page += offset / PAGE_SIZE; // first subpage
738         offset %= PAGE_SIZE;
739         while (1) {
740                 void *kaddr = kmap_local_page(page);
741                 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
742                 n = _copy_to_iter(kaddr + offset, n, i);
743                 kunmap_local(kaddr);
744                 res += n;
745                 bytes -= n;
746                 if (!bytes || !n)
747                         break;
748                 offset += n;
749                 if (offset == PAGE_SIZE) {
750                         page++;
751                         offset = 0;
752                 }
753         }
754         return res;
755 }
756 EXPORT_SYMBOL(copy_page_to_iter);
757
758 size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes,
759                                  struct iov_iter *i)
760 {
761         size_t res = 0;
762
763         if (!page_copy_sane(page, offset, bytes))
764                 return 0;
765         if (WARN_ON_ONCE(i->data_source))
766                 return 0;
767         if (unlikely(iov_iter_is_pipe(i)))
768                 return copy_page_to_iter_pipe(page, offset, bytes, i);
769         page += offset / PAGE_SIZE; // first subpage
770         offset %= PAGE_SIZE;
771         while (1) {
772                 void *kaddr = kmap_local_page(page);
773                 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
774
775                 iterate_and_advance(i, n, base, len, off,
776                         copyout_nofault(base, kaddr + offset + off, len),
777                         memcpy(base, kaddr + offset + off, len)
778                 )
779                 kunmap_local(kaddr);
780                 res += n;
781                 bytes -= n;
782                 if (!bytes || !n)
783                         break;
784                 offset += n;
785                 if (offset == PAGE_SIZE) {
786                         page++;
787                         offset = 0;
788                 }
789         }
790         return res;
791 }
792 EXPORT_SYMBOL(copy_page_to_iter_nofault);
793
794 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
795                          struct iov_iter *i)
796 {
797         size_t res = 0;
798         if (!page_copy_sane(page, offset, bytes))
799                 return 0;
800         page += offset / PAGE_SIZE; // first subpage
801         offset %= PAGE_SIZE;
802         while (1) {
803                 void *kaddr = kmap_local_page(page);
804                 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
805                 n = _copy_from_iter(kaddr + offset, n, i);
806                 kunmap_local(kaddr);
807                 res += n;
808                 bytes -= n;
809                 if (!bytes || !n)
810                         break;
811                 offset += n;
812                 if (offset == PAGE_SIZE) {
813                         page++;
814                         offset = 0;
815                 }
816         }
817         return res;
818 }
819 EXPORT_SYMBOL(copy_page_from_iter);
820
821 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
822 {
823         unsigned int chunk, off;
824
825         if (unlikely(bytes > i->count))
826                 bytes = i->count;
827         if (unlikely(!bytes))
828                 return 0;
829
830         if (!sanity(i))
831                 return 0;
832
833         for (size_t n = bytes; n; n -= chunk) {
834                 struct page *page = append_pipe(i, n, &off);
835                 char *p;
836
837                 if (!page)
838                         return bytes - n;
839                 chunk = min_t(size_t, n, PAGE_SIZE - off);
840                 p = kmap_local_page(page);
841                 memset(p + off, 0, chunk);
842                 kunmap_local(p);
843         }
844         return bytes;
845 }
846
847 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
848 {
849         if (unlikely(iov_iter_is_pipe(i)))
850                 return pipe_zero(bytes, i);
851         iterate_and_advance(i, bytes, base, len, count,
852                 clear_user(base, len),
853                 memset(base, 0, len)
854         )
855
856         return bytes;
857 }
858 EXPORT_SYMBOL(iov_iter_zero);
859
860 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
861                                   struct iov_iter *i)
862 {
863         char *kaddr = kmap_atomic(page), *p = kaddr + offset;
864         if (!page_copy_sane(page, offset, bytes)) {
865                 kunmap_atomic(kaddr);
866                 return 0;
867         }
868         if (WARN_ON_ONCE(!i->data_source)) {
869                 kunmap_atomic(kaddr);
870                 return 0;
871         }
872         iterate_and_advance(i, bytes, base, len, off,
873                 copyin(p + off, base, len),
874                 memcpy_from_iter(i, p + off, base, len)
875         )
876         kunmap_atomic(kaddr);
877         return bytes;
878 }
879 EXPORT_SYMBOL(copy_page_from_iter_atomic);
880
881 static void pipe_advance(struct iov_iter *i, size_t size)
882 {
883         struct pipe_inode_info *pipe = i->pipe;
884         int off = i->last_offset;
885
886         if (!off && !size) {
887                 pipe_discard_from(pipe, i->start_head); // discard everything
888                 return;
889         }
890         i->count -= size;
891         while (1) {
892                 struct pipe_buffer *buf = pipe_buf(pipe, i->head);
893                 if (off) /* make it relative to the beginning of buffer */
894                         size += abs(off) - buf->offset;
895                 if (size <= buf->len) {
896                         buf->len = size;
897                         i->last_offset = last_offset(buf);
898                         break;
899                 }
900                 size -= buf->len;
901                 i->head++;
902                 off = 0;
903         }
904         pipe_discard_from(pipe, i->head + 1); // discard everything past this one
905 }
906
907 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
908 {
909         const struct bio_vec *bvec, *end;
910
911         if (!i->count)
912                 return;
913         i->count -= size;
914
915         size += i->iov_offset;
916
917         for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
918                 if (likely(size < bvec->bv_len))
919                         break;
920                 size -= bvec->bv_len;
921         }
922         i->iov_offset = size;
923         i->nr_segs -= bvec - i->bvec;
924         i->bvec = bvec;
925 }
926
927 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
928 {
929         const struct iovec *iov, *end;
930
931         if (!i->count)
932                 return;
933         i->count -= size;
934
935         size += i->iov_offset; // from beginning of current segment
936         for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) {
937                 if (likely(size < iov->iov_len))
938                         break;
939                 size -= iov->iov_len;
940         }
941         i->iov_offset = size;
942         i->nr_segs -= iov - iter_iov(i);
943         i->__iov = iov;
944 }
945
946 void iov_iter_advance(struct iov_iter *i, size_t size)
947 {
948         if (unlikely(i->count < size))
949                 size = i->count;
950         if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
951                 i->iov_offset += size;
952                 i->count -= size;
953         } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
954                 /* iovec and kvec have identical layouts */
955                 iov_iter_iovec_advance(i, size);
956         } else if (iov_iter_is_bvec(i)) {
957                 iov_iter_bvec_advance(i, size);
958         } else if (iov_iter_is_pipe(i)) {
959                 pipe_advance(i, size);
960         } else if (iov_iter_is_discard(i)) {
961                 i->count -= size;
962         }
963 }
964 EXPORT_SYMBOL(iov_iter_advance);
965
966 void iov_iter_revert(struct iov_iter *i, size_t unroll)
967 {
968         if (!unroll)
969                 return;
970         if (WARN_ON(unroll > MAX_RW_COUNT))
971                 return;
972         i->count += unroll;
973         if (unlikely(iov_iter_is_pipe(i))) {
974                 struct pipe_inode_info *pipe = i->pipe;
975                 unsigned int head = pipe->head;
976
977                 while (head > i->start_head) {
978                         struct pipe_buffer *b = pipe_buf(pipe, --head);
979                         if (unroll < b->len) {
980                                 b->len -= unroll;
981                                 i->last_offset = last_offset(b);
982                                 i->head = head;
983                                 return;
984                         }
985                         unroll -= b->len;
986                         pipe_buf_release(pipe, b);
987                         pipe->head--;
988                 }
989                 i->last_offset = 0;
990                 i->head = head;
991                 return;
992         }
993         if (unlikely(iov_iter_is_discard(i)))
994                 return;
995         if (unroll <= i->iov_offset) {
996                 i->iov_offset -= unroll;
997                 return;
998         }
999         unroll -= i->iov_offset;
1000         if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
1001                 BUG(); /* We should never go beyond the start of the specified
1002                         * range since we might then be straying into pages that
1003                         * aren't pinned.
1004                         */
1005         } else if (iov_iter_is_bvec(i)) {
1006                 const struct bio_vec *bvec = i->bvec;
1007                 while (1) {
1008                         size_t n = (--bvec)->bv_len;
1009                         i->nr_segs++;
1010                         if (unroll <= n) {
1011                                 i->bvec = bvec;
1012                                 i->iov_offset = n - unroll;
1013                                 return;
1014                         }
1015                         unroll -= n;
1016                 }
1017         } else { /* same logics for iovec and kvec */
1018                 const struct iovec *iov = iter_iov(i);
1019                 while (1) {
1020                         size_t n = (--iov)->iov_len;
1021                         i->nr_segs++;
1022                         if (unroll <= n) {
1023                                 i->__iov = iov;
1024                                 i->iov_offset = n - unroll;
1025                                 return;
1026                         }
1027                         unroll -= n;
1028                 }
1029         }
1030 }
1031 EXPORT_SYMBOL(iov_iter_revert);
1032
1033 /*
1034  * Return the count of just the current iov_iter segment.
1035  */
1036 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1037 {
1038         if (i->nr_segs > 1) {
1039                 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1040                         return min(i->count, iter_iov(i)->iov_len - i->iov_offset);
1041                 if (iov_iter_is_bvec(i))
1042                         return min(i->count, i->bvec->bv_len - i->iov_offset);
1043         }
1044         return i->count;
1045 }
1046 EXPORT_SYMBOL(iov_iter_single_seg_count);
1047
1048 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1049                         const struct kvec *kvec, unsigned long nr_segs,
1050                         size_t count)
1051 {
1052         WARN_ON(direction & ~(READ | WRITE));
1053         *i = (struct iov_iter){
1054                 .iter_type = ITER_KVEC,
1055                 .copy_mc = false,
1056                 .data_source = direction,
1057                 .kvec = kvec,
1058                 .nr_segs = nr_segs,
1059                 .iov_offset = 0,
1060                 .count = count
1061         };
1062 }
1063 EXPORT_SYMBOL(iov_iter_kvec);
1064
1065 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1066                         const struct bio_vec *bvec, unsigned long nr_segs,
1067                         size_t count)
1068 {
1069         WARN_ON(direction & ~(READ | WRITE));
1070         *i = (struct iov_iter){
1071                 .iter_type = ITER_BVEC,
1072                 .copy_mc = false,
1073                 .data_source = direction,
1074                 .bvec = bvec,
1075                 .nr_segs = nr_segs,
1076                 .iov_offset = 0,
1077                 .count = count
1078         };
1079 }
1080 EXPORT_SYMBOL(iov_iter_bvec);
1081
1082 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1083                         struct pipe_inode_info *pipe,
1084                         size_t count)
1085 {
1086         BUG_ON(direction != READ);
1087         WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1088         *i = (struct iov_iter){
1089                 .iter_type = ITER_PIPE,
1090                 .data_source = false,
1091                 .pipe = pipe,
1092                 .head = pipe->head,
1093                 .start_head = pipe->head,
1094                 .last_offset = 0,
1095                 .count = count
1096         };
1097 }
1098 EXPORT_SYMBOL(iov_iter_pipe);
1099
1100 /**
1101  * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1102  * @i: The iterator to initialise.
1103  * @direction: The direction of the transfer.
1104  * @xarray: The xarray to access.
1105  * @start: The start file position.
1106  * @count: The size of the I/O buffer in bytes.
1107  *
1108  * Set up an I/O iterator to either draw data out of the pages attached to an
1109  * inode or to inject data into those pages.  The pages *must* be prevented
1110  * from evaporation, either by taking a ref on them or locking them by the
1111  * caller.
1112  */
1113 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1114                      struct xarray *xarray, loff_t start, size_t count)
1115 {
1116         BUG_ON(direction & ~1);
1117         *i = (struct iov_iter) {
1118                 .iter_type = ITER_XARRAY,
1119                 .copy_mc = false,
1120                 .data_source = direction,
1121                 .xarray = xarray,
1122                 .xarray_start = start,
1123                 .count = count,
1124                 .iov_offset = 0
1125         };
1126 }
1127 EXPORT_SYMBOL(iov_iter_xarray);
1128
1129 /**
1130  * iov_iter_discard - Initialise an I/O iterator that discards data
1131  * @i: The iterator to initialise.
1132  * @direction: The direction of the transfer.
1133  * @count: The size of the I/O buffer in bytes.
1134  *
1135  * Set up an I/O iterator that just discards everything that's written to it.
1136  * It's only available as a READ iterator.
1137  */
1138 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1139 {
1140         BUG_ON(direction != READ);
1141         *i = (struct iov_iter){
1142                 .iter_type = ITER_DISCARD,
1143                 .copy_mc = false,
1144                 .data_source = false,
1145                 .count = count,
1146                 .iov_offset = 0
1147         };
1148 }
1149 EXPORT_SYMBOL(iov_iter_discard);
1150
1151 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
1152                                    unsigned len_mask)
1153 {
1154         size_t size = i->count;
1155         size_t skip = i->iov_offset;
1156         unsigned k;
1157
1158         for (k = 0; k < i->nr_segs; k++, skip = 0) {
1159                 const struct iovec *iov = iter_iov(i) + k;
1160                 size_t len = iov->iov_len - skip;
1161
1162                 if (len > size)
1163                         len = size;
1164                 if (len & len_mask)
1165                         return false;
1166                 if ((unsigned long)(iov->iov_base + skip) & addr_mask)
1167                         return false;
1168
1169                 size -= len;
1170                 if (!size)
1171                         break;
1172         }
1173         return true;
1174 }
1175
1176 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
1177                                   unsigned len_mask)
1178 {
1179         size_t size = i->count;
1180         unsigned skip = i->iov_offset;
1181         unsigned k;
1182
1183         for (k = 0; k < i->nr_segs; k++, skip = 0) {
1184                 size_t len = i->bvec[k].bv_len - skip;
1185
1186                 if (len > size)
1187                         len = size;
1188                 if (len & len_mask)
1189                         return false;
1190                 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
1191                         return false;
1192
1193                 size -= len;
1194                 if (!size)
1195                         break;
1196         }
1197         return true;
1198 }
1199
1200 /**
1201  * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
1202  *      are aligned to the parameters.
1203  *
1204  * @i: &struct iov_iter to restore
1205  * @addr_mask: bit mask to check against the iov element's addresses
1206  * @len_mask: bit mask to check against the iov element's lengths
1207  *
1208  * Return: false if any addresses or lengths intersect with the provided masks
1209  */
1210 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
1211                          unsigned len_mask)
1212 {
1213         if (likely(iter_is_ubuf(i))) {
1214                 if (i->count & len_mask)
1215                         return false;
1216                 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
1217                         return false;
1218                 return true;
1219         }
1220
1221         if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1222                 return iov_iter_aligned_iovec(i, addr_mask, len_mask);
1223
1224         if (iov_iter_is_bvec(i))
1225                 return iov_iter_aligned_bvec(i, addr_mask, len_mask);
1226
1227         if (iov_iter_is_pipe(i)) {
1228                 size_t size = i->count;
1229
1230                 if (size & len_mask)
1231                         return false;
1232                 if (size && i->last_offset > 0) {
1233                         if (i->last_offset & addr_mask)
1234                                 return false;
1235                 }
1236
1237                 return true;
1238         }
1239
1240         if (iov_iter_is_xarray(i)) {
1241                 if (i->count & len_mask)
1242                         return false;
1243                 if ((i->xarray_start + i->iov_offset) & addr_mask)
1244                         return false;
1245         }
1246
1247         return true;
1248 }
1249 EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
1250
1251 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1252 {
1253         unsigned long res = 0;
1254         size_t size = i->count;
1255         size_t skip = i->iov_offset;
1256         unsigned k;
1257
1258         for (k = 0; k < i->nr_segs; k++, skip = 0) {
1259                 const struct iovec *iov = iter_iov(i) + k;
1260                 size_t len = iov->iov_len - skip;
1261                 if (len) {
1262                         res |= (unsigned long)iov->iov_base + skip;
1263                         if (len > size)
1264                                 len = size;
1265                         res |= len;
1266                         size -= len;
1267                         if (!size)
1268                                 break;
1269                 }
1270         }
1271         return res;
1272 }
1273
1274 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1275 {
1276         unsigned res = 0;
1277         size_t size = i->count;
1278         unsigned skip = i->iov_offset;
1279         unsigned k;
1280
1281         for (k = 0; k < i->nr_segs; k++, skip = 0) {
1282                 size_t len = i->bvec[k].bv_len - skip;
1283                 res |= (unsigned long)i->bvec[k].bv_offset + skip;
1284                 if (len > size)
1285                         len = size;
1286                 res |= len;
1287                 size -= len;
1288                 if (!size)
1289                         break;
1290         }
1291         return res;
1292 }
1293
1294 unsigned long iov_iter_alignment(const struct iov_iter *i)
1295 {
1296         if (likely(iter_is_ubuf(i))) {
1297                 size_t size = i->count;
1298                 if (size)
1299                         return ((unsigned long)i->ubuf + i->iov_offset) | size;
1300                 return 0;
1301         }
1302
1303         /* iovec and kvec have identical layouts */
1304         if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1305                 return iov_iter_alignment_iovec(i);
1306
1307         if (iov_iter_is_bvec(i))
1308                 return iov_iter_alignment_bvec(i);
1309
1310         if (iov_iter_is_pipe(i)) {
1311                 size_t size = i->count;
1312
1313                 if (size && i->last_offset > 0)
1314                         return size | i->last_offset;
1315                 return size;
1316         }
1317
1318         if (iov_iter_is_xarray(i))
1319                 return (i->xarray_start + i->iov_offset) | i->count;
1320
1321         return 0;
1322 }
1323 EXPORT_SYMBOL(iov_iter_alignment);
1324
1325 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1326 {
1327         unsigned long res = 0;
1328         unsigned long v = 0;
1329         size_t size = i->count;
1330         unsigned k;
1331
1332         if (iter_is_ubuf(i))
1333                 return 0;
1334
1335         if (WARN_ON(!iter_is_iovec(i)))
1336                 return ~0U;
1337
1338         for (k = 0; k < i->nr_segs; k++) {
1339                 const struct iovec *iov = iter_iov(i) + k;
1340                 if (iov->iov_len) {
1341                         unsigned long base = (unsigned long)iov->iov_base;
1342                         if (v) // if not the first one
1343                                 res |= base | v; // this start | previous end
1344                         v = base + iov->iov_len;
1345                         if (size <= iov->iov_len)
1346                                 break;
1347                         size -= iov->iov_len;
1348                 }
1349         }
1350         return res;
1351 }
1352 EXPORT_SYMBOL(iov_iter_gap_alignment);
1353
1354 static int want_pages_array(struct page ***res, size_t size,
1355                             size_t start, unsigned int maxpages)
1356 {
1357         unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
1358
1359         if (count > maxpages)
1360                 count = maxpages;
1361         WARN_ON(!count);        // caller should've prevented that
1362         if (!*res) {
1363                 *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
1364                 if (!*res)
1365                         return 0;
1366         }
1367         return count;
1368 }
1369
1370 static ssize_t pipe_get_pages(struct iov_iter *i,
1371                    struct page ***pages, size_t maxsize, unsigned maxpages,
1372                    size_t *start)
1373 {
1374         unsigned int npages, count, off, chunk;
1375         struct page **p;
1376         size_t left;
1377
1378         if (!sanity(i))
1379                 return -EFAULT;
1380
1381         *start = off = pipe_npages(i, &npages);
1382         if (!npages)
1383                 return -EFAULT;
1384         count = want_pages_array(pages, maxsize, off, min(npages, maxpages));
1385         if (!count)
1386                 return -ENOMEM;
1387         p = *pages;
1388         for (npages = 0, left = maxsize ; npages < count; npages++, left -= chunk) {
1389                 struct page *page = append_pipe(i, left, &off);
1390                 if (!page)
1391                         break;
1392                 chunk = min_t(size_t, left, PAGE_SIZE - off);
1393                 get_page(*p++ = page);
1394         }
1395         if (!npages)
1396                 return -EFAULT;
1397         return maxsize - left;
1398 }
1399
1400 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1401                                           pgoff_t index, unsigned int nr_pages)
1402 {
1403         XA_STATE(xas, xa, index);
1404         struct page *page;
1405         unsigned int ret = 0;
1406
1407         rcu_read_lock();
1408         for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1409                 if (xas_retry(&xas, page))
1410                         continue;
1411
1412                 /* Has the page moved or been split? */
1413                 if (unlikely(page != xas_reload(&xas))) {
1414                         xas_reset(&xas);
1415                         continue;
1416                 }
1417
1418                 pages[ret] = find_subpage(page, xas.xa_index);
1419                 get_page(pages[ret]);
1420                 if (++ret == nr_pages)
1421                         break;
1422         }
1423         rcu_read_unlock();
1424         return ret;
1425 }
1426
1427 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1428                                      struct page ***pages, size_t maxsize,
1429                                      unsigned maxpages, size_t *_start_offset)
1430 {
1431         unsigned nr, offset, count;
1432         pgoff_t index;
1433         loff_t pos;
1434
1435         pos = i->xarray_start + i->iov_offset;
1436         index = pos >> PAGE_SHIFT;
1437         offset = pos & ~PAGE_MASK;
1438         *_start_offset = offset;
1439
1440         count = want_pages_array(pages, maxsize, offset, maxpages);
1441         if (!count)
1442                 return -ENOMEM;
1443         nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
1444         if (nr == 0)
1445                 return 0;
1446
1447         maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1448         i->iov_offset += maxsize;
1449         i->count -= maxsize;
1450         return maxsize;
1451 }
1452
1453 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
1454 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
1455 {
1456         size_t skip;
1457         long k;
1458
1459         if (iter_is_ubuf(i))
1460                 return (unsigned long)i->ubuf + i->iov_offset;
1461
1462         for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1463                 const struct iovec *iov = iter_iov(i) + k;
1464                 size_t len = iov->iov_len - skip;
1465
1466                 if (unlikely(!len))
1467                         continue;
1468                 if (*size > len)
1469                         *size = len;
1470                 return (unsigned long)iov->iov_base + skip;
1471         }
1472         BUG(); // if it had been empty, we wouldn't get called
1473 }
1474
1475 /* must be done on non-empty ITER_BVEC one */
1476 static struct page *first_bvec_segment(const struct iov_iter *i,
1477                                        size_t *size, size_t *start)
1478 {
1479         struct page *page;
1480         size_t skip = i->iov_offset, len;
1481
1482         len = i->bvec->bv_len - skip;
1483         if (*size > len)
1484                 *size = len;
1485         skip += i->bvec->bv_offset;
1486         page = i->bvec->bv_page + skip / PAGE_SIZE;
1487         *start = skip % PAGE_SIZE;
1488         return page;
1489 }
1490
1491 static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
1492                    struct page ***pages, size_t maxsize,
1493                    unsigned int maxpages, size_t *start,
1494                    iov_iter_extraction_t extraction_flags)
1495 {
1496         unsigned int n, gup_flags = 0;
1497
1498         if (maxsize > i->count)
1499                 maxsize = i->count;
1500         if (!maxsize)
1501                 return 0;
1502         if (maxsize > MAX_RW_COUNT)
1503                 maxsize = MAX_RW_COUNT;
1504         if (extraction_flags & ITER_ALLOW_P2PDMA)
1505                 gup_flags |= FOLL_PCI_P2PDMA;
1506
1507         if (likely(user_backed_iter(i))) {
1508                 unsigned long addr;
1509                 int res;
1510
1511                 if (iov_iter_rw(i) != WRITE)
1512                         gup_flags |= FOLL_WRITE;
1513                 if (i->nofault)
1514                         gup_flags |= FOLL_NOFAULT;
1515
1516                 addr = first_iovec_segment(i, &maxsize);
1517                 *start = addr % PAGE_SIZE;
1518                 addr &= PAGE_MASK;
1519                 n = want_pages_array(pages, maxsize, *start, maxpages);
1520                 if (!n)
1521                         return -ENOMEM;
1522                 res = get_user_pages_fast(addr, n, gup_flags, *pages);
1523                 if (unlikely(res <= 0))
1524                         return res;
1525                 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1526                 iov_iter_advance(i, maxsize);
1527                 return maxsize;
1528         }
1529         if (iov_iter_is_bvec(i)) {
1530                 struct page **p;
1531                 struct page *page;
1532
1533                 page = first_bvec_segment(i, &maxsize, start);
1534                 n = want_pages_array(pages, maxsize, *start, maxpages);
1535                 if (!n)
1536                         return -ENOMEM;
1537                 p = *pages;
1538                 for (int k = 0; k < n; k++)
1539                         get_page(p[k] = page + k);
1540                 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1541                 i->count -= maxsize;
1542                 i->iov_offset += maxsize;
1543                 if (i->iov_offset == i->bvec->bv_len) {
1544                         i->iov_offset = 0;
1545                         i->bvec++;
1546                         i->nr_segs--;
1547                 }
1548                 return maxsize;
1549         }
1550         if (iov_iter_is_pipe(i))
1551                 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1552         if (iov_iter_is_xarray(i))
1553                 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1554         return -EFAULT;
1555 }
1556
1557 ssize_t iov_iter_get_pages(struct iov_iter *i,
1558                    struct page **pages, size_t maxsize, unsigned maxpages,
1559                    size_t *start, iov_iter_extraction_t extraction_flags)
1560 {
1561         if (!maxpages)
1562                 return 0;
1563         BUG_ON(!pages);
1564
1565         return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages,
1566                                           start, extraction_flags);
1567 }
1568 EXPORT_SYMBOL_GPL(iov_iter_get_pages);
1569
1570 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
1571                 size_t maxsize, unsigned maxpages, size_t *start)
1572 {
1573         return iov_iter_get_pages(i, pages, maxsize, maxpages, start, 0);
1574 }
1575 EXPORT_SYMBOL(iov_iter_get_pages2);
1576
1577 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1578                    struct page ***pages, size_t maxsize,
1579                    size_t *start, iov_iter_extraction_t extraction_flags)
1580 {
1581         ssize_t len;
1582
1583         *pages = NULL;
1584
1585         len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start,
1586                                          extraction_flags);
1587         if (len <= 0) {
1588                 kvfree(*pages);
1589                 *pages = NULL;
1590         }
1591         return len;
1592 }
1593 EXPORT_SYMBOL_GPL(iov_iter_get_pages_alloc);
1594
1595 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
1596                 struct page ***pages, size_t maxsize, size_t *start)
1597 {
1598         return iov_iter_get_pages_alloc(i, pages, maxsize, start, 0);
1599 }
1600 EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
1601
1602 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1603                                struct iov_iter *i)
1604 {
1605         __wsum sum, next;
1606         sum = *csum;
1607         if (WARN_ON_ONCE(!i->data_source))
1608                 return 0;
1609
1610         iterate_and_advance(i, bytes, base, len, off, ({
1611                 next = csum_and_copy_from_user(base, addr + off, len);
1612                 sum = csum_block_add(sum, next, off);
1613                 next ? 0 : len;
1614         }), ({
1615                 sum = csum_and_memcpy(addr + off, base, len, sum, off);
1616         })
1617         )
1618         *csum = sum;
1619         return bytes;
1620 }
1621 EXPORT_SYMBOL(csum_and_copy_from_iter);
1622
1623 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1624                              struct iov_iter *i)
1625 {
1626         struct csum_state *csstate = _csstate;
1627         __wsum sum, next;
1628
1629         if (WARN_ON_ONCE(i->data_source))
1630                 return 0;
1631         if (unlikely(iov_iter_is_discard(i))) {
1632                 // can't use csum_memcpy() for that one - data is not copied
1633                 csstate->csum = csum_block_add(csstate->csum,
1634                                                csum_partial(addr, bytes, 0),
1635                                                csstate->off);
1636                 csstate->off += bytes;
1637                 return bytes;
1638         }
1639
1640         sum = csum_shift(csstate->csum, csstate->off);
1641         if (unlikely(iov_iter_is_pipe(i)))
1642                 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
1643         else iterate_and_advance(i, bytes, base, len, off, ({
1644                 next = csum_and_copy_to_user(addr + off, base, len);
1645                 sum = csum_block_add(sum, next, off);
1646                 next ? 0 : len;
1647         }), ({
1648                 sum = csum_and_memcpy(base, addr + off, len, sum, off);
1649         })
1650         )
1651         csstate->csum = csum_shift(sum, csstate->off);
1652         csstate->off += bytes;
1653         return bytes;
1654 }
1655 EXPORT_SYMBOL(csum_and_copy_to_iter);
1656
1657 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1658                 struct iov_iter *i)
1659 {
1660 #ifdef CONFIG_CRYPTO_HASH
1661         struct ahash_request *hash = hashp;
1662         struct scatterlist sg;
1663         size_t copied;
1664
1665         copied = copy_to_iter(addr, bytes, i);
1666         sg_init_one(&sg, addr, copied);
1667         ahash_request_set_crypt(hash, &sg, NULL, copied);
1668         crypto_ahash_update(hash);
1669         return copied;
1670 #else
1671         return 0;
1672 #endif
1673 }
1674 EXPORT_SYMBOL(hash_and_copy_to_iter);
1675
1676 static int iov_npages(const struct iov_iter *i, int maxpages)
1677 {
1678         size_t skip = i->iov_offset, size = i->count;
1679         const struct iovec *p;
1680         int npages = 0;
1681
1682         for (p = iter_iov(i); size; skip = 0, p++) {
1683                 unsigned offs = offset_in_page(p->iov_base + skip);
1684                 size_t len = min(p->iov_len - skip, size);
1685
1686                 if (len) {
1687                         size -= len;
1688                         npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1689                         if (unlikely(npages > maxpages))
1690                                 return maxpages;
1691                 }
1692         }
1693         return npages;
1694 }
1695
1696 static int bvec_npages(const struct iov_iter *i, int maxpages)
1697 {
1698         size_t skip = i->iov_offset, size = i->count;
1699         const struct bio_vec *p;
1700         int npages = 0;
1701
1702         for (p = i->bvec; size; skip = 0, p++) {
1703                 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1704                 size_t len = min(p->bv_len - skip, size);
1705
1706                 size -= len;
1707                 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1708                 if (unlikely(npages > maxpages))
1709                         return maxpages;
1710         }
1711         return npages;
1712 }
1713
1714 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1715 {
1716         if (unlikely(!i->count))
1717                 return 0;
1718         if (likely(iter_is_ubuf(i))) {
1719                 unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
1720                 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
1721                 return min(npages, maxpages);
1722         }
1723         /* iovec and kvec have identical layouts */
1724         if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1725                 return iov_npages(i, maxpages);
1726         if (iov_iter_is_bvec(i))
1727                 return bvec_npages(i, maxpages);
1728         if (iov_iter_is_pipe(i)) {
1729                 int npages;
1730
1731                 if (!sanity(i))
1732                         return 0;
1733
1734                 pipe_npages(i, &npages);
1735                 return min(npages, maxpages);
1736         }
1737         if (iov_iter_is_xarray(i)) {
1738                 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1739                 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1740                 return min(npages, maxpages);
1741         }
1742         return 0;
1743 }
1744 EXPORT_SYMBOL(iov_iter_npages);
1745
1746 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1747 {
1748         *new = *old;
1749         if (unlikely(iov_iter_is_pipe(new))) {
1750                 WARN_ON(1);
1751                 return NULL;
1752         }
1753         if (iov_iter_is_bvec(new))
1754                 return new->bvec = kmemdup(new->bvec,
1755                                     new->nr_segs * sizeof(struct bio_vec),
1756                                     flags);
1757         else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
1758                 /* iovec and kvec have identical layout */
1759                 return new->__iov = kmemdup(new->__iov,
1760                                    new->nr_segs * sizeof(struct iovec),
1761                                    flags);
1762         return NULL;
1763 }
1764 EXPORT_SYMBOL(dup_iter);
1765
1766 static __noclone int copy_compat_iovec_from_user(struct iovec *iov,
1767                 const struct iovec __user *uvec, unsigned long nr_segs)
1768 {
1769         const struct compat_iovec __user *uiov =
1770                 (const struct compat_iovec __user *)uvec;
1771         int ret = -EFAULT, i;
1772
1773         if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1774                 return -EFAULT;
1775
1776         for (i = 0; i < nr_segs; i++) {
1777                 compat_uptr_t buf;
1778                 compat_ssize_t len;
1779
1780                 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1781                 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1782
1783                 /* check for compat_size_t not fitting in compat_ssize_t .. */
1784                 if (len < 0) {
1785                         ret = -EINVAL;
1786                         goto uaccess_end;
1787                 }
1788                 iov[i].iov_base = compat_ptr(buf);
1789                 iov[i].iov_len = len;
1790         }
1791
1792         ret = 0;
1793 uaccess_end:
1794         user_access_end();
1795         return ret;
1796 }
1797
1798 static int copy_iovec_from_user(struct iovec *iov,
1799                 const struct iovec __user *uiov, unsigned long nr_segs)
1800 {
1801         int ret = -EFAULT;
1802
1803         if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1804                 return -EFAULT;
1805
1806         do {
1807                 void __user *buf;
1808                 ssize_t len;
1809
1810                 unsafe_get_user(len, &uiov->iov_len, uaccess_end);
1811                 unsafe_get_user(buf, &uiov->iov_base, uaccess_end);
1812
1813                 /* check for size_t not fitting in ssize_t .. */
1814                 if (unlikely(len < 0)) {
1815                         ret = -EINVAL;
1816                         goto uaccess_end;
1817                 }
1818                 iov->iov_base = buf;
1819                 iov->iov_len = len;
1820
1821                 uiov++; iov++;
1822         } while (--nr_segs);
1823
1824         ret = 0;
1825 uaccess_end:
1826         user_access_end();
1827         return ret;
1828 }
1829
1830 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1831                 unsigned long nr_segs, unsigned long fast_segs,
1832                 struct iovec *fast_iov, bool compat)
1833 {
1834         struct iovec *iov = fast_iov;
1835         int ret;
1836
1837         /*
1838          * SuS says "The readv() function *may* fail if the iovcnt argument was
1839          * less than or equal to 0, or greater than {IOV_MAX}.  Linux has
1840          * traditionally returned zero for zero segments, so...
1841          */
1842         if (nr_segs == 0)
1843                 return iov;
1844         if (nr_segs > UIO_MAXIOV)
1845                 return ERR_PTR(-EINVAL);
1846         if (nr_segs > fast_segs) {
1847                 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1848                 if (!iov)
1849                         return ERR_PTR(-ENOMEM);
1850         }
1851
1852         if (unlikely(compat))
1853                 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1854         else
1855                 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1856         if (ret) {
1857                 if (iov != fast_iov)
1858                         kfree(iov);
1859                 return ERR_PTR(ret);
1860         }
1861
1862         return iov;
1863 }
1864
1865 /*
1866  * Single segment iovec supplied by the user, import it as ITER_UBUF.
1867  */
1868 static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
1869                                    struct iovec **iovp, struct iov_iter *i,
1870                                    bool compat)
1871 {
1872         struct iovec *iov = *iovp;
1873         ssize_t ret;
1874
1875         if (compat)
1876                 ret = copy_compat_iovec_from_user(iov, uvec, 1);
1877         else
1878                 ret = copy_iovec_from_user(iov, uvec, 1);
1879         if (unlikely(ret))
1880                 return ret;
1881
1882         ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
1883         if (unlikely(ret))
1884                 return ret;
1885         *iovp = NULL;
1886         return i->count;
1887 }
1888
1889 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1890                  unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1891                  struct iov_iter *i, bool compat)
1892 {
1893         ssize_t total_len = 0;
1894         unsigned long seg;
1895         struct iovec *iov;
1896
1897         if (nr_segs == 1)
1898                 return __import_iovec_ubuf(type, uvec, iovp, i, compat);
1899
1900         iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1901         if (IS_ERR(iov)) {
1902                 *iovp = NULL;
1903                 return PTR_ERR(iov);
1904         }
1905
1906         /*
1907          * According to the Single Unix Specification we should return EINVAL if
1908          * an element length is < 0 when cast to ssize_t or if the total length
1909          * would overflow the ssize_t return value of the system call.
1910          *
1911          * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1912          * overflow case.
1913          */
1914         for (seg = 0; seg < nr_segs; seg++) {
1915                 ssize_t len = (ssize_t)iov[seg].iov_len;
1916
1917                 if (!access_ok(iov[seg].iov_base, len)) {
1918                         if (iov != *iovp)
1919                                 kfree(iov);
1920                         *iovp = NULL;
1921                         return -EFAULT;
1922                 }
1923
1924                 if (len > MAX_RW_COUNT - total_len) {
1925                         len = MAX_RW_COUNT - total_len;
1926                         iov[seg].iov_len = len;
1927                 }
1928                 total_len += len;
1929         }
1930
1931         iov_iter_init(i, type, iov, nr_segs, total_len);
1932         if (iov == *iovp)
1933                 *iovp = NULL;
1934         else
1935                 *iovp = iov;
1936         return total_len;
1937 }
1938
1939 /**
1940  * import_iovec() - Copy an array of &struct iovec from userspace
1941  *     into the kernel, check that it is valid, and initialize a new
1942  *     &struct iov_iter iterator to access it.
1943  *
1944  * @type: One of %READ or %WRITE.
1945  * @uvec: Pointer to the userspace array.
1946  * @nr_segs: Number of elements in userspace array.
1947  * @fast_segs: Number of elements in @iov.
1948  * @iovp: (input and output parameter) Pointer to pointer to (usually small
1949  *     on-stack) kernel array.
1950  * @i: Pointer to iterator that will be initialized on success.
1951  *
1952  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1953  * then this function places %NULL in *@iov on return. Otherwise, a new
1954  * array will be allocated and the result placed in *@iov. This means that
1955  * the caller may call kfree() on *@iov regardless of whether the small
1956  * on-stack array was used or not (and regardless of whether this function
1957  * returns an error or not).
1958  *
1959  * Return: Negative error code on error, bytes imported on success
1960  */
1961 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1962                  unsigned nr_segs, unsigned fast_segs,
1963                  struct iovec **iovp, struct iov_iter *i)
1964 {
1965         return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1966                               in_compat_syscall());
1967 }
1968 EXPORT_SYMBOL(import_iovec);
1969
1970 int import_single_range(int rw, void __user *buf, size_t len,
1971                  struct iovec *iov, struct iov_iter *i)
1972 {
1973         if (len > MAX_RW_COUNT)
1974                 len = MAX_RW_COUNT;
1975         if (unlikely(!access_ok(buf, len)))
1976                 return -EFAULT;
1977
1978         iov_iter_ubuf(i, rw, buf, len);
1979         return 0;
1980 }
1981 EXPORT_SYMBOL(import_single_range);
1982
1983 int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
1984 {
1985         if (len > MAX_RW_COUNT)
1986                 len = MAX_RW_COUNT;
1987         if (unlikely(!access_ok(buf, len)))
1988                 return -EFAULT;
1989
1990         iov_iter_ubuf(i, rw, buf, len);
1991         return 0;
1992 }
1993
1994 /**
1995  * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1996  *     iov_iter_save_state() was called.
1997  *
1998  * @i: &struct iov_iter to restore
1999  * @state: state to restore from
2000  *
2001  * Used after iov_iter_save_state() to bring restore @i, if operations may
2002  * have advanced it.
2003  *
2004  * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
2005  */
2006 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
2007 {
2008         if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) &&
2009                          !iter_is_ubuf(i)) && !iov_iter_is_kvec(i))
2010                 return;
2011         i->iov_offset = state->iov_offset;
2012         i->count = state->count;
2013         if (iter_is_ubuf(i))
2014                 return;
2015         /*
2016          * For the *vec iters, nr_segs + iov is constant - if we increment
2017          * the vec, then we also decrement the nr_segs count. Hence we don't
2018          * need to track both of these, just one is enough and we can deduct
2019          * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
2020          * size, so we can just increment the iov pointer as they are unionzed.
2021          * ITER_BVEC _may_ be the same size on some archs, but on others it is
2022          * not. Be safe and handle it separately.
2023          */
2024         BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
2025         if (iov_iter_is_bvec(i))
2026                 i->bvec -= state->nr_segs - i->nr_segs;
2027         else
2028                 i->__iov -= state->nr_segs - i->nr_segs;
2029         i->nr_segs = state->nr_segs;
2030 }
2031
2032 /*
2033  * Extract a list of contiguous pages from an ITER_XARRAY iterator.  This does not
2034  * get references on the pages, nor does it get a pin on them.
2035  */
2036 static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
2037                                              struct page ***pages, size_t maxsize,
2038                                              unsigned int maxpages,
2039                                              iov_iter_extraction_t extraction_flags,
2040                                              size_t *offset0)
2041 {
2042         struct page *page, **p;
2043         unsigned int nr = 0, offset;
2044         loff_t pos = i->xarray_start + i->iov_offset;
2045         pgoff_t index = pos >> PAGE_SHIFT;
2046         XA_STATE(xas, i->xarray, index);
2047
2048         offset = pos & ~PAGE_MASK;
2049         *offset0 = offset;
2050
2051         maxpages = want_pages_array(pages, maxsize, offset, maxpages);
2052         if (!maxpages)
2053                 return -ENOMEM;
2054         p = *pages;
2055
2056         rcu_read_lock();
2057         for (page = xas_load(&xas); page; page = xas_next(&xas)) {
2058                 if (xas_retry(&xas, page))
2059                         continue;
2060
2061                 /* Has the page moved or been split? */
2062                 if (unlikely(page != xas_reload(&xas))) {
2063                         xas_reset(&xas);
2064                         continue;
2065                 }
2066
2067                 p[nr++] = find_subpage(page, xas.xa_index);
2068                 if (nr == maxpages)
2069                         break;
2070         }
2071         rcu_read_unlock();
2072
2073         maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
2074         iov_iter_advance(i, maxsize);
2075         return maxsize;
2076 }
2077
2078 /*
2079  * Extract a list of contiguous pages from an ITER_BVEC iterator.  This does
2080  * not get references on the pages, nor does it get a pin on them.
2081  */
2082 static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
2083                                            struct page ***pages, size_t maxsize,
2084                                            unsigned int maxpages,
2085                                            iov_iter_extraction_t extraction_flags,
2086                                            size_t *offset0)
2087 {
2088         struct page **p, *page;
2089         size_t skip = i->iov_offset, offset;
2090         int k;
2091
2092         for (;;) {
2093                 if (i->nr_segs == 0)
2094                         return 0;
2095                 maxsize = min(maxsize, i->bvec->bv_len - skip);
2096                 if (maxsize)
2097                         break;
2098                 i->iov_offset = 0;
2099                 i->nr_segs--;
2100                 i->bvec++;
2101                 skip = 0;
2102         }
2103
2104         skip += i->bvec->bv_offset;
2105         page = i->bvec->bv_page + skip / PAGE_SIZE;
2106         offset = skip % PAGE_SIZE;
2107         *offset0 = offset;
2108
2109         maxpages = want_pages_array(pages, maxsize, offset, maxpages);
2110         if (!maxpages)
2111                 return -ENOMEM;
2112         p = *pages;
2113         for (k = 0; k < maxpages; k++)
2114                 p[k] = page + k;
2115
2116         maxsize = min_t(size_t, maxsize, maxpages * PAGE_SIZE - offset);
2117         iov_iter_advance(i, maxsize);
2118         return maxsize;
2119 }
2120
2121 /*
2122  * Extract a list of virtually contiguous pages from an ITER_KVEC iterator.
2123  * This does not get references on the pages, nor does it get a pin on them.
2124  */
2125 static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
2126                                            struct page ***pages, size_t maxsize,
2127                                            unsigned int maxpages,
2128                                            iov_iter_extraction_t extraction_flags,
2129                                            size_t *offset0)
2130 {
2131         struct page **p, *page;
2132         const void *kaddr;
2133         size_t skip = i->iov_offset, offset, len;
2134         int k;
2135
2136         for (;;) {
2137                 if (i->nr_segs == 0)
2138                         return 0;
2139                 maxsize = min(maxsize, i->kvec->iov_len - skip);
2140                 if (maxsize)
2141                         break;
2142                 i->iov_offset = 0;
2143                 i->nr_segs--;
2144                 i->kvec++;
2145                 skip = 0;
2146         }
2147
2148         kaddr = i->kvec->iov_base + skip;
2149         offset = (unsigned long)kaddr & ~PAGE_MASK;
2150         *offset0 = offset;
2151
2152         maxpages = want_pages_array(pages, maxsize, offset, maxpages);
2153         if (!maxpages)
2154                 return -ENOMEM;
2155         p = *pages;
2156
2157         kaddr -= offset;
2158         len = offset + maxsize;
2159         for (k = 0; k < maxpages; k++) {
2160                 size_t seg = min_t(size_t, len, PAGE_SIZE);
2161
2162                 if (is_vmalloc_or_module_addr(kaddr))
2163                         page = vmalloc_to_page(kaddr);
2164                 else
2165                         page = virt_to_page(kaddr);
2166
2167                 p[k] = page;
2168                 len -= seg;
2169                 kaddr += PAGE_SIZE;
2170         }
2171
2172         maxsize = min_t(size_t, maxsize, maxpages * PAGE_SIZE - offset);
2173         iov_iter_advance(i, maxsize);
2174         return maxsize;
2175 }
2176
2177 /*
2178  * Extract a list of contiguous pages from a user iterator and get a pin on
2179  * each of them.  This should only be used if the iterator is user-backed
2180  * (IOBUF/UBUF).
2181  *
2182  * It does not get refs on the pages, but the pages must be unpinned by the
2183  * caller once the transfer is complete.
2184  *
2185  * This is safe to be used where background IO/DMA *is* going to be modifying
2186  * the buffer; using a pin rather than a ref makes forces fork() to give the
2187  * child a copy of the page.
2188  */
2189 static ssize_t iov_iter_extract_user_pages(struct iov_iter *i,
2190                                            struct page ***pages,
2191                                            size_t maxsize,
2192                                            unsigned int maxpages,
2193                                            iov_iter_extraction_t extraction_flags,
2194                                            size_t *offset0)
2195 {
2196         unsigned long addr;
2197         unsigned int gup_flags = 0;
2198         size_t offset;
2199         int res;
2200
2201         if (i->data_source == ITER_DEST)
2202                 gup_flags |= FOLL_WRITE;
2203         if (extraction_flags & ITER_ALLOW_P2PDMA)
2204                 gup_flags |= FOLL_PCI_P2PDMA;
2205         if (i->nofault)
2206                 gup_flags |= FOLL_NOFAULT;
2207
2208         addr = first_iovec_segment(i, &maxsize);
2209         *offset0 = offset = addr % PAGE_SIZE;
2210         addr &= PAGE_MASK;
2211         maxpages = want_pages_array(pages, maxsize, offset, maxpages);
2212         if (!maxpages)
2213                 return -ENOMEM;
2214         res = pin_user_pages_fast(addr, maxpages, gup_flags, *pages);
2215         if (unlikely(res <= 0))
2216                 return res;
2217         maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset);
2218         iov_iter_advance(i, maxsize);
2219         return maxsize;
2220 }
2221
2222 /**
2223  * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
2224  * @i: The iterator to extract from
2225  * @pages: Where to return the list of pages
2226  * @maxsize: The maximum amount of iterator to extract
2227  * @maxpages: The maximum size of the list of pages
2228  * @extraction_flags: Flags to qualify request
2229  * @offset0: Where to return the starting offset into (*@pages)[0]
2230  *
2231  * Extract a list of contiguous pages from the current point of the iterator,
2232  * advancing the iterator.  The maximum number of pages and the maximum amount
2233  * of page contents can be set.
2234  *
2235  * If *@pages is NULL, a page list will be allocated to the required size and
2236  * *@pages will be set to its base.  If *@pages is not NULL, it will be assumed
2237  * that the caller allocated a page list at least @maxpages in size and this
2238  * will be filled in.
2239  *
2240  * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
2241  * be allowed on the pages extracted.
2242  *
2243  * The iov_iter_extract_will_pin() function can be used to query how cleanup
2244  * should be performed.
2245  *
2246  * Extra refs or pins on the pages may be obtained as follows:
2247  *
2248  *  (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
2249  *      added to the pages, but refs will not be taken.
2250  *      iov_iter_extract_will_pin() will return true.
2251  *
2252  *  (*) If the iterator is ITER_KVEC, ITER_BVEC or ITER_XARRAY, the pages are
2253  *      merely listed; no extra refs or pins are obtained.
2254  *      iov_iter_extract_will_pin() will return 0.
2255  *
2256  * Note also:
2257  *
2258  *  (*) Use with ITER_DISCARD is not supported as that has no content.
2259  *
2260  * On success, the function sets *@pages to the new pagelist, if allocated, and
2261  * sets *offset0 to the offset into the first page.
2262  *
2263  * It may also return -ENOMEM and -EFAULT.
2264  */
2265 ssize_t iov_iter_extract_pages(struct iov_iter *i,
2266                                struct page ***pages,
2267                                size_t maxsize,
2268                                unsigned int maxpages,
2269                                iov_iter_extraction_t extraction_flags,
2270                                size_t *offset0)
2271 {
2272         maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT);
2273         if (!maxsize)
2274                 return 0;
2275
2276         if (likely(user_backed_iter(i)))
2277                 return iov_iter_extract_user_pages(i, pages, maxsize,
2278                                                    maxpages, extraction_flags,
2279                                                    offset0);
2280         if (iov_iter_is_kvec(i))
2281                 return iov_iter_extract_kvec_pages(i, pages, maxsize,
2282                                                    maxpages, extraction_flags,
2283                                                    offset0);
2284         if (iov_iter_is_bvec(i))
2285                 return iov_iter_extract_bvec_pages(i, pages, maxsize,
2286                                                    maxpages, extraction_flags,
2287                                                    offset0);
2288         if (iov_iter_is_xarray(i))
2289                 return iov_iter_extract_xarray_pages(i, pages, maxsize,
2290                                                      maxpages, extraction_flags,
2291                                                      offset0);
2292         return -EFAULT;
2293 }
2294 EXPORT_SYMBOL_GPL(iov_iter_extract_pages);