1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/export.h>
9 #include <linux/slab.h>
11 #include <linux/uio.h>
12 #include <linux/scatterlist.h>
13 #include <linux/netfs.h>
17 * netfs_extract_user_iter - Extract the pages from a user iterator into a bvec
18 * @orig: The original iterator
19 * @orig_len: The amount of iterator to copy
20 * @new: The iterator to be set up
21 * @extraction_flags: Flags to qualify the request
23 * Extract the page fragments from the given amount of the source iterator and
24 * build up a second iterator that refers to all of those bits. This allows
25 * the original iterator to disposed of.
27 * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA be
28 * allowed on the pages extracted.
30 * On success, the number of elements in the bvec is returned, the original
31 * iterator will have been advanced by the amount extracted.
33 * The iov_iter_extract_mode() function should be used to query how cleanup
34 * should be performed.
36 ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
38 iov_iter_extraction_t extraction_flags)
40 struct bio_vec *bv = NULL;
42 unsigned int cur_npages;
43 unsigned int max_pages;
44 unsigned int npages = 0;
47 size_t count = orig_len, offset, len;
48 size_t bv_size, pg_size;
50 if (WARN_ON_ONCE(!iter_is_ubuf(orig) && !iter_is_iovec(orig)))
53 max_pages = iov_iter_npages(orig, INT_MAX);
54 bv_size = array_size(max_pages, sizeof(*bv));
55 bv = kvmalloc(bv_size, GFP_KERNEL);
59 /* Put the page list at the end of the bvec list storage. bvec
60 * elements are larger than page pointers, so as long as we work
61 * 0->last, we should be fine.
63 pg_size = array_size(max_pages, sizeof(*pages));
64 pages = (void *)bv + bv_size - pg_size;
66 while (count && npages < max_pages) {
67 ret = iov_iter_extract_pages(orig, &pages, count,
68 max_pages - npages, extraction_flags,
71 pr_err("Couldn't get user pages (rc=%zd)\n", ret);
76 pr_err("get_pages rc=%zd more than %zu\n", ret, count);
82 cur_npages = DIV_ROUND_UP(ret, PAGE_SIZE);
84 if (npages + cur_npages > max_pages) {
85 pr_err("Out of bvec array capacity (%u vs %u)\n",
86 npages + cur_npages, max_pages);
90 for (i = 0; i < cur_npages; i++) {
91 len = ret > PAGE_SIZE ? PAGE_SIZE : ret;
92 bv[npages + i].bv_page = *pages++;
93 bv[npages + i].bv_offset = offset;
94 bv[npages + i].bv_len = len - offset;
102 iov_iter_bvec(new, orig->data_source, bv, npages, orig_len - count);
105 EXPORT_SYMBOL_GPL(netfs_extract_user_iter);
108 * Extract and pin a list of up to sg_max pages from UBUF- or IOVEC-class
109 * iterators, and add them to the scatterlist.
111 static ssize_t netfs_extract_user_to_sg(struct iov_iter *iter,
113 struct sg_table *sgtable,
115 iov_iter_extraction_t extraction_flags)
117 struct scatterlist *sg = sgtable->sgl + sgtable->nents;
120 ssize_t ret = 0, res;
123 /* We decant the page list into the tail of the scatterlist */
124 pages = (void *)sgtable->sgl + array_size(sg_max, sizeof(struct scatterlist));
128 res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max,
129 extraction_flags, &off);
136 npages = DIV_ROUND_UP(off + len, PAGE_SIZE);
139 for (; npages < 0; npages--) {
140 struct page *page = *pages;
141 size_t seg = min_t(size_t, PAGE_SIZE - off, len);
144 sg_set_page(sg, page, len, off);
150 } while (maxsize > 0 && sg_max > 0);
155 while (sgtable->nents > sgtable->orig_nents)
156 put_page(sg_page(&sgtable->sgl[--sgtable->nents]));
161 * Extract up to sg_max pages from a BVEC-type iterator and add them to the
162 * scatterlist. The pages are not pinned.
164 static ssize_t netfs_extract_bvec_to_sg(struct iov_iter *iter,
166 struct sg_table *sgtable,
168 iov_iter_extraction_t extraction_flags)
170 const struct bio_vec *bv = iter->bvec;
171 struct scatterlist *sg = sgtable->sgl + sgtable->nents;
172 unsigned long start = iter->iov_offset;
176 for (i = 0; i < iter->nr_segs; i++) {
185 len = min_t(size_t, maxsize, len - start);
186 off = bv[i].bv_offset + start;
188 sg_set_page(sg, bv[i].bv_page, len, off);
195 if (maxsize <= 0 || sg_max == 0)
201 iov_iter_advance(iter, ret);
206 * Extract up to sg_max pages from a KVEC-type iterator and add them to the
207 * scatterlist. This can deal with vmalloc'd buffers as well as kmalloc'd or
208 * static buffers. The pages are not pinned.
210 static ssize_t netfs_extract_kvec_to_sg(struct iov_iter *iter,
212 struct sg_table *sgtable,
214 iov_iter_extraction_t extraction_flags)
216 const struct kvec *kv = iter->kvec;
217 struct scatterlist *sg = sgtable->sgl + sgtable->nents;
218 unsigned long start = iter->iov_offset;
222 for (i = 0; i < iter->nr_segs; i++) {
225 size_t off, len, seg;
233 kaddr = (unsigned long)kv[i].iov_base + start;
234 off = kaddr & ~PAGE_MASK;
235 len = min_t(size_t, maxsize, len - start);
241 seg = min_t(size_t, len, PAGE_SIZE - off);
242 if (is_vmalloc_or_module_addr((void *)kaddr))
243 page = vmalloc_to_page((void *)kaddr);
245 page = virt_to_page(kaddr);
247 sg_set_page(sg, page, len, off);
255 } while (len > 0 && sg_max > 0);
257 if (maxsize <= 0 || sg_max == 0)
263 iov_iter_advance(iter, ret);
268 * Extract up to sg_max folios from an XARRAY-type iterator and add them to
269 * the scatterlist. The pages are not pinned.
271 static ssize_t netfs_extract_xarray_to_sg(struct iov_iter *iter,
273 struct sg_table *sgtable,
275 iov_iter_extraction_t extraction_flags)
277 struct scatterlist *sg = sgtable->sgl + sgtable->nents;
278 struct xarray *xa = iter->xarray;
280 loff_t start = iter->xarray_start + iter->iov_offset;
281 pgoff_t index = start / PAGE_SIZE;
284 XA_STATE(xas, xa, index);
288 xas_for_each(&xas, folio, ULONG_MAX) {
289 if (xas_retry(&xas, folio))
291 if (WARN_ON(xa_is_value(folio)))
293 if (WARN_ON(folio_test_hugetlb(folio)))
296 offset = offset_in_folio(folio, start);
297 len = min_t(size_t, maxsize, folio_size(folio) - offset);
299 sg_set_page(sg, folio_page(folio, 0), len, offset);
306 if (maxsize <= 0 || sg_max == 0)
312 iov_iter_advance(iter, ret);
317 * netfs_extract_iter_to_sg - Extract pages from an iterator and add ot an sglist
318 * @iter: The iterator to extract from
319 * @maxsize: The amount of iterator to copy
320 * @sgtable: The scatterlist table to fill in
321 * @sg_max: Maximum number of elements in @sgtable that may be filled
322 * @extraction_flags: Flags to qualify the request
324 * Extract the page fragments from the given amount of the source iterator and
325 * add them to a scatterlist that refers to all of those bits, to a maximum
326 * addition of @sg_max elements.
328 * The pages referred to by UBUF- and IOVEC-type iterators are extracted and
329 * pinned; BVEC-, KVEC- and XARRAY-type are extracted but aren't pinned; PIPE-
330 * and DISCARD-type are not supported.
332 * No end mark is placed on the scatterlist; that's left to the caller.
334 * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
335 * be allowed on the pages extracted.
337 * If successul, @sgtable->nents is updated to include the number of elements
338 * added and the number of bytes added is returned. @sgtable->orig_nents is
341 * The iov_iter_extract_mode() function should be used to query how cleanup
342 * should be performed.
344 ssize_t netfs_extract_iter_to_sg(struct iov_iter *iter, size_t maxsize,
345 struct sg_table *sgtable, unsigned int sg_max,
346 iov_iter_extraction_t extraction_flags)
351 switch (iov_iter_type(iter)) {
354 return netfs_extract_user_to_sg(iter, maxsize, sgtable, sg_max,
357 return netfs_extract_bvec_to_sg(iter, maxsize, sgtable, sg_max,
360 return netfs_extract_kvec_to_sg(iter, maxsize, sgtable, sg_max,
363 return netfs_extract_xarray_to_sg(iter, maxsize, sgtable, sg_max,
366 pr_err("%s(%u) unsupported\n", __func__, iov_iter_type(iter));
371 EXPORT_SYMBOL_GPL(netfs_extract_iter_to_sg);