1 // SPDX-License-Identifier: GPL-2.0-only
2 /* I/O iterator tests. This can only test kernel-backed iterator types.
4 * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
13 #include <linux/uio.h>
14 #include <linux/bvec.h>
15 #include <kunit/test.h>
17 MODULE_DESCRIPTION("iov_iter testing");
18 MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
19 MODULE_LICENSE("GPL");
21 struct kvec_test_range {
25 static const struct kvec_test_range kvec_test_ranges[] = {
37 static inline u8 pattern(unsigned long x)
42 static void iov_kunit_unmap(void *data)
47 static void *__init iov_kunit_create_buffer(struct kunit *test,
48 struct page ***ppages,
55 pages = kunit_kcalloc(test, npages, sizeof(struct page *), GFP_KERNEL);
56 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages);
59 got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages);
61 release_pages(pages, got);
62 KUNIT_ASSERT_EQ(test, got, npages);
65 buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
66 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
68 kunit_add_action_or_reset(test, iov_kunit_unmap, buffer);
72 static void __init iov_kunit_load_kvec(struct kunit *test,
73 struct iov_iter *iter, int dir,
74 struct kvec *kvec, unsigned int kvmax,
75 void *buffer, size_t bufsize,
76 const struct kvec_test_range *pr)
81 for (i = 0; i < kvmax; i++, pr++) {
84 KUNIT_ASSERT_GE(test, pr->to, pr->from);
85 KUNIT_ASSERT_LE(test, pr->to, bufsize);
86 kvec[i].iov_base = buffer + pr->from;
87 kvec[i].iov_len = pr->to - pr->from;
88 size += pr->to - pr->from;
90 KUNIT_ASSERT_LE(test, size, bufsize);
92 iov_iter_kvec(iter, dir, kvec, i, size);
96 * Test copying to a ITER_KVEC-type iterator.
98 static void __init iov_kunit_copy_to_kvec(struct kunit *test)
100 const struct kvec_test_range *pr;
101 struct iov_iter iter;
102 struct page **spages, **bpages;
104 u8 *scratch, *buffer;
105 size_t bufsize, npages, size, copied;
109 npages = bufsize / PAGE_SIZE;
111 scratch = iov_kunit_create_buffer(test, &spages, npages);
112 for (i = 0; i < bufsize; i++)
113 scratch[i] = pattern(i);
115 buffer = iov_kunit_create_buffer(test, &bpages, npages);
116 memset(buffer, 0, bufsize);
118 iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
119 buffer, bufsize, kvec_test_ranges);
122 copied = copy_to_iter(scratch, size, &iter);
124 KUNIT_EXPECT_EQ(test, copied, size);
125 KUNIT_EXPECT_EQ(test, iter.count, 0);
126 KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
128 /* Build the expected image in the scratch buffer. */
130 memset(scratch, 0, bufsize);
131 for (pr = kvec_test_ranges; pr->from >= 0; pr++)
132 for (i = pr->from; i < pr->to; i++)
133 scratch[i] = pattern(patt++);
135 /* Compare the images */
136 for (i = 0; i < bufsize; i++) {
137 KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
138 if (buffer[i] != scratch[i])
146 * Test copying from a ITER_KVEC-type iterator.
148 static void __init iov_kunit_copy_from_kvec(struct kunit *test)
150 const struct kvec_test_range *pr;
151 struct iov_iter iter;
152 struct page **spages, **bpages;
154 u8 *scratch, *buffer;
155 size_t bufsize, npages, size, copied;
159 npages = bufsize / PAGE_SIZE;
161 buffer = iov_kunit_create_buffer(test, &bpages, npages);
162 for (i = 0; i < bufsize; i++)
163 buffer[i] = pattern(i);
165 scratch = iov_kunit_create_buffer(test, &spages, npages);
166 memset(scratch, 0, bufsize);
168 iov_kunit_load_kvec(test, &iter, WRITE, kvec, ARRAY_SIZE(kvec),
169 buffer, bufsize, kvec_test_ranges);
170 size = min(iter.count, bufsize);
172 copied = copy_from_iter(scratch, size, &iter);
174 KUNIT_EXPECT_EQ(test, copied, size);
175 KUNIT_EXPECT_EQ(test, iter.count, 0);
176 KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
178 /* Build the expected image in the main buffer. */
180 memset(buffer, 0, bufsize);
181 for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
182 for (j = pr->from; j < pr->to; j++) {
183 buffer[i++] = pattern(j);
190 /* Compare the images */
191 for (i = 0; i < bufsize; i++) {
192 KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
193 if (scratch[i] != buffer[i])
200 struct bvec_test_range {
204 static const struct bvec_test_range bvec_test_ranges[] = {
205 { 0, 0x0002, 0x0002 },
206 { 1, 0x0027, 0x0893 },
207 { 2, 0x0193, 0x0794 },
208 { 3, 0x0000, 0x1000 },
209 { 4, 0x0000, 0x1000 },
210 { 5, 0x0000, 0x1000 },
211 { 6, 0x0000, 0x0ffb },
212 { 6, 0x0ffd, 0x0ffe },
216 static void __init iov_kunit_load_bvec(struct kunit *test,
217 struct iov_iter *iter, int dir,
218 struct bio_vec *bvec, unsigned int bvmax,
219 struct page **pages, size_t npages,
221 const struct bvec_test_range *pr)
223 struct page *can_merge = NULL, *page;
227 for (i = 0; i < bvmax; i++, pr++) {
230 KUNIT_ASSERT_LT(test, pr->page, npages);
231 KUNIT_ASSERT_LT(test, pr->page * PAGE_SIZE, bufsize);
232 KUNIT_ASSERT_GE(test, pr->from, 0);
233 KUNIT_ASSERT_GE(test, pr->to, pr->from);
234 KUNIT_ASSERT_LE(test, pr->to, PAGE_SIZE);
236 page = pages[pr->page];
237 if (pr->from == 0 && pr->from != pr->to && page == can_merge) {
239 bvec[i].bv_len += pr->to;
241 bvec_set_page(&bvec[i], page, pr->to - pr->from, pr->from);
244 size += pr->to - pr->from;
245 if ((pr->to & ~PAGE_MASK) == 0)
246 can_merge = page + pr->to / PAGE_SIZE;
251 iov_iter_bvec(iter, dir, bvec, i, size);
255 * Test copying to a ITER_BVEC-type iterator.
257 static void __init iov_kunit_copy_to_bvec(struct kunit *test)
259 const struct bvec_test_range *pr;
260 struct iov_iter iter;
261 struct bio_vec bvec[8];
262 struct page **spages, **bpages;
263 u8 *scratch, *buffer;
264 size_t bufsize, npages, size, copied;
268 npages = bufsize / PAGE_SIZE;
270 scratch = iov_kunit_create_buffer(test, &spages, npages);
271 for (i = 0; i < bufsize; i++)
272 scratch[i] = pattern(i);
274 buffer = iov_kunit_create_buffer(test, &bpages, npages);
275 memset(buffer, 0, bufsize);
277 iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
278 bpages, npages, bufsize, bvec_test_ranges);
281 copied = copy_to_iter(scratch, size, &iter);
283 KUNIT_EXPECT_EQ(test, copied, size);
284 KUNIT_EXPECT_EQ(test, iter.count, 0);
285 KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
287 /* Build the expected image in the scratch buffer. */
290 memset(scratch, 0, bufsize);
291 for (pr = bvec_test_ranges; pr->from >= 0; pr++, b++) {
292 u8 *p = scratch + pr->page * PAGE_SIZE;
294 for (i = pr->from; i < pr->to; i++)
295 p[i] = pattern(patt++);
298 /* Compare the images */
299 for (i = 0; i < bufsize; i++) {
300 KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
301 if (buffer[i] != scratch[i])
309 * Test copying from a ITER_BVEC-type iterator.
311 static void __init iov_kunit_copy_from_bvec(struct kunit *test)
313 const struct bvec_test_range *pr;
314 struct iov_iter iter;
315 struct bio_vec bvec[8];
316 struct page **spages, **bpages;
317 u8 *scratch, *buffer;
318 size_t bufsize, npages, size, copied;
322 npages = bufsize / PAGE_SIZE;
324 buffer = iov_kunit_create_buffer(test, &bpages, npages);
325 for (i = 0; i < bufsize; i++)
326 buffer[i] = pattern(i);
328 scratch = iov_kunit_create_buffer(test, &spages, npages);
329 memset(scratch, 0, bufsize);
331 iov_kunit_load_bvec(test, &iter, WRITE, bvec, ARRAY_SIZE(bvec),
332 bpages, npages, bufsize, bvec_test_ranges);
335 copied = copy_from_iter(scratch, size, &iter);
337 KUNIT_EXPECT_EQ(test, copied, size);
338 KUNIT_EXPECT_EQ(test, iter.count, 0);
339 KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
341 /* Build the expected image in the main buffer. */
343 memset(buffer, 0, bufsize);
344 for (pr = bvec_test_ranges; pr->from >= 0; pr++) {
345 size_t patt = pr->page * PAGE_SIZE;
347 for (j = pr->from; j < pr->to; j++) {
348 buffer[i++] = pattern(patt + j);
355 /* Compare the images */
356 for (i = 0; i < bufsize; i++) {
357 KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
358 if (scratch[i] != buffer[i])
365 static void iov_kunit_destroy_xarray(void *data)
367 struct xarray *xarray = data;
373 static void __init iov_kunit_load_xarray(struct kunit *test,
374 struct iov_iter *iter, int dir,
375 struct xarray *xarray,
376 struct page **pages, size_t npages)
381 for (i = 0; i < npages; i++) {
382 void *x = xa_store(xarray, i, pages[i], GFP_KERNEL);
384 KUNIT_ASSERT_FALSE(test, xa_is_err(x));
387 iov_iter_xarray(iter, dir, xarray, 0, size);
390 static struct xarray *iov_kunit_create_xarray(struct kunit *test)
392 struct xarray *xarray;
394 xarray = kzalloc(sizeof(struct xarray), GFP_KERNEL);
396 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xarray);
397 kunit_add_action_or_reset(test, iov_kunit_destroy_xarray, xarray);
402 * Test copying to a ITER_XARRAY-type iterator.
404 static void __init iov_kunit_copy_to_xarray(struct kunit *test)
406 const struct kvec_test_range *pr;
407 struct iov_iter iter;
408 struct xarray *xarray;
409 struct page **spages, **bpages;
410 u8 *scratch, *buffer;
411 size_t bufsize, npages, size, copied;
415 npages = bufsize / PAGE_SIZE;
417 xarray = iov_kunit_create_xarray(test);
419 scratch = iov_kunit_create_buffer(test, &spages, npages);
420 for (i = 0; i < bufsize; i++)
421 scratch[i] = pattern(i);
423 buffer = iov_kunit_create_buffer(test, &bpages, npages);
424 memset(buffer, 0, bufsize);
426 iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
429 for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
430 size = pr->to - pr->from;
431 KUNIT_ASSERT_LE(test, pr->to, bufsize);
433 iov_iter_xarray(&iter, READ, xarray, pr->from, size);
434 copied = copy_to_iter(scratch + i, size, &iter);
436 KUNIT_EXPECT_EQ(test, copied, size);
437 KUNIT_EXPECT_EQ(test, iter.count, 0);
438 KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
442 /* Build the expected image in the scratch buffer. */
444 memset(scratch, 0, bufsize);
445 for (pr = kvec_test_ranges; pr->from >= 0; pr++)
446 for (i = pr->from; i < pr->to; i++)
447 scratch[i] = pattern(patt++);
449 /* Compare the images */
450 for (i = 0; i < bufsize; i++) {
451 KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
452 if (buffer[i] != scratch[i])
460 * Test copying from a ITER_XARRAY-type iterator.
462 static void __init iov_kunit_copy_from_xarray(struct kunit *test)
464 const struct kvec_test_range *pr;
465 struct iov_iter iter;
466 struct xarray *xarray;
467 struct page **spages, **bpages;
468 u8 *scratch, *buffer;
469 size_t bufsize, npages, size, copied;
473 npages = bufsize / PAGE_SIZE;
475 xarray = iov_kunit_create_xarray(test);
477 buffer = iov_kunit_create_buffer(test, &bpages, npages);
478 for (i = 0; i < bufsize; i++)
479 buffer[i] = pattern(i);
481 scratch = iov_kunit_create_buffer(test, &spages, npages);
482 memset(scratch, 0, bufsize);
484 iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
487 for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
488 size = pr->to - pr->from;
489 KUNIT_ASSERT_LE(test, pr->to, bufsize);
491 iov_iter_xarray(&iter, WRITE, xarray, pr->from, size);
492 copied = copy_from_iter(scratch + i, size, &iter);
494 KUNIT_EXPECT_EQ(test, copied, size);
495 KUNIT_EXPECT_EQ(test, iter.count, 0);
496 KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
500 /* Build the expected image in the main buffer. */
502 memset(buffer, 0, bufsize);
503 for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
504 for (j = pr->from; j < pr->to; j++) {
505 buffer[i++] = pattern(j);
512 /* Compare the images */
513 for (i = 0; i < bufsize; i++) {
514 KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
515 if (scratch[i] != buffer[i])
523 * Test the extraction of ITER_KVEC-type iterators.
525 static void __init iov_kunit_extract_pages_kvec(struct kunit *test)
527 const struct kvec_test_range *pr;
528 struct iov_iter iter;
529 struct page **bpages, *pagelist[8], **pages = pagelist;
533 size_t bufsize, size = 0, npages;
537 npages = bufsize / PAGE_SIZE;
539 buffer = iov_kunit_create_buffer(test, &bpages, npages);
541 iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
542 buffer, bufsize, kvec_test_ranges);
545 pr = kvec_test_ranges;
548 size_t offset0 = LONG_MAX;
550 for (i = 0; i < ARRAY_SIZE(pagelist); i++)
551 pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
553 len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
554 ARRAY_SIZE(pagelist), 0, &offset0);
555 KUNIT_EXPECT_GE(test, len, 0);
558 KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
559 KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
560 KUNIT_EXPECT_LE(test, len, size);
561 KUNIT_EXPECT_EQ(test, iter.count, size - len);
567 for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
569 ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
572 KUNIT_ASSERT_GE(test, part, 0);
573 while (from == pr->to) {
579 ix = from / PAGE_SIZE;
580 KUNIT_ASSERT_LT(test, ix, npages);
582 KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
583 KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
586 KUNIT_ASSERT_GE(test, len, 0);
592 if (test->status == KUNIT_FAILURE)
594 } while (iov_iter_count(&iter) > 0);
597 KUNIT_EXPECT_EQ(test, size, 0);
598 KUNIT_EXPECT_EQ(test, iter.count, 0);
603 * Test the extraction of ITER_BVEC-type iterators.
605 static void __init iov_kunit_extract_pages_bvec(struct kunit *test)
607 const struct bvec_test_range *pr;
608 struct iov_iter iter;
609 struct page **bpages, *pagelist[8], **pages = pagelist;
610 struct bio_vec bvec[8];
612 size_t bufsize, size = 0, npages;
616 npages = bufsize / PAGE_SIZE;
618 iov_kunit_create_buffer(test, &bpages, npages);
619 iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
620 bpages, npages, bufsize, bvec_test_ranges);
623 pr = bvec_test_ranges;
626 size_t offset0 = LONG_MAX;
628 for (i = 0; i < ARRAY_SIZE(pagelist); i++)
629 pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
631 len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
632 ARRAY_SIZE(pagelist), 0, &offset0);
633 KUNIT_EXPECT_GE(test, len, 0);
636 KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
637 KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
638 KUNIT_EXPECT_LE(test, len, size);
639 KUNIT_EXPECT_EQ(test, iter.count, size - len);
645 for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
647 ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
650 KUNIT_ASSERT_GE(test, part, 0);
651 while (from == pr->to) {
657 ix = pr->page + from / PAGE_SIZE;
658 KUNIT_ASSERT_LT(test, ix, npages);
660 KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
661 KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
664 KUNIT_ASSERT_GE(test, len, 0);
670 if (test->status == KUNIT_FAILURE)
672 } while (iov_iter_count(&iter) > 0);
675 KUNIT_EXPECT_EQ(test, size, 0);
676 KUNIT_EXPECT_EQ(test, iter.count, 0);
681 * Test the extraction of ITER_XARRAY-type iterators.
683 static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
685 const struct kvec_test_range *pr;
686 struct iov_iter iter;
687 struct xarray *xarray;
688 struct page **bpages, *pagelist[8], **pages = pagelist;
690 size_t bufsize, size = 0, npages;
694 npages = bufsize / PAGE_SIZE;
696 xarray = iov_kunit_create_xarray(test);
698 iov_kunit_create_buffer(test, &bpages, npages);
699 iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
701 for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
703 size = pr->to - from;
704 KUNIT_ASSERT_LE(test, pr->to, bufsize);
706 iov_iter_xarray(&iter, WRITE, xarray, from, size);
709 size_t offset0 = LONG_MAX;
711 for (i = 0; i < ARRAY_SIZE(pagelist); i++)
712 pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
714 len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
715 ARRAY_SIZE(pagelist), 0, &offset0);
716 KUNIT_EXPECT_GE(test, len, 0);
719 KUNIT_EXPECT_LE(test, len, size);
720 KUNIT_EXPECT_EQ(test, iter.count, size - len);
724 KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
725 KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
727 for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
729 ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
732 KUNIT_ASSERT_GE(test, part, 0);
733 ix = from / PAGE_SIZE;
734 KUNIT_ASSERT_LT(test, ix, npages);
736 KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
737 KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
740 KUNIT_ASSERT_GE(test, len, 0);
746 if (test->status == KUNIT_FAILURE)
748 } while (iov_iter_count(&iter) > 0);
750 KUNIT_EXPECT_EQ(test, size, 0);
751 KUNIT_EXPECT_EQ(test, iter.count, 0);
752 KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to - pr->from);
759 static struct kunit_case __refdata iov_kunit_cases[] = {
760 KUNIT_CASE(iov_kunit_copy_to_kvec),
761 KUNIT_CASE(iov_kunit_copy_from_kvec),
762 KUNIT_CASE(iov_kunit_copy_to_bvec),
763 KUNIT_CASE(iov_kunit_copy_from_bvec),
764 KUNIT_CASE(iov_kunit_copy_to_xarray),
765 KUNIT_CASE(iov_kunit_copy_from_xarray),
766 KUNIT_CASE(iov_kunit_extract_pages_kvec),
767 KUNIT_CASE(iov_kunit_extract_pages_bvec),
768 KUNIT_CASE(iov_kunit_extract_pages_xarray),
772 static struct kunit_suite iov_kunit_suite = {
774 .test_cases = iov_kunit_cases,
777 kunit_test_suites(&iov_kunit_suite);