1 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <linux/uaccess.h>
5 #include <linux/ktime.h>
6 #include <linux/debugfs.h>
7 #include <linux/highmem.h>
10 static void put_back_pages(unsigned int cmd, struct page **pages,
11 unsigned long nr_pages, unsigned int gup_test_flags)
16 case GUP_FAST_BENCHMARK:
18 for (i = 0; i < nr_pages; i++)
22 case PIN_FAST_BENCHMARK:
24 case PIN_LONGTERM_BENCHMARK:
25 unpin_user_pages(pages, nr_pages);
27 case DUMP_USER_PAGES_TEST:
28 if (gup_test_flags & GUP_TEST_FLAG_DUMP_PAGES_USE_PIN) {
29 unpin_user_pages(pages, nr_pages);
31 for (i = 0; i < nr_pages; i++)
39 static void verify_dma_pinned(unsigned int cmd, struct page **pages,
40 unsigned long nr_pages)
46 case PIN_FAST_BENCHMARK:
48 case PIN_LONGTERM_BENCHMARK:
49 for (i = 0; i < nr_pages; i++) {
50 folio = page_folio(pages[i]);
52 if (WARN(!folio_maybe_dma_pinned(folio),
53 "pages[%lu] is NOT dma-pinned\n", i)) {
55 dump_page(&folio->page, "gup_test failure");
57 } else if (cmd == PIN_LONGTERM_BENCHMARK &&
58 WARN(!folio_is_longterm_pinnable(folio),
59 "pages[%lu] is NOT pinnable but pinned\n",
61 dump_page(&folio->page, "gup_test failure");
69 static void dump_pages_test(struct gup_test *gup, struct page **pages,
70 unsigned long nr_pages)
72 unsigned int index_to_dump;
76 * Zero out any user-supplied page index that is out of range. Remember:
77 * .which_pages[] contains a 1-based set of page indices.
79 for (i = 0; i < GUP_TEST_MAX_PAGES_TO_DUMP; i++) {
80 if (gup->which_pages[i] > nr_pages) {
81 pr_warn("ZEROING due to out of range: .which_pages[%u]: %u\n",
82 i, gup->which_pages[i]);
83 gup->which_pages[i] = 0;
87 for (i = 0; i < GUP_TEST_MAX_PAGES_TO_DUMP; i++) {
88 index_to_dump = gup->which_pages[i];
91 index_to_dump--; // Decode from 1-based, to 0-based
92 pr_info("---- page #%u, starting from user virt addr: 0x%llx\n",
93 index_to_dump, gup->addr);
94 dump_page(pages[index_to_dump],
95 "gup_test: dump_pages() test");
100 static int __gup_test_ioctl(unsigned int cmd,
101 struct gup_test *gup)
103 ktime_t start_time, end_time;
104 unsigned long i, nr_pages, addr, next;
108 bool needs_mmap_lock =
109 cmd != GUP_FAST_BENCHMARK && cmd != PIN_FAST_BENCHMARK;
111 if (gup->size > ULONG_MAX)
114 nr_pages = gup->size / PAGE_SIZE;
115 pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL);
119 if (needs_mmap_lock && mmap_read_lock_killable(current->mm)) {
125 nr = gup->nr_pages_per_call;
126 start_time = ktime_get();
127 for (addr = gup->addr; addr < gup->addr + gup->size; addr = next) {
128 if (nr != gup->nr_pages_per_call)
131 next = addr + nr * PAGE_SIZE;
132 if (next > gup->addr + gup->size) {
133 next = gup->addr + gup->size;
134 nr = (next - addr) / PAGE_SIZE;
138 case GUP_FAST_BENCHMARK:
139 nr = get_user_pages_fast(addr, nr, gup->gup_flags,
143 nr = get_user_pages(addr, nr, gup->gup_flags, pages + i);
145 case PIN_FAST_BENCHMARK:
146 nr = pin_user_pages_fast(addr, nr, gup->gup_flags,
150 nr = pin_user_pages(addr, nr, gup->gup_flags, pages + i);
152 case PIN_LONGTERM_BENCHMARK:
153 nr = pin_user_pages(addr, nr,
154 gup->gup_flags | FOLL_LONGTERM,
157 case DUMP_USER_PAGES_TEST:
158 if (gup->test_flags & GUP_TEST_FLAG_DUMP_PAGES_USE_PIN)
159 nr = pin_user_pages(addr, nr, gup->gup_flags,
162 nr = get_user_pages(addr, nr, gup->gup_flags,
174 end_time = ktime_get();
176 /* Shifting the meaning of nr_pages: now it is actual number pinned: */
179 gup->get_delta_usec = ktime_us_delta(end_time, start_time);
180 gup->size = addr - gup->addr;
183 * Take an un-benchmark-timed moment to verify DMA pinned
184 * state: print a warning if any non-dma-pinned pages are found:
186 verify_dma_pinned(cmd, pages, nr_pages);
188 if (cmd == DUMP_USER_PAGES_TEST)
189 dump_pages_test(gup, pages, nr_pages);
191 start_time = ktime_get();
193 put_back_pages(cmd, pages, nr_pages, gup->test_flags);
195 end_time = ktime_get();
196 gup->put_delta_usec = ktime_us_delta(end_time, start_time);
200 mmap_read_unlock(current->mm);
206 static DEFINE_MUTEX(pin_longterm_test_mutex);
207 static struct page **pin_longterm_test_pages;
208 static unsigned long pin_longterm_test_nr_pages;
210 static inline void pin_longterm_test_stop(void)
212 if (pin_longterm_test_pages) {
213 if (pin_longterm_test_nr_pages)
214 unpin_user_pages(pin_longterm_test_pages,
215 pin_longterm_test_nr_pages);
216 kvfree(pin_longterm_test_pages);
217 pin_longterm_test_pages = NULL;
218 pin_longterm_test_nr_pages = 0;
222 static inline int pin_longterm_test_start(unsigned long arg)
224 long nr_pages, cur_pages, addr, remaining_pages;
225 int gup_flags = FOLL_LONGTERM;
226 struct pin_longterm_test args;
231 if (pin_longterm_test_pages)
234 if (copy_from_user(&args, (void __user *)arg, sizeof(args)))
238 ~(PIN_LONGTERM_TEST_FLAG_USE_WRITE|PIN_LONGTERM_TEST_FLAG_USE_FAST))
240 if (!IS_ALIGNED(args.addr | args.size, PAGE_SIZE))
242 if (args.size > LONG_MAX)
244 nr_pages = args.size / PAGE_SIZE;
248 pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL);
252 if (args.flags & PIN_LONGTERM_TEST_FLAG_USE_WRITE)
253 gup_flags |= FOLL_WRITE;
254 fast = !!(args.flags & PIN_LONGTERM_TEST_FLAG_USE_FAST);
256 if (!fast && mmap_read_lock_killable(current->mm)) {
261 pin_longterm_test_pages = pages;
262 pin_longterm_test_nr_pages = 0;
264 while (nr_pages - pin_longterm_test_nr_pages) {
265 remaining_pages = nr_pages - pin_longterm_test_nr_pages;
266 addr = args.addr + pin_longterm_test_nr_pages * PAGE_SIZE;
269 cur_pages = pin_user_pages_fast(addr, remaining_pages,
272 cur_pages = pin_user_pages(addr, remaining_pages,
275 pin_longterm_test_stop();
279 pin_longterm_test_nr_pages += cur_pages;
284 mmap_read_unlock(current->mm);
288 static inline int pin_longterm_test_read(unsigned long arg)
293 if (!pin_longterm_test_pages)
296 if (copy_from_user(&user_addr, (void __user *)arg, sizeof(user_addr)))
299 for (i = 0; i < pin_longterm_test_nr_pages; i++) {
300 void *addr = kmap_local_page(pin_longterm_test_pages[i]);
303 ret = copy_to_user((void __user *)(unsigned long)user_addr, addr,
308 user_addr += PAGE_SIZE;
313 static long pin_longterm_test_ioctl(struct file *filep, unsigned int cmd,
318 if (mutex_lock_killable(&pin_longterm_test_mutex))
322 case PIN_LONGTERM_TEST_START:
323 ret = pin_longterm_test_start(arg);
325 case PIN_LONGTERM_TEST_STOP:
326 pin_longterm_test_stop();
329 case PIN_LONGTERM_TEST_READ:
330 ret = pin_longterm_test_read(arg);
334 mutex_unlock(&pin_longterm_test_mutex);
338 static long gup_test_ioctl(struct file *filep, unsigned int cmd,
345 case GUP_FAST_BENCHMARK:
346 case PIN_FAST_BENCHMARK:
347 case PIN_LONGTERM_BENCHMARK:
350 case DUMP_USER_PAGES_TEST:
352 case PIN_LONGTERM_TEST_START:
353 case PIN_LONGTERM_TEST_STOP:
354 case PIN_LONGTERM_TEST_READ:
355 return pin_longterm_test_ioctl(filep, cmd, arg);
360 if (copy_from_user(&gup, (void __user *)arg, sizeof(gup)))
363 ret = __gup_test_ioctl(cmd, &gup);
367 if (copy_to_user((void __user *)arg, &gup, sizeof(gup)))
373 static int gup_test_release(struct inode *inode, struct file *file)
375 pin_longterm_test_stop();
380 static const struct file_operations gup_test_fops = {
381 .open = nonseekable_open,
382 .unlocked_ioctl = gup_test_ioctl,
383 .compat_ioctl = compat_ptr_ioctl,
384 .release = gup_test_release,
387 static int __init gup_test_init(void)
389 debugfs_create_file_unsafe("gup_test", 0600, NULL, NULL,
395 late_initcall(gup_test_init);