1 // SPDX-License-Identifier: GPL-2.0
3 * HMM stands for Heterogeneous Memory Management, it is a helper layer inside
4 * the linux kernel to help device drivers mirror a process address space in
5 * the device. This allows the device to use the same address space which
6 * makes communication and data exchange a lot easier.
8 * This framework's sole purpose is to exercise various code paths inside
9 * the kernel to make sure that HMM performs as expected and to flush out any
13 #include "../kselftest_harness.h"
24 #include <sys/types.h>
27 #include <sys/ioctl.h>
29 #include "./local_config.h"
30 #ifdef LOCAL_CONFIG_HAVE_LIBHUGETLBFS
31 #include <hugetlbfs.h>
35 * This is a private UAPI to the kernel test module so it isn't exported
36 * in the usual include/uapi/... directory.
38 #include "../../../../lib/test_hmm_uapi.h"
49 #define TWOMEG (1 << 21)
50 #define HMM_BUFFER_SIZE (1024 << 12)
51 #define HMM_PATH_MAX 64
54 #define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
59 unsigned int page_size;
60 unsigned int page_shift;
67 unsigned int page_size;
68 unsigned int page_shift;
71 static int hmm_open(int unit)
73 char pathname[HMM_PATH_MAX];
76 snprintf(pathname, sizeof(pathname), "/dev/hmm_dmirror%d", unit);
77 fd = open(pathname, O_RDWR, 0);
79 fprintf(stderr, "could not open hmm dmirror driver (%s)\n",
86 self->page_size = sysconf(_SC_PAGE_SIZE);
87 self->page_shift = ffs(self->page_size) - 1;
89 self->fd = hmm_open(0);
90 ASSERT_GE(self->fd, 0);
95 self->page_size = sysconf(_SC_PAGE_SIZE);
96 self->page_shift = ffs(self->page_size) - 1;
98 self->fd0 = hmm_open(0);
99 ASSERT_GE(self->fd0, 0);
100 self->fd1 = hmm_open(1);
101 ASSERT_GE(self->fd1, 0);
104 FIXTURE_TEARDOWN(hmm)
106 int ret = close(self->fd);
112 FIXTURE_TEARDOWN(hmm2)
114 int ret = close(self->fd0);
119 ret = close(self->fd1);
124 static int hmm_dmirror_cmd(int fd,
125 unsigned long request,
126 struct hmm_buffer *buffer,
127 unsigned long npages)
129 struct hmm_dmirror_cmd cmd;
132 /* Simulate a device reading system memory. */
133 cmd.addr = (__u64)buffer->ptr;
134 cmd.ptr = (__u64)buffer->mirror;
138 ret = ioctl(fd, request, &cmd);
145 buffer->cpages = cmd.cpages;
146 buffer->faults = cmd.faults;
151 static void hmm_buffer_free(struct hmm_buffer *buffer)
157 munmap(buffer->ptr, buffer->size);
158 free(buffer->mirror);
163 * Create a temporary file that will be deleted on close.
165 static int hmm_create_file(unsigned long size)
167 char path[HMM_PATH_MAX];
170 strcpy(path, "/tmp");
171 fd = open(path, O_TMPFILE | O_EXCL | O_RDWR, 0600);
176 r = ftruncate(fd, size);
177 } while (r == -1 && errno == EINTR);
186 * Return a random unsigned number.
188 static unsigned int hmm_random(void)
194 fd = open("/dev/urandom", O_RDONLY);
196 fprintf(stderr, "%s:%d failed to open /dev/urandom\n",
201 read(fd, &r, sizeof(r));
205 static void hmm_nanosleep(unsigned int n)
215 * Simple NULL test of device open/close.
217 TEST_F(hmm, open_close)
222 * Read private anonymous memory.
224 TEST_F(hmm, anon_read)
226 struct hmm_buffer *buffer;
227 unsigned long npages;
234 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
235 ASSERT_NE(npages, 0);
236 size = npages << self->page_shift;
238 buffer = malloc(sizeof(*buffer));
239 ASSERT_NE(buffer, NULL);
243 buffer->mirror = malloc(size);
244 ASSERT_NE(buffer->mirror, NULL);
246 buffer->ptr = mmap(NULL, size,
247 PROT_READ | PROT_WRITE,
248 MAP_PRIVATE | MAP_ANONYMOUS,
250 ASSERT_NE(buffer->ptr, MAP_FAILED);
253 * Initialize buffer in system memory but leave the first two pages
254 * zero (pte_none and pfn_zero).
256 i = 2 * self->page_size / sizeof(*ptr);
257 for (ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
260 /* Set buffer permission to read-only. */
261 ret = mprotect(buffer->ptr, size, PROT_READ);
264 /* Populate the CPU page table with a special zero page. */
265 val = *(int *)(buffer->ptr + self->page_size);
268 /* Simulate a device reading system memory. */
269 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
271 ASSERT_EQ(buffer->cpages, npages);
272 ASSERT_EQ(buffer->faults, 1);
274 /* Check what the device read. */
275 ptr = buffer->mirror;
276 for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i)
277 ASSERT_EQ(ptr[i], 0);
278 for (; i < size / sizeof(*ptr); ++i)
279 ASSERT_EQ(ptr[i], i);
281 hmm_buffer_free(buffer);
285 * Read private anonymous memory which has been protected with
286 * mprotect() PROT_NONE.
288 TEST_F(hmm, anon_read_prot)
290 struct hmm_buffer *buffer;
291 unsigned long npages;
297 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
298 ASSERT_NE(npages, 0);
299 size = npages << self->page_shift;
301 buffer = malloc(sizeof(*buffer));
302 ASSERT_NE(buffer, NULL);
306 buffer->mirror = malloc(size);
307 ASSERT_NE(buffer->mirror, NULL);
309 buffer->ptr = mmap(NULL, size,
310 PROT_READ | PROT_WRITE,
311 MAP_PRIVATE | MAP_ANONYMOUS,
313 ASSERT_NE(buffer->ptr, MAP_FAILED);
315 /* Initialize buffer in system memory. */
316 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
319 /* Initialize mirror buffer so we can verify it isn't written. */
320 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
323 /* Protect buffer from reading. */
324 ret = mprotect(buffer->ptr, size, PROT_NONE);
327 /* Simulate a device reading system memory. */
328 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
329 ASSERT_EQ(ret, -EFAULT);
331 /* Allow CPU to read the buffer so we can check it. */
332 ret = mprotect(buffer->ptr, size, PROT_READ);
334 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
335 ASSERT_EQ(ptr[i], i);
337 /* Check what the device read. */
338 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
339 ASSERT_EQ(ptr[i], -i);
341 hmm_buffer_free(buffer);
345 * Write private anonymous memory.
347 TEST_F(hmm, anon_write)
349 struct hmm_buffer *buffer;
350 unsigned long npages;
356 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
357 ASSERT_NE(npages, 0);
358 size = npages << self->page_shift;
360 buffer = malloc(sizeof(*buffer));
361 ASSERT_NE(buffer, NULL);
365 buffer->mirror = malloc(size);
366 ASSERT_NE(buffer->mirror, NULL);
368 buffer->ptr = mmap(NULL, size,
369 PROT_READ | PROT_WRITE,
370 MAP_PRIVATE | MAP_ANONYMOUS,
372 ASSERT_NE(buffer->ptr, MAP_FAILED);
374 /* Initialize data that the device will write to buffer->ptr. */
375 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
378 /* Simulate a device writing system memory. */
379 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
381 ASSERT_EQ(buffer->cpages, npages);
382 ASSERT_EQ(buffer->faults, 1);
384 /* Check what the device wrote. */
385 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
386 ASSERT_EQ(ptr[i], i);
388 hmm_buffer_free(buffer);
392 * Write private anonymous memory which has been protected with
393 * mprotect() PROT_READ.
395 TEST_F(hmm, anon_write_prot)
397 struct hmm_buffer *buffer;
398 unsigned long npages;
404 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
405 ASSERT_NE(npages, 0);
406 size = npages << self->page_shift;
408 buffer = malloc(sizeof(*buffer));
409 ASSERT_NE(buffer, NULL);
413 buffer->mirror = malloc(size);
414 ASSERT_NE(buffer->mirror, NULL);
416 buffer->ptr = mmap(NULL, size,
418 MAP_PRIVATE | MAP_ANONYMOUS,
420 ASSERT_NE(buffer->ptr, MAP_FAILED);
422 /* Simulate a device reading a zero page of memory. */
423 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, 1);
425 ASSERT_EQ(buffer->cpages, 1);
426 ASSERT_EQ(buffer->faults, 1);
428 /* Initialize data that the device will write to buffer->ptr. */
429 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
432 /* Simulate a device writing system memory. */
433 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
434 ASSERT_EQ(ret, -EPERM);
436 /* Check what the device wrote. */
437 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
438 ASSERT_EQ(ptr[i], 0);
440 /* Now allow writing and see that the zero page is replaced. */
441 ret = mprotect(buffer->ptr, size, PROT_WRITE | PROT_READ);
444 /* Simulate a device writing system memory. */
445 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
447 ASSERT_EQ(buffer->cpages, npages);
448 ASSERT_EQ(buffer->faults, 1);
450 /* Check what the device wrote. */
451 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
452 ASSERT_EQ(ptr[i], i);
454 hmm_buffer_free(buffer);
458 * Check that a device writing an anonymous private mapping
459 * will copy-on-write if a child process inherits the mapping.
461 TEST_F(hmm, anon_write_child)
463 struct hmm_buffer *buffer;
464 unsigned long npages;
472 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
473 ASSERT_NE(npages, 0);
474 size = npages << self->page_shift;
476 buffer = malloc(sizeof(*buffer));
477 ASSERT_NE(buffer, NULL);
481 buffer->mirror = malloc(size);
482 ASSERT_NE(buffer->mirror, NULL);
484 buffer->ptr = mmap(NULL, size,
485 PROT_READ | PROT_WRITE,
486 MAP_PRIVATE | MAP_ANONYMOUS,
488 ASSERT_NE(buffer->ptr, MAP_FAILED);
490 /* Initialize buffer->ptr so we can tell if it is written. */
491 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
494 /* Initialize data that the device will write to buffer->ptr. */
495 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
502 waitpid(pid, &ret, 0);
503 ASSERT_EQ(WIFEXITED(ret), 1);
505 /* Check that the parent's buffer did not change. */
506 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
507 ASSERT_EQ(ptr[i], i);
511 /* Check that we see the parent's values. */
512 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
513 ASSERT_EQ(ptr[i], i);
514 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
515 ASSERT_EQ(ptr[i], -i);
517 /* The child process needs its own mirror to its own mm. */
518 child_fd = hmm_open(0);
519 ASSERT_GE(child_fd, 0);
521 /* Simulate a device writing system memory. */
522 ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
524 ASSERT_EQ(buffer->cpages, npages);
525 ASSERT_EQ(buffer->faults, 1);
527 /* Check what the device wrote. */
528 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
529 ASSERT_EQ(ptr[i], -i);
536 * Check that a device writing an anonymous shared mapping
537 * will not copy-on-write if a child process inherits the mapping.
539 TEST_F(hmm, anon_write_child_shared)
541 struct hmm_buffer *buffer;
542 unsigned long npages;
550 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
551 ASSERT_NE(npages, 0);
552 size = npages << self->page_shift;
554 buffer = malloc(sizeof(*buffer));
555 ASSERT_NE(buffer, NULL);
559 buffer->mirror = malloc(size);
560 ASSERT_NE(buffer->mirror, NULL);
562 buffer->ptr = mmap(NULL, size,
563 PROT_READ | PROT_WRITE,
564 MAP_SHARED | MAP_ANONYMOUS,
566 ASSERT_NE(buffer->ptr, MAP_FAILED);
568 /* Initialize buffer->ptr so we can tell if it is written. */
569 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
572 /* Initialize data that the device will write to buffer->ptr. */
573 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
580 waitpid(pid, &ret, 0);
581 ASSERT_EQ(WIFEXITED(ret), 1);
583 /* Check that the parent's buffer did change. */
584 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
585 ASSERT_EQ(ptr[i], -i);
589 /* Check that we see the parent's values. */
590 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
591 ASSERT_EQ(ptr[i], i);
592 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
593 ASSERT_EQ(ptr[i], -i);
595 /* The child process needs its own mirror to its own mm. */
596 child_fd = hmm_open(0);
597 ASSERT_GE(child_fd, 0);
599 /* Simulate a device writing system memory. */
600 ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
602 ASSERT_EQ(buffer->cpages, npages);
603 ASSERT_EQ(buffer->faults, 1);
605 /* Check what the device wrote. */
606 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
607 ASSERT_EQ(ptr[i], -i);
614 * Write private anonymous huge page.
616 TEST_F(hmm, anon_write_huge)
618 struct hmm_buffer *buffer;
619 unsigned long npages;
629 buffer = malloc(sizeof(*buffer));
630 ASSERT_NE(buffer, NULL);
634 buffer->mirror = malloc(size);
635 ASSERT_NE(buffer->mirror, NULL);
637 buffer->ptr = mmap(NULL, size,
638 PROT_READ | PROT_WRITE,
639 MAP_PRIVATE | MAP_ANONYMOUS,
641 ASSERT_NE(buffer->ptr, MAP_FAILED);
644 npages = size >> self->page_shift;
645 map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
646 ret = madvise(map, size, MADV_HUGEPAGE);
648 old_ptr = buffer->ptr;
651 /* Initialize data that the device will write to buffer->ptr. */
652 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
655 /* Simulate a device writing system memory. */
656 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
658 ASSERT_EQ(buffer->cpages, npages);
659 ASSERT_EQ(buffer->faults, 1);
661 /* Check what the device wrote. */
662 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
663 ASSERT_EQ(ptr[i], i);
665 buffer->ptr = old_ptr;
666 hmm_buffer_free(buffer);
669 #ifdef LOCAL_CONFIG_HAVE_LIBHUGETLBFS
671 * Write huge TLBFS page.
673 TEST_F(hmm, anon_write_hugetlbfs)
675 struct hmm_buffer *buffer;
676 unsigned long npages;
684 /* Skip test if we can't allocate a hugetlbfs page. */
686 n = gethugepagesizes(pagesizes, 4);
688 SKIP(return, "Huge page size could not be determined");
689 for (idx = 0; --n > 0; ) {
690 if (pagesizes[n] < pagesizes[idx])
693 size = ALIGN(TWOMEG, pagesizes[idx]);
694 npages = size >> self->page_shift;
696 buffer = malloc(sizeof(*buffer));
697 ASSERT_NE(buffer, NULL);
699 buffer->ptr = get_hugepage_region(size, GHR_STRICT);
700 if (buffer->ptr == NULL) {
702 SKIP(return, "Huge page could not be allocated");
707 buffer->mirror = malloc(size);
708 ASSERT_NE(buffer->mirror, NULL);
710 /* Initialize data that the device will write to buffer->ptr. */
711 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
714 /* Simulate a device writing system memory. */
715 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
717 ASSERT_EQ(buffer->cpages, npages);
718 ASSERT_EQ(buffer->faults, 1);
720 /* Check what the device wrote. */
721 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
722 ASSERT_EQ(ptr[i], i);
724 free_hugepage_region(buffer->ptr);
726 hmm_buffer_free(buffer);
728 #endif /* LOCAL_CONFIG_HAVE_LIBHUGETLBFS */
731 * Read mmap'ed file memory.
733 TEST_F(hmm, file_read)
735 struct hmm_buffer *buffer;
736 unsigned long npages;
744 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
745 ASSERT_NE(npages, 0);
746 size = npages << self->page_shift;
748 fd = hmm_create_file(size);
751 buffer = malloc(sizeof(*buffer));
752 ASSERT_NE(buffer, NULL);
756 buffer->mirror = malloc(size);
757 ASSERT_NE(buffer->mirror, NULL);
759 /* Write initial contents of the file. */
760 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
762 len = pwrite(fd, buffer->mirror, size, 0);
763 ASSERT_EQ(len, size);
764 memset(buffer->mirror, 0, size);
766 buffer->ptr = mmap(NULL, size,
770 ASSERT_NE(buffer->ptr, MAP_FAILED);
772 /* Simulate a device reading system memory. */
773 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
775 ASSERT_EQ(buffer->cpages, npages);
776 ASSERT_EQ(buffer->faults, 1);
778 /* Check what the device read. */
779 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
780 ASSERT_EQ(ptr[i], i);
782 hmm_buffer_free(buffer);
786 * Write mmap'ed file memory.
788 TEST_F(hmm, file_write)
790 struct hmm_buffer *buffer;
791 unsigned long npages;
799 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
800 ASSERT_NE(npages, 0);
801 size = npages << self->page_shift;
803 fd = hmm_create_file(size);
806 buffer = malloc(sizeof(*buffer));
807 ASSERT_NE(buffer, NULL);
811 buffer->mirror = malloc(size);
812 ASSERT_NE(buffer->mirror, NULL);
814 buffer->ptr = mmap(NULL, size,
815 PROT_READ | PROT_WRITE,
818 ASSERT_NE(buffer->ptr, MAP_FAILED);
820 /* Initialize data that the device will write to buffer->ptr. */
821 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
824 /* Simulate a device writing system memory. */
825 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
827 ASSERT_EQ(buffer->cpages, npages);
828 ASSERT_EQ(buffer->faults, 1);
830 /* Check what the device wrote. */
831 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
832 ASSERT_EQ(ptr[i], i);
834 /* Check that the device also wrote the file. */
835 len = pread(fd, buffer->mirror, size, 0);
836 ASSERT_EQ(len, size);
837 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
838 ASSERT_EQ(ptr[i], i);
840 hmm_buffer_free(buffer);
844 * Migrate anonymous memory to device private memory.
848 struct hmm_buffer *buffer;
849 unsigned long npages;
855 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
856 ASSERT_NE(npages, 0);
857 size = npages << self->page_shift;
859 buffer = malloc(sizeof(*buffer));
860 ASSERT_NE(buffer, NULL);
864 buffer->mirror = malloc(size);
865 ASSERT_NE(buffer->mirror, NULL);
867 buffer->ptr = mmap(NULL, size,
868 PROT_READ | PROT_WRITE,
869 MAP_PRIVATE | MAP_ANONYMOUS,
871 ASSERT_NE(buffer->ptr, MAP_FAILED);
873 /* Initialize buffer in system memory. */
874 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
877 /* Migrate memory to device. */
878 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
880 ASSERT_EQ(buffer->cpages, npages);
882 /* Check what the device read. */
883 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
884 ASSERT_EQ(ptr[i], i);
886 hmm_buffer_free(buffer);
890 * Migrate anonymous memory to device private memory and fault some of it back
891 * to system memory, then try migrating the resulting mix of system and device
892 * private memory to the device.
894 TEST_F(hmm, migrate_fault)
896 struct hmm_buffer *buffer;
897 unsigned long npages;
903 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
904 ASSERT_NE(npages, 0);
905 size = npages << self->page_shift;
907 buffer = malloc(sizeof(*buffer));
908 ASSERT_NE(buffer, NULL);
912 buffer->mirror = malloc(size);
913 ASSERT_NE(buffer->mirror, NULL);
915 buffer->ptr = mmap(NULL, size,
916 PROT_READ | PROT_WRITE,
917 MAP_PRIVATE | MAP_ANONYMOUS,
919 ASSERT_NE(buffer->ptr, MAP_FAILED);
921 /* Initialize buffer in system memory. */
922 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
925 /* Migrate memory to device. */
926 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
928 ASSERT_EQ(buffer->cpages, npages);
930 /* Check what the device read. */
931 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
932 ASSERT_EQ(ptr[i], i);
934 /* Fault half the pages back to system memory and check them. */
935 for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
936 ASSERT_EQ(ptr[i], i);
938 /* Migrate memory to the device again. */
939 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
941 ASSERT_EQ(buffer->cpages, npages);
943 /* Check what the device read. */
944 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
945 ASSERT_EQ(ptr[i], i);
947 hmm_buffer_free(buffer);
951 * Migrate anonymous shared memory to device private memory.
953 TEST_F(hmm, migrate_shared)
955 struct hmm_buffer *buffer;
956 unsigned long npages;
960 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
961 ASSERT_NE(npages, 0);
962 size = npages << self->page_shift;
964 buffer = malloc(sizeof(*buffer));
965 ASSERT_NE(buffer, NULL);
969 buffer->mirror = malloc(size);
970 ASSERT_NE(buffer->mirror, NULL);
972 buffer->ptr = mmap(NULL, size,
973 PROT_READ | PROT_WRITE,
974 MAP_SHARED | MAP_ANONYMOUS,
976 ASSERT_NE(buffer->ptr, MAP_FAILED);
978 /* Migrate memory to device. */
979 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
980 ASSERT_EQ(ret, -ENOENT);
982 hmm_buffer_free(buffer);
986 * Try to migrate various memory types to device private memory.
988 TEST_F(hmm2, migrate_mixed)
990 struct hmm_buffer *buffer;
991 unsigned long npages;
999 size = npages << self->page_shift;
1001 buffer = malloc(sizeof(*buffer));
1002 ASSERT_NE(buffer, NULL);
1005 buffer->size = size;
1006 buffer->mirror = malloc(size);
1007 ASSERT_NE(buffer->mirror, NULL);
1009 /* Reserve a range of addresses. */
1010 buffer->ptr = mmap(NULL, size,
1012 MAP_PRIVATE | MAP_ANONYMOUS,
1014 ASSERT_NE(buffer->ptr, MAP_FAILED);
1017 /* Migrating a protected area should be an error. */
1018 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, npages);
1019 ASSERT_EQ(ret, -EINVAL);
1021 /* Punch a hole after the first page address. */
1022 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1025 /* We expect an error if the vma doesn't cover the range. */
1026 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 3);
1027 ASSERT_EQ(ret, -EINVAL);
1029 /* Page 2 will be a read-only zero page. */
1030 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1033 ptr = (int *)(buffer->ptr + 2 * self->page_size);
1037 /* Page 3 will be read-only. */
1038 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1039 PROT_READ | PROT_WRITE);
1041 ptr = (int *)(buffer->ptr + 3 * self->page_size);
1043 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1047 /* Page 4-5 will be read-write. */
1048 ret = mprotect(buffer->ptr + 4 * self->page_size, 2 * self->page_size,
1049 PROT_READ | PROT_WRITE);
1051 ptr = (int *)(buffer->ptr + 4 * self->page_size);
1053 ptr = (int *)(buffer->ptr + 5 * self->page_size);
1056 /* Now try to migrate pages 2-5 to device 1. */
1057 buffer->ptr = p + 2 * self->page_size;
1058 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 4);
1060 ASSERT_EQ(buffer->cpages, 4);
1062 /* Page 5 won't be migrated to device 0 because it's on device 1. */
1063 buffer->ptr = p + 5 * self->page_size;
1064 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
1065 ASSERT_EQ(ret, -ENOENT);
1069 hmm_buffer_free(buffer);
1073 * Migrate anonymous memory to device private memory and fault it back to system
1074 * memory multiple times.
1076 TEST_F(hmm, migrate_multiple)
1078 struct hmm_buffer *buffer;
1079 unsigned long npages;
1086 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1087 ASSERT_NE(npages, 0);
1088 size = npages << self->page_shift;
1090 for (c = 0; c < NTIMES; c++) {
1091 buffer = malloc(sizeof(*buffer));
1092 ASSERT_NE(buffer, NULL);
1095 buffer->size = size;
1096 buffer->mirror = malloc(size);
1097 ASSERT_NE(buffer->mirror, NULL);
1099 buffer->ptr = mmap(NULL, size,
1100 PROT_READ | PROT_WRITE,
1101 MAP_PRIVATE | MAP_ANONYMOUS,
1103 ASSERT_NE(buffer->ptr, MAP_FAILED);
1105 /* Initialize buffer in system memory. */
1106 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1109 /* Migrate memory to device. */
1110 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer,
1113 ASSERT_EQ(buffer->cpages, npages);
1115 /* Check what the device read. */
1116 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1117 ASSERT_EQ(ptr[i], i);
1119 /* Fault pages back to system memory and check them. */
1120 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1121 ASSERT_EQ(ptr[i], i);
1123 hmm_buffer_free(buffer);
1128 * Read anonymous memory multiple times.
1130 TEST_F(hmm, anon_read_multiple)
1132 struct hmm_buffer *buffer;
1133 unsigned long npages;
1140 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1141 ASSERT_NE(npages, 0);
1142 size = npages << self->page_shift;
1144 for (c = 0; c < NTIMES; c++) {
1145 buffer = malloc(sizeof(*buffer));
1146 ASSERT_NE(buffer, NULL);
1149 buffer->size = size;
1150 buffer->mirror = malloc(size);
1151 ASSERT_NE(buffer->mirror, NULL);
1153 buffer->ptr = mmap(NULL, size,
1154 PROT_READ | PROT_WRITE,
1155 MAP_PRIVATE | MAP_ANONYMOUS,
1157 ASSERT_NE(buffer->ptr, MAP_FAILED);
1159 /* Initialize buffer in system memory. */
1160 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1163 /* Simulate a device reading system memory. */
1164 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1167 ASSERT_EQ(buffer->cpages, npages);
1168 ASSERT_EQ(buffer->faults, 1);
1170 /* Check what the device read. */
1171 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1172 ASSERT_EQ(ptr[i], i + c);
1174 hmm_buffer_free(buffer);
1178 void *unmap_buffer(void *p)
1180 struct hmm_buffer *buffer = p;
1182 /* Delay for a bit and then unmap buffer while it is being read. */
1183 hmm_nanosleep(hmm_random() % 32000);
1184 munmap(buffer->ptr + buffer->size / 2, buffer->size / 2);
1191 * Try reading anonymous memory while it is being unmapped.
1193 TEST_F(hmm, anon_teardown)
1195 unsigned long npages;
1200 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1201 ASSERT_NE(npages, 0);
1202 size = npages << self->page_shift;
1204 for (c = 0; c < NTIMES; ++c) {
1206 struct hmm_buffer *buffer;
1211 buffer = malloc(sizeof(*buffer));
1212 ASSERT_NE(buffer, NULL);
1215 buffer->size = size;
1216 buffer->mirror = malloc(size);
1217 ASSERT_NE(buffer->mirror, NULL);
1219 buffer->ptr = mmap(NULL, size,
1220 PROT_READ | PROT_WRITE,
1221 MAP_PRIVATE | MAP_ANONYMOUS,
1223 ASSERT_NE(buffer->ptr, MAP_FAILED);
1225 /* Initialize buffer in system memory. */
1226 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1229 rc = pthread_create(&thread, NULL, unmap_buffer, buffer);
1232 /* Simulate a device reading system memory. */
1233 rc = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1236 ASSERT_EQ(buffer->cpages, npages);
1237 ASSERT_EQ(buffer->faults, 1);
1239 /* Check what the device read. */
1240 for (i = 0, ptr = buffer->mirror;
1241 i < size / sizeof(*ptr);
1243 ASSERT_EQ(ptr[i], i + c);
1246 pthread_join(thread, &ret);
1247 hmm_buffer_free(buffer);
1252 * Test memory snapshot without faulting in pages accessed by the device.
1254 TEST_F(hmm2, snapshot)
1256 struct hmm_buffer *buffer;
1257 unsigned long npages;
1266 size = npages << self->page_shift;
1268 buffer = malloc(sizeof(*buffer));
1269 ASSERT_NE(buffer, NULL);
1272 buffer->size = size;
1273 buffer->mirror = malloc(npages);
1274 ASSERT_NE(buffer->mirror, NULL);
1276 /* Reserve a range of addresses. */
1277 buffer->ptr = mmap(NULL, size,
1279 MAP_PRIVATE | MAP_ANONYMOUS,
1281 ASSERT_NE(buffer->ptr, MAP_FAILED);
1284 /* Punch a hole after the first page address. */
1285 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1288 /* Page 2 will be read-only zero page. */
1289 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1292 ptr = (int *)(buffer->ptr + 2 * self->page_size);
1296 /* Page 3 will be read-only. */
1297 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1298 PROT_READ | PROT_WRITE);
1300 ptr = (int *)(buffer->ptr + 3 * self->page_size);
1302 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1306 /* Page 4-6 will be read-write. */
1307 ret = mprotect(buffer->ptr + 4 * self->page_size, 3 * self->page_size,
1308 PROT_READ | PROT_WRITE);
1310 ptr = (int *)(buffer->ptr + 4 * self->page_size);
1313 /* Page 5 will be migrated to device 0. */
1314 buffer->ptr = p + 5 * self->page_size;
1315 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
1317 ASSERT_EQ(buffer->cpages, 1);
1319 /* Page 6 will be migrated to device 1. */
1320 buffer->ptr = p + 6 * self->page_size;
1321 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 1);
1323 ASSERT_EQ(buffer->cpages, 1);
1325 /* Simulate a device snapshotting CPU pagetables. */
1327 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1329 ASSERT_EQ(buffer->cpages, npages);
1331 /* Check what the device saw. */
1333 ASSERT_EQ(m[0], HMM_DMIRROR_PROT_ERROR);
1334 ASSERT_EQ(m[1], HMM_DMIRROR_PROT_ERROR);
1335 ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ);
1336 ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ);
1337 ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE);
1338 ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
1339 HMM_DMIRROR_PROT_WRITE);
1340 ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
1342 hmm_buffer_free(buffer);
1345 #ifdef LOCAL_CONFIG_HAVE_LIBHUGETLBFS
1347 * Test the hmm_range_fault() HMM_PFN_PMD flag for large pages that
1348 * should be mapped by a large page table entry.
1350 TEST_F(hmm, compound)
1352 struct hmm_buffer *buffer;
1353 unsigned long npages;
1362 /* Skip test if we can't allocate a hugetlbfs page. */
1364 n = gethugepagesizes(pagesizes, 4);
1367 for (idx = 0; --n > 0; ) {
1368 if (pagesizes[n] < pagesizes[idx])
1371 size = ALIGN(TWOMEG, pagesizes[idx]);
1372 npages = size >> self->page_shift;
1374 buffer = malloc(sizeof(*buffer));
1375 ASSERT_NE(buffer, NULL);
1377 buffer->ptr = get_hugepage_region(size, GHR_STRICT);
1378 if (buffer->ptr == NULL) {
1383 buffer->size = size;
1384 buffer->mirror = malloc(npages);
1385 ASSERT_NE(buffer->mirror, NULL);
1387 /* Initialize the pages the device will snapshot in buffer->ptr. */
1388 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1391 /* Simulate a device snapshotting CPU pagetables. */
1392 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1394 ASSERT_EQ(buffer->cpages, npages);
1396 /* Check what the device saw. */
1398 for (i = 0; i < npages; ++i)
1399 ASSERT_EQ(m[i], HMM_DMIRROR_PROT_WRITE |
1400 HMM_DMIRROR_PROT_PMD);
1402 /* Make the region read-only. */
1403 ret = mprotect(buffer->ptr, size, PROT_READ);
1406 /* Simulate a device snapshotting CPU pagetables. */
1407 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1409 ASSERT_EQ(buffer->cpages, npages);
1411 /* Check what the device saw. */
1413 for (i = 0; i < npages; ++i)
1414 ASSERT_EQ(m[i], HMM_DMIRROR_PROT_READ |
1415 HMM_DMIRROR_PROT_PMD);
1417 free_hugepage_region(buffer->ptr);
1419 hmm_buffer_free(buffer);
1421 #endif /* LOCAL_CONFIG_HAVE_LIBHUGETLBFS */
1424 * Test two devices reading the same memory (double mapped).
1426 TEST_F(hmm2, double_map)
1428 struct hmm_buffer *buffer;
1429 unsigned long npages;
1436 size = npages << self->page_shift;
1438 buffer = malloc(sizeof(*buffer));
1439 ASSERT_NE(buffer, NULL);
1442 buffer->size = size;
1443 buffer->mirror = malloc(npages);
1444 ASSERT_NE(buffer->mirror, NULL);
1446 /* Reserve a range of addresses. */
1447 buffer->ptr = mmap(NULL, size,
1448 PROT_READ | PROT_WRITE,
1449 MAP_PRIVATE | MAP_ANONYMOUS,
1451 ASSERT_NE(buffer->ptr, MAP_FAILED);
1453 /* Initialize buffer in system memory. */
1454 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1457 /* Make region read-only. */
1458 ret = mprotect(buffer->ptr, size, PROT_READ);
1461 /* Simulate device 0 reading system memory. */
1462 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1464 ASSERT_EQ(buffer->cpages, npages);
1465 ASSERT_EQ(buffer->faults, 1);
1467 /* Check what the device read. */
1468 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1469 ASSERT_EQ(ptr[i], i);
1471 /* Simulate device 1 reading system memory. */
1472 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_READ, buffer, npages);
1474 ASSERT_EQ(buffer->cpages, npages);
1475 ASSERT_EQ(buffer->faults, 1);
1477 /* Check what the device read. */
1478 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1479 ASSERT_EQ(ptr[i], i);
1481 /* Punch a hole after the first page address. */
1482 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1485 hmm_buffer_free(buffer);
1489 * Basic check of exclusive faulting.
1491 TEST_F(hmm, exclusive)
1493 struct hmm_buffer *buffer;
1494 unsigned long npages;
1500 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1501 ASSERT_NE(npages, 0);
1502 size = npages << self->page_shift;
1504 buffer = malloc(sizeof(*buffer));
1505 ASSERT_NE(buffer, NULL);
1508 buffer->size = size;
1509 buffer->mirror = malloc(size);
1510 ASSERT_NE(buffer->mirror, NULL);
1512 buffer->ptr = mmap(NULL, size,
1513 PROT_READ | PROT_WRITE,
1514 MAP_PRIVATE | MAP_ANONYMOUS,
1516 ASSERT_NE(buffer->ptr, MAP_FAILED);
1518 /* Initialize buffer in system memory. */
1519 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1522 /* Map memory exclusively for device access. */
1523 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1525 ASSERT_EQ(buffer->cpages, npages);
1527 /* Check what the device read. */
1528 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1529 ASSERT_EQ(ptr[i], i);
1531 /* Fault pages back to system memory and check them. */
1532 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1533 ASSERT_EQ(ptr[i]++, i);
1535 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1536 ASSERT_EQ(ptr[i], i+1);
1538 /* Check atomic access revoked */
1539 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_CHECK_EXCLUSIVE, buffer, npages);
1542 hmm_buffer_free(buffer);
1545 TEST_F(hmm, exclusive_mprotect)
1547 struct hmm_buffer *buffer;
1548 unsigned long npages;
1554 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1555 ASSERT_NE(npages, 0);
1556 size = npages << self->page_shift;
1558 buffer = malloc(sizeof(*buffer));
1559 ASSERT_NE(buffer, NULL);
1562 buffer->size = size;
1563 buffer->mirror = malloc(size);
1564 ASSERT_NE(buffer->mirror, NULL);
1566 buffer->ptr = mmap(NULL, size,
1567 PROT_READ | PROT_WRITE,
1568 MAP_PRIVATE | MAP_ANONYMOUS,
1570 ASSERT_NE(buffer->ptr, MAP_FAILED);
1572 /* Initialize buffer in system memory. */
1573 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1576 /* Map memory exclusively for device access. */
1577 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1579 ASSERT_EQ(buffer->cpages, npages);
1581 /* Check what the device read. */
1582 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1583 ASSERT_EQ(ptr[i], i);
1585 ret = mprotect(buffer->ptr, size, PROT_READ);
1588 /* Simulate a device writing system memory. */
1589 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
1590 ASSERT_EQ(ret, -EPERM);
1592 hmm_buffer_free(buffer);
1596 * Check copy-on-write works.
1598 TEST_F(hmm, exclusive_cow)
1600 struct hmm_buffer *buffer;
1601 unsigned long npages;
1607 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1608 ASSERT_NE(npages, 0);
1609 size = npages << self->page_shift;
1611 buffer = malloc(sizeof(*buffer));
1612 ASSERT_NE(buffer, NULL);
1615 buffer->size = size;
1616 buffer->mirror = malloc(size);
1617 ASSERT_NE(buffer->mirror, NULL);
1619 buffer->ptr = mmap(NULL, size,
1620 PROT_READ | PROT_WRITE,
1621 MAP_PRIVATE | MAP_ANONYMOUS,
1623 ASSERT_NE(buffer->ptr, MAP_FAILED);
1625 /* Initialize buffer in system memory. */
1626 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1629 /* Map memory exclusively for device access. */
1630 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1632 ASSERT_EQ(buffer->cpages, npages);
1636 /* Fault pages back to system memory and check them. */
1637 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1638 ASSERT_EQ(ptr[i]++, i);
1640 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1641 ASSERT_EQ(ptr[i], i+1);
1643 hmm_buffer_free(buffer);