2 * Copyright © 2009-2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 * Tvrtko Ursulin <tvrtko.ursulin@intel.com>
30 /** @file gem_userptr_blits.c
32 * This is a test of doing many blits using a mixture of normal system pages
33 * and uncached linear buffers, with a working set larger than the
36 * The goal is to simply ensure the basics work.
55 #include "intel_bufmgr.h"
56 #include "intel_batchbuffer.h"
57 #include "intel_chipset.h"
58 #include "ioctl_wrappers.h"
60 #include "eviction_common.c"
63 #define PAGE_SIZE 4096
66 #define LOCAL_I915_GEM_USERPTR 0x34
67 #define LOCAL_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + LOCAL_I915_GEM_USERPTR, struct local_i915_gem_userptr)
68 struct local_i915_gem_userptr {
72 #define LOCAL_I915_USERPTR_READ_ONLY (1<<0)
73 #define LOCAL_I915_USERPTR_UNSYNCHRONIZED (1<<31)
77 static uint32_t userptr_flags = LOCAL_I915_USERPTR_UNSYNCHRONIZED;
82 static uint32_t linear[WIDTH*HEIGHT];
84 static void gem_userptr_test_unsynchronized(void)
86 userptr_flags = LOCAL_I915_USERPTR_UNSYNCHRONIZED;
89 static void gem_userptr_test_synchronized(void)
94 static int gem_userptr(int fd, void *ptr, int size, int read_only, uint32_t *handle)
96 struct local_i915_gem_userptr userptr;
99 userptr.user_ptr = (uintptr_t)ptr;
100 userptr.user_size = size;
101 userptr.flags = userptr_flags;
103 userptr.flags |= LOCAL_I915_USERPTR_READ_ONLY;
105 ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr);
108 igt_skip_on_f(ret == ENODEV &&
109 (userptr_flags & LOCAL_I915_USERPTR_UNSYNCHRONIZED) == 0 &&
111 "Skipping, synchronized mappings with no kernel CONFIG_MMU_NOTIFIER?");
113 *handle = userptr.handle;
119 static void gem_userptr_sync(int fd, uint32_t handle)
121 gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
125 copy(int fd, uint32_t dst, uint32_t src, unsigned int error)
128 struct drm_i915_gem_relocation_entry reloc[2];
129 struct drm_i915_gem_exec_object2 obj[3];
130 struct drm_i915_gem_execbuffer2 exec;
134 batch[i++] = XY_SRC_COPY_BLT_CMD |
135 XY_SRC_COPY_BLT_WRITE_ALPHA |
136 XY_SRC_COPY_BLT_WRITE_RGB;
137 if (intel_gen(intel_get_drm_devid(fd)) >= 8)
142 batch[i++] = (3 << 24) | /* 32 bits */
143 (0xcc << 16) | /* copy ROP */
145 batch[i++] = 0; /* dst x1,y1 */
146 batch[i++] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */
147 batch[i++] = 0; /* dst reloc */
148 if (intel_gen(intel_get_drm_devid(fd)) >= 8)
150 batch[i++] = 0; /* src x1,y1 */
151 batch[i++] = WIDTH*4;
152 batch[i++] = 0; /* src reloc */
153 if (intel_gen(intel_get_drm_devid(fd)) >= 8)
155 batch[i++] = MI_BATCH_BUFFER_END;
156 batch[i++] = MI_NOOP;
158 handle = gem_create(fd, 4096);
159 gem_write(fd, handle, 0, batch, sizeof(batch));
161 reloc[0].target_handle = dst;
163 reloc[0].offset = 4 * sizeof(batch[0]);
164 reloc[0].presumed_offset = 0;
165 reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;;
166 reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
168 reloc[1].target_handle = src;
170 reloc[1].offset = 7 * sizeof(batch[0]);
171 if (intel_gen(intel_get_drm_devid(fd)) >= 8)
172 reloc[1].offset += sizeof(batch[0]);
173 reloc[1].presumed_offset = 0;
174 reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;;
175 reloc[1].write_domain = 0;
178 obj[0].relocation_count = 0;
179 obj[0].relocs_ptr = 0;
180 obj[0].alignment = 0;
187 obj[1].relocation_count = 0;
188 obj[1].relocs_ptr = 0;
189 obj[1].alignment = 0;
195 obj[2].handle = handle;
196 obj[2].relocation_count = 2;
197 obj[2].relocs_ptr = (uintptr_t)reloc;
198 obj[2].alignment = 0;
201 obj[2].rsvd1 = obj[2].rsvd2 = 0;
203 exec.buffers_ptr = (uintptr_t)obj;
204 exec.buffer_count = 3;
205 exec.batch_start_offset = 0;
206 exec.batch_len = i * 4;
207 exec.DR1 = exec.DR4 = 0;
208 exec.num_cliprects = 0;
209 exec.cliprects_ptr = 0;
210 exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0;
211 i915_execbuffer2_set_context_id(exec, 0);
214 ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec);
219 igt_assert(ret != 0);
221 igt_assert(ret == error);
223 gem_close(fd, handle);
227 blit(int fd, uint32_t dst, uint32_t src, uint32_t *all_bo, int n_bo, int error)
230 struct drm_i915_gem_relocation_entry reloc[2];
231 struct drm_i915_gem_exec_object2 *obj;
232 struct drm_i915_gem_execbuffer2 exec;
236 batch[i++] = XY_SRC_COPY_BLT_CMD |
237 XY_SRC_COPY_BLT_WRITE_ALPHA |
238 XY_SRC_COPY_BLT_WRITE_RGB;
239 if (intel_gen(intel_get_drm_devid(fd)) >= 8)
243 batch[i++] = (3 << 24) | /* 32 bits */
244 (0xcc << 16) | /* copy ROP */
246 batch[i++] = 0; /* dst x1,y1 */
247 batch[i++] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */
248 batch[i++] = 0; /* dst reloc */
249 if (intel_gen(intel_get_drm_devid(fd)) >= 8)
251 batch[i++] = 0; /* src x1,y1 */
252 batch[i++] = WIDTH*4;
253 batch[i++] = 0; /* src reloc */
254 if (intel_gen(intel_get_drm_devid(fd)) >= 8)
256 batch[i++] = MI_BATCH_BUFFER_END;
257 batch[i++] = MI_NOOP;
259 handle = gem_create(fd, 4096);
260 gem_write(fd, handle, 0, batch, sizeof(batch));
262 reloc[0].target_handle = dst;
264 reloc[0].offset = 4 * sizeof(batch[0]);
265 reloc[0].presumed_offset = 0;
266 reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
267 reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
269 reloc[1].target_handle = src;
271 reloc[1].offset = 7 * sizeof(batch[0]);
272 if (intel_gen(intel_get_drm_devid(fd)) >= 8)
273 reloc[1].offset += sizeof(batch[0]);
274 reloc[1].presumed_offset = 0;
275 reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
276 reloc[1].write_domain = 0;
278 obj = calloc(n_bo + 1, sizeof(*obj));
279 for (n = 0; n < n_bo; n++)
280 obj[n].handle = all_bo[n];
281 obj[n].handle = handle;
282 obj[n].relocation_count = 2;
283 obj[n].relocs_ptr = (uintptr_t)reloc;
285 exec.buffers_ptr = (uintptr_t)obj;
286 exec.buffer_count = n_bo + 1;
287 exec.batch_start_offset = 0;
288 exec.batch_len = i * 4;
289 exec.DR1 = exec.DR4 = 0;
290 exec.num_cliprects = 0;
291 exec.cliprects_ptr = 0;
292 exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0;
293 i915_execbuffer2_set_context_id(exec, 0);
296 ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec);
300 igt_assert(ret == error);
302 gem_close(fd, handle);
307 create_userptr(int fd, uint32_t val, uint32_t *ptr)
312 ret = gem_userptr(fd, ptr, sizeof(linear), 0, &handle);
313 igt_assert(ret == 0);
314 igt_assert(handle != 0);
316 /* Fill the BO with dwords starting at val */
317 for (i = 0; i < WIDTH*HEIGHT; i++)
323 static void **handle_ptr_map;
324 static unsigned int num_handle_ptr_map;
326 static void add_handle_ptr(uint32_t handle, void *ptr)
328 if (handle >= num_handle_ptr_map) {
329 handle_ptr_map = realloc(handle_ptr_map,
330 (handle + 1000) * sizeof(void*));
331 num_handle_ptr_map = handle + 1000;
334 handle_ptr_map[handle] = ptr;
337 static void *get_handle_ptr(uint32_t handle)
339 return handle_ptr_map[handle];
342 static void free_handle_ptr(uint32_t handle)
344 igt_assert(handle < num_handle_ptr_map);
345 igt_assert(handle_ptr_map[handle]);
347 free(handle_ptr_map[handle]);
348 handle_ptr_map[handle] = NULL;
351 static uint32_t create_userptr_bo(int fd, int size)
357 ret = posix_memalign(&ptr, PAGE_SIZE, size);
358 igt_assert(ret == 0);
360 ret = gem_userptr(fd, (uint32_t *)ptr, size, 0, &handle);
361 igt_assert(ret == 0);
362 add_handle_ptr(handle, ptr);
367 static void clear(int fd, uint32_t handle, int size)
369 void *ptr = get_handle_ptr(handle);
371 igt_assert(ptr != NULL);
373 memset(ptr, 0, size);
376 static void free_userptr_bo(int fd, uint32_t handle)
378 gem_close(fd, handle);
379 free_handle_ptr(handle);
383 create_bo(int fd, uint32_t val)
388 handle = gem_create(fd, sizeof(linear));
390 /* Fill the BO with dwords starting at val */
391 for (i = 0; i < WIDTH*HEIGHT; i++)
393 gem_write(fd, handle, 0, linear, sizeof(linear));
399 check_cpu(uint32_t *ptr, uint32_t val)
403 for (i = 0; i < WIDTH*HEIGHT; i++) {
404 igt_assert_f(ptr[i] == val,
405 "Expected 0x%08x, found 0x%08x "
406 "at offset 0x%08x\n",
413 check_gpu(int fd, uint32_t handle, uint32_t val)
415 gem_read(fd, handle, 0, linear, sizeof(linear));
416 check_cpu(linear, val);
419 static int has_userptr(int fd)
426 assert(posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE) == 0);
427 oldflags = userptr_flags;
428 gem_userptr_test_unsynchronized();
429 ret = gem_userptr(fd, ptr, PAGE_SIZE, 0, &handle);
430 userptr_flags = oldflags;
436 gem_close(fd, handle);
442 static int test_input_checking(int fd)
444 struct local_i915_gem_userptr userptr;
448 userptr.user_ptr = 0;
449 userptr.user_size = 0;
451 ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr);
452 igt_assert(ret != 0);
455 userptr.user_ptr = 0;
456 userptr.user_size = ~0;
458 ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr);
459 igt_assert(ret != 0);
462 userptr.user_ptr = 0;
463 userptr.user_size = ~0;
465 ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr);
466 igt_assert(ret != 0);
471 static int test_access_control(int fd)
480 /* CAP_SYS_ADMIN is needed for UNSYNCHRONIZED mappings. */
481 gem_userptr_test_unsynchronized();
483 igt_assert(posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE) == 0);
485 ret = gem_userptr(fd, ptr, PAGE_SIZE, 0, &handle);
487 gem_close(fd, handle);
489 igt_assert(ret == EPERM);
497 static int test_invalid_mapping(int fd)
500 uint32_t handle, handle2;
504 ret = gem_userptr(fd, NULL, PAGE_SIZE, 0, &handle);
505 igt_assert(ret == 0);
506 copy(fd, handle, handle, ~0); /* QQQ Precise errno? */
507 gem_close(fd, handle);
510 handle = create_bo(fd, 0);
511 ptr = gem_mmap__gtt(fd, handle, sizeof(linear), PROT_READ | PROT_WRITE);
513 gem_close(fd, handle);
515 assert(((unsigned long)ptr & (PAGE_SIZE - 1)) == 0);
516 assert((sizeof(linear) & (PAGE_SIZE - 1)) == 0);
517 ret = gem_userptr(fd, ptr, sizeof(linear), 0, &handle2);
518 igt_assert(ret == 0);
519 copy(fd, handle, handle, ~0); /* QQQ Precise errno? */
520 gem_close(fd, handle2);
521 munmap(ptr, sizeof(linear));
522 gem_close(fd, handle);
527 static int test_forbidden_ops(int fd)
533 struct drm_i915_gem_pread gem_pread;
534 struct drm_i915_gem_pwrite gem_pwrite;
536 assert(posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE) == 0);
538 ret = gem_userptr(fd, ptr, PAGE_SIZE, 0, &handle);
539 igt_assert(ret == 0);
541 gem_pread.handle = handle;
542 gem_pread.offset = 0;
543 gem_pread.size = PAGE_SIZE;
544 gem_pread.data_ptr = (uintptr_t)buf;
545 ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_PREAD, &gem_pread);
547 gem_close(fd, handle);
550 igt_assert(ret != 0);
552 gem_pwrite.handle = handle;
553 gem_pwrite.offset = 0;
554 gem_pwrite.size = PAGE_SIZE;
555 gem_pwrite.data_ptr = (uintptr_t)buf;
556 ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &gem_pwrite);
558 gem_close(fd, handle);
561 igt_assert(ret != 0);
563 gem_close(fd, handle);
571 static void (*orig_sigbus)(int sig, siginfo_t *info, void *param);
572 static unsigned long sigbus_start;
573 static long sigbus_cnt = -1;
576 check_bo(int fd1, uint32_t handle1, int is_userptr, int fd2, uint32_t handle2)
580 unsigned long size = sizeof(linear);
583 ptr1 = get_handle_ptr(handle1);
585 ptr1 = gem_mmap(fd1, handle1, sizeof(linear), PROT_READ | PROT_WRITE);
587 ptr2 = gem_mmap(fd2, handle2, sizeof(linear), PROT_READ | PROT_WRITE);
592 sigbus_start = (unsigned long)ptr2;
597 /* check whether it's still our old object first. */
598 for (i = 0; i < size; i++) {
599 igt_assert(ptr1[i] == counter);
600 igt_assert(ptr2[i] == counter);
606 memset(ptr1, counter, size);
607 igt_assert(memcmp(ptr1, ptr2, size) == 0);
611 munmap(ptr1, sizeof(linear));
612 munmap(ptr2, sizeof(linear));
615 static int export_handle(int fd, uint32_t handle, int *outfd)
617 struct drm_prime_handle args;
620 args.handle = handle;
621 args.flags = DRM_CLOEXEC;
624 ret = drmIoctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
632 static void sigbus(int sig, siginfo_t *info, void *param)
634 unsigned long ptr = (unsigned long)info->si_addr;
637 if (ptr >= sigbus_start &&
638 ptr <= (sigbus_start + sizeof(linear))) {
640 addr = mmap((void *)ptr, sizeof(linear), PROT_READ | PROT_WRITE,
641 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
642 if ((unsigned long)addr == ptr) {
643 memset(addr, counter, sizeof(linear));
649 orig_sigbus(sig, info, param);
653 static int test_dmabuf(void)
656 uint32_t handle, handle_import1, handle_import2, handle_selfimport;
659 struct sigaction sigact, orig_sigact;
661 fd1 = drm_open_any();
662 fd2 = drm_open_any();
664 handle = create_userptr_bo(fd1, sizeof(linear));
666 ret = export_handle(fd1, handle, &dma_buf_fd);
667 if (userptr_flags & LOCAL_I915_USERPTR_UNSYNCHRONIZED) {
668 igt_assert(ret == EINVAL);
669 free_userptr_bo(fd1, handle);
673 igt_assert(ret == 0);
674 igt_assert(dma_buf_fd >= 0);
676 handle_import1 = prime_fd_to_handle(fd2, dma_buf_fd);
677 check_bo(fd1, handle, 1, fd2, handle_import1);
679 /* reimport should give us the same handle so that userspace can check
680 * whether it has that bo already somewhere. */
681 handle_import2 = prime_fd_to_handle(fd2, dma_buf_fd);
682 igt_assert(handle_import1 == handle_import2);
684 /* Same for re-importing on the exporting fd. */
685 handle_selfimport = prime_fd_to_handle(fd1, dma_buf_fd);
686 igt_assert(handle == handle_selfimport);
688 /* close dma_buf, check whether nothing disappears. */
690 check_bo(fd1, handle, 1, fd2, handle_import1);
692 /* destroy userptr object and expect SIGBUS */
693 free_userptr_bo(fd1, handle);
694 sigact.sa_sigaction = sigbus;
695 sigact.sa_flags = SA_SIGINFO;
696 ret = sigaction(SIGBUS, &sigact, &orig_sigact);
698 orig_sigbus = orig_sigact.sa_sigaction;
700 check_bo(fd2, handle_import1, 0, fd2, handle_import1);
701 assert(sigbus_cnt > 0);
702 sigact.sa_sigaction = orig_sigbus;
703 sigact.sa_flags = SA_SIGINFO;
704 ret = sigaction(SIGBUS, &sigact, &orig_sigact);
707 gem_close(fd2, handle_import1);
714 static int test_usage_restrictions(int fd)
720 assert(posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE * 2) == 0);
722 /* Address not aligned. */
723 ret = gem_userptr(fd, (char *)ptr + 1, PAGE_SIZE, 0, &handle);
724 igt_assert(ret != 0);
726 /* Size not rounded to page size. */
727 ret = gem_userptr(fd, ptr, PAGE_SIZE - 1, 0, &handle);
728 igt_assert(ret != 0);
731 ret = gem_userptr(fd, (char *)ptr + 1, PAGE_SIZE - 1, 0, &handle);
732 igt_assert(ret != 0);
734 /* Read-only not supported. */
735 ret = gem_userptr(fd, (char *)ptr, PAGE_SIZE, 1, &handle);
736 igt_assert(ret != 0);
743 static int test_create_destroy(int fd)
749 igt_assert(posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE) == 0);
751 ret = gem_userptr(fd, ptr, PAGE_SIZE, 0, &handle);
752 igt_assert(ret == 0);
754 gem_close(fd, handle);
760 static int test_coherency(int fd, int count)
763 uint32_t *cpu, *cpu_val;
764 uint32_t *gpu, *gpu_val;
768 igt_info("Using 2x%d 1MiB buffers\n", count);
770 ret = posix_memalign((void **)&memory, PAGE_SIZE, count*sizeof(linear));
771 igt_assert(ret == 0 && memory);
773 gpu = malloc(sizeof(uint32_t)*count*4);
774 gpu_val = gpu + count;
775 cpu = gpu_val + count;
776 cpu_val = cpu + count;
778 for (i = 0; i < count; i++) {
779 gpu[i] = create_bo(fd, start);
781 start += WIDTH*HEIGHT;
784 for (i = 0; i < count; i++) {
785 cpu[i] = create_userptr(fd, start, memory+i*WIDTH*HEIGHT);
787 start += WIDTH*HEIGHT;
790 igt_info("Verifying initialisation...\n");
791 for (i = 0; i < count; i++) {
792 check_gpu(fd, gpu[i], gpu_val[i]);
793 check_cpu(memory+i*WIDTH*HEIGHT, cpu_val[i]);
796 igt_info("Cyclic blits cpu->gpu, forward...\n");
797 for (i = 0; i < count * 4; i++) {
799 int dst = (i + 1) % count;
801 copy(fd, gpu[dst], cpu[src], 0);
802 gpu_val[dst] = cpu_val[src];
804 for (i = 0; i < count; i++)
805 check_gpu(fd, gpu[i], gpu_val[i]);
807 igt_info("Cyclic blits gpu->cpu, backward...\n");
808 for (i = 0; i < count * 4; i++) {
809 int src = (i + 1) % count;
812 copy(fd, cpu[dst], gpu[src], 0);
813 cpu_val[dst] = gpu_val[src];
815 for (i = 0; i < count; i++) {
816 gem_userptr_sync(fd, cpu[i]);
817 check_cpu(memory+i*WIDTH*HEIGHT, cpu_val[i]);
820 igt_info("Random blits...\n");
821 for (i = 0; i < count * 4; i++) {
822 int src = random() % count;
823 int dst = random() % count;
826 copy(fd, gpu[dst], cpu[src], 0);
827 gpu_val[dst] = cpu_val[src];
829 copy(fd, cpu[dst], gpu[src], 0);
830 cpu_val[dst] = gpu_val[src];
833 for (i = 0; i < count; i++) {
834 check_gpu(fd, gpu[i], gpu_val[i]);
835 gem_close(fd, gpu[i]);
837 gem_userptr_sync(fd, cpu[i]);
838 check_cpu(memory+i*WIDTH*HEIGHT, cpu_val[i]);
839 gem_close(fd, cpu[i]);
848 static struct igt_eviction_test_ops fault_ops = {
849 .create = create_userptr_bo,
850 .close = free_userptr_bo,
855 static int can_swap(void)
857 unsigned long as, ram;
859 /* Cannot swap if not enough address space */
861 /* FIXME: Improve check criteria. */
862 if (sizeof(void*) < 8)
865 as = 256 * 1024; /* Just a big number */
867 ram = intel_get_total_ram_mb();
869 if ((as - 128) < (ram - 256))
875 #define min(a, b) ((a) < (b) ? (a) : (b))
877 static void test_forking_evictions(int fd, int size, int count,
883 trash_count = intel_get_total_ram_mb() * 11 / 10;
884 /* Use the fact test will spawn a number of child
885 * processes meaning swapping will be triggered system
886 * wide even if one process on it's own can't do it.
888 num_threads = min(sysconf(_SC_NPROCESSORS_ONLN) * 4, 12);
889 trash_count /= num_threads;
890 if (count > trash_count)
893 forking_evictions(fd, &fault_ops, size, count, trash_count, flags);
896 static void test_swapping_evictions(int fd, int size, int count)
900 igt_skip_on_f(!can_swap(),
901 "Not enough process address space for swapping tests.\n");
903 trash_count = intel_get_total_ram_mb() * 11 / 10;
905 swapping_evictions(fd, &fault_ops, size, count, trash_count);
908 static void test_minor_evictions(int fd, int size, int count)
910 minor_evictions(fd, &fault_ops, size, count);
913 static void test_major_evictions(int fd, int size, int count)
915 major_evictions(fd, &fault_ops, size, count);
918 static int test_overlap(int fd, int expected)
922 uint32_t handle, handle2;
924 igt_assert(posix_memalign((void *)&ptr, PAGE_SIZE, PAGE_SIZE * 3) == 0);
926 ret = gem_userptr(fd, ptr + PAGE_SIZE, PAGE_SIZE, 0, &handle);
927 igt_assert(ret == 0);
929 ret = gem_userptr(fd, ptr, PAGE_SIZE, 0, &handle2);
930 igt_assert(ret == 0);
931 gem_close(fd, handle2);
933 ret = gem_userptr(fd, ptr + PAGE_SIZE * 2, PAGE_SIZE, 0, &handle2);
934 igt_assert(ret == 0);
935 gem_close(fd, handle2);
937 ret = gem_userptr(fd, ptr, PAGE_SIZE * 2, 0, &handle2);
938 igt_assert(ret == expected);
940 gem_close(fd, handle2);
942 ret = gem_userptr(fd, ptr + PAGE_SIZE, PAGE_SIZE * 2, 0, &handle2);
943 igt_assert(ret == expected);
945 gem_close(fd, handle2);
947 ret = gem_userptr(fd, ptr, PAGE_SIZE * 3, 0, &handle2);
948 igt_assert(ret == expected);
950 gem_close(fd, handle2);
952 gem_close(fd, handle);
958 static int test_unmap(int fd, int expected)
961 const unsigned int num_obj = 3;
963 uint32_t bo[num_obj + 1];
964 size_t map_size = sizeof(linear) * num_obj + (PAGE_SIZE - 1);
967 ptr = mmap(NULL, map_size, PROT_READ | PROT_WRITE,
968 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
969 assert(ptr != MAP_FAILED);
971 bo_ptr = (char *)ALIGN((unsigned long)ptr, PAGE_SIZE);
973 for (i = 0; i < num_obj; i++, bo_ptr += sizeof(linear)) {
974 ret = gem_userptr(fd, bo_ptr, sizeof(linear), 0, &bo[i]);
975 igt_assert(ret == 0);
978 bo[num_obj] = create_bo(fd, 0);
980 for (i = 0; i < num_obj; i++)
981 copy(fd, bo[num_obj], bo[i], 0);
983 ret = munmap(ptr, map_size);
986 for (i = 0; i < num_obj; i++)
987 copy(fd, bo[num_obj], bo[i], expected);
989 for (i = 0; i < (num_obj + 1); i++)
990 gem_close(fd, bo[i]);
995 static int test_unmap_after_close(int fd)
998 const unsigned int num_obj = 3;
1000 uint32_t bo[num_obj + 1];
1001 size_t map_size = sizeof(linear) * num_obj + (PAGE_SIZE - 1);
1004 ptr = mmap(NULL, map_size, PROT_READ | PROT_WRITE,
1005 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
1006 assert(ptr != MAP_FAILED);
1008 bo_ptr = (char *)ALIGN((unsigned long)ptr, PAGE_SIZE);
1010 for (i = 0; i < num_obj; i++, bo_ptr += sizeof(linear)) {
1011 ret = gem_userptr(fd, bo_ptr, sizeof(linear), 0, &bo[i]);
1012 igt_assert(ret == 0);
1015 bo[num_obj] = create_bo(fd, 0);
1017 for (i = 0; i < num_obj; i++)
1018 copy(fd, bo[num_obj], bo[i], 0);
1020 for (i = 0; i < (num_obj + 1); i++)
1021 gem_close(fd, bo[i]);
1023 ret = munmap(ptr, map_size);
1029 static int test_unmap_cycles(int fd, int expected)
1033 for (i = 0; i < 1000; i++)
1034 test_unmap(fd, expected);
1039 unsigned int total_ram;
1040 uint64_t aperture_size;
1044 int main(int argc, char **argv)
1046 int size = sizeof(linear);
1048 igt_skip_on_simulation();
1050 igt_subtest_init(argc, argv);
1055 fd = drm_open_any();
1056 igt_assert(fd >= 0);
1058 ret = has_userptr(fd);
1059 igt_skip_on_f(ret == 0, "No userptr support - %s (%d)\n",
1060 strerror(errno), ret);
1062 size = sizeof(linear);
1064 aperture_size = gem_aperture_size(fd);
1065 igt_info("Aperture size is %lu MiB\n", (long)(aperture_size / (1024*1024)));
1068 count = atoi(argv[1]);
1070 count = 2 * aperture_size / (1024*1024) / 3;
1072 total_ram = intel_get_total_ram_mb();
1073 igt_info("Total RAM is %u MiB\n", total_ram);
1075 if (count > total_ram * 3 / 4) {
1076 count = intel_get_total_ram_mb() * 3 / 4;
1077 igt_info("Not enough RAM to run test, reducing buffer count.\n");
1081 igt_subtest("input-checking")
1082 test_input_checking(fd);
1084 igt_subtest("usage-restrictions")
1085 test_usage_restrictions(fd);
1087 igt_subtest("invalid-mapping")
1088 test_invalid_mapping(fd);
1090 igt_subtest("forbidden-operations")
1091 test_forbidden_ops(fd);
1093 igt_info("Testing unsynchronized mappings...\n");
1094 gem_userptr_test_unsynchronized();
1096 igt_subtest("create-destroy-unsync")
1097 test_create_destroy(fd);
1099 igt_subtest("unsync-overlap")
1100 test_overlap(fd, 0);
1102 igt_subtest("unsync-unmap")
1105 igt_subtest("unsync-unmap-cycles")
1106 test_unmap_cycles(fd, 0);
1108 igt_subtest("unsync-unmap-after-close")
1109 test_unmap_after_close(fd);
1111 igt_subtest("coherency-unsync")
1112 test_coherency(fd, count);
1114 igt_subtest("dmabuf-unsync")
1117 for (unsigned flags = 0; flags < ALL_FORKING_EVICTIONS + 1; flags++) {
1118 igt_subtest_f("forked-unsync%s%s%s-%s",
1119 flags & FORKING_EVICTIONS_SWAPPING ? "-swapping" : "",
1120 flags & FORKING_EVICTIONS_DUP_DRMFD ? "-multifd" : "",
1121 flags & FORKING_EVICTIONS_MEMORY_PRESSURE ?
1122 "-mempressure" : "",
1123 flags & FORKING_EVICTIONS_INTERRUPTIBLE ?
1124 "interruptible" : "normal") {
1125 test_forking_evictions(fd, size, count, flags);
1129 igt_subtest("swapping-unsync-normal")
1130 test_swapping_evictions(fd, size, count);
1132 igt_subtest("minor-unsync-normal")
1133 test_minor_evictions(fd, size, count);
1135 igt_subtest("major-unsync-normal") {
1136 size = 200 * 1024 * 1024;
1137 count = (gem_aperture_size(fd) / size) + 2;
1138 test_major_evictions(fd, size, count);
1142 size = sizeof(linear);
1143 count = 2 * gem_aperture_size(fd) / (1024*1024) / 3;
1144 if (count > total_ram * 3 / 4)
1145 count = intel_get_total_ram_mb() * 3 / 4;
1148 igt_fork_signal_helper();
1150 igt_subtest("swapping-unsync-interruptible")
1151 test_swapping_evictions(fd, size, count);
1153 igt_subtest("minor-unsync-interruptible")
1154 test_minor_evictions(fd, size, count);
1156 igt_subtest("major-unsync-interruptible") {
1157 size = 200 * 1024 * 1024;
1158 count = (gem_aperture_size(fd) / size) + 2;
1159 test_major_evictions(fd, size, count);
1162 igt_stop_signal_helper();
1164 igt_info("Testing synchronized mappings...\n");
1167 size = sizeof(linear);
1168 count = 2 * gem_aperture_size(fd) / (1024*1024) / 3;
1169 if (count > total_ram * 3 / 4)
1170 count = intel_get_total_ram_mb() * 3 / 4;
1173 gem_userptr_test_synchronized();
1175 igt_subtest("create-destroy-sync")
1176 test_create_destroy(fd);
1178 igt_subtest("sync-overlap")
1179 test_overlap(fd, EINVAL);
1181 igt_subtest("sync-unmap")
1182 test_unmap(fd, EFAULT);
1184 igt_subtest("sync-unmap-cycles")
1185 test_unmap_cycles(fd, EFAULT);
1187 igt_subtest("sync-unmap-after-close")
1188 test_unmap_after_close(fd);
1190 igt_subtest("coherency-sync")
1191 test_coherency(fd, count);
1193 igt_subtest("dmabuf-sync")
1196 for (unsigned flags = 0; flags < ALL_FORKING_EVICTIONS + 1; flags++) {
1197 igt_subtest_f("forked-sync%s%s%s-%s",
1198 flags & FORKING_EVICTIONS_SWAPPING ? "-swapping" : "",
1199 flags & FORKING_EVICTIONS_DUP_DRMFD ? "-multifd" : "",
1200 flags & FORKING_EVICTIONS_MEMORY_PRESSURE ?
1201 "-mempressure" : "",
1202 flags & FORKING_EVICTIONS_INTERRUPTIBLE ?
1203 "interruptible" : "normal") {
1204 test_forking_evictions(fd, size, count, flags);
1208 igt_subtest("swapping-normal-sync")
1209 test_swapping_evictions(fd, size, count);
1211 igt_subtest("minor-normal-sync")
1212 test_minor_evictions(fd, size, count);
1214 igt_subtest("major-normal-sync") {
1215 size = 200 * 1024 * 1024;
1216 count = (gem_aperture_size(fd) / size) + 2;
1217 test_major_evictions(fd, size, count);
1222 count = 2 * gem_aperture_size(fd) / (1024*1024) / 3;
1223 if (count > total_ram * 3 / 4)
1224 count = intel_get_total_ram_mb() * 3 / 4;
1227 igt_fork_signal_helper();
1229 igt_subtest("swapping-sync-interruptible")
1230 test_swapping_evictions(fd, size, count);
1232 igt_subtest("minor-sync-interruptible")
1233 test_minor_evictions(fd, size, count);
1235 igt_subtest("major-sync-interruptible") {
1236 size = 200 * 1024 * 1024;
1237 count = (gem_aperture_size(fd) / size) + 2;
1238 test_major_evictions(fd, size, count);
1241 igt_stop_signal_helper();
1243 igt_subtest("access-control")
1244 test_access_control(fd);