2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Chris Wilson <chris@chris-wilson.co.uk>
38 #include <sys/ioctl.h>
40 #include "ioctl_wrappers.h"
42 #include "igt_debugfs.h"
44 static int OBJECT_SIZE = 16*1024*1024;
46 static void set_domain(int fd, uint32_t handle)
48 gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
52 mmap_bo(int fd, uint32_t handle)
56 ptr = gem_mmap(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
57 igt_assert(ptr != MAP_FAILED);
63 create_pointer(int fd)
68 handle = gem_create(fd, OBJECT_SIZE);
70 ptr = mmap_bo(fd, handle);
72 gem_close(fd, handle);
80 uint32_t handle, flink, handle2;
81 struct drm_i915_gem_mmap_gtt mmap_arg;
84 handle = gem_create(fd, OBJECT_SIZE);
89 /* Check that fd1 can mmap. */
90 mmap_arg.handle = handle;
91 igt_assert(drmIoctl(fd,
92 DRM_IOCTL_I915_GEM_MMAP_GTT,
95 igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE,
96 MAP_SHARED, fd, mmap_arg.offset));
98 /* Check that the same offset on the other fd doesn't work. */
99 igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE,
100 MAP_SHARED, fd2, mmap_arg.offset) == MAP_FAILED);
101 igt_assert(errno == EACCES);
103 flink = gem_flink(fd, handle);
105 handle2 = gem_open(fd2, flink);
108 /* Recheck that it works after flink. */
109 /* Check that the same offset on the other fd doesn't work. */
110 igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE,
111 MAP_SHARED, fd2, mmap_arg.offset));
117 struct drm_i915_gem_mmap_gtt mmap_arg;
120 mmap_arg.handle = gem_create(fd, OBJECT_SIZE);
121 igt_assert(mmap_arg.handle);
123 igt_assert(drmIoctl(fd,
124 DRM_IOCTL_I915_GEM_MMAP_GTT,
126 for (pages = 1; pages <= OBJECT_SIZE / 4096; pages <<= 1) {
129 w = mmap64(0, pages * 4096, PROT_READ | PROT_WRITE,
130 MAP_SHARED, fd, mmap_arg.offset);
131 igt_assert(w != MAP_FAILED);
133 r = mmap64(0, pages * 4096, PROT_READ,
134 MAP_SHARED, fd, mmap_arg.offset);
135 igt_assert(r != MAP_FAILED);
137 for (p = 0; p < pages; p++) {
138 w[4096*p] = r[4096*p];
139 w[4096*p+4095] = r[4096*p+4095];
142 munmap(r, pages * 4096);
143 munmap(w, pages * 4096);
145 gem_close(fd, mmap_arg.handle);
153 /* copy from a fresh src to fresh dst to force pagefault on both */
154 src = create_pointer(fd);
155 dst = create_pointer(fd);
157 memcpy(dst, src, OBJECT_SIZE);
158 memcpy(src, dst, OBJECT_SIZE);
160 munmap(dst, OBJECT_SIZE);
161 munmap(src, OBJECT_SIZE);
164 enum test_read_write {
170 test_read_write(int fd, enum test_read_write order)
174 volatile uint32_t val = 0;
176 handle = gem_create(fd, OBJECT_SIZE);
178 ptr = gem_mmap(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
179 igt_assert(ptr != MAP_FAILED);
181 if (order == READ_BEFORE_WRITE) {
182 val = *(uint32_t *)ptr;
183 *(uint32_t *)ptr = val;
185 *(uint32_t *)ptr = val;
186 val = *(uint32_t *)ptr;
189 gem_close(fd, handle);
190 munmap(ptr, OBJECT_SIZE);
194 test_read_write2(int fd, enum test_read_write order)
198 volatile uint32_t val = 0;
200 handle = gem_create(fd, OBJECT_SIZE);
202 r = gem_mmap(fd, handle, OBJECT_SIZE, PROT_READ);
203 igt_assert(r != MAP_FAILED);
205 w = gem_mmap(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
206 igt_assert(w != MAP_FAILED);
208 if (order == READ_BEFORE_WRITE) {
209 val = *(uint32_t *)r;
210 *(uint32_t *)w = val;
212 *(uint32_t *)w = val;
213 val = *(uint32_t *)r;
216 gem_close(fd, handle);
217 munmap(r, OBJECT_SIZE);
218 munmap(w, OBJECT_SIZE);
227 /* copy from a fresh src to fresh dst to force pagefault on both */
228 src = create_pointer(fd);
229 dst = gem_create(fd, OBJECT_SIZE);
231 gem_write(fd, dst, 0, src, OBJECT_SIZE);
234 munmap(src, OBJECT_SIZE);
238 test_write_gtt(int fd)
244 dst = gem_create(fd, OBJECT_SIZE);
246 /* prefault object into gtt */
247 dst_gtt = mmap_bo(fd, dst);
249 memset(dst_gtt, 0, OBJECT_SIZE);
250 munmap(dst_gtt, OBJECT_SIZE);
252 src = create_pointer(fd);
254 gem_write(fd, dst, 0, src, OBJECT_SIZE);
257 munmap(src, OBJECT_SIZE);
266 /* copy from a fresh src to fresh dst to force pagefault on both */
267 dst = create_pointer(fd);
268 src = gem_create(fd, OBJECT_SIZE);
270 gem_read(fd, src, 0, dst, OBJECT_SIZE);
273 munmap(dst, OBJECT_SIZE);
276 struct thread_fault_concurrent {
283 thread_fault_concurrent(void *closure)
285 struct thread_fault_concurrent *t = closure;
289 for (n = 0; n < 32; n++) {
291 *t->ptr[(n + t->id) % 32] = val;
293 val = *t->ptr[(n + t->id) % 32];
300 test_fault_concurrent(int fd)
303 struct thread_fault_concurrent thread[64];
306 for (n = 0; n < 32; n++) {
307 ptr[n] = create_pointer(fd);
310 for (n = 0; n < 64; n++) {
313 pthread_create(&thread[n].thread, NULL, thread_fault_concurrent, &thread[n]);
316 for (n = 0; n < 64; n++)
317 pthread_join(thread[n].thread, NULL);
319 for (n = 0; n < 32; n++) {
320 munmap(ptr[n], OBJECT_SIZE);
325 run_without_prefault(int fd,
326 void (*func)(int fd))
328 igt_disable_prefault();
330 igt_enable_prefault();
337 if (igt_run_in_simulation())
338 OBJECT_SIZE = 1 * 1024 * 1024;
343 igt_subtest("access")
353 igt_subtest("write-gtt")
355 igt_subtest("read-write")
356 test_read_write(fd, READ_BEFORE_WRITE);
357 igt_subtest("write-read")
358 test_read_write(fd, READ_AFTER_WRITE);
359 igt_subtest("read-write-distinct")
360 test_read_write2(fd, READ_BEFORE_WRITE);
361 igt_subtest("write-read-distinct")
362 test_read_write2(fd, READ_AFTER_WRITE);
363 igt_subtest("fault-concurrent")
364 test_fault_concurrent(fd);
365 igt_subtest("read-no-prefault")
366 run_without_prefault(fd, test_read);
367 igt_subtest("write-no-prefault")
368 run_without_prefault(fd, test_write);
369 igt_subtest("write-gtt-no-prefault")
370 run_without_prefault(fd, test_write_gtt);