2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Daniel Vetter <daniel.vetter@ffwll.ch>
37 #include "ioctl_wrappers.h"
39 #include "intel_bufmgr.h"
40 #include "intel_batchbuffer.h"
41 #include "intel_chipset.h"
45 #define LOCAL_I915_EXEC_VEBOX (4<<0)
47 static drm_intel_bufmgr *bufmgr;
48 struct intel_batchbuffer *batch;
49 static drm_intel_bo *load_bo, *target_bo, *dummy_bo;
52 /* Testcase: check read/write syncpoints when switching rings
54 * We've had a bug where the syncpoint for the last write was mangled after a
55 * ring switch using semaphores. This resulted in cpu reads returning before the
56 * write actually completed. This test exercises this.
59 #define COLOR 0xffffffff
60 static void run_test(int ring)
65 gem_require_ring(fd, ring);
66 /* Testing render only makes sense with separate blt. */
67 if (ring == I915_EXEC_RENDER)
68 gem_require_ring(fd, I915_EXEC_BLT);
70 target_bo = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
71 igt_assert(target_bo);
73 /* Need to map first so that we can do our own domain mangement with
75 drm_intel_bo_map(target_bo, 0);
76 ptr = target_bo->virtual;
77 igt_assert(*ptr == 0);
79 /* put some load onto the gpu to keep the light buffers active for long
81 for (i = 0; i < 1000; i++) {
82 BLIT_COPY_BATCH_START(batch->devid, 0);
83 OUT_BATCH((3 << 24) | /* 32 bits */
84 (0xcc << 16) | /* copy ROP */
86 OUT_BATCH(0); /* dst x1,y1 */
87 OUT_BATCH((1024 << 16) | 512);
88 OUT_RELOC(load_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
89 BLIT_RELOC_UDW(batch->devid);
90 OUT_BATCH((0 << 16) | 512); /* src x1, y1 */
92 OUT_RELOC(load_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
93 BLIT_RELOC_UDW(batch->devid);
97 COLOR_BLIT_COPY_BATCH_START(batch->devid, 0);
98 OUT_BATCH((3 << 24) | /* 32 bits */
101 OUT_BATCH(0); /* dst x1,y1 */
102 OUT_BATCH((1 << 16) | 1);
103 OUT_RELOC(target_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
104 BLIT_RELOC_UDW(batch->devid);
108 intel_batchbuffer_flush(batch);
110 /* Emit an empty batch so that signalled seqno on the target ring >
111 * signalled seqnoe on the blt ring. This is required to hit the bug. */
116 intel_batchbuffer_flush_on_ring(batch, ring);
118 /* For the ring->ring sync it's important to only emit a read reloc, for
119 * otherwise the obj->last_write_seqno will be updated. */
120 if (ring == I915_EXEC_RENDER) {
122 OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
123 OUT_BATCH(0xffffffff); /* compare dword */
124 OUT_RELOC(target_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
129 OUT_BATCH(MI_FLUSH_DW | 1);
130 OUT_BATCH(0); /* reserved */
131 OUT_RELOC(target_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
132 OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
135 intel_batchbuffer_flush_on_ring(batch, ring);
137 gem_set_domain(fd, target_bo->handle, I915_GEM_DOMAIN_GTT, 0);
138 igt_assert(*ptr == COLOR);
139 drm_intel_bo_unmap(target_bo);
141 drm_intel_bo_unreference(target_bo);
146 static const struct {
150 { "blt2render", I915_EXEC_RENDER },
151 { "blt2bsd", I915_EXEC_BSD },
152 { "blt2vebox", LOCAL_I915_EXEC_VEBOX },
156 igt_skip_on_simulation();
161 /* Test requires MI_FLUSH_DW and MI_COND_BATCH_BUFFER_END */
162 igt_require(intel_gen(intel_get_drm_devid(fd)) >= 6);
164 bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
166 /* don't enable buffer reuse!! */
167 //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
169 batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
172 dummy_bo = drm_intel_bo_alloc(bufmgr, "dummy bo", 4096, 4096);
173 igt_assert(dummy_bo);
175 load_bo = drm_intel_bo_alloc(bufmgr, "load bo", 1024*4096, 4096);
179 for (i = 0; i < ARRAY_SIZE(tests); i++) {
180 igt_subtest(tests[i].name)
181 run_test(tests[i].ring);
184 igt_fork_signal_helper();
185 for (i = 0; i < ARRAY_SIZE(tests); i++) {
186 igt_subtest_f("%s-interruptible", tests[i].name)
187 run_test(tests[i].ring);
189 igt_stop_signal_helper();
192 drm_intel_bufmgr_destroy(bufmgr);