2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Daniel Vetter <daniel.vetter@ffwll.ch>
39 #include "ioctl_wrappers.h"
41 #include "intel_chipset.h"
46 #define LOCAL_I915_EXEC_VEBOX (4<<0)
48 static drm_intel_bufmgr *bufmgr;
49 struct intel_batchbuffer *batch;
50 static drm_intel_bo *load_bo, *target_bo, *dummy_bo;
53 /* Testcase: check read/write syncpoints when switching rings
55 * We've had a bug where the syncpoint for the last write was mangled after a
56 * ring switch using semaphores. This resulted in cpu reads returning before the
57 * write actually completed. This test exercises this.
60 #define COLOR 0xffffffff
61 static void run_test(int ring)
66 gem_require_ring(fd, ring);
67 /* Testing render only makes sense with separate blt. */
68 if (ring == I915_EXEC_RENDER)
69 gem_require_ring(fd, I915_EXEC_BLT);
71 target_bo = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
72 igt_assert(target_bo);
74 /* Need to map first so that we can do our own domain mangement with
76 drm_intel_bo_map(target_bo, 0);
77 ptr = target_bo->virtual;
78 igt_assert(*ptr == 0);
80 /* put some load onto the gpu to keep the light buffers active for long
82 for (i = 0; i < 1000; i++) {
83 BLIT_COPY_BATCH_START(batch->devid, 0);
84 OUT_BATCH((3 << 24) | /* 32 bits */
85 (0xcc << 16) | /* copy ROP */
87 OUT_BATCH(0); /* dst x1,y1 */
88 OUT_BATCH((1024 << 16) | 512);
89 OUT_RELOC(load_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
90 BLIT_RELOC_UDW(batch->devid);
91 OUT_BATCH((0 << 16) | 512); /* src x1, y1 */
93 OUT_RELOC(load_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
94 BLIT_RELOC_UDW(batch->devid);
98 COLOR_BLIT_COPY_BATCH_START(batch->devid, 0);
99 OUT_BATCH((3 << 24) | /* 32 bits */
102 OUT_BATCH(0); /* dst x1,y1 */
103 OUT_BATCH((1 << 16) | 1);
104 OUT_RELOC(target_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
105 BLIT_RELOC_UDW(batch->devid);
109 intel_batchbuffer_flush(batch);
111 /* Emit an empty batch so that signalled seqno on the target ring >
112 * signalled seqnoe on the blt ring. This is required to hit the bug. */
117 intel_batchbuffer_flush_on_ring(batch, ring);
119 /* For the ring->ring sync it's important to only emit a read reloc, for
120 * otherwise the obj->last_write_seqno will be updated. */
121 if (ring == I915_EXEC_RENDER) {
123 OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
124 OUT_BATCH(0xffffffff); /* compare dword */
125 OUT_RELOC(target_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
130 OUT_BATCH(MI_FLUSH_DW | 1);
131 OUT_BATCH(0); /* reserved */
132 OUT_RELOC(target_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
133 OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
136 intel_batchbuffer_flush_on_ring(batch, ring);
138 gem_set_domain(fd, target_bo->handle, I915_GEM_DOMAIN_GTT, 0);
139 igt_assert(*ptr == COLOR);
140 drm_intel_bo_unmap(target_bo);
142 drm_intel_bo_unreference(target_bo);
147 static const struct {
151 { "blt2render", I915_EXEC_RENDER },
152 { "blt2bsd", I915_EXEC_BSD },
153 { "blt2vebox", LOCAL_I915_EXEC_VEBOX },
157 igt_skip_on_simulation();
162 /* Test requires MI_FLUSH_DW and MI_COND_BATCH_BUFFER_END */
163 igt_require(intel_gen(intel_get_drm_devid(fd)) >= 6);
165 bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
167 /* don't enable buffer reuse!! */
168 //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
170 batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
173 dummy_bo = drm_intel_bo_alloc(bufmgr, "dummy bo", 4096, 4096);
174 igt_assert(dummy_bo);
176 load_bo = drm_intel_bo_alloc(bufmgr, "load bo", 1024*4096, 4096);
180 for (i = 0; i < ARRAY_SIZE(tests); i++) {
181 igt_subtest(tests[i].name)
182 run_test(tests[i].ring);
185 igt_fork_signal_helper();
186 for (i = 0; i < ARRAY_SIZE(tests); i++) {
187 igt_subtest_f("%s-interruptible", tests[i].name)
188 run_test(tests[i].ring);
190 igt_stop_signal_helper();
193 drm_intel_bufmgr_destroy(bufmgr);