2 * Copyright © 2011,2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Daniel Vetter <daniel.vetter@ffwll.ch>
42 #include "ioctl_wrappers.h"
44 #include "intel_chipset.h"
46 #include "igt_debugfs.h"
50 * Testcase: Kernel relocations vs. gpu races
54 static drm_intel_bufmgr *bufmgr;
55 struct intel_batchbuffer *batch;
57 uint32_t blob[2048*2048];
58 #define NUM_TARGET_BOS 16
59 drm_intel_bo *pc_target_bo[NUM_TARGET_BOS];
60 drm_intel_bo *dummy_bo;
61 drm_intel_bo *special_bo;
63 int special_reloc_ofs;
64 int special_batch_len;
66 static void create_special_bo(void)
71 #define BATCH(dw) data[len++] = (dw);
73 memset(data, 0, 4096);
74 special_bo = drm_intel_bo_alloc(bufmgr, "special batch", 4096, 4096);
76 if (intel_gen(devid) >= 8) {
78 BATCH(XY_COLOR_BLT_CMD_NOLEN | 5 |
79 COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
81 BATCH(XY_COLOR_BLT_CMD_NOLEN | 4 |
82 COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
85 BATCH((3 << 24) | (0xf0 << 16) | small_pitch);
88 special_reloc_ofs = 4*len;
90 if (intel_gen(devid) >= 8)
94 #define CMD_POLY_STIPPLE_OFFSET 0x7906
96 if (IS_GEN5(batch->devid)) {
97 BATCH(CMD_POLY_STIPPLE_OFFSET << 16);
100 igt_assert(len % 2 == 0);
102 BATCH(MI_BATCH_BUFFER_END);
104 drm_intel_bo_subdata(special_bo, 0, 4096, data);
105 special_batch_len = len*4;
108 static void emit_dummy_load(int pitch)
111 uint32_t tile_flags = 0;
115 tile_flags = XY_SRC_COPY_BLT_SRC_TILED |
116 XY_SRC_COPY_BLT_DST_TILED;
119 for (i = 0; i < 10; i++) {
120 BLIT_COPY_BATCH_START(devid, tile_flags);
121 OUT_BATCH((3 << 24) | /* 32 bits */
122 (0xcc << 16) | /* copy ROP */
124 OUT_BATCH(0 << 16 | 1024);
125 OUT_BATCH((2048) << 16 | (2048));
126 OUT_RELOC_FENCED(dummy_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
127 BLIT_RELOC_UDW(devid);
128 OUT_BATCH(0 << 16 | 0);
130 OUT_RELOC_FENCED(dummy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
131 BLIT_RELOC_UDW(devid);
134 if (intel_gen(devid) >= 6) {
136 OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
142 intel_batchbuffer_flush(batch);
145 static void faulting_reloc_and_emit(int fd, drm_intel_bo *target_bo)
147 struct drm_i915_gem_execbuffer2 execbuf;
148 struct drm_i915_gem_exec_object2 exec[2];
149 struct drm_i915_gem_relocation_entry reloc[1];
150 uint32_t handle_relocs;
154 if (intel_gen(devid) >= 6)
155 ring = I915_EXEC_BLT;
159 exec[0].handle = target_bo->handle;
160 exec[0].relocation_count = 0;
161 exec[0].relocs_ptr = 0;
162 exec[0].alignment = 0;
168 reloc[0].offset = special_reloc_ofs;
170 reloc[0].target_handle = target_bo->handle;
171 reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
172 reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
173 reloc[0].presumed_offset = 0;
175 handle_relocs = gem_create(fd, 4096);
176 gem_write(fd, handle_relocs, 0, reloc, sizeof(reloc));
177 gtt_relocs = gem_mmap(fd, handle_relocs, 4096,
178 PROT_READ | PROT_WRITE);
179 igt_assert(gtt_relocs);
181 exec[1].handle = special_bo->handle;
182 exec[1].relocation_count = 1;
183 /* A newly mmap gtt bo will fault on first access. */
184 exec[1].relocs_ptr = (uintptr_t)gtt_relocs;
185 exec[1].alignment = 0;
191 execbuf.buffers_ptr = (uintptr_t)exec;
192 execbuf.buffer_count = 2;
193 execbuf.batch_start_offset = 0;
194 execbuf.batch_len = special_batch_len;
195 execbuf.cliprects_ptr = 0;
196 execbuf.num_cliprects = 0;
199 execbuf.flags = ring;
200 i915_execbuffer2_set_context_id(execbuf, 0);
203 gem_execbuf(fd, &execbuf);
205 gem_close(fd, handle_relocs);
208 static void reloc_and_emit(int fd, drm_intel_bo *target_bo)
212 if (intel_gen(devid) >= 6)
213 ring = I915_EXEC_BLT;
217 drm_intel_bo_emit_reloc(special_bo, special_reloc_ofs,
220 I915_GEM_DOMAIN_RENDER,
221 I915_GEM_DOMAIN_RENDER);
222 drm_intel_bo_mrb_exec(special_bo, special_batch_len, NULL,
227 static void do_test(int fd, bool faulting_reloc)
229 uint32_t tiling_mode = I915_TILING_X;
230 unsigned long pitch, act_size;
235 igt_disable_prefault();
238 dummy_bo = drm_intel_bo_alloc_tiled(bufmgr, "tiled dummy_bo", act_size, act_size,
239 4, &tiling_mode, &pitch, 0);
241 drm_intel_bo_subdata(dummy_bo, 0, act_size*act_size*4, blob);
245 for (i = 0; i < NUM_TARGET_BOS; i++) {
246 pc_target_bo[i] = drm_intel_bo_alloc(bufmgr, "special batch", 4096, 4096);
247 emit_dummy_load(pitch);
248 igt_assert(pc_target_bo[i]->offset == 0);
251 faulting_reloc_and_emit(fd, pc_target_bo[i]);
253 reloc_and_emit(fd, pc_target_bo[i]);
256 /* Only check at the end to avoid unnecessary synchronous behaviour. */
257 for (i = 0; i < NUM_TARGET_BOS; i++) {
258 drm_intel_bo_get_subdata(pc_target_bo[i], 0, 4, &test);
259 igt_assert_f(test == 0xdeadbeef,
260 "mismatch in buffer %i: 0x%08x instead of 0xdeadbeef\n", i, test);
261 drm_intel_bo_unreference(pc_target_bo[i]);
264 drm_intel_gem_bo_map_gtt(dummy_bo);
265 drm_intel_gem_bo_unmap_gtt(dummy_bo);
267 drm_intel_bo_unreference(special_bo);
268 drm_intel_bo_unreference(dummy_bo);
271 igt_enable_prefault();
274 #define INTERRUPT (1 << 0)
275 #define FAULTING (1 << 1)
276 #define THRASH (1 << 2)
277 #define THRASH_INACTIVE (1 << 3)
278 #define ALL_FLAGS (INTERRUPT | FAULTING | THRASH | THRASH_INACTIVE)
279 static void do_forked_test(int fd, unsigned flags)
281 int num_threads = sysconf(_SC_NPROCESSORS_ONLN);
282 struct igt_helper_process thrasher = {};
284 if (flags & (THRASH | THRASH_INACTIVE)) {
285 uint64_t val = (flags & THRASH_INACTIVE) ?
286 (DROP_RETIRE | DROP_BOUND | DROP_UNBOUND) : DROP_ALL;
288 igt_fork_helper(&thrasher) {
291 igt_drop_caches_set(val);
296 igt_fork(i, num_threads * 4) {
297 /* re-create process local data */
298 bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
299 batch = intel_batchbuffer_alloc(bufmgr, devid);
301 if (flags & INTERRUPT)
302 igt_fork_signal_helper();
304 do_test(fd, flags & FAULTING);
306 if (flags & INTERRUPT)
307 igt_stop_signal_helper();
311 if (flags & (THRASH | THRASH_INACTIVE))
312 igt_stop_helper(&thrasher);
317 #define MAX_BLT_SIZE 128
320 igt_skip_on_simulation();
322 memset(blob, 'A', sizeof(blob));
327 bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
328 /* disable reuse, otherwise the test fails */
329 //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
330 devid = intel_get_drm_devid(fd);
331 batch = intel_batchbuffer_alloc(bufmgr, devid);
334 igt_subtest("normal")
337 igt_subtest("faulting-reloc")
340 igt_fork_signal_helper();
341 igt_subtest("interruptible")
344 igt_subtest("faulting-reloc-interruptible")
346 igt_stop_signal_helper();
348 for (unsigned flags = 0; flags <= ALL_FLAGS; flags++) {
349 if ((flags & THRASH) && (flags & THRASH_INACTIVE))
352 igt_subtest_f("forked%s%s%s%s",
353 flags & INTERRUPT ? "-interruptible" : "",
354 flags & FAULTING ? "-faulting-reloc" : "",
355 flags & THRASH ? "-thrashing" : "",
356 flags & THRASH_INACTIVE ? "-thrash-inactive" : "")
357 do_forked_test(fd, flags);
361 intel_batchbuffer_free(batch);
362 drm_intel_bufmgr_destroy(bufmgr);