2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Daniel Vetter <daniel.vetter@ffwll.ch>
28 /** @file gem_unfence_active_buffers.c
30 * Testcase: Check for use-after free in the fence stealing code
32 * If we're stealing the fence of a active object where the active list is the
33 * only thing holding a reference, we need to be careful not to access the old
34 * object we're stealing the fence from after that reference has been dropped by
37 * Note that this needs slab poisoning enabled in the kernel to reliably hit the
38 * problem - the race window is too small.
51 #include "ioctl_wrappers.h"
53 #include "intel_bufmgr.h"
54 #include "intel_batchbuffer.h"
56 #include "intel_chipset.h"
58 static drm_intel_bufmgr *bufmgr;
59 struct intel_batchbuffer *batch;
62 #define TEST_SIZE (1024*1024)
63 #define TEST_STRIDE (4*1024)
65 uint32_t data[TEST_SIZE/4];
69 int i, ret, fd, num_fences;
70 drm_intel_bo *busy_bo, *test_bo;
71 uint32_t tiling = I915_TILING_X;
73 igt_skip_on_simulation();
75 for (i = 0; i < 1024*256; i++)
80 bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
81 drm_intel_bufmgr_gem_enable_reuse(bufmgr);
82 devid = intel_get_drm_devid(fd);
83 batch = intel_batchbuffer_alloc(bufmgr, devid);
85 igt_info("filling ring\n");
86 busy_bo = drm_intel_bo_alloc(bufmgr, "busy bo bo", 16*1024*1024, 4096);
88 for (i = 0; i < 250; i++) {
89 BLIT_COPY_BATCH_START(devid, 0);
90 OUT_BATCH((3 << 24) | /* 32 bits */
91 (0xcc << 16) | /* copy ROP */
93 OUT_BATCH(0 << 16 | 1024);
94 OUT_BATCH((2048) << 16 | (2048));
95 OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
96 BLIT_RELOC_UDW(devid);
97 OUT_BATCH(0 << 16 | 0);
99 OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
100 BLIT_RELOC_UDW(devid);
103 if (IS_GEN6(devid) || IS_GEN7(devid)) {
105 OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
111 intel_batchbuffer_flush(batch);
113 num_fences = gem_available_fences(fd);
114 igt_info("creating havoc on %i fences\n", num_fences);
116 for (i = 0; i < num_fences*2; i++) {
117 test_bo = drm_intel_bo_alloc(bufmgr, "test_bo",
119 ret = drm_intel_bo_set_tiling(test_bo, &tiling, TEST_STRIDE);
120 igt_assert(ret == 0);
122 drm_intel_bo_disable_reuse(test_bo);
124 BLIT_COPY_BATCH_START(devid, 0);
125 OUT_BATCH((3 << 24) | /* 32 bits */
126 (0xcc << 16) | /* copy ROP */
128 OUT_BATCH(0 << 16 | 0);
129 OUT_BATCH((1) << 16 | (1));
130 OUT_RELOC_FENCED(test_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
131 BLIT_RELOC_UDW(devid);
132 OUT_BATCH(0 << 16 | 0);
133 OUT_BATCH(TEST_STRIDE);
134 OUT_RELOC_FENCED(test_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
135 BLIT_RELOC_UDW(devid);
137 intel_batchbuffer_flush(batch);
138 igt_info("test bo offset: %#lx\n", test_bo->offset);
140 drm_intel_bo_unreference(test_bo);
143 /* launch a few batchs to ensure the damaged slab objects get reused. */
144 for (i = 0; i < 10; i++) {
145 BLIT_COPY_BATCH_START(devid, 0);
146 OUT_BATCH((3 << 24) | /* 32 bits */
147 (0xcc << 16) | /* copy ROP */
149 OUT_BATCH(0 << 16 | 1024);
150 OUT_BATCH((1) << 16 | (1));
151 OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
152 BLIT_RELOC_UDW(devid);
153 OUT_BATCH(0 << 16 | 0);
155 OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
156 BLIT_RELOC_UDW(devid);
159 if (IS_GEN6(devid) || IS_GEN7(devid)) {
161 OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
167 intel_batchbuffer_flush(batch);