2 * Copyright © 2009 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 /** @file gem_ringfill.c
30 * This is a test of doing many tiny batchbuffer operations, in the hope of
31 * catching failure to manage the ring properly near full.
46 #include "ioctl_wrappers.h"
48 #include "intel_chipset.h"
54 drm_intel_bo *src, *dst, *tmp;
57 static const int width = 512, height = 512;
59 static void create_bo(drm_intel_bufmgr *bufmgr,
63 int size = 4 * width * height, i;
67 b->src = drm_intel_bo_alloc(bufmgr, "src", size, 4096);
68 b->dst = drm_intel_bo_alloc(bufmgr, "dst", size, 4096);
69 b->tmp = drm_intel_bo_alloc(bufmgr, "tmp", size, 4096);
71 /* Fill the src with indexes of the pixels */
72 drm_intel_bo_map(b->src, true);
73 map = b->src->virtual;
74 for (i = 0; i < width * height; i++)
76 drm_intel_bo_unmap(b->src);
78 /* Fill the dst with garbage. */
79 drm_intel_bo_map(b->dst, true);
80 map = b->dst->virtual;
81 for (i = 0; i < width * height; i++)
83 drm_intel_bo_unmap(b->dst);
86 static int check_bo(struct bo *b)
91 drm_intel_bo_map(b->dst, false);
92 map = b->dst->virtual;
93 for (i = 0; i < width*height; i++) {
94 if (map[i] != i && ++fails <= 9) {
98 igt_info("%s: copy #%d at %d,%d failed: read 0x%08x\n",
99 b->ring, i, x, y, map[i]);
102 drm_intel_bo_unmap(b->dst);
107 static void destroy_bo(struct bo *b)
109 drm_intel_bo_unreference(b->src);
110 drm_intel_bo_unreference(b->tmp);
111 drm_intel_bo_unreference(b->dst);
114 static int check_ring(drm_intel_bufmgr *bufmgr,
115 struct intel_batchbuffer *batch,
117 igt_render_copyfunc_t copy)
119 struct igt_buf src, tmp, dst;
124 snprintf(output, 100, "filling %s ring: ", ring);
126 create_bo(bufmgr, &bo, ring);
128 src.stride = 4 * width;
130 src.size = 4 * width * height;
131 src.num_tiles = 4 * width * height;
138 /* The ring we've been using is 128k, and each rendering op
139 * will use at least 8 dwords:
145 * STORE_DATA_INDEX offset
146 * STORE_DATA_INDEX value
150 * So iterate just a little more than that -- if we don't fill the ring
151 * doing this, we aren't likely to with this test.
153 for (i = 0; i < width * height; i++) {
157 igt_progress(output, i, width*height);
159 igt_assert(y < height);
161 /* Dummy load to fill the ring */
162 copy(batch, NULL, &src, 0, 0, width, height, &tmp, 0, 0);
163 /* And copy the src into dst, pixel by pixel */
164 copy(batch, NULL, &src, x, y, 1, 1, &dst, x, y);
168 igt_info("verifying\n");
175 static void blt_copy(struct intel_batchbuffer *batch,
176 drm_intel_context *context,
177 struct igt_buf *src, unsigned src_x, unsigned src_y,
178 unsigned w, unsigned h,
179 struct igt_buf *dst, unsigned dst_x, unsigned dst_y)
181 BLIT_COPY_BATCH_START(batch->devid, 0);
182 OUT_BATCH((3 << 24) | /* 32 bits */
183 (0xcc << 16) | /* copy ROP */
185 OUT_BATCH((dst_y << 16) | dst_x); /* dst x1,y1 */
186 OUT_BATCH(((dst_y + h) << 16) | (dst_x + w)); /* dst x2,y2 */
187 OUT_RELOC(dst->bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
188 BLIT_RELOC_UDW(batch->devid);
189 OUT_BATCH((src_y << 16) | src_x); /* src x1,y1 */
190 OUT_BATCH(src->stride);
191 OUT_RELOC(src->bo, I915_GEM_DOMAIN_RENDER, 0, 0);
192 BLIT_RELOC_UDW(batch->devid);
195 intel_batchbuffer_flush(batch);
198 drm_intel_bufmgr *bufmgr;
199 struct intel_batchbuffer *batch;
204 igt_skip_on_simulation();
209 bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
210 drm_intel_bufmgr_gem_enable_reuse(bufmgr);
211 batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
214 igt_subtest("blitter")
215 check_ring(bufmgr, batch, "blt", blt_copy);
217 /* Strictly only required on architectures with a separate BLT ring,
218 * but lets stress everybody.
220 igt_subtest("render") {
221 igt_render_copyfunc_t copy;
223 copy = igt_get_render_copyfunc(batch->devid);
226 check_ring(bufmgr, batch, "render", copy);
229 igt_fork_signal_helper();
230 igt_subtest("blitter-interruptible")
231 check_ring(bufmgr, batch, "blt", blt_copy);
233 /* Strictly only required on architectures with a separate BLT ring,
234 * but lets stress everybody.
236 igt_subtest("render-interruptible") {
237 igt_render_copyfunc_t copy;
239 copy = igt_get_render_copyfunc(batch->devid);
242 check_ring(bufmgr, batch, "render", copy);
244 igt_stop_signal_helper();
247 intel_batchbuffer_free(batch);
248 drm_intel_bufmgr_destroy(bufmgr);