lib/igt_kms: Unify pipe name helpers
[platform/upstream/intel-gpu-tools.git] / tests / gem_ring_sync_copy.c
1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Damien Lespiau <damien.lespiau@intel.com>
25  */
26
27 /*
28  * The goal of this test is to ensure that we respect inter ring dependencies
29  *
30  * For each pair of rings R1, R2 where we have copy support (i.e. blt,
31  * rendercpy and mediafill) do:
32  *  - Throw a busy load onto R1. gem_concurrent_blt just uses lots of buffers
33  *    for this effect.
34  *  - Fill three buffers A, B, C with unique data.
35  *  - Copy A to B on ring R1
36  *
37  * Then come the three different variants.
38  *  - Copy B to C on ring R2, check that C now contains what A originally
39  *    contained. This is the write->read hazard. gem_concurrent_blt calls this
40  *    early read.
41  *  - Copy C to A on ring R2, check that B now contains what A originally
42  *    contained. This is the read->write hazard, gem_concurrent_blt calls it
43  *    overwrite_source.
44  *  - Copy C to B on ring R2 and check that B contains what C originally
45  *    contained. This is the write/write hazard. gem_concurrent_blt doesn't
46  *    have that since for the cpu case it's too boring.
47  *
48  */
49
50 #include <stdlib.h>
51 #include <stdbool.h>
52
53 #include "ioctl_wrappers.h"
54 #include "drmtest.h"
55 #include "intel_batchbuffer.h"
56 #include "intel_chipset.h"
57
58 #define WIDTH   512
59 #define HEIGHT  512
60
61 typedef struct {
62         int drm_fd;
63         uint32_t devid;
64         drm_intel_bufmgr *bufmgr;
65         struct intel_batchbuffer *batch;
66
67         /* number of buffers to keep the ring busy for a while */
68         unsigned int n_buffers_load;
69
70         uint32_t linear[WIDTH * HEIGHT];
71
72         struct {
73                 igt_render_copyfunc_t copy;
74                 struct igt_buf *srcs;
75                 struct igt_buf *dsts;
76         } render;
77
78         struct {
79                 drm_intel_bo **srcs;
80                 drm_intel_bo **dsts;
81         } blitter;
82
83 } data_t;
84
85 enum ring {
86         RENDER,
87         BLITTER,
88 };
89
90 enum test {
91         TEST_WRITE_READ,
92         TEST_READ_WRITE,
93         TEST_WRITE_WRITE,
94 };
95
96 static const char *ring_name(enum ring ring)
97 {
98         const char *names[] = {
99                 "render",
100                 "blitter",
101         };
102
103         return names[ring];
104 }
105
106 static drm_intel_bo *bo_create(data_t *data, int width, int height, int val)
107 {
108         drm_intel_bo *bo;
109         int i;
110
111         bo = drm_intel_bo_alloc(data->bufmgr, "", 4 * width * height, 4096);
112         igt_assert(bo);
113
114         for (i = 0; i < width * height; i++)
115                 data->linear[i] = val;
116         gem_write(data->drm_fd, bo->handle, 0, data->linear,
117                   sizeof(data->linear));
118
119         return bo;
120 }
121
122 static void bo_check(data_t *data, drm_intel_bo *bo, uint32_t val)
123 {
124         int i;
125
126         gem_read(data->drm_fd, bo->handle, 0,
127                  data->linear, sizeof(data->linear));
128         for (i = 0; i < WIDTH * HEIGHT; i++)
129                 igt_assert_cmpint(data->linear[i], ==, val);
130 }
131
132 static void scratch_buf_init_from_bo(struct igt_buf *buf, drm_intel_bo *bo)
133 {
134         buf->bo = bo;
135         buf->stride = 4 * WIDTH;
136         buf->tiling = I915_TILING_NONE;
137         buf->size = 4 * WIDTH * HEIGHT;
138 }
139
140 static void scratch_buf_init(data_t *data, struct igt_buf *buf,
141                              int width, int height, uint32_t color)
142 {
143         drm_intel_bo *bo;
144
145         bo = bo_create(data, width, height, color);
146         scratch_buf_init_from_bo(buf, bo);
147 }
148
149 /*
150  * Provide a few ring specific vfuncs for run_test().
151  *
152  * busy()       Queue a n_buffers_load workloads onto the ring to keep it busy
153  * busy_fini()  Clean up after busy
154  * copy()       Copy one BO to another
155  */
156
157 /*
158  * Render ring
159  */
160
161 static void render_busy(data_t *data)
162 {
163         size_t array_size;
164         int i;
165
166         array_size = data->n_buffers_load * sizeof(struct igt_buf);
167         data->render.srcs = malloc(array_size);
168         data->render.dsts = malloc(array_size);
169
170         for (i = 0; i < data->n_buffers_load; i++) {
171                 scratch_buf_init(data, &data->render.srcs[i], WIDTH, HEIGHT,
172                                  0xdeadbeef);
173                 scratch_buf_init(data, &data->render.dsts[i], WIDTH, HEIGHT,
174                                  0xdeadbeef);
175         }
176
177         for (i = 0; i < data->n_buffers_load; i++) {
178                 data->render.copy(data->batch,
179                                   NULL,                 /* context */
180                                   &data->render.srcs[i],
181                                   0, 0,                 /* src_x, src_y */
182                                   WIDTH, HEIGHT,
183                                   &data->render.dsts[i],
184                                   0, 0                  /* dst_x, dst_y */);
185         }
186 }
187
188 static void render_busy_fini(data_t *data)
189 {
190         int i;
191
192         for (i = 0; i < data->n_buffers_load; i++) {
193                 drm_intel_bo_unreference(data->render.srcs[i].bo);
194                 drm_intel_bo_unreference(data->render.dsts[i].bo);
195         }
196
197         free(data->render.srcs);
198         free(data->render.dsts);
199         data->render.srcs = NULL;
200         data->render.dsts = NULL;
201 }
202
203 static void render_copy(data_t *data, drm_intel_bo *src, drm_intel_bo *dst)
204 {
205         struct igt_buf src_buf, dst_buf;
206
207         scratch_buf_init_from_bo(&src_buf, src);
208         scratch_buf_init_from_bo(&dst_buf, dst);
209
210         data->render.copy(data->batch,
211                           NULL,                 /* context */
212                           &src_buf,
213                           0, 0,                 /* src_x, src_y */
214                           WIDTH, HEIGHT,
215                           &dst_buf,
216                           0, 0                  /* dst_x, dst_y */);
217 }
218
219 /*
220  * Blitter ring
221  */
222
223 static void blitter_busy(data_t *data)
224 {
225         size_t array_size;
226         int i;
227
228         array_size = data->n_buffers_load * sizeof(drm_intel_bo *);
229         data->blitter.srcs = malloc(array_size);
230         data->blitter.dsts = malloc(array_size);
231
232         for (i = 0; i < data->n_buffers_load; i++) {
233                 data->blitter.srcs[i] = bo_create(data,
234                                                   WIDTH, HEIGHT,
235                                                   0xdeadbeef);
236                 data->blitter.dsts[i] = bo_create(data,
237                                                   WIDTH, HEIGHT,
238                                                   0xdeadbeef);
239         }
240
241         for (i = 0; i < data->n_buffers_load; i++) {
242                 intel_copy_bo(data->batch,
243                               data->blitter.srcs[i],
244                               data->blitter.dsts[i],
245                               WIDTH*HEIGHT*4);
246         }
247 }
248
249 static void blitter_busy_fini(data_t *data)
250 {
251         int i;
252
253         for (i = 0; i < data->n_buffers_load; i++) {
254                 drm_intel_bo_unreference(data->blitter.srcs[i]);
255                 drm_intel_bo_unreference(data->blitter.dsts[i]);
256         }
257
258         free(data->blitter.srcs);
259         free(data->blitter.dsts);
260         data->blitter.srcs = NULL;
261         data->blitter.dsts = NULL;
262 }
263
264 static void blitter_copy(data_t *data, drm_intel_bo *src, drm_intel_bo *dst)
265 {
266         intel_copy_bo(data->batch, dst, src, WIDTH*HEIGHT*4);
267 }
268
269 struct ring_ops {
270         void (*busy)(data_t *data);
271         void (*busy_fini)(data_t *data);
272         void (*copy)(data_t *data, drm_intel_bo *src, drm_intel_bo *dst);
273 } ops [] = {
274         {
275                 .busy      = render_busy,
276                 .busy_fini = render_busy_fini,
277                 .copy      = render_copy,
278         },
279         {
280                 .busy      = blitter_busy,
281                 .busy_fini = blitter_busy_fini,
282                 .copy      = blitter_copy,
283         },
284 };
285
286 static void run_test(data_t *data, enum ring r1, enum ring r2, enum test test)
287 {
288         struct ring_ops *r1_ops = &ops[r1];
289         struct ring_ops *r2_ops = &ops[r2];
290         drm_intel_bo *a, *b, *c;
291
292         a = bo_create(data, WIDTH, HEIGHT, 0xa);
293         b = bo_create(data, WIDTH, HEIGHT, 0xb);
294         c = bo_create(data, WIDTH, HEIGHT, 0xc);
295
296         r1_ops->busy(data);
297         r1_ops->copy(data, a, b);
298
299         switch (test) {
300         case TEST_WRITE_READ:
301                 r2_ops->copy(data, b, c);
302                 bo_check(data, c, 0xa);
303                 break;
304         case TEST_READ_WRITE:
305                 r2_ops->copy(data, c, a);
306                 bo_check(data, b, 0xa);
307                 break;
308         case TEST_WRITE_WRITE:
309                 r2_ops->copy(data, c, b);
310                 bo_check(data, b, 0xc);
311                 break;
312         default:
313                 igt_fail(1);
314         }
315
316         r1_ops->busy_fini(data);
317 }
318
319 igt_main
320 {
321         data_t data = {0, };
322         int i;
323         struct combination {
324                 int r1, r2;
325         } ring_combinations [] = {
326                 { RENDER, BLITTER },
327                 { BLITTER, RENDER },
328         };
329
330         igt_fixture {
331                 data.drm_fd = drm_open_any_render();
332                 data.devid = intel_get_drm_devid(data.drm_fd);
333
334                 data.n_buffers_load = 1000;
335
336                 data.bufmgr = drm_intel_bufmgr_gem_init(data.drm_fd, 4096);
337                 igt_assert(data.bufmgr);
338                 drm_intel_bufmgr_gem_enable_reuse(data.bufmgr);
339
340                 data.render.copy = igt_get_render_copyfunc(data.devid);
341                 igt_require_f(data.render.copy,
342                               "no render-copy function\n");
343
344                 data.batch = intel_batchbuffer_alloc(data.bufmgr, data.devid);
345                 igt_assert(data.batch);
346         }
347
348         for (i = 0; i < ARRAY_SIZE(ring_combinations); i++) {
349                 struct combination *c = &ring_combinations[i];
350
351                 igt_subtest_f("sync-%s-%s-write-read",
352                               ring_name(c->r1), ring_name(c->r2))
353                         run_test(&data, c->r1, c->r2, TEST_WRITE_READ);
354
355                 igt_subtest_f("sync-%s-%s-read-write",
356                               ring_name(c->r1), ring_name(c->r2))
357                         run_test(&data, c->r1, c->r2, TEST_READ_WRITE);
358                 igt_subtest_f("sync-%s-%s-write-write",
359                               ring_name(c->r1), ring_name(c->r2))
360                         run_test(&data, c->r1, c->r2, TEST_WRITE_WRITE);
361         }
362
363         igt_fixture {
364                 intel_batchbuffer_free(data.batch);
365                 drm_intel_bufmgr_destroy(data.bufmgr);
366                 close(data.drm_fd);
367         }
368 }