tests: use igt_exit() consistently with subtests
[platform/upstream/intel-gpu-tools.git] / tests / gem_tiled_partial_pwrite_pread.c
1 /*
2  * Copyright © 2011 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Daniel Vetter <daniel.vetter@ffwll.ch>
25  *
26  */
27
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <string.h>
31 #include <fcntl.h>
32 #include <inttypes.h>
33 #include <errno.h>
34 #include <sys/stat.h>
35 #include <sys/time.h>
36 #include "drm.h"
37 #include "i915_drm.h"
38 #include "drmtest.h"
39 #include "intel_bufmgr.h"
40 #include "intel_batchbuffer.h"
41 #include "intel_gpu_tools.h"
42
43 /*
44  * Testcase: pwrite/pread consistency when touching partial cachelines
45  *
46  * Some fancy new pwrite/pread optimizations clflush in-line while
47  * reading/writing. Check whether all required clflushes happen.
48  *
49  * Unfortunately really old mesa used unaligned pread/pwrite for s/w fallback
50  * rendering, so we need to check whether this works on tiled buffers, too.
51  *
52  */
53
54 static drm_intel_bufmgr *bufmgr;
55 struct intel_batchbuffer *batch;
56
57 drm_intel_bo *scratch_bo;
58 drm_intel_bo *staging_bo;
59 drm_intel_bo *tiled_staging_bo;
60 unsigned long scratch_pitch;
61 #define BO_SIZE (32*4096)
62 uint32_t devid;
63 uint64_t mappable_gtt_limit;
64 int fd;
65
66 static void
67 copy_bo(drm_intel_bo *src, int src_tiled,
68         drm_intel_bo *dst, int dst_tiled)
69 {
70         unsigned long dst_pitch = scratch_pitch;
71         unsigned long src_pitch = scratch_pitch;
72         uint32_t cmd_bits = 0;
73
74         /* dst is tiled ... */
75         if (intel_gen(devid) >= 4 && dst_tiled) {
76                 dst_pitch /= 4;
77                 cmd_bits |= XY_SRC_COPY_BLT_DST_TILED;
78         }
79
80         if (intel_gen(devid) >= 4 && dst_tiled) {
81                 src_pitch /= 4;
82                 cmd_bits |= XY_SRC_COPY_BLT_SRC_TILED;
83         }
84
85         BEGIN_BATCH(8);
86         OUT_BATCH(XY_SRC_COPY_BLT_CMD |
87                   XY_SRC_COPY_BLT_WRITE_ALPHA |
88                   XY_SRC_COPY_BLT_WRITE_RGB |
89                   cmd_bits);
90         OUT_BATCH((3 << 24) | /* 32 bits */
91                   (0xcc << 16) | /* copy ROP */
92                   dst_pitch);
93         OUT_BATCH(0 << 16 | 0);
94         OUT_BATCH(BO_SIZE/scratch_pitch << 16 | 1024);
95         OUT_RELOC_FENCED(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
96         OUT_BATCH(0 << 16 | 0);
97         OUT_BATCH(src_pitch);
98         OUT_RELOC_FENCED(src, I915_GEM_DOMAIN_RENDER, 0, 0);
99         ADVANCE_BATCH();
100
101         intel_batchbuffer_flush(batch);
102 }
103
104 static void
105 blt_bo_fill(drm_intel_bo *tmp_bo, drm_intel_bo *bo, int val)
106 {
107         uint8_t *gtt_ptr;
108         int i;
109
110         drm_intel_gem_bo_map_gtt(tmp_bo);
111         gtt_ptr = tmp_bo->virtual;
112
113         for (i = 0; i < BO_SIZE; i++)
114                 gtt_ptr[i] = val;
115
116         drm_intel_gem_bo_unmap_gtt(tmp_bo);
117
118         if (bo->offset < mappable_gtt_limit &&
119             (IS_G33(devid) || intel_gen(devid) >= 4))
120                 igt_trash_aperture();
121
122         copy_bo(tmp_bo, 0, bo, 1);
123 }
124
125 #define MAX_BLT_SIZE 128
126 #define ROUNDS 200
127 uint8_t tmp[BO_SIZE];
128 uint8_t compare_tmp[BO_SIZE];
129
130 static void test_partial_reads(void)
131 {
132         int i, j;
133
134         printf("checking partial reads\n");
135         for (i = 0; i < ROUNDS; i++) {
136                 int start, len;
137                 int val = i % 256;
138
139                 blt_bo_fill(staging_bo, scratch_bo, i);
140
141                 start = random() % BO_SIZE;
142                 len = random() % (BO_SIZE-start) + 1;
143
144                 drm_intel_bo_get_subdata(scratch_bo, start, len, tmp);
145                 for (j = 0; j < len; j++) {
146                         if (tmp[j] != val) {
147                                 printf("mismatch at %i, got: %i, expected: %i\n",
148                                        start + j, tmp[j], val);
149                                 igt_fail(1);
150                         }
151                 }
152
153                 igt_progress("partial reads test: ", i, ROUNDS);
154         }
155 }
156
157 static void test_partial_writes(void)
158 {
159         int i, j;
160
161         printf("checking partial writes\n");
162         for (i = 0; i < ROUNDS; i++) {
163                 int start, len;
164                 int val = i % 256;
165
166                 blt_bo_fill(staging_bo, scratch_bo, i);
167
168                 start = random() % BO_SIZE;
169                 len = random() % (BO_SIZE-start) + 1;
170
171                 memset(tmp, i + 63, BO_SIZE);
172
173                 drm_intel_bo_subdata(scratch_bo, start, len, tmp);
174
175                 copy_bo(scratch_bo, 1, tiled_staging_bo, 1);
176                 drm_intel_bo_get_subdata(tiled_staging_bo, 0, BO_SIZE,
177                                          compare_tmp);
178
179                 for (j = 0; j < start; j++) {
180                         if (compare_tmp[j] != val) {
181                                 printf("amismatch at %i, got: %i, expected: %i\n",
182                                        j, tmp[j], val);
183                                 igt_fail(1);
184                         }
185                 }
186                 for (; j < start + len; j++) {
187                         if (compare_tmp[j] != tmp[0]) {
188                                 printf("bmismatch at %i, got: %i, expected: %i\n",
189                                        j, tmp[j], i);
190                                 igt_fail(1);
191                         }
192                 }
193                 for (; j < BO_SIZE; j++) {
194                         if (compare_tmp[j] != val) {
195                                 printf("cmismatch at %i, got: %i, expected: %i\n",
196                                        j, tmp[j], val);
197                                 igt_fail(1);
198                         }
199                 }
200                 drm_intel_gem_bo_unmap_gtt(staging_bo);
201
202                 igt_progress("partial writes test: ", i, ROUNDS);
203         }
204 }
205
206 static void test_partial_read_writes(void)
207 {
208         int i, j;
209
210         printf("checking partial writes after partial reads\n");
211         for (i = 0; i < ROUNDS; i++) {
212                 int start, len;
213                 int val = i % 256;
214
215                 blt_bo_fill(staging_bo, scratch_bo, i);
216
217                 /* partial read */
218                 start = random() % BO_SIZE;
219                 len = random() % (BO_SIZE-start) + 1;
220
221                 drm_intel_bo_get_subdata(scratch_bo, start, len, tmp);
222                 for (j = 0; j < len; j++) {
223                         if (tmp[j] != val) {
224                                 printf("mismatch in read at %i, got: %i, expected: %i\n",
225                                        start + j, tmp[j], val);
226                                 igt_fail(1);
227                         }
228                 }
229
230                 /* Change contents through gtt to make the pread cachelines
231                  * stale. */
232                 val = (i + 17) % 256;
233                 blt_bo_fill(staging_bo, scratch_bo, val);
234
235                 /* partial write */
236                 start = random() % BO_SIZE;
237                 len = random() % (BO_SIZE-start) + 1;
238
239                 memset(tmp, i + 63, BO_SIZE);
240
241                 drm_intel_bo_subdata(scratch_bo, start, len, tmp);
242
243                 copy_bo(scratch_bo, 1, tiled_staging_bo, 1);
244                 drm_intel_bo_get_subdata(tiled_staging_bo, 0, BO_SIZE,
245                                          compare_tmp);
246
247                 for (j = 0; j < start; j++) {
248                         if (compare_tmp[j] != val) {
249                                 printf("mismatch at %i, got: %i, expected: %i\n",
250                                        j, tmp[j], val);
251                                 igt_fail(1);
252                         }
253                 }
254                 for (; j < start + len; j++) {
255                         if (compare_tmp[j] != tmp[0]) {
256                                 printf("mismatch at %i, got: %i, expected: %i\n",
257                                        j, tmp[j], tmp[0]);
258                                 igt_fail(1);
259                         }
260                 }
261                 for (; j < BO_SIZE; j++) {
262                         if (compare_tmp[j] != val) {
263                                 printf("mismatch at %i, got: %i, expected: %i\n",
264                                        j, tmp[j], val);
265                                 igt_fail(1);
266                         }
267                 }
268                 drm_intel_gem_bo_unmap_gtt(staging_bo);
269
270                 igt_progress("partial read/writes test: ", i, ROUNDS);
271         }
272 }
273
274 int main(int argc, char **argv)
275 {
276         uint32_t tiling_mode = I915_TILING_X;
277
278         igt_subtest_init(argc, argv);
279         igt_skip_on_simulation();
280
281         srandom(0xdeadbeef);
282
283         fd = drm_open_any();
284
285         bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
286         //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
287         devid = intel_get_drm_devid(fd);
288         batch = intel_batchbuffer_alloc(bufmgr, devid);
289
290         /* overallocate the buffers we're actually using because */
291         scratch_bo = drm_intel_bo_alloc_tiled(bufmgr, "scratch bo", 1024,
292                                               BO_SIZE/4096, 4,
293                                               &tiling_mode, &scratch_pitch, 0);
294         igt_assert(tiling_mode == I915_TILING_X);
295         igt_assert(scratch_pitch == 4096);
296         staging_bo = drm_intel_bo_alloc(bufmgr, "staging bo", BO_SIZE, 4096);
297         tiled_staging_bo = drm_intel_bo_alloc_tiled(bufmgr, "scratch bo", 1024,
298                                                     BO_SIZE/4096, 4,
299                                                     &tiling_mode,
300                                                     &scratch_pitch, 0);
301
302         igt_init_aperture_trashers(bufmgr);
303         mappable_gtt_limit = gem_mappable_aperture_size();
304
305         igt_subtest("reads")
306                 test_partial_reads();
307
308         igt_subtest("writes")
309                 test_partial_writes();
310
311         igt_subtest("writes-after-reads")
312                 test_partial_read_writes();
313
314         igt_cleanup_aperture_trashers();
315         drm_intel_bufmgr_destroy(bufmgr);
316
317         close(fd);
318
319         igt_exit();
320 }