tests/gem_tiled_partial_pwrite_pread: convert to subtest infrastructure
[platform/upstream/intel-gpu-tools.git] / tests / gem_tiled_partial_pwrite_pread.c
1 /*
2  * Copyright © 2011 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Daniel Vetter <daniel.vetter@ffwll.ch>
25  *
26  */
27
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <string.h>
31 #include <assert.h>
32 #include <fcntl.h>
33 #include <inttypes.h>
34 #include <errno.h>
35 #include <sys/stat.h>
36 #include <sys/time.h>
37 #include "drm.h"
38 #include "i915_drm.h"
39 #include "drmtest.h"
40 #include "intel_bufmgr.h"
41 #include "intel_batchbuffer.h"
42 #include "intel_gpu_tools.h"
43
44 /*
45  * Testcase: pwrite/pread consistency when touching partial cachelines
46  *
47  * Some fancy new pwrite/pread optimizations clflush in-line while
48  * reading/writing. Check whether all required clflushes happen.
49  *
50  * Unfortunately really old mesa used unaligned pread/pwrite for s/w fallback
51  * rendering, so we need to check whether this works on tiled buffers, too.
52  *
53  */
54
55 static drm_intel_bufmgr *bufmgr;
56 struct intel_batchbuffer *batch;
57
58 drm_intel_bo *scratch_bo;
59 drm_intel_bo *staging_bo;
60 drm_intel_bo *tiled_staging_bo;
61 unsigned long scratch_pitch;
62 #define BO_SIZE (32*4096)
63 uint32_t devid;
64 uint64_t mappable_gtt_limit;
65 int fd;
66
67 static void
68 copy_bo(drm_intel_bo *src, int src_tiled,
69         drm_intel_bo *dst, int dst_tiled)
70 {
71         unsigned long dst_pitch = scratch_pitch;
72         unsigned long src_pitch = scratch_pitch;
73         uint32_t cmd_bits = 0;
74
75         /* dst is tiled ... */
76         if (intel_gen(devid) >= 4 && dst_tiled) {
77                 dst_pitch /= 4;
78                 cmd_bits |= XY_SRC_COPY_BLT_DST_TILED;
79         }
80
81         if (intel_gen(devid) >= 4 && dst_tiled) {
82                 src_pitch /= 4;
83                 cmd_bits |= XY_SRC_COPY_BLT_SRC_TILED;
84         }
85
86         BEGIN_BATCH(8);
87         OUT_BATCH(XY_SRC_COPY_BLT_CMD |
88                   XY_SRC_COPY_BLT_WRITE_ALPHA |
89                   XY_SRC_COPY_BLT_WRITE_RGB |
90                   cmd_bits);
91         OUT_BATCH((3 << 24) | /* 32 bits */
92                   (0xcc << 16) | /* copy ROP */
93                   dst_pitch);
94         OUT_BATCH(0 << 16 | 0);
95         OUT_BATCH(BO_SIZE/scratch_pitch << 16 | 1024);
96         OUT_RELOC_FENCED(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
97         OUT_BATCH(0 << 16 | 0);
98         OUT_BATCH(src_pitch);
99         OUT_RELOC_FENCED(src, I915_GEM_DOMAIN_RENDER, 0, 0);
100         ADVANCE_BATCH();
101
102         intel_batchbuffer_flush(batch);
103 }
104
105 static void
106 blt_bo_fill(drm_intel_bo *tmp_bo, drm_intel_bo *bo, int val)
107 {
108         uint8_t *gtt_ptr;
109         int i;
110
111         drm_intel_gem_bo_map_gtt(tmp_bo);
112         gtt_ptr = tmp_bo->virtual;
113
114         for (i = 0; i < BO_SIZE; i++)
115                 gtt_ptr[i] = val;
116
117         drm_intel_gem_bo_unmap_gtt(tmp_bo);
118
119         if (bo->offset < mappable_gtt_limit &&
120             (IS_G33(devid) || intel_gen(devid) >= 4))
121                 drmtest_trash_aperture();
122
123         copy_bo(tmp_bo, 0, bo, 1);
124 }
125
126 #define MAX_BLT_SIZE 128
127 #define ROUNDS 200
128 uint8_t tmp[BO_SIZE];
129 uint8_t compare_tmp[BO_SIZE];
130
131 static void test_partial_reads(void)
132 {
133         int i, j;
134
135         printf("checking partial reads\n");
136         for (i = 0; i < ROUNDS; i++) {
137                 int start, len;
138                 int val = i % 256;
139
140                 blt_bo_fill(staging_bo, scratch_bo, i);
141
142                 start = random() % BO_SIZE;
143                 len = random() % (BO_SIZE-start) + 1;
144
145                 drm_intel_bo_get_subdata(scratch_bo, start, len, tmp);
146                 for (j = 0; j < len; j++) {
147                         if (tmp[j] != val) {
148                                 printf("mismatch at %i, got: %i, expected: %i\n",
149                                        start + j, tmp[j], val);
150                                 exit(1);
151                         }
152                 }
153
154                 drmtest_progress("partial reads test: ", i, ROUNDS);
155         }
156 }
157
158 static void test_partial_writes(void)
159 {
160         int i, j;
161
162         printf("checking partial writes\n");
163         for (i = 0; i < ROUNDS; i++) {
164                 int start, len;
165                 int val = i % 256;
166
167                 blt_bo_fill(staging_bo, scratch_bo, i);
168
169                 start = random() % BO_SIZE;
170                 len = random() % (BO_SIZE-start) + 1;
171
172                 memset(tmp, i + 63, BO_SIZE);
173
174                 drm_intel_bo_subdata(scratch_bo, start, len, tmp);
175
176                 copy_bo(scratch_bo, 1, tiled_staging_bo, 1);
177                 drm_intel_bo_get_subdata(tiled_staging_bo, 0, BO_SIZE,
178                                          compare_tmp);
179
180                 for (j = 0; j < start; j++) {
181                         if (compare_tmp[j] != val) {
182                                 printf("amismatch at %i, got: %i, expected: %i\n",
183                                        j, tmp[j], val);
184                                 exit(1);
185                         }
186                 }
187                 for (; j < start + len; j++) {
188                         if (compare_tmp[j] != tmp[0]) {
189                                 printf("bmismatch at %i, got: %i, expected: %i\n",
190                                        j, tmp[j], i);
191                                 exit(1);
192                         }
193                 }
194                 for (; j < BO_SIZE; j++) {
195                         if (compare_tmp[j] != val) {
196                                 printf("cmismatch at %i, got: %i, expected: %i\n",
197                                        j, tmp[j], val);
198                                 exit(1);
199                         }
200                 }
201                 drm_intel_gem_bo_unmap_gtt(staging_bo);
202
203                 drmtest_progress("partial writes test: ", i, ROUNDS);
204         }
205 }
206
207 static void test_partial_read_writes(void)
208 {
209         int i, j;
210
211         printf("checking partial writes after partial reads\n");
212         for (i = 0; i < ROUNDS; i++) {
213                 int start, len;
214                 int val = i % 256;
215
216                 blt_bo_fill(staging_bo, scratch_bo, i);
217
218                 /* partial read */
219                 start = random() % BO_SIZE;
220                 len = random() % (BO_SIZE-start) + 1;
221
222                 drm_intel_bo_get_subdata(scratch_bo, start, len, tmp);
223                 for (j = 0; j < len; j++) {
224                         if (tmp[j] != val) {
225                                 printf("mismatch in read at %i, got: %i, expected: %i\n",
226                                        start + j, tmp[j], val);
227                                 exit(1);
228                         }
229                 }
230
231                 /* Change contents through gtt to make the pread cachelines
232                  * stale. */
233                 val = (i + 17) % 256;
234                 blt_bo_fill(staging_bo, scratch_bo, val);
235
236                 /* partial write */
237                 start = random() % BO_SIZE;
238                 len = random() % (BO_SIZE-start) + 1;
239
240                 memset(tmp, i + 63, BO_SIZE);
241
242                 drm_intel_bo_subdata(scratch_bo, start, len, tmp);
243
244                 copy_bo(scratch_bo, 1, tiled_staging_bo, 1);
245                 drm_intel_bo_get_subdata(tiled_staging_bo, 0, BO_SIZE,
246                                          compare_tmp);
247
248                 for (j = 0; j < start; j++) {
249                         if (compare_tmp[j] != val) {
250                                 printf("mismatch at %i, got: %i, expected: %i\n",
251                                        j, tmp[j], val);
252                                 exit(1);
253                         }
254                 }
255                 for (; j < start + len; j++) {
256                         if (compare_tmp[j] != tmp[0]) {
257                                 printf("mismatch at %i, got: %i, expected: %i\n",
258                                        j, tmp[j], tmp[0]);
259                                 exit(1);
260                         }
261                 }
262                 for (; j < BO_SIZE; j++) {
263                         if (compare_tmp[j] != val) {
264                                 printf("mismatch at %i, got: %i, expected: %i\n",
265                                        j, tmp[j], val);
266                                 exit(1);
267                         }
268                 }
269                 drm_intel_gem_bo_unmap_gtt(staging_bo);
270
271                 drmtest_progress("partial read/writes test: ", i, ROUNDS);
272         }
273 }
274
275 int main(int argc, char **argv)
276 {
277         uint32_t tiling_mode = I915_TILING_X;
278
279         drmtest_subtest_init(argc, argv);
280
281         srandom(0xdeadbeef);
282
283         fd = drm_open_any();
284
285         bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
286         //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
287         devid = intel_get_drm_devid(fd);
288         batch = intel_batchbuffer_alloc(bufmgr, devid);
289
290         /* overallocate the buffers we're actually using because */
291         scratch_bo = drm_intel_bo_alloc_tiled(bufmgr, "scratch bo", 1024,
292                                               BO_SIZE/4096, 4,
293                                               &tiling_mode, &scratch_pitch, 0);
294         assert(tiling_mode == I915_TILING_X);
295         assert(scratch_pitch == 4096);
296         staging_bo = drm_intel_bo_alloc(bufmgr, "staging bo", BO_SIZE, 4096);
297         tiled_staging_bo = drm_intel_bo_alloc_tiled(bufmgr, "scratch bo", 1024,
298                                                     BO_SIZE/4096, 4,
299                                                     &tiling_mode,
300                                                     &scratch_pitch, 0);
301
302         drmtest_init_aperture_trashers(bufmgr);
303         mappable_gtt_limit = gem_mappable_aperture_size();
304
305         if (drmtest_run_subtest("reads"))
306                 test_partial_reads();
307
308         if (drmtest_run_subtest("writes"))
309                 test_partial_writes();
310
311         if (drmtest_run_subtest("writes-after-reads"))
312                 test_partial_read_writes();
313
314         drmtest_cleanup_aperture_trashers();
315         drm_intel_bufmgr_destroy(bufmgr);
316
317         close(fd);
318
319         return 0;
320 }