2 * Copyright © 2013 Google
3 * Copyright © 2013 Intel Corporation
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * Kees Cook <keescook@chromium.org>
26 * Daniel Vetter <daniel.vetter@ffwll.ch>
27 * Rafael Barbalho <rafael.barbalho@intel.com>
41 #include <sys/ioctl.h>
44 #include <sys/types.h>
46 #include "ioctl_wrappers.h"
47 #include "intel_chipset.h"
52 * Testcase: Kernel relocation overflows are caught.
58 struct drm_i915_gem_exec_object2 *execobjs;
59 struct drm_i915_gem_execbuffer2 execbuf = { 0 };
60 struct drm_i915_gem_relocation_entry *reloc;
63 uint32_t batch_handle;
65 static void source_offset_tests(int devid, bool reloc_gtt)
67 struct drm_i915_gem_relocation_entry single_reloc;
69 const char *relocation_type;
72 relocation_type = "reloc-gtt";
74 relocation_type = "reloc-cpu";
77 handle = gem_create(fd, 8192);
79 execobjs[1].handle = batch_handle;
80 execobjs[1].relocation_count = 0;
81 execobjs[1].relocs_ptr = 0;
83 execobjs[0].handle = handle;
84 execobjs[0].relocation_count = 1;
85 execobjs[0].relocs_ptr = (uintptr_t) &single_reloc;
86 execbuf.buffer_count = 2;
89 dst_gtt = gem_mmap(fd, handle, 8192, PROT_READ | PROT_WRITE);
90 igt_assert(dst_gtt != MAP_FAILED);
91 gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
92 memset(dst_gtt, 0, 8192);
93 munmap(dst_gtt, 8192);
94 relocation_type = "reloc-gtt";
96 relocation_type = "reloc-cpu";
100 /* Special tests for 64b relocs. */
101 igt_subtest_f("source-offset-page-stradle-gen8-%s", relocation_type) {
102 igt_require(intel_gen(devid) >= 8);
103 single_reloc.offset = 4096 - 4;
104 single_reloc.delta = 0;
105 single_reloc.target_handle = handle;
106 single_reloc.read_domains = I915_GEM_DOMAIN_RENDER;
107 single_reloc.write_domain = I915_GEM_DOMAIN_RENDER;
108 single_reloc.presumed_offset = 0;
110 igt_assert(ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf) == 0);
111 single_reloc.delta = 1024;
112 igt_assert(ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf) == 0);
115 igt_subtest_f("source-offset-end-gen8-%s", relocation_type) {
116 igt_require(intel_gen(devid) >= 8);
117 single_reloc.offset = 8192 - 8;
118 single_reloc.delta = 0;
119 single_reloc.target_handle = handle;
120 single_reloc.read_domains = I915_GEM_DOMAIN_RENDER;
121 single_reloc.write_domain = I915_GEM_DOMAIN_RENDER;
122 single_reloc.presumed_offset = 0;
124 igt_assert(ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf) == 0);
127 igt_subtest_f("source-offset-overflow-gen8-%s", relocation_type) {
128 igt_require(intel_gen(devid) >= 8);
129 single_reloc.offset = 8192 - 4;
130 single_reloc.delta = 0;
131 single_reloc.target_handle = handle;
132 single_reloc.read_domains = I915_GEM_DOMAIN_RENDER;
133 single_reloc.write_domain = I915_GEM_DOMAIN_RENDER;
134 single_reloc.presumed_offset = 0;
136 igt_assert(ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf) != 0);
137 igt_assert(errno == EINVAL);
140 /* Tests for old 4byte relocs on pre-gen8. */
141 igt_subtest_f("source-offset-end-%s", relocation_type) {
142 igt_require(intel_gen(devid) < 8);
143 single_reloc.offset = 8192 - 4;
144 single_reloc.delta = 0;
145 single_reloc.target_handle = handle;
146 single_reloc.read_domains = I915_GEM_DOMAIN_RENDER;
147 single_reloc.write_domain = I915_GEM_DOMAIN_RENDER;
148 single_reloc.presumed_offset = 0;
150 igt_assert(ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf) == 0);
153 igt_subtest_f("source-offset-big-%s", relocation_type) {
154 single_reloc.offset = 8192;
155 single_reloc.delta = 0;
156 single_reloc.target_handle = handle;
157 single_reloc.read_domains = I915_GEM_DOMAIN_RENDER;
158 single_reloc.write_domain = I915_GEM_DOMAIN_RENDER;
159 single_reloc.presumed_offset = 0;
161 igt_assert(ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf) != 0);
162 igt_assert(errno == EINVAL);
165 igt_subtest_f("source-offset-negative-%s", relocation_type) {
166 single_reloc.offset = (int64_t) -4;
167 single_reloc.delta = 0;
168 single_reloc.target_handle = handle;
169 single_reloc.read_domains = I915_GEM_DOMAIN_RENDER;
170 single_reloc.write_domain = I915_GEM_DOMAIN_RENDER;
171 single_reloc.presumed_offset = 0;
173 igt_assert(ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf) != 0);
174 igt_assert(errno == EINVAL);
177 igt_subtest_f("source-offset-unaligned-%s", relocation_type) {
178 single_reloc.offset = 1;
179 single_reloc.delta = 0;
180 single_reloc.target_handle = handle;
181 single_reloc.read_domains = I915_GEM_DOMAIN_RENDER;
182 single_reloc.write_domain = I915_GEM_DOMAIN_RENDER;
183 single_reloc.presumed_offset = 0;
185 igt_assert(ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf) != 0);
186 igt_assert(errno == EINVAL);
190 gem_close(fd, handle);
194 static void reloc_tests(void)
197 unsigned int total_unsigned = 0;
199 igt_subtest("invalid-address") {
200 /* Attempt unmapped single entry. */
201 execobjs[0].relocation_count = 1;
202 execobjs[0].relocs_ptr = 0;
203 execbuf.buffer_count = 1;
206 ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
207 igt_assert(errno == EFAULT);
210 igt_subtest("single-overflow") {
211 /* Attempt single overflowed entry. */
212 execobjs[0].relocation_count = (1 << 31);
213 execobjs[0].relocs_ptr = (uintptr_t)reloc;
214 execbuf.buffer_count = 1;
217 ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
218 igt_assert(errno == EINVAL);
222 execobjs[0].handle = batch_handle;
223 execobjs[0].relocation_count = 0;
224 execobjs[0].relocs_ptr = 0;
226 execbuf.buffer_count = 1;
228 /* Make sure the batch would succeed except for the thing we're
230 execbuf.batch_start_offset = 0;
231 execbuf.batch_len = 8;
232 igt_assert(ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf) == 0);
235 igt_subtest("batch-start-unaligned") {
236 execbuf.batch_start_offset = 1;
237 execbuf.batch_len = 8;
239 igt_assert(ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf) != 0);
240 igt_assert(errno == EINVAL);
243 igt_subtest("batch-end-unaligned") {
244 execbuf.batch_start_offset = 0;
245 execbuf.batch_len = 7;
247 igt_assert(ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf) != 0);
248 igt_assert(errno == EINVAL);
252 /* Undo damage for next tests. */
253 execbuf.batch_start_offset = 0;
254 execbuf.batch_len = 8;
257 igt_subtest("wrapped-overflow") {
258 /* Attempt wrapped overflow entries. */
259 for (i = 0; i < num; i++) {
260 struct drm_i915_gem_exec_object2 *obj = &execobjs[i];
261 obj->handle = handles[i];
264 /* Wraps to 1 on last count. */
265 obj->relocation_count = 1 - total_unsigned;
266 obj->relocs_ptr = (uintptr_t)reloc;
268 obj->relocation_count = entries;
269 obj->relocs_ptr = (uintptr_t)reloc;
272 total_unsigned += obj->relocation_count;
274 execbuf.buffer_count = num;
277 ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
278 igt_assert(errno == EINVAL);
282 static void buffer_count_tests(void)
284 igt_subtest("buffercount-overflow") {
285 for (int i = 0; i < num; i++) {
286 execobjs[i].relocation_count = 0;
287 execobjs[i].relocs_ptr = 0;
288 execobjs[i].handle = handles[i];
291 execobjs[0].relocation_count = 0;
292 execobjs[0].relocs_ptr = 0;
293 /* We only have num buffers actually, but the overflow will make
294 * sure we blow up the kernel before we blow up userspace. */
295 execbuf.buffer_count = num;
297 /* Put a real batch at the end. */
298 execobjs[num - 1].handle = batch_handle;
300 /* Make sure the basic thing would work first ... */
302 ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
303 igt_assert(errno == 0);
305 /* ... then be evil: Overflow of the pointer table (which has a
306 * bit of lead datastructures, so no + 1 needed to overflow). */
307 execbuf.buffer_count = INT_MAX / sizeof(void *);
310 ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
311 igt_assert(errno == EINVAL);
313 /* ... then be evil: Copying/allocating the array. */
314 execbuf.buffer_count = UINT_MAX / sizeof(execobjs[0]) + 1;
317 ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
318 igt_assert(errno == EINVAL);
328 uint32_t batch_data [2] = { MI_NOOP, MI_BATCH_BUFFER_END };
332 devid = intel_get_drm_devid(fd);
334 /* Create giant reloc buffer area. */
336 entries = ((1ULL << 32) / (num - 1));
337 reloc_size = entries * sizeof(struct drm_i915_gem_relocation_entry);
338 reloc = mmap(NULL, reloc_size, PROT_READ | PROT_WRITE,
339 MAP_PRIVATE | MAP_ANON, -1, 0);
340 igt_assert(reloc != MAP_FAILED);
342 /* Allocate the handles we'll need to wrap. */
343 handles = calloc(num, sizeof(*handles));
344 for (int i = 0; i < num; i++)
345 handles[i] = gem_create(fd, 4096);
347 if (intel_gen(devid) >= 6)
348 ring = I915_EXEC_BLT;
352 /* Create relocation objects. */
353 execobjs = calloc(num, sizeof(*execobjs));
354 execbuf.buffers_ptr = (uintptr_t)execobjs;
355 execbuf.batch_start_offset = 0;
356 execbuf.batch_len = 8;
357 execbuf.cliprects_ptr = 0;
358 execbuf.num_cliprects = 0;
361 execbuf.flags = ring;
362 i915_execbuffer2_set_context_id(execbuf, 0);
365 batch_handle = gem_create(fd, 4096);
367 gem_write(fd, batch_handle, 0, batch_data, sizeof(batch_data));
372 source_offset_tests(devid, false);
373 source_offset_tests(devid, true);
375 buffer_count_tests();
378 gem_close(fd, batch_handle);