2 * Copyright (c) 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Mika Kuoppala <mika.kuoppala@intel.com>
38 #include <sys/ioctl.h>
43 #include "ioctl_wrappers.h"
45 #include "igt_debugfs.h"
46 #include "intel_chipset.h"
51 #define RS_BATCH_ACTIVE (1 << 0)
52 #define RS_BATCH_PENDING (1 << 1)
53 #define RS_UNKNOWN (1 << 2)
55 static uint32_t devid;
56 static bool hw_contexts;
58 struct local_drm_i915_reset_stats {
67 struct local_drm_i915_gem_context_create {
72 struct local_drm_i915_gem_context_destroy {
79 #define CONTEXT_CREATE_IOCTL DRM_IOWR(DRM_COMMAND_BASE + 0x2d, struct local_drm_i915_gem_context_create)
80 #define CONTEXT_DESTROY_IOCTL DRM_IOWR(DRM_COMMAND_BASE + 0x2e, struct local_drm_i915_gem_context_destroy)
81 #define GET_RESET_STATS_IOCTL DRM_IOWR(DRM_COMMAND_BASE + 0x32, struct local_drm_i915_reset_stats)
83 #define LOCAL_I915_EXEC_VEBOX (4 << 0)
87 static bool gem_has_render(int fd)
92 static bool has_context(const struct target_ring *ring);
94 static const struct target_ring {
96 bool (*present)(int fd);
97 bool (*contexts)(const struct target_ring *ring);
100 { I915_EXEC_RENDER, gem_has_render, has_context, "render" },
101 { I915_EXEC_BLT, gem_has_blt, has_context, "blt" },
102 { I915_EXEC_BSD, gem_has_bsd, has_context, "bsd" },
103 { LOCAL_I915_EXEC_VEBOX, gem_has_vebox, has_context, "vebox" },
106 static bool has_context(const struct target_ring *ring)
111 if(ring->exec == I915_EXEC_RENDER)
117 #define NUM_RINGS (sizeof(rings)/sizeof(struct target_ring))
119 static const struct target_ring *current_ring;
121 static uint32_t context_create(int fd)
123 struct local_drm_i915_gem_context_create create;
126 create.ctx_id = rand();
129 ret = drmIoctl(fd, CONTEXT_CREATE_IOCTL, &create);
130 igt_assert(ret == 0);
132 return create.ctx_id;
135 static int context_destroy(int fd, uint32_t ctx_id)
138 struct local_drm_i915_gem_context_destroy destroy;
140 destroy.ctx_id = ctx_id;
141 destroy.pad = rand();
143 ret = drmIoctl(fd, CONTEXT_DESTROY_IOCTL, &destroy);
150 static int gem_reset_stats(int fd, int ctx_id,
151 struct local_drm_i915_reset_stats *rs)
157 rs->reset_count = rand();
158 rs->batch_active = rand();
159 rs->batch_pending = rand();
163 ret = ioctl(fd, GET_RESET_STATS_IOCTL, rs);
164 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
172 static int gem_reset_status(int fd, int ctx_id)
175 struct local_drm_i915_reset_stats rs;
177 ret = gem_reset_stats(fd, ctx_id, &rs);
182 return RS_BATCH_ACTIVE;
183 if (rs.batch_pending)
184 return RS_BATCH_PENDING;
189 static int gem_exec(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
194 DRM_IOCTL_I915_GEM_EXECBUFFER2,
203 static int exec_valid_ring(int fd, int ctx, int ring)
205 struct drm_i915_gem_execbuffer2 execbuf;
206 struct drm_i915_gem_exec_object2 exec;
209 uint32_t buf[2] = { MI_BATCH_BUFFER_END, 0 };
211 exec.handle = gem_create(fd, 4096);
212 gem_write(fd, exec.handle, 0, buf, sizeof(buf));
213 exec.relocation_count = 0;
221 execbuf.buffers_ptr = (uintptr_t)&exec;
222 execbuf.buffer_count = 1;
223 execbuf.batch_start_offset = 0;
224 execbuf.batch_len = sizeof(buf);
225 execbuf.cliprects_ptr = 0;
226 execbuf.num_cliprects = 0;
229 execbuf.flags = ring;
230 i915_execbuffer2_set_context_id(execbuf, ctx);
233 ret = gem_exec(fd, &execbuf);
240 static int exec_valid(int fd, int ctx)
242 return exec_valid_ring(fd, ctx, current_ring->exec);
245 #define BUFSIZE (4 * 1024)
246 #define ITEMS (BUFSIZE >> 2)
248 static int inject_hang_ring(int fd, int ctx, int ring, bool ignore_ban_error)
250 struct drm_i915_gem_execbuffer2 execbuf;
251 struct drm_i915_gem_exec_object2 exec;
255 unsigned cmd_len = 2;
256 enum stop_ring_flags flags;
260 if (intel_gen(devid) >= 8)
263 buf = malloc(BUFSIZE);
264 igt_assert(buf != NULL);
266 buf[0] = MI_BATCH_BUFFER_END;
269 exec.handle = gem_create(fd, BUFSIZE);
270 gem_write(fd, exec.handle, 0, buf, BUFSIZE);
271 exec.relocation_count = 0;
279 execbuf.buffers_ptr = (uintptr_t)&exec;
280 execbuf.buffer_count = 1;
281 execbuf.batch_start_offset = 0;
282 execbuf.batch_len = BUFSIZE;
283 execbuf.cliprects_ptr = 0;
284 execbuf.num_cliprects = 0;
287 execbuf.flags = ring;
288 i915_execbuffer2_set_context_id(execbuf, ctx);
291 igt_assert(gem_exec(fd, &execbuf) == 0);
293 gtt_off = exec.offset;
295 for (i = 0; i < ITEMS; i++)
298 roff = random() % (ITEMS - cmd_len);
299 buf[roff] = MI_BATCH_BUFFER_START | (cmd_len - 2);
300 buf[roff + 1] = (gtt_off & 0xfffffffc) + (roff << 2);
302 buf[roff + 2] = (gtt_off & 0xffffffff00000000ull) >> 32;
304 buf[roff + cmd_len] = MI_BATCH_BUFFER_END;
307 printf("loop injected at 0x%lx (off 0x%x, bo_start 0x%lx, bo_end 0x%lx)\n",
308 (long unsigned int)((roff << 2) + gtt_off),
309 roff << 2, (long unsigned int)gtt_off,
310 (long unsigned int)(gtt_off + BUFSIZE - 1));
312 gem_write(fd, exec.handle, 0, buf, BUFSIZE);
314 exec.relocation_count = 0;
322 execbuf.buffers_ptr = (uintptr_t)&exec;
323 execbuf.buffer_count = 1;
324 execbuf.batch_start_offset = 0;
325 execbuf.batch_len = BUFSIZE;
326 execbuf.cliprects_ptr = 0;
327 execbuf.num_cliprects = 0;
330 execbuf.flags = ring;
331 i915_execbuffer2_set_context_id(execbuf, ctx);
334 igt_assert(gem_exec(fd, &execbuf) == 0);
336 igt_assert(gtt_off == exec.offset);
340 flags = igt_to_stop_ring_flag(ring);
342 flags |= STOP_RING_ALLOW_BAN;
344 if (!ignore_ban_error)
345 flags |= STOP_RING_ALLOW_ERRORS;
347 igt_set_stop_rings(flags);
352 static int inject_hang(int fd, int ctx)
354 return inject_hang_ring(fd, ctx, current_ring->exec, false);
357 static int inject_hang_no_ban_error(int fd, int ctx)
359 return inject_hang_ring(fd, ctx, current_ring->exec, true);
362 static int _assert_reset_status(int fd, int ctx, int status)
366 rs = gem_reset_status(fd, ctx);
368 printf("reset status for %d ctx %d returned %d\n",
374 printf("%d:%d reset status %d differs from assumed %d\n",
375 fd, ctx, rs, status);
383 #define assert_reset_status(fd, ctx, status) \
384 igt_assert(_assert_reset_status(fd, ctx, status) == 0)
386 static void test_rs(int num_fds, int hang_index, int rs_assumed_no_hang)
392 igt_assert (num_fds <= MAX_FD);
393 igt_assert (hang_index < MAX_FD);
395 for (i = 0; i < num_fds; i++) {
396 fd[i] = drm_open_any();
400 for (i = 0; i < num_fds; i++)
401 assert_reset_status(fd[i], 0, RS_NO_ERROR);
403 for (i = 0; i < num_fds; i++) {
405 h[i] = inject_hang(fd[i], 0);
407 h[i] = exec_valid(fd[i], 0);
410 gem_sync(fd[num_fds - 1], h[num_fds - 1]);
412 for (i = 0; i < num_fds; i++) {
413 if (hang_index < 0) {
414 assert_reset_status(fd[i], 0, rs_assumed_no_hang);
419 assert_reset_status(fd[i], 0, RS_NO_ERROR);
421 assert_reset_status(fd[i], 0, RS_BATCH_ACTIVE);
423 assert_reset_status(fd[i], 0, RS_BATCH_PENDING);
426 for (i = 0; i < num_fds; i++) {
427 gem_close(fd[i], h[i]);
433 static void test_rs_ctx(int num_fds, int num_ctx, int hang_index,
438 int h[MAX_FD][MAX_CTX];
439 int ctx[MAX_FD][MAX_CTX];
441 igt_assert (num_fds <= MAX_FD);
442 igt_assert (hang_index < MAX_FD);
444 igt_assert (num_ctx <= MAX_CTX);
445 igt_assert (hang_context < MAX_CTX);
447 test_rs(num_fds, -1, RS_NO_ERROR);
449 for (i = 0; i < num_fds; i++) {
450 fd[i] = drm_open_any();
452 assert_reset_status(fd[i], 0, RS_NO_ERROR);
454 for (j = 0; j < num_ctx; j++) {
455 ctx[i][j] = context_create(fd[i]);
459 assert_reset_status(fd[i], 0, RS_NO_ERROR);
462 for (i = 0; i < num_fds; i++) {
464 assert_reset_status(fd[i], 0, RS_NO_ERROR);
466 for (j = 0; j < num_ctx; j++)
467 assert_reset_status(fd[i], ctx[i][j], RS_NO_ERROR);
469 assert_reset_status(fd[i], 0, RS_NO_ERROR);
472 for (i = 0; i < num_fds; i++) {
473 for (j = 0; j < num_ctx; j++) {
474 if (i == hang_index && j == hang_context)
475 h[i][j] = inject_hang(fd[i], ctx[i][j]);
477 h[i][j] = exec_valid(fd[i], ctx[i][j]);
481 gem_sync(fd[num_fds - 1], ctx[num_fds - 1][num_ctx - 1]);
483 for (i = 0; i < num_fds; i++)
484 assert_reset_status(fd[i], 0, RS_NO_ERROR);
486 for (i = 0; i < num_fds; i++) {
487 for (j = 0; j < num_ctx; j++) {
489 assert_reset_status(fd[i], ctx[i][j], RS_NO_ERROR);
490 if (i == hang_index && j < hang_context)
491 assert_reset_status(fd[i], ctx[i][j], RS_NO_ERROR);
492 if (i == hang_index && j == hang_context)
493 assert_reset_status(fd[i], ctx[i][j],
495 if (i == hang_index && j > hang_context)
496 assert_reset_status(fd[i], ctx[i][j],
499 assert_reset_status(fd[i], ctx[i][j],
504 for (i = 0; i < num_fds; i++) {
505 for (j = 0; j < num_ctx; j++) {
506 gem_close(fd[i], h[i][j]);
507 igt_assert(context_destroy(fd[i], ctx[i][j]) == 0);
510 assert_reset_status(fd[i], 0, RS_NO_ERROR);
516 static void test_ban(void)
518 int h1,h2,h3,h4,h5,h6,h7;
521 int active_count = 0, pending_count = 0;
522 struct local_drm_i915_reset_stats rs_bad, rs_good;
524 fd_bad = drm_open_any();
525 igt_assert(fd_bad >= 0);
527 fd_good = drm_open_any();
528 igt_assert(fd_good >= 0);
530 assert_reset_status(fd_bad, 0, RS_NO_ERROR);
531 assert_reset_status(fd_good, 0, RS_NO_ERROR);
533 h1 = exec_valid(fd_bad, 0);
535 h5 = exec_valid(fd_good, 0);
538 assert_reset_status(fd_bad, 0, RS_NO_ERROR);
539 assert_reset_status(fd_good, 0, RS_NO_ERROR);
541 h2 = inject_hang_no_ban_error(fd_bad, 0);
544 /* Second hang will be pending for this */
547 h6 = exec_valid(fd_good, 0);
548 h7 = exec_valid(fd_good, 0);
551 h3 = inject_hang_no_ban_error(fd_bad, 0);
553 gem_sync(fd_bad, h3);
555 /* This second hand will count as pending */
556 assert_reset_status(fd_bad, 0, RS_BATCH_ACTIVE);
558 h4 = exec_valid(fd_bad, 0);
560 gem_close(fd_bad, h3);
564 /* Should not happen often but sometimes hang is declared too slow
565 * due to our way of faking hang using loop */
568 gem_close(fd_bad, h3);
569 gem_close(fd_bad, h4);
571 printf("retrying for ban (%d)\n", retry);
574 igt_assert(h4 == -EIO);
575 assert_reset_status(fd_bad, 0, RS_BATCH_ACTIVE);
577 gem_sync(fd_good, h7);
578 assert_reset_status(fd_good, 0, RS_BATCH_PENDING);
580 igt_assert(gem_reset_stats(fd_good, 0, &rs_good) == 0);
581 igt_assert(gem_reset_stats(fd_bad, 0, &rs_bad) == 0);
583 igt_assert(rs_bad.batch_active == active_count);
584 igt_assert(rs_bad.batch_pending == pending_count);
585 igt_assert(rs_good.batch_active == 0);
586 igt_assert(rs_good.batch_pending == 2);
588 gem_close(fd_bad, h1);
589 gem_close(fd_bad, h2);
590 gem_close(fd_good, h6);
591 gem_close(fd_good, h7);
593 h1 = exec_valid(fd_good, 0);
595 gem_close(fd_good, h1);
600 igt_assert(gem_reset_status(fd_bad, 0) < 0);
601 igt_assert(gem_reset_status(fd_good, 0) < 0);
604 static void test_ban_ctx(void)
606 int h1,h2,h3,h4,h5,h6,h7;
607 int ctx_good, ctx_bad;
610 int active_count = 0, pending_count = 0;
611 struct local_drm_i915_reset_stats rs_bad, rs_good;
616 assert_reset_status(fd, 0, RS_NO_ERROR);
618 ctx_good = context_create(fd);
619 ctx_bad = context_create(fd);
621 assert_reset_status(fd, 0, RS_NO_ERROR);
622 assert_reset_status(fd, ctx_good, RS_NO_ERROR);
623 assert_reset_status(fd, ctx_bad, RS_NO_ERROR);
625 h1 = exec_valid(fd, ctx_bad);
627 h5 = exec_valid(fd, ctx_good);
630 assert_reset_status(fd, ctx_good, RS_NO_ERROR);
631 assert_reset_status(fd, ctx_bad, RS_NO_ERROR);
633 h2 = inject_hang_no_ban_error(fd, ctx_bad);
636 /* Second hang will be pending for this */
639 h6 = exec_valid(fd, ctx_good);
640 h7 = exec_valid(fd, ctx_good);
643 h3 = inject_hang_no_ban_error(fd, ctx_bad);
647 /* This second hand will count as pending */
648 assert_reset_status(fd, ctx_bad, RS_BATCH_ACTIVE);
650 h4 = exec_valid(fd, ctx_bad);
656 /* Should not happen often but sometimes hang is declared too slow
657 * due to our way of faking hang using loop */
663 printf("retrying for ban (%d)\n", retry);
666 igt_assert(h4 == -EIO);
667 assert_reset_status(fd, ctx_bad, RS_BATCH_ACTIVE);
670 assert_reset_status(fd, ctx_good, RS_BATCH_PENDING);
672 igt_assert(gem_reset_stats(fd, ctx_good, &rs_good) == 0);
673 igt_assert(gem_reset_stats(fd, ctx_bad, &rs_bad) == 0);
675 igt_assert(rs_bad.batch_active == active_count);
676 igt_assert(rs_bad.batch_pending == pending_count);
677 igt_assert(rs_good.batch_active == 0);
678 igt_assert(rs_good.batch_pending == 2);
685 h1 = exec_valid(fd, ctx_good);
689 igt_assert(context_destroy(fd, ctx_good) == 0);
690 igt_assert(context_destroy(fd, ctx_bad) == 0);
691 igt_assert(gem_reset_status(fd, ctx_good) < 0);
692 igt_assert(gem_reset_status(fd, ctx_bad) < 0);
693 igt_assert(exec_valid(fd, ctx_good) < 0);
694 igt_assert(exec_valid(fd, ctx_bad) < 0);
699 static void test_unrelated_ctx(void)
703 int ctx_guilty, ctx_unrelated;
705 fd1 = drm_open_any();
706 fd2 = drm_open_any();
707 assert_reset_status(fd1, 0, RS_NO_ERROR);
708 assert_reset_status(fd2, 0, RS_NO_ERROR);
709 ctx_guilty = context_create(fd1);
710 ctx_unrelated = context_create(fd2);
712 assert_reset_status(fd1, ctx_guilty, RS_NO_ERROR);
713 assert_reset_status(fd2, ctx_unrelated, RS_NO_ERROR);
715 h1 = inject_hang(fd1, ctx_guilty);
718 assert_reset_status(fd1, ctx_guilty, RS_BATCH_ACTIVE);
719 assert_reset_status(fd2, ctx_unrelated, RS_NO_ERROR);
721 h2 = exec_valid(fd2, ctx_unrelated);
724 assert_reset_status(fd1, ctx_guilty, RS_BATCH_ACTIVE);
725 assert_reset_status(fd2, ctx_unrelated, RS_NO_ERROR);
729 igt_assert(context_destroy(fd1, ctx_guilty) == 0);
730 igt_assert(context_destroy(fd2, ctx_unrelated) == 0);
736 static int get_reset_count(int fd, int ctx)
739 struct local_drm_i915_reset_stats rs;
741 ret = gem_reset_stats(fd, ctx, &rs);
745 return rs.reset_count;
748 static void test_close_pending_ctx(void)
755 ctx = context_create(fd);
757 assert_reset_status(fd, ctx, RS_NO_ERROR);
759 h = inject_hang(fd, ctx);
761 igt_assert(context_destroy(fd, ctx) == 0);
762 igt_assert(context_destroy(fd, ctx) == -ENOENT);
768 static void test_close_pending(void)
775 assert_reset_status(fd, 0, RS_NO_ERROR);
777 h = inject_hang(fd, 0);
784 static void exec_noop_on_each_ring(int fd, const bool reverse)
786 uint32_t batch[2] = {MI_BATCH_BUFFER_END, 0};
788 struct drm_i915_gem_execbuffer2 execbuf;
789 struct drm_i915_gem_exec_object2 exec[1];
791 handle = gem_create(fd, 4096);
792 gem_write(fd, handle, 0, batch, sizeof(batch));
794 exec[0].handle = handle;
795 exec[0].relocation_count = 0;
796 exec[0].relocs_ptr = 0;
797 exec[0].alignment = 0;
803 execbuf.buffers_ptr = (uintptr_t)exec;
804 execbuf.buffer_count = 1;
805 execbuf.batch_start_offset = 0;
806 execbuf.batch_len = 8;
807 execbuf.cliprects_ptr = 0;
808 execbuf.num_cliprects = 0;
812 i915_execbuffer2_set_context_id(execbuf, 0);
815 for (unsigned i = 0; i < NUM_RINGS; i++) {
816 const struct target_ring *ring;
818 ring = reverse ? &rings[NUM_RINGS - 1 - i] : &rings[i];
820 if (ring->present(fd)) {
821 execbuf.flags = ring->exec;
822 do_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
826 gem_sync(fd, handle);
827 gem_close(fd, handle);
830 static void test_close_pending_fork(const bool reverse)
838 assert_reset_status(fd, 0, RS_NO_ERROR);
840 h = inject_hang(fd, 0);
845 /* Avoid helpers as we need to kill the child
846 * without any extra signal handling on behalf of
851 const int fd2 = drm_open_any();
852 igt_assert(fd2 >= 0);
854 /* The crucial component is that we schedule the same noop batch
855 * on each ring. This exercises batch_obj reference counting,
856 * when gpu is reset and ring lists are cleared.
858 exec_noop_on_each_ring(fd2, reverse);
866 /* Kill the child to reduce refcounts on
874 /* Then we just wait on hang to happen */
878 h = exec_valid(fd, 0);
886 static void test_reset_count(const bool create_ctx)
894 ctx = context_create(fd);
898 assert_reset_status(fd, ctx, RS_NO_ERROR);
900 c1 = get_reset_count(fd, ctx);
903 h = inject_hang(fd, ctx);
907 assert_reset_status(fd, ctx, RS_BATCH_ACTIVE);
908 c2 = get_reset_count(fd, ctx);
910 igt_assert(c2 == (c1 + 1));
915 c2 = get_reset_count(fd, ctx);
918 igt_assert(c2 == -EPERM);
928 context_destroy(fd, ctx);
933 static int _test_params(int fd, int ctx, uint32_t flags, uint32_t pad)
935 struct local_drm_i915_reset_stats rs;
940 rs.reset_count = rand();
941 rs.batch_active = rand();
942 rs.batch_pending = rand();
946 ret = ioctl(fd, GET_RESET_STATS_IOCTL, &rs);
947 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
955 typedef enum { root = 0, user } cap_t;
957 static void _check_param_ctx(const int fd, const int ctx, const cap_t cap)
959 const uint32_t bad = rand() + 1;
963 igt_assert(_test_params(fd, ctx, 0, 0) == 0);
965 igt_assert(_test_params(fd, ctx, 0, 0) == -EPERM);
968 igt_assert(_test_params(fd, ctx, 0, bad) == -EINVAL);
969 igt_assert(_test_params(fd, ctx, bad, 0) == -EINVAL);
970 igt_assert(_test_params(fd, ctx, bad, bad) == -EINVAL);
973 static void check_params(const int fd, const int ctx, cap_t cap)
975 igt_assert(ioctl(fd, GET_RESET_STATS_IOCTL, 0) == -1);
976 igt_assert(_test_params(fd, 0xbadbad, 0, 0) == -ENOENT);
978 _check_param_ctx(fd, ctx, cap);
981 static void _test_param(const int fd, const int ctx)
983 check_params(fd, ctx, root);
986 check_params(fd, ctx, root);
990 check_params(fd, ctx, user);
993 check_params(fd, ctx, root);
998 static void test_params_ctx(void)
1002 fd = drm_open_any();
1003 igt_assert(fd >= 0);
1004 ctx = context_create(fd);
1006 _test_param(fd, ctx);
1011 static void test_params(void)
1015 fd = drm_open_any();
1016 igt_assert(fd >= 0);
1024 static bool gem_has_hw_contexts(int fd)
1026 struct local_drm_i915_gem_context_create create;
1029 memset(&create, 0, sizeof(create));
1030 ret = drmIoctl(fd, CONTEXT_CREATE_IOCTL, &create);
1033 drmIoctl(fd, CONTEXT_DESTROY_IOCTL, &create);
1040 static bool gem_has_reset_stats(int fd)
1042 struct local_drm_i915_reset_stats rs;
1045 /* Carefully set flags and pad to zero, otherwise
1048 memset(&rs, 0, sizeof(rs));
1050 ret = drmIoctl(fd, GET_RESET_STATS_IOCTL, &rs);
1054 /* If we get EPERM, we have support but did not
1056 if (ret == -1 && errno == EPERM)
1062 #define RING_HAS_CONTEXTS (current_ring->contexts(current_ring))
1063 #define RUN_CTX_TEST(...) do { igt_skip_on(RING_HAS_CONTEXTS == false); __VA_ARGS__; } while (0)
1069 igt_skip_on_simulation();
1072 bool has_reset_stats;
1073 fd = drm_open_any();
1074 devid = intel_get_drm_devid(fd);
1076 hw_contexts = gem_has_hw_contexts(fd);
1077 has_reset_stats = gem_has_reset_stats(fd);
1079 igt_require_f(has_reset_stats,
1080 "No reset stats ioctl support. Too old kernel?\n");
1083 igt_subtest("params")
1086 for (int i = 0; i < NUM_RINGS; i++) {
1089 current_ring = &rings[i];
1090 name = current_ring->name;
1093 gem_require_ring(fd, current_ring->exec);
1096 igt_require_f(intel_gen(devid) >= 4,
1097 "gen %d doesn't support reset\n", intel_gen(devid));
1099 igt_subtest_f("params-ctx-%s", name)
1100 RUN_CTX_TEST(test_params_ctx());
1102 igt_subtest_f("reset-stats-%s", name)
1105 igt_subtest_f("reset-stats-ctx-%s", name)
1106 RUN_CTX_TEST(test_rs_ctx(4, 4, 1, 2));
1108 igt_subtest_f("ban-%s", name)
1111 igt_subtest_f("ban-ctx-%s", name)
1112 RUN_CTX_TEST(test_ban_ctx());
1114 igt_subtest_f("reset-count-%s", name)
1115 test_reset_count(false);
1117 igt_subtest_f("reset-count-ctx-%s", name)
1118 RUN_CTX_TEST(test_reset_count(true));
1120 igt_subtest_f("unrelated-ctx-%s", name)
1121 RUN_CTX_TEST(test_unrelated_ctx());
1123 igt_subtest_f("close-pending-%s", name) {
1124 test_close_pending();
1125 gem_quiescent_gpu(fd);
1128 igt_subtest_f("close-pending-ctx-%s", name) {
1129 RUN_CTX_TEST(test_close_pending_ctx());
1130 gem_quiescent_gpu(fd);
1133 igt_subtest_f("close-pending-fork-%s", name) {
1134 test_close_pending_fork(true);
1135 test_close_pending_fork(false);