2 * Copyright (c) 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Mika Kuoppala <mika.kuoppala@intel.com>
37 #include <sys/ioctl.h>
43 #include "intel_bufmgr.h"
44 #include "intel_batchbuffer.h"
45 #include "intel_gpu_tools.h"
46 #include "rendercopy.h"
47 #include "igt_debugfs.h"
50 #define RS_BATCH_ACTIVE (1 << 0)
51 #define RS_BATCH_PENDING (1 << 1)
52 #define RS_UNKNOWN (1 << 2)
54 struct local_drm_i915_reset_stats {
63 struct local_drm_i915_gem_context_create {
68 struct local_drm_i915_gem_context_destroy {
75 #define CONTEXT_CREATE_IOCTL DRM_IOWR(DRM_COMMAND_BASE + 0x2d, struct local_drm_i915_gem_context_create)
76 #define CONTEXT_DESTROY_IOCTL DRM_IOWR(DRM_COMMAND_BASE + 0x2e, struct local_drm_i915_gem_context_destroy)
77 #define GET_RESET_STATS_IOCTL DRM_IOWR(DRM_COMMAND_BASE + 0x32, struct local_drm_i915_reset_stats)
79 static igt_debugfs_t dfs;
81 #define LOCAL_I915_EXEC_VEBOX (4 << 0)
85 static bool gem_has_render(int fd)
90 static bool has_context(const struct target_ring *ring);
92 static const struct target_ring {
94 bool (*present)(int fd);
95 bool (*contexts)(const struct target_ring *ring);
98 { I915_EXEC_RENDER, gem_has_render, has_context, "render" },
99 { I915_EXEC_BLT, gem_has_blt, has_context, "blt" },
100 { I915_EXEC_BSD, gem_has_bsd, has_context, "bsd" },
101 { LOCAL_I915_EXEC_VEBOX, gem_has_vebox, has_context, "vebox" },
104 static bool has_context(const struct target_ring *ring)
106 if(ring->exec == I915_EXEC_RENDER)
112 #define NUM_RINGS (sizeof(rings)/sizeof(struct target_ring))
114 static const struct target_ring *current_ring;
116 static uint32_t context_create(int fd)
118 struct local_drm_i915_gem_context_create create;
121 create.ctx_id = rand();
124 ret = drmIoctl(fd, CONTEXT_CREATE_IOCTL, &create);
125 igt_assert(ret == 0);
127 return create.ctx_id;
130 static int context_destroy(int fd, uint32_t ctx_id)
133 struct local_drm_i915_gem_context_destroy destroy;
135 destroy.ctx_id = ctx_id;
136 destroy.pad = rand();
138 ret = drmIoctl(fd, CONTEXT_DESTROY_IOCTL, &destroy);
145 static int gem_reset_stats(int fd, int ctx_id,
146 struct local_drm_i915_reset_stats *rs)
152 rs->reset_count = rand();
153 rs->batch_active = rand();
154 rs->batch_pending = rand();
158 ret = ioctl(fd, GET_RESET_STATS_IOCTL, rs);
159 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
167 static int gem_reset_status(int fd, int ctx_id)
170 struct local_drm_i915_reset_stats rs;
172 ret = gem_reset_stats(fd, ctx_id, &rs);
177 return RS_BATCH_ACTIVE;
178 if (rs.batch_pending)
179 return RS_BATCH_PENDING;
184 static int gem_exec(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
189 DRM_IOCTL_I915_GEM_EXECBUFFER2,
198 static int exec_valid_ring(int fd, int ctx, int ring)
200 struct drm_i915_gem_execbuffer2 execbuf;
201 struct drm_i915_gem_exec_object2 exec;
204 uint32_t buf[2] = { MI_BATCH_BUFFER_END, 0 };
206 exec.handle = gem_create(fd, 4096);
207 gem_write(fd, exec.handle, 0, buf, sizeof(buf));
208 exec.relocation_count = 0;
216 execbuf.buffers_ptr = (uintptr_t)&exec;
217 execbuf.buffer_count = 1;
218 execbuf.batch_start_offset = 0;
219 execbuf.batch_len = sizeof(buf);
220 execbuf.cliprects_ptr = 0;
221 execbuf.num_cliprects = 0;
224 execbuf.flags = ring;
225 i915_execbuffer2_set_context_id(execbuf, ctx);
228 ret = gem_exec(fd, &execbuf);
235 static int exec_valid(int fd, int ctx)
237 return exec_valid_ring(fd, ctx, current_ring->exec);
240 static void stop_rings(const int mask)
245 igt_assert((mask & ~((1 << NUM_RINGS) - 1)) == 0);
246 igt_assert(snprintf(buf, sizeof(buf), "0x%02x", mask) == 4);
247 fd = igt_debugfs_open(&dfs, "i915_ring_stop", O_WRONLY);
250 igt_assert(write(fd, buf, 4) == 4);
254 #define BUFSIZE (4 * 1024)
255 #define ITEMS (BUFSIZE >> 2)
257 static int ring_to_mask(int ring)
259 for (unsigned i = 0; i < NUM_RINGS; i++) {
260 const struct target_ring *r = &rings[i];
271 static int inject_hang_ring(int fd, int ctx, int ring)
273 struct drm_i915_gem_execbuffer2 execbuf;
274 struct drm_i915_gem_exec_object2 exec;
278 unsigned cmd_len = 2;
282 if (intel_gen(intel_get_drm_devid(fd)) >= 8)
285 buf = malloc(BUFSIZE);
286 igt_assert(buf != NULL);
288 buf[0] = MI_BATCH_BUFFER_END;
291 exec.handle = gem_create(fd, BUFSIZE);
292 gem_write(fd, exec.handle, 0, buf, BUFSIZE);
293 exec.relocation_count = 0;
301 execbuf.buffers_ptr = (uintptr_t)&exec;
302 execbuf.buffer_count = 1;
303 execbuf.batch_start_offset = 0;
304 execbuf.batch_len = BUFSIZE;
305 execbuf.cliprects_ptr = 0;
306 execbuf.num_cliprects = 0;
309 execbuf.flags = ring;
310 i915_execbuffer2_set_context_id(execbuf, ctx);
313 igt_assert(gem_exec(fd, &execbuf) == 0);
315 gtt_off = exec.offset;
317 for (i = 0; i < ITEMS; i++)
320 roff = random() % (ITEMS - cmd_len);
321 buf[roff] = MI_BATCH_BUFFER_START | (cmd_len - 2);
322 buf[roff + 1] = (gtt_off & 0xfffffffc) + (roff << 2);
324 buf[roff + 2] = gtt_off & 0xffffffff00000000ull;
327 printf("loop injected at 0x%lx (off 0x%x, bo_start 0x%lx, bo_end 0x%lx)\n",
328 (long unsigned int)((roff << 2) + gtt_off),
329 roff << 2, (long unsigned int)gtt_off,
330 (long unsigned int)(gtt_off + BUFSIZE - 1));
332 gem_write(fd, exec.handle, 0, buf, BUFSIZE);
334 exec.relocation_count = 0;
342 execbuf.buffers_ptr = (uintptr_t)&exec;
343 execbuf.buffer_count = 1;
344 execbuf.batch_start_offset = 0;
345 execbuf.batch_len = BUFSIZE;
346 execbuf.cliprects_ptr = 0;
347 execbuf.num_cliprects = 0;
350 execbuf.flags = ring;
351 i915_execbuffer2_set_context_id(execbuf, ctx);
354 igt_assert(gem_exec(fd, &execbuf) == 0);
356 igt_assert(gtt_off == exec.offset);
360 stop_rings(ring_to_mask(ring));
365 static int inject_hang(int fd, int ctx)
367 return inject_hang_ring(fd, ctx, current_ring->exec);
370 static int _assert_reset_status(int fd, int ctx, int status)
374 rs = gem_reset_status(fd, ctx);
376 printf("reset status for %d ctx %d returned %d\n",
382 printf("%d:%d reset status %d differs from assumed %d\n",
383 fd, ctx, rs, status);
391 #define assert_reset_status(fd, ctx, status) \
392 igt_assert(_assert_reset_status(fd, ctx, status) == 0)
394 static void test_rs(int num_fds, int hang_index, int rs_assumed_no_hang)
400 igt_assert (num_fds <= MAX_FD);
401 igt_assert (hang_index < MAX_FD);
403 for (i = 0; i < num_fds; i++) {
404 fd[i] = drm_open_any();
408 for (i = 0; i < num_fds; i++)
409 assert_reset_status(fd[i], 0, RS_NO_ERROR);
411 for (i = 0; i < num_fds; i++) {
413 h[i] = inject_hang(fd[i], 0);
415 h[i] = exec_valid(fd[i], 0);
418 gem_sync(fd[num_fds - 1], h[num_fds - 1]);
420 for (i = 0; i < num_fds; i++) {
421 if (hang_index < 0) {
422 assert_reset_status(fd[i], 0, rs_assumed_no_hang);
427 assert_reset_status(fd[i], 0, RS_NO_ERROR);
429 assert_reset_status(fd[i], 0, RS_BATCH_ACTIVE);
431 assert_reset_status(fd[i], 0, RS_BATCH_PENDING);
434 for (i = 0; i < num_fds; i++) {
435 gem_close(fd[i], h[i]);
441 static void test_rs_ctx(int num_fds, int num_ctx, int hang_index,
446 int h[MAX_FD][MAX_CTX];
447 int ctx[MAX_FD][MAX_CTX];
449 igt_assert (num_fds <= MAX_FD);
450 igt_assert (hang_index < MAX_FD);
452 igt_assert (num_ctx <= MAX_CTX);
453 igt_assert (hang_context < MAX_CTX);
455 test_rs(num_fds, -1, RS_NO_ERROR);
457 for (i = 0; i < num_fds; i++) {
458 fd[i] = drm_open_any();
460 assert_reset_status(fd[i], 0, RS_NO_ERROR);
462 for (j = 0; j < num_ctx; j++) {
463 ctx[i][j] = context_create(fd[i]);
467 assert_reset_status(fd[i], 0, RS_NO_ERROR);
470 for (i = 0; i < num_fds; i++) {
472 assert_reset_status(fd[i], 0, RS_NO_ERROR);
474 for (j = 0; j < num_ctx; j++)
475 assert_reset_status(fd[i], ctx[i][j], RS_NO_ERROR);
477 assert_reset_status(fd[i], 0, RS_NO_ERROR);
480 for (i = 0; i < num_fds; i++) {
481 for (j = 0; j < num_ctx; j++) {
482 if (i == hang_index && j == hang_context)
483 h[i][j] = inject_hang(fd[i], ctx[i][j]);
485 h[i][j] = exec_valid(fd[i], ctx[i][j]);
489 gem_sync(fd[num_fds - 1], ctx[num_fds - 1][num_ctx - 1]);
491 for (i = 0; i < num_fds; i++)
492 assert_reset_status(fd[i], 0, RS_NO_ERROR);
494 for (i = 0; i < num_fds; i++) {
495 for (j = 0; j < num_ctx; j++) {
497 assert_reset_status(fd[i], ctx[i][j], RS_NO_ERROR);
498 if (i == hang_index && j < hang_context)
499 assert_reset_status(fd[i], ctx[i][j], RS_NO_ERROR);
500 if (i == hang_index && j == hang_context)
501 assert_reset_status(fd[i], ctx[i][j],
503 if (i == hang_index && j > hang_context)
504 assert_reset_status(fd[i], ctx[i][j],
507 assert_reset_status(fd[i], ctx[i][j],
512 for (i = 0; i < num_fds; i++) {
513 for (j = 0; j < num_ctx; j++) {
514 gem_close(fd[i], h[i][j]);
515 igt_assert(context_destroy(fd[i], ctx[i][j]) == 0);
518 assert_reset_status(fd[i], 0, RS_NO_ERROR);
524 static void test_ban(void)
526 int h1,h2,h3,h4,h5,h6,h7;
529 int active_count = 0, pending_count = 0;
530 struct local_drm_i915_reset_stats rs_bad, rs_good;
532 fd_bad = drm_open_any();
533 igt_assert(fd_bad >= 0);
535 fd_good = drm_open_any();
536 igt_assert(fd_good >= 0);
538 assert_reset_status(fd_bad, 0, RS_NO_ERROR);
539 assert_reset_status(fd_good, 0, RS_NO_ERROR);
541 h1 = exec_valid(fd_bad, 0);
543 h5 = exec_valid(fd_good, 0);
546 assert_reset_status(fd_bad, 0, RS_NO_ERROR);
547 assert_reset_status(fd_good, 0, RS_NO_ERROR);
549 h2 = inject_hang(fd_bad, 0);
552 /* Second hang will be pending for this */
555 h6 = exec_valid(fd_good, 0);
556 h7 = exec_valid(fd_good, 0);
559 h3 = inject_hang(fd_bad, 0);
561 gem_sync(fd_bad, h3);
563 /* This second hand will count as pending */
564 assert_reset_status(fd_bad, 0, RS_BATCH_ACTIVE);
566 h4 = exec_valid(fd_bad, 0);
568 gem_close(fd_bad, h3);
572 /* Should not happen often but sometimes hang is declared too slow
573 * due to our way of faking hang using loop */
576 gem_close(fd_bad, h3);
577 gem_close(fd_bad, h4);
579 printf("retrying for ban (%d)\n", retry);
582 igt_assert(h4 == -EIO);
583 assert_reset_status(fd_bad, 0, RS_BATCH_ACTIVE);
585 gem_sync(fd_good, h7);
586 assert_reset_status(fd_good, 0, RS_BATCH_PENDING);
588 igt_assert(gem_reset_stats(fd_good, 0, &rs_good) == 0);
589 igt_assert(gem_reset_stats(fd_bad, 0, &rs_bad) == 0);
591 igt_assert(rs_bad.batch_active == active_count);
592 igt_assert(rs_bad.batch_pending == pending_count);
593 igt_assert(rs_good.batch_active == 0);
594 igt_assert(rs_good.batch_pending == 2);
596 gem_close(fd_bad, h1);
597 gem_close(fd_bad, h2);
598 gem_close(fd_good, h6);
599 gem_close(fd_good, h7);
601 h1 = exec_valid(fd_good, 0);
603 gem_close(fd_good, h1);
608 igt_assert(gem_reset_status(fd_bad, 0) < 0);
609 igt_assert(gem_reset_status(fd_good, 0) < 0);
612 static void test_ban_ctx(void)
614 int h1,h2,h3,h4,h5,h6,h7;
615 int ctx_good, ctx_bad;
618 int active_count = 0, pending_count = 0;
619 struct local_drm_i915_reset_stats rs_bad, rs_good;
624 assert_reset_status(fd, 0, RS_NO_ERROR);
626 ctx_good = context_create(fd);
627 ctx_bad = context_create(fd);
629 assert_reset_status(fd, 0, RS_NO_ERROR);
630 assert_reset_status(fd, ctx_good, RS_NO_ERROR);
631 assert_reset_status(fd, ctx_bad, RS_NO_ERROR);
633 h1 = exec_valid(fd, ctx_bad);
635 h5 = exec_valid(fd, ctx_good);
638 assert_reset_status(fd, ctx_good, RS_NO_ERROR);
639 assert_reset_status(fd, ctx_bad, RS_NO_ERROR);
641 h2 = inject_hang(fd, ctx_bad);
644 /* Second hang will be pending for this */
647 h6 = exec_valid(fd, ctx_good);
648 h7 = exec_valid(fd, ctx_good);
651 h3 = inject_hang(fd, ctx_bad);
655 /* This second hand will count as pending */
656 assert_reset_status(fd, ctx_bad, RS_BATCH_ACTIVE);
658 h4 = exec_valid(fd, ctx_bad);
664 /* Should not happen often but sometimes hang is declared too slow
665 * due to our way of faking hang using loop */
671 printf("retrying for ban (%d)\n", retry);
674 igt_assert(h4 == -EIO);
675 assert_reset_status(fd, ctx_bad, RS_BATCH_ACTIVE);
678 assert_reset_status(fd, ctx_good, RS_BATCH_PENDING);
680 igt_assert(gem_reset_stats(fd, ctx_good, &rs_good) == 0);
681 igt_assert(gem_reset_stats(fd, ctx_bad, &rs_bad) == 0);
683 igt_assert(rs_bad.batch_active == active_count);
684 igt_assert(rs_bad.batch_pending == pending_count);
685 igt_assert(rs_good.batch_active == 0);
686 igt_assert(rs_good.batch_pending == 2);
693 h1 = exec_valid(fd, ctx_good);
697 igt_assert(context_destroy(fd, ctx_good) == 0);
698 igt_assert(context_destroy(fd, ctx_bad) == 0);
699 igt_assert(gem_reset_status(fd, ctx_good) < 0);
700 igt_assert(gem_reset_status(fd, ctx_bad) < 0);
701 igt_assert(exec_valid(fd, ctx_good) < 0);
702 igt_assert(exec_valid(fd, ctx_bad) < 0);
707 static void test_unrelated_ctx(void)
711 int ctx_guilty, ctx_unrelated;
713 fd1 = drm_open_any();
714 fd2 = drm_open_any();
715 assert_reset_status(fd1, 0, RS_NO_ERROR);
716 assert_reset_status(fd2, 0, RS_NO_ERROR);
717 ctx_guilty = context_create(fd1);
718 ctx_unrelated = context_create(fd2);
720 assert_reset_status(fd1, ctx_guilty, RS_NO_ERROR);
721 assert_reset_status(fd2, ctx_unrelated, RS_NO_ERROR);
723 h1 = inject_hang(fd1, ctx_guilty);
726 assert_reset_status(fd1, ctx_guilty, RS_BATCH_ACTIVE);
727 assert_reset_status(fd2, ctx_unrelated, RS_NO_ERROR);
729 h2 = exec_valid(fd2, ctx_unrelated);
732 assert_reset_status(fd1, ctx_guilty, RS_BATCH_ACTIVE);
733 assert_reset_status(fd2, ctx_unrelated, RS_NO_ERROR);
737 igt_assert(context_destroy(fd1, ctx_guilty) == 0);
738 igt_assert(context_destroy(fd2, ctx_unrelated) == 0);
744 static int get_reset_count(int fd, int ctx)
747 struct local_drm_i915_reset_stats rs;
749 ret = gem_reset_stats(fd, ctx, &rs);
753 return rs.reset_count;
756 static void test_close_pending_ctx(void)
763 ctx = context_create(fd);
765 assert_reset_status(fd, ctx, RS_NO_ERROR);
767 h = inject_hang(fd, ctx);
769 igt_assert(context_destroy(fd, ctx) == 0);
770 igt_assert(context_destroy(fd, ctx) == -ENOENT);
776 static void test_close_pending(void)
783 assert_reset_status(fd, 0, RS_NO_ERROR);
785 h = inject_hang(fd, 0);
792 static void exec_noop_on_each_ring(int fd, const bool reverse)
794 uint32_t batch[2] = {MI_BATCH_BUFFER_END, 0};
796 struct drm_i915_gem_execbuffer2 execbuf;
797 struct drm_i915_gem_exec_object2 exec[1];
799 handle = gem_create(fd, 4096);
800 gem_write(fd, handle, 0, batch, sizeof(batch));
802 exec[0].handle = handle;
803 exec[0].relocation_count = 0;
804 exec[0].relocs_ptr = 0;
805 exec[0].alignment = 0;
811 execbuf.buffers_ptr = (uintptr_t)exec;
812 execbuf.buffer_count = 1;
813 execbuf.batch_start_offset = 0;
814 execbuf.batch_len = 8;
815 execbuf.cliprects_ptr = 0;
816 execbuf.num_cliprects = 0;
820 i915_execbuffer2_set_context_id(execbuf, 0);
823 for (unsigned i = 0; i < NUM_RINGS; i++) {
824 const struct target_ring *ring;
826 ring = reverse ? &rings[NUM_RINGS - 1 - i] : &rings[i];
828 if (ring->present(fd)) {
829 execbuf.flags = ring->exec;
830 do_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
834 gem_sync(fd, handle);
835 gem_close(fd, handle);
838 static void test_close_pending_fork(const bool reverse)
846 assert_reset_status(fd, 0, RS_NO_ERROR);
848 h = inject_hang(fd, 0);
853 /* Avoid helpers as we need to kill the child
854 * without any extra signal handling on behalf of
859 const int fd2 = drm_open_any();
860 igt_assert(fd2 >= 0);
862 /* The crucial component is that we schedule the same noop batch
863 * on each ring. This exercises batch_obj reference counting,
864 * when gpu is reset and ring lists are cleared.
866 exec_noop_on_each_ring(fd2, reverse);
874 /* Kill the child to reduce refcounts on
882 /* Then we just wait on hang to happen */
886 h = exec_valid(fd, 0);
894 static void test_reset_count(const bool create_ctx)
902 ctx = context_create(fd);
906 assert_reset_status(fd, ctx, RS_NO_ERROR);
908 c1 = get_reset_count(fd, ctx);
911 h = inject_hang(fd, ctx);
915 assert_reset_status(fd, ctx, RS_BATCH_ACTIVE);
916 c2 = get_reset_count(fd, ctx);
918 igt_assert(c2 == (c1 + 1));
923 c2 = get_reset_count(fd, ctx);
926 igt_assert(c2 == -EPERM);
936 context_destroy(fd, ctx);
941 static int _test_params(int fd, int ctx, uint32_t flags, uint32_t pad)
943 struct local_drm_i915_reset_stats rs;
948 rs.reset_count = rand();
949 rs.batch_active = rand();
950 rs.batch_pending = rand();
954 ret = ioctl(fd, GET_RESET_STATS_IOCTL, &rs);
955 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
963 typedef enum { root = 0, user } cap_t;
965 static void test_param_ctx(const int fd, const int ctx, const cap_t cap)
967 const uint32_t bad = rand() + 1;
971 igt_assert(_test_params(fd, ctx, 0, 0) == 0);
973 igt_assert(_test_params(fd, ctx, 0, 0) == -EPERM);
976 igt_assert(_test_params(fd, ctx, 0, bad) == -EINVAL);
977 igt_assert(_test_params(fd, ctx, bad, 0) == -EINVAL);
978 igt_assert(_test_params(fd, ctx, bad, bad) == -EINVAL);
981 static void check_params(const int fd, const int ctx, cap_t cap)
983 igt_assert(ioctl(fd, GET_RESET_STATS_IOCTL, 0) == -1);
984 igt_assert(_test_params(fd, 0xbadbad, 0, 0) == -ENOENT);
986 test_param_ctx(fd, 0, cap);
987 test_param_ctx(fd, ctx, cap);
990 static void _test_param(const int fd, const int ctx)
992 check_params(fd, ctx, root);
995 check_params(fd, ctx, root);
999 check_params(fd, ctx, user);
1002 check_params(fd, ctx, root);
1007 static void test_params(void)
1011 fd = drm_open_any();
1012 igt_assert(fd >= 0);
1013 ctx = context_create(fd);
1015 _test_param(fd, ctx);
1020 #define RING_HAS_CONTEXTS current_ring->contexts(current_ring)
1021 #define RUN_CTX_TEST(...) do { igt_skip_on(RING_HAS_CONTEXTS == false); __VA_ARGS__; } while (0)
1027 struct local_drm_i915_gem_context_create create;
1031 igt_skip_on_simulation();
1034 fd = drm_open_any();
1035 devid = intel_get_drm_devid(fd);
1036 igt_require_f(intel_gen(devid) >= 4,
1037 "Architecture %d too old\n", intel_gen(devid));
1039 ret = drmIoctl(fd, CONTEXT_CREATE_IOCTL, &create);
1040 igt_skip_on_f(ret != 0 && (errno == ENODEV || errno == EINVAL),
1041 "Kernel is too old, or contexts not supported: %s\n",
1044 igt_debugfs_init(&dfs);
1047 igt_subtest("params")
1050 for (int i = 0; i < NUM_RINGS; i++) {
1053 current_ring = &rings[i];
1054 name = current_ring->name;
1057 gem_require_ring(fd, current_ring->exec);
1059 igt_subtest_f("reset-stats-%s", name)
1062 igt_subtest_f("reset-stats-ctx-%s", name)
1063 RUN_CTX_TEST(test_rs_ctx(4, 4, 1, 2));
1065 igt_subtest_f("ban-%s", name)
1068 igt_subtest_f("ban-ctx-%s", name)
1069 RUN_CTX_TEST(test_ban_ctx());
1071 igt_subtest_f("reset-count-%s", name)
1072 test_reset_count(false);
1074 igt_subtest_f("reset-count-ctx-%s", name)
1075 RUN_CTX_TEST(test_reset_count(true));
1077 igt_subtest_f("unrelated-ctx-%s", name)
1078 RUN_CTX_TEST(test_unrelated_ctx());
1080 igt_subtest_f("close-pending-%s", name) {
1081 test_close_pending();
1082 gem_quiescent_gpu(fd);
1085 igt_subtest_f("close-pending-ctx-%s", name) {
1086 RUN_CTX_TEST(test_close_pending_ctx());
1087 gem_quiescent_gpu(fd);
1090 igt_subtest_f("close-pending-fork-%s", name) {
1091 test_close_pending_fork(true);
1092 test_close_pending_fork(false);